aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/.gitignore1
-rw-r--r--drivers/scsi/3w-9xxx.c192
-rw-r--r--drivers/scsi/3w-9xxx.h273
-rw-r--r--drivers/scsi/3w-sas.c117
-rw-r--r--drivers/scsi/3w-sas.h118
-rw-r--r--drivers/scsi/3w-xxxx.c299
-rw-r--r--drivers/scsi/3w-xxxx.h201
-rw-r--r--drivers/scsi/53c700.c163
-rw-r--r--drivers/scsi/53c700.h18
-rw-r--r--drivers/scsi/BusLogic.c283
-rw-r--r--drivers/scsi/BusLogic.h13
-rw-r--r--drivers/scsi/FlashPoint.c216
-rw-r--r--drivers/scsi/Kconfig206
-rw-r--r--drivers/scsi/Makefile15
-rw-r--r--drivers/scsi/NCR5380.c212
-rw-r--r--drivers/scsi/NCR5380.h16
-rw-r--r--drivers/scsi/a100u2w.c20
-rw-r--r--drivers/scsi/a2091.c79
-rw-r--r--drivers/scsi/a3000.c73
-rw-r--r--drivers/scsi/aacraid/TODO3
-rw-r--r--drivers/scsi/aacraid/aachba.c418
-rw-r--r--drivers/scsi/aacraid/aacraid.h28
-rw-r--r--drivers/scsi/aacraid/commctrl.c71
-rw-r--r--drivers/scsi/aacraid/comminit.c35
-rw-r--r--drivers/scsi/aacraid/commsup.c85
-rw-r--r--drivers/scsi/aacraid/dpcsup.c15
-rw-r--r--drivers/scsi/aacraid/linit.c362
-rw-r--r--drivers/scsi/aacraid/nark.c1
-rw-r--r--drivers/scsi/aacraid/rkt.c5
-rw-r--r--drivers/scsi/aacraid/rx.c14
-rw-r--r--drivers/scsi/aacraid/sa.c19
-rw-r--r--drivers/scsi/aacraid/src.c13
-rw-r--r--drivers/scsi/advansys.c456
-rw-r--r--drivers/scsi/aha152x.c328
-rw-r--r--drivers/scsi/aha1542.c272
-rw-r--r--drivers/scsi/aha1542.h33
-rw-r--r--drivers/scsi/aha1740.c19
-rw-r--r--drivers/scsi/aic7xxx/.gitignore1
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx6
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h50
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c372
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c111
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h41
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c49
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c12
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c13
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h12
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c348
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c125
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h43
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c46
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c15
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h2
-rw-r--r--drivers/scsi/aic7xxx/aiclib.h15
-rw-r--r--drivers/scsi/aic7xxx/scsi_message.h30
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h11
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.c186
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c7
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c42
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c24
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c14
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c15
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h105
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c35
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c528
-rw-r--r--drivers/scsi/arm/Kconfig11
-rw-r--r--drivers/scsi/arm/acornscsi.c216
-rw-r--r--drivers/scsi/arm/arm_scsi.h (renamed from drivers/scsi/arm/scsi.h)37
-rw-r--r--drivers/scsi/arm/arxescsi.c7
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c32
-rw-r--r--drivers/scsi/arm/eesox.c22
-rw-r--r--drivers/scsi/arm/fas216.c123
-rw-r--r--drivers/scsi/arm/fas216.h14
-rw-r--r--drivers/scsi/arm/oak.c4
-rw-r--r--drivers/scsi/arm/powertec.c23
-rw-r--r--drivers/scsi/arm/queue.c8
-rw-r--r--drivers/scsi/atari_scsi.c16
-rw-r--r--drivers/scsi/atp870u.c470
-rw-r--r--drivers/scsi/atp870u.h14
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c40
-rw-r--r--drivers/scsi/be2iscsi/be_main.c167
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c113
-rw-r--r--drivers/scsi/bfa/bfa_core.c4
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fc.h19
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c22
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c30
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c21
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c69
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c10
-rw-r--r--drivers/scsi/bfa/bfa_port.c4
-rw-r--r--drivers/scsi/bfa/bfa_svc.c21
-rw-r--r--drivers/scsi/bfa/bfad.c9
-rw-r--r--drivers/scsi/bfa/bfad_attr.c98
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c226
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c3
-rw-r--r--drivers/scsi/bfa/bfad_im.c46
-rw-r--r--drivers/scsi/bfa/bfad_im.h20
-rw-r--r--drivers/scsi/bnx2fc/Kconfig3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h23
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c115
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c131
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c92
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c11
-rw-r--r--drivers/scsi/bnx2i/Kconfig3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c146
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c60
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c21
-rw-r--r--drivers/scsi/ch.c132
-rw-r--r--drivers/scsi/constants.c19
-rw-r--r--drivers/scsi/csiostor/csio_hw.c6
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c8
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c6
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c78
-rw-r--r--drivers/scsi/csiostor/csio_scsi.h10
-rw-r--r--drivers/scsi/csiostor/csio_wr.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kconfig2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c41
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c257
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c705
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h60
-rw-r--r--drivers/scsi/cxlflash/main.c125
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c28
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h1
-rw-r--r--drivers/scsi/cxlflash/superpipe.c19
-rw-r--r--drivers/scsi/cxlflash/vlun.c12
-rw-r--r--drivers/scsi/dc395x.c244
-rw-r--r--drivers/scsi/dc395x.h38
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c144
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c10
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h441
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h136
-rw-r--r--drivers/scsi/dpt/dptsig.h336
-rw-r--r--drivers/scsi/dpt/osd_defs.h79
-rw-r--r--drivers/scsi/dpt/osd_util.h358
-rw-r--r--drivers/scsi/dpt/sys_info.h417
-rw-r--r--drivers/scsi/dpt_i2o.c3575
-rw-r--r--drivers/scsi/dpti.h332
-rw-r--r--drivers/scsi/elx/Kconfig9
-rw-r--r--drivers/scsi/elx/Makefile18
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c781
-rw-r--r--drivers/scsi/elx/efct/efct_driver.h108
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c3580
-rw-r--r--drivers/scsi/elx/efct/efct_hw.h764
-rw-r--r--drivers/scsi/elx/efct/efct_hw_queues.c677
-rw-r--r--drivers/scsi/elx/efct/efct_io.c190
-rw-r--r--drivers/scsi/elx/efct/efct_io.h174
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c1695
-rw-r--r--drivers/scsi/elx/efct/efct_lio.h189
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.c1157
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.h203
-rw-r--r--drivers/scsi/elx/efct/efct_unsol.c492
-rw-r--r--drivers/scsi/elx/efct/efct_unsol.h17
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c1111
-rw-r--r--drivers/scsi/elx/efct/efct_xport.h186
-rw-r--r--drivers/scsi/elx/include/efc_common.h37
-rw-r--r--drivers/scsi/elx/libefc/efc.h52
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.c782
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.h35
-rw-r--r--drivers/scsi/elx/libefc/efc_device.c1602
-rw-r--r--drivers/scsi/elx/libefc/efc_device.h72
-rw-r--r--drivers/scsi/elx/libefc/efc_domain.c1088
-rw-r--r--drivers/scsi/elx/libefc/efc_domain.h54
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c1094
-rw-r--r--drivers/scsi/elx/libefc/efc_els.h107
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c1563
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.h116
-rw-r--r--drivers/scsi/elx/libefc/efc_node.c1102
-rw-r--r--drivers/scsi/elx/libefc/efc_node.h191
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.c777
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.h50
-rw-r--r--drivers/scsi/elx/libefc/efc_sm.c54
-rw-r--r--drivers/scsi/elx/libefc/efc_sm.h197
-rw-r--r--drivers/scsi/elx/libefc/efclib.c81
-rw-r--r--drivers/scsi/elx/libefc/efclib.h621
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c5159
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.h4132
-rw-r--r--drivers/scsi/esas2r/Kconfig2
-rw-r--r--drivers/scsi/esas2r/atioctl.h3
-rw-r--r--drivers/scsi/esas2r/esas2r.h8
-rw-r--r--drivers/scsi/esas2r/esas2r_disc.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c57
-rw-r--r--drivers/scsi/esas2r/esas2r_int.c8
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c33
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c17
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c23
-rw-r--r--drivers/scsi/esp_scsi.c51
-rw-r--r--drivers/scsi/esp_scsi.h3
-rw-r--r--drivers/scsi/fcoe/fcoe.c75
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c76
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c12
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c13
-rw-r--r--drivers/scsi/fdomain.c82
-rw-r--r--drivers/scsi/fdomain.h2
-rw-r--r--drivers/scsi/fdomain_isa.c8
-rw-r--r--drivers/scsi/fnic/cq_desc.h14
-rw-r--r--drivers/scsi/fnic/cq_enet_desc.h14
-rw-r--r--drivers/scsi/fnic/cq_exch_desc.h14
-rw-r--r--drivers/scsi/fnic/fcpio.h14
-rw-r--r--drivers/scsi/fnic/fnic.h46
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c31
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c30
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c49
-rw-r--r--drivers/scsi/fnic/fnic_fip.h14
-rw-r--r--drivers/scsi/fnic/fnic_io.h14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c15
-rw-r--r--drivers/scsi/fnic/fnic_main.c88
-rw-r--r--drivers/scsi/fnic/fnic_res.c14
-rw-r--r--drivers/scsi/fnic/fnic_res.h14
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1168
-rw-r--r--drivers/scsi/fnic/fnic_stats.h18
-rw-r--r--drivers/scsi/fnic/fnic_trace.c94
-rw-r--r--drivers/scsi/fnic/fnic_trace.h18
-rw-r--r--drivers/scsi/fnic/rq_enet_desc.h14
-rw-r--r--drivers/scsi/fnic/vnic_cq.c14
-rw-r--r--drivers/scsi/fnic/vnic_cq.h14
-rw-r--r--drivers/scsi/fnic/vnic_cq_copy.h14
-rw-r--r--drivers/scsi/fnic/vnic_dev.c34
-rw-r--r--drivers/scsi/fnic/vnic_dev.h14
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h16
-rw-r--r--drivers/scsi/fnic/vnic_intr.c14
-rw-r--r--drivers/scsi/fnic/vnic_intr.h14
-rw-r--r--drivers/scsi/fnic/vnic_nic.h14
-rw-r--r--drivers/scsi/fnic/vnic_resource.h14
-rw-r--r--drivers/scsi/fnic/vnic_rq.c15
-rw-r--r--drivers/scsi/fnic/vnic_rq.h14
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h14
-rw-r--r--drivers/scsi/fnic/vnic_stats.h14
-rw-r--r--drivers/scsi/fnic/vnic_wq.c18
-rw-r--r--drivers/scsi/fnic/vnic_wq.h14
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.c23
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.h14
-rw-r--r--drivers/scsi/fnic/wq_enet_desc.h14
-rw-r--r--drivers/scsi/g_NCR5380.c27
-rw-r--r--drivers/scsi/gdth.c4323
-rw-r--r--drivers/scsi/gdth.h981
-rw-r--r--drivers/scsi/gdth_ioctl.h251
-rw-r--r--drivers/scsi/gdth_proc.c586
-rw-r--r--drivers/scsi/gdth_proc.h18
-rw-r--r--drivers/scsi/gvp11.c111
-rw-r--r--drivers/scsi/hisi_sas/Kconfig8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h116
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2557
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c111
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c184
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2163
-rw-r--r--drivers/scsi/hosts.c177
-rw-r--r--drivers/scsi/hpsa.c515
-rw-r--r--drivers/scsi/hpsa.h3
-rw-r--r--drivers/scsi/hpsa_cmd.h81
-rw-r--r--drivers/scsi/hptiop.c33
-rw-r--r--drivers/scsi/hptiop.h8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2162
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h281
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c160
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c89
-rw-r--r--drivers/scsi/imm.c126
-rw-r--r--drivers/scsi/imm.h5
-rw-r--r--drivers/scsi/initio.c124
-rw-r--r--drivers/scsi/initio.h34
-rw-r--r--drivers/scsi/ipr.c180
-rw-r--r--drivers/scsi/ipr.h11
-rw-r--r--drivers/scsi/ips.c158
-rw-r--r--drivers/scsi/ips.h20
-rw-r--r--drivers/scsi/isci/host.c45
-rw-r--r--drivers/scsi/isci/init.c31
-rw-r--r--drivers/scsi/isci/isci.h6
-rw-r--r--drivers/scsi/isci/phy.c40
-rw-r--r--drivers/scsi/isci/phy.h1
-rw-r--r--drivers/scsi/isci/port.c73
-rw-r--r--drivers/scsi/isci/port_config.c37
-rw-r--r--drivers/scsi/isci/remote_device.c37
-rw-r--r--drivers/scsi/isci/remote_node_context.c19
-rw-r--r--drivers/scsi/isci/remote_node_table.c64
-rw-r--r--drivers/scsi/isci/remote_node_table.h2
-rw-r--r--drivers/scsi/isci/request.c114
-rw-r--r--drivers/scsi/isci/request.h5
-rw-r--r--drivers/scsi/isci/sas.h2
-rw-r--r--drivers/scsi/isci/task.c50
-rw-r--r--drivers/scsi/isci/task.h8
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c178
-rw-r--r--drivers/scsi/iscsi_tcp.h5
-rw-r--r--drivers/scsi/jazz_esp.c18
-rw-r--r--drivers/scsi/lasi700.c6
-rw-r--r--drivers/scsi/libfc/fc_disc.c24
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_encode.h951
-rw-r--r--drivers/scsi/libfc/fc_exch.c34
-rw-r--r--drivers/scsi/libfc/fc_fcp.c84
-rw-r--r--drivers/scsi/libfc/fc_libfc.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c143
-rw-r--r--drivers/scsi/libfc/fc_rport.c46
-rw-r--r--drivers/scsi/libiscsi.c1090
-rw-r--r--drivers/scsi/libiscsi_tcp.c96
-rw-r--r--drivers/scsi/libsas/Kconfig2
-rw-r--r--drivers/scsi/libsas/Makefile2
-rw-r--r--drivers/scsi/libsas/sas_ata.c181
-rw-r--r--drivers/scsi/libsas/sas_discover.c26
-rw-r--r--drivers/scsi/libsas/sas_event.c134
-rw-r--r--drivers/scsi/libsas/sas_expander.c117
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c2
-rw-r--r--drivers/scsi/libsas/sas_init.c82
-rw-r--r--drivers/scsi/libsas/sas_internal.h16
-rw-r--r--drivers/scsi/libsas/sas_phy.c2
-rw-r--r--drivers/scsi/libsas/sas_port.c79
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c374
-rw-r--r--drivers/scsi/libsas/sas_task.c18
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h654
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c1747
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c1025
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h36
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h138
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2326
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c641
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h70
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5790
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2097
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h401
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h564
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h36
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c3923
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h31
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c274
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c827
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c1430
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h189
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c1251
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h158
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2251
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6040
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h74
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h67
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c286
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c379
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h6
-rw-r--r--drivers/scsi/mac53c94.c50
-rw-r--r--drivers/scsi/mac53c94.h11
-rw-r--r--drivers/scsi/mac_esp.c14
-rw-r--r--drivers/scsi/mac_scsi.c19
-rw-r--r--drivers/scsi/megaraid.c588
-rw-r--r--drivers/scsi/megaraid.h23
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h2
-rw-r--r--drivers/scsi/megaraid/mega_common.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c92
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c24
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h62
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c775
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c35
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c389
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h20
-rw-r--r--drivers/scsi/mesh.c61
-rw-r--r--drivers/scsi/mesh.h11
-rw-r--r--drivers/scsi/mpi3mr/Kconfig9
-rw-r--r--drivers/scsi/mpi3mr/Makefile6
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h2414
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h275
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h117
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h1063
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h16
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h46
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h470
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h1400
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c1860
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h197
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c5780
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c5324
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c3291
-rw-r--r--drivers/scsi/mpt3sas/Kconfig10
-rw-r--r--drivers/scsi/mpt3sas/Makefile3
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h3
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2200
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h317
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c957
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c376
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debugfs.c157
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2252
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c315
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c38
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h94
-rw-r--r--drivers/scsi/mvme147.c17
-rw-r--r--drivers/scsi/mvsas/mv_defs.h5
-rw-r--r--drivers/scsi/mvsas/mv_init.c70
-rw-r--r--drivers/scsi/mvsas/mv_sas.c223
-rw-r--r--drivers/scsi/mvsas/mv_sas.h7
-rw-r--r--drivers/scsi/mvumi.c85
-rw-r--r--drivers/scsi/mvumi.h13
-rw-r--r--drivers/scsi/myrb.c294
-rw-r--r--drivers/scsi/myrs.c245
-rw-r--r--drivers/scsi/ncr53c8xx.c359
-rw-r--r--drivers/scsi/ncr53c8xx.h22
-rw-r--r--drivers/scsi/nsp32.c471
-rw-r--r--drivers/scsi/nsp32.h9
-rw-r--r--drivers/scsi/pcmcia/Kconfig2
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c9
-rw-r--r--drivers/scsi/pcmcia/fdomain_cs.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c274
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h19
-rw-r--r--drivers/scsi/pcmcia/nsp_debug.c2
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c9
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c57
-rw-r--r--drivers/scsi/pm8001/Makefile7
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c388
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h5
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h30
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2277
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h6
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c567
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c798
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h176
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c3006
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h46
-rw-r--r--drivers/scsi/pm8001/pm80xx_tracepoints.c10
-rw-r--r--drivers/scsi/pm8001/pm80xx_tracepoints.h113
-rw-r--r--drivers/scsi/pmcraid.c642
-rw-r--r--drivers/scsi/pmcraid.h41
-rw-r--r--drivers/scsi/ppa.c108
-rw-r--r--drivers/scsi/ps3rom.c18
-rw-r--r--drivers/scsi/qedf/Kconfig2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c8
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h35
-rw-r--r--drivers/scsi/qedf/qedf_attr.c31
-rw-r--r--drivers/scsi/qedf/qedf_dbg.c3
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h1
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c18
-rw-r--r--drivers/scsi/qedf/qedf_els.c46
-rw-r--r--drivers/scsi/qedf/qedf_fip.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c187
-rw-r--r--drivers/scsi/qedf/qedf_main.c462
-rw-r--r--drivers/scsi/qedi/Kconfig2
-rw-r--r--drivers/scsi/qedi/qedi.h10
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c4
-rw-r--r--drivers/scsi/qedi/qedi_fw.c396
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c22
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c231
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h11
-rw-r--r--drivers/scsi/qedi/qedi_main.c314
-rw-r--r--drivers/scsi/qedi/qedi_sysfs.c29
-rw-r--r--drivers/scsi/qla1280.c95
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/qla2xxx/Kconfig6
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c535
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c645
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h40
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c1009
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h498
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1567
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c430
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c3684
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h148
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h258
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h921
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h189
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2049
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1826
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h175
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c588
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c1356
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c838
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c88
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c180
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c332
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h72
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c298
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h59
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c54
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1695
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c369
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c567
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h263
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c159
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h11
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c206
-rw-r--r--drivers/scsi/qla4xxx/Kconfig2
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c40
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h20
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c44
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h27
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c9
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c44
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c134
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h20
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c152
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/qlogicfas.c6
-rw-r--r--drivers/scsi/qlogicfas408.c151
-rw-r--r--drivers/scsi/qlogicpti.c48
-rw-r--r--drivers/scsi/scsi.c172
-rw-r--r--drivers/scsi/scsi.h46
-rw-r--r--drivers/scsi/scsi_bsg.c103
-rw-r--r--drivers/scsi/scsi_common.c9
-rw-r--r--drivers/scsi/scsi_debug.c3284
-rw-r--r--drivers/scsi/scsi_debugfs.c8
-rw-r--r--drivers/scsi/scsi_devinfo.c6
-rw-r--r--drivers/scsi/scsi_dh.c3
-rw-r--r--drivers/scsi/scsi_error.c443
-rw-r--r--drivers/scsi/scsi_ioctl.c828
-rw-r--r--drivers/scsi/scsi_lib.c1562
-rw-r--r--drivers/scsi/scsi_logging.c43
-rw-r--r--drivers/scsi/scsi_pm.c108
-rw-r--r--drivers/scsi/scsi_priv.h41
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c201
-rw-r--r--drivers/scsi/scsi_sysfs.c185
-rw-r--r--drivers/scsi/scsi_trace.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c541
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c924
-rw-r--r--drivers/scsi/scsi_transport_sas.c34
-rw-r--r--drivers/scsi/scsi_transport_spi.c40
-rw-r--r--drivers/scsi/scsi_transport_srp.c21
-rw-r--r--drivers/scsi/scsicam.c187
-rw-r--r--drivers/scsi/sd.c1044
-rw-r--r--drivers/scsi/sd.h83
-rw-r--r--drivers/scsi/sd_dif.c10
-rw-r--r--drivers/scsi/sd_zbc.c715
-rw-r--r--drivers/scsi/sense_codes.h54
-rw-r--r--drivers/scsi/ses.c24
-rw-r--r--drivers/scsi/sg.c292
-rw-r--r--drivers/scsi/sgiwd93.c40
-rw-r--r--drivers/scsi/sim710.c14
-rw-r--r--drivers/scsi/smartpqi/Kconfig14
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h415
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c4854
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c50
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c90
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h12
-rw-r--r--drivers/scsi/sni_53c710.c21
-rw-r--r--drivers/scsi/snic/cq_desc.h18
-rw-r--r--drivers/scsi/snic/cq_enet_desc.h18
-rw-r--r--drivers/scsi/snic/snic.h22
-rw-r--r--drivers/scsi/snic/snic_attrs.c37
-rw-r--r--drivers/scsi/snic/snic_ctl.c28
-rw-r--r--drivers/scsi/snic/snic_debugfs.c77
-rw-r--r--drivers/scsi/snic/snic_disc.c20
-rw-r--r--drivers/scsi/snic/snic_disc.h18
-rw-r--r--drivers/scsi/snic/snic_fwint.h20
-rw-r--r--drivers/scsi/snic/snic_io.c18
-rw-r--r--drivers/scsi/snic/snic_io.h18
-rw-r--r--drivers/scsi/snic/snic_isr.c18
-rw-r--r--drivers/scsi/snic/snic_main.c20
-rw-r--r--drivers/scsi/snic/snic_res.c18
-rw-r--r--drivers/scsi/snic/snic_res.h18
-rw-r--r--drivers/scsi/snic/snic_scsi.c73
-rw-r--r--drivers/scsi/snic/snic_stats.h18
-rw-r--r--drivers/scsi/snic/snic_trc.c18
-rw-r--r--drivers/scsi/snic/snic_trc.h21
-rw-r--r--drivers/scsi/snic/vnic_cq.c26
-rw-r--r--drivers/scsi/snic/vnic_cq.h18
-rw-r--r--drivers/scsi/snic/vnic_cq_fw.h18
-rw-r--r--drivers/scsi/snic/vnic_dev.c18
-rw-r--r--drivers/scsi/snic/vnic_dev.h18
-rw-r--r--drivers/scsi/snic/vnic_devcmd.h20
-rw-r--r--drivers/scsi/snic/vnic_intr.c18
-rw-r--r--drivers/scsi/snic/vnic_intr.h18
-rw-r--r--drivers/scsi/snic/vnic_resource.h18
-rw-r--r--drivers/scsi/snic/vnic_snic.h18
-rw-r--r--drivers/scsi/snic/vnic_stats.h18
-rw-r--r--drivers/scsi/snic/vnic_wq.c18
-rw-r--r--drivers/scsi/snic/vnic_wq.h18
-rw-r--r--drivers/scsi/snic/wq_enet_desc.h18
-rw-r--r--drivers/scsi/sr.c411
-rw-r--r--drivers/scsi/sr.h8
-rw-r--r--drivers/scsi/sr_ioctl.c31
-rw-r--r--drivers/scsi/sr_vendor.c12
-rw-r--r--drivers/scsi/st.c237
-rw-r--r--drivers/scsi/st.h5
-rw-r--r--drivers/scsi/stex.c70
-rw-r--r--drivers/scsi/storvsc_drv.c666
-rw-r--r--drivers/scsi/sun3_scsi.c13
-rw-r--r--drivers/scsi/sun3x_esp.c18
-rw-r--r--drivers/scsi/sun_esp.c14
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c29
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c17
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c2
-rw-r--r--drivers/scsi/ufs/Kconfig162
-rw-r--r--drivers/scsi/ufs/Makefile14
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c341
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c176
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pltfrm.c110
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.c317
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.h16
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c90
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c604
-rw-r--r--drivers/scsi/ufs/ufs-hisi.h117
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c569
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h85
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c1725
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h267
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c800
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.h17
-rw-r--r--drivers/scsi/ufs/ufs.h556
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c226
-rw-r--r--drivers/scsi/ufs/ufs_bsg.h23
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h103
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.c151
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.h23
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c227
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c456
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h53
-rw-r--r--drivers/scsi/ufs/ufshcd.c8587
-rw-r--r--drivers/scsi/ufs/ufshcd.h1115
-rw-r--r--drivers/scsi/ufs/ufshci-dwc.h33
-rw-r--r--drivers/scsi/ufs/ufshci.h459
-rw-r--r--drivers/scsi/ufs/unipro.h287
-rw-r--r--drivers/scsi/virtio_scsi.c78
-rw-r--r--drivers/scsi/vmw_pvscsi.c31
-rw-r--r--drivers/scsi/vmw_pvscsi.h6
-rw-r--r--drivers/scsi/wd33c93.c224
-rw-r--r--drivers/scsi/wd33c93.h9
-rw-r--r--drivers/scsi/wd719x.c27
-rw-r--r--drivers/scsi/wd719x.h1
-rw-r--r--drivers/scsi/xen-scsifront.c196
-rw-r--r--drivers/scsi/zalon.c5
-rw-r--r--drivers/scsi/zorro7xx.c2
-rw-r--r--drivers/scsi/zorro_esp.c7
663 files changed, 129790 insertions, 75427 deletions
diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore
index e2956741fbd1..5f65cb75f534 100644
--- a/drivers/scsi/.gitignore
+++ b/drivers/scsi/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
53c700_d.h
scsi_devinfo_tbl.c
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3337b1e80412..6cb9cca9565b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -128,14 +128,14 @@ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_
static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
- u32 set_features, unsigned short current_fw_srl,
- unsigned short current_fw_arch_id,
- unsigned short current_fw_branch,
- unsigned short current_fw_build,
- unsigned short *fw_on_ctlr_srl,
- unsigned short *fw_on_ctlr_arch_id,
- unsigned short *fw_on_ctlr_branch,
- unsigned short *fw_on_ctlr_build,
+ u32 set_features, unsigned short current_fw_srl,
+ unsigned short current_fw_arch_id,
+ unsigned short current_fw_branch,
+ unsigned short current_fw_build,
+ unsigned short *fw_on_ctlr_srl,
+ unsigned short *fw_on_ctlr_arch_id,
+ unsigned short *fw_on_ctlr_branch,
+ unsigned short *fw_on_ctlr_build,
u32 *init_connect_result);
static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
@@ -171,7 +171,7 @@ static ssize_t twa_show_stats(struct device *dev,
"Last sector count: %4d\n"
"Max sector count: %4d\n"
"SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
+ "AEN's: %4d\n",
TW_DRIVER_VERSION,
tw_dev->posted_request_count,
tw_dev->max_posted_request_count,
@@ -190,18 +190,20 @@ static ssize_t twa_show_stats(struct device *dev,
/* Create sysfs 'stats' entry */
static struct device_attribute twa_host_stats_attr = {
.attr = {
- .name = "stats",
+ .name = "stats",
.mode = S_IRUGO,
},
.show = twa_show_stats
};
/* Host attributes initializer */
-static struct device_attribute *twa_host_attrs[] = {
- &twa_host_stats_attr,
+static struct attribute *twa_host_attrs[] = {
+ &twa_host_stats_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(twa_host);
+
/* File operations struct for character device */
static const struct file_operations twa_fops = {
.owner = THIS_MODULE,
@@ -242,7 +244,7 @@ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
/* Keep reading the queue in case there are more aen's */
if (twa_aen_read_queue(tw_dev, request_id))
goto out2;
- else {
+ else {
retval = 0;
goto out;
}
@@ -303,10 +305,10 @@ static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
/* Initialize sglist */
memset(&sglist, 0, sizeof(TW_SG_Entry));
- sglist[0].length = TW_SECTOR_SIZE;
- sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+ sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
+ sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
- if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
+ if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
goto out;
}
@@ -440,8 +442,8 @@ static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
/* Initialize sglist */
memset(&sglist, 0, sizeof(TW_SG_Entry));
- sglist[0].length = TW_SECTOR_SIZE;
- sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+ sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
+ sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
/* Mark internal command */
tw_dev->srb[request_id] = NULL;
@@ -497,13 +499,12 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
param->parameter_size_bytes = cpu_to_le16(4);
- /* Convert system time in UTC to local time seconds since last
+ /* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
- schedulertime = cpu_to_le32(schedulertime % 604800);
- memcpy(param->data, &schedulertime, sizeof(u32));
+ memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32));
/* Mark internal command */
tw_dev->srb[request_id] = NULL;
@@ -676,7 +677,9 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
/* Now allocate ioctl buf memory */
- cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
+ &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
retval = TW_IOCTL_ERROR_OS_ENOMEM;
goto out2;
@@ -685,7 +688,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length))
goto out3;
/* See which ioctl we are doing */
@@ -729,7 +732,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
/* Now copy in the command packet response */
memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
-
+
/* Now complete the io */
spin_lock_irqsave(tw_dev->host->host_lock, flags);
tw_dev->posted_request_count--;
@@ -766,7 +769,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
if (tw_dev->aen_clobber) {
tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
tw_dev->aen_clobber = 0;
- } else
+ } else
tw_ioctl->driver_command.status = 0;
event_index = tw_dev->error_index;
} else {
@@ -867,11 +870,13 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
}
/* Now copy the entire response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
retval = 0;
out3:
/* Now free ioctl buf memory */
- dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
+ cpu_addr, dma_handle);
out2:
mutex_unlock(&tw_dev->ioctl_lock);
out:
@@ -939,13 +944,13 @@ out:
/* This function will empty the response queue */
static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
{
- u32 status_reg_value, response_que_value;
+ u32 status_reg_value;
int count = 0, retval = 1;
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
- response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
count++;
}
@@ -1000,19 +1005,13 @@ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_
if (print_host)
printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
tw_dev->host->host_no,
- TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
- full_command_packet->header.status_block.error,
- error_str[0] == '\0' ?
- twa_string_lookup(twa_error_table,
- full_command_packet->header.status_block.error) : error_str,
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
+ error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
full_command_packet->header.err_specific_desc);
else
printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
- TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
- full_command_packet->header.status_block.error,
- error_str[0] == '\0' ?
- twa_string_lookup(twa_error_table,
- full_command_packet->header.status_block.error) : error_str,
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
+ error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
full_command_packet->header.err_specific_desc);
}
@@ -1067,8 +1066,8 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
command_packet = &full_command_packet->command.oldcommand;
command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
- command_packet->size = TW_COMMAND_SIZE;
- command_packet->request_id = request_id;
+ command_packet->size = TW_COMMAND_SIZE;
+ command_packet->request_id = request_id;
command_packet->byte6_offset.block_count = cpu_to_le16(1);
/* Now setup the param */
@@ -1106,14 +1105,14 @@ static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
/* This function will send an initconnection command to controller */
static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
- u32 set_features, unsigned short current_fw_srl,
- unsigned short current_fw_arch_id,
- unsigned short current_fw_branch,
- unsigned short current_fw_build,
- unsigned short *fw_on_ctlr_srl,
- unsigned short *fw_on_ctlr_arch_id,
- unsigned short *fw_on_ctlr_branch,
- unsigned short *fw_on_ctlr_build,
+ u32 set_features, unsigned short current_fw_srl,
+ unsigned short current_fw_arch_id,
+ unsigned short current_fw_branch,
+ unsigned short current_fw_build,
+ unsigned short *fw_on_ctlr_srl,
+ unsigned short *fw_on_ctlr_arch_id,
+ unsigned short *fw_on_ctlr_branch,
+ unsigned short *fw_on_ctlr_build,
u32 *init_connect_result)
{
TW_Command_Full *full_command_packet;
@@ -1124,17 +1123,16 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
full_command_packet = tw_dev->command_packet_virt[request_id];
memset(full_command_packet, 0, sizeof(TW_Command_Full));
full_command_packet->header.header_desc.size_header = 128;
-
+
tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
tw_initconnect->request_id = request_id;
tw_initconnect->message_credits = cpu_to_le16(message_credits);
- tw_initconnect->features = set_features;
/* Turn on 64-bit sgl support if we need to */
- tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
+ set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
- tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
+ tw_initconnect->features = cpu_to_le32(set_features);
if (set_features & TW_EXTENDED_INIT_CONNECT) {
tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
@@ -1142,7 +1140,7 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
- } else
+ } else
tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
/* Send command packet to the board */
@@ -1342,19 +1340,21 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
/* If error, command failed */
if (error == 1) {
/* Ask for a host reset */
- cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
}
/* Report residual bytes for single sgl */
if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
- if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
- scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
+ u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length);
+
+ if (length < scsi_bufflen(cmd))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - length);
}
/* Now complete the io */
if (twa_command_mapped(cmd))
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
@@ -1390,13 +1390,13 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
newcommand = &full_command_packet->command.newcommand;
newcommand->request_id__lunl =
- cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
+ TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id);
if (length) {
- newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
newcommand->sg_list[0].length = cpu_to_le32(length);
}
newcommand->sgl_entries__lunh =
- cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
+ TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0);
} else {
oldcommand = &full_command_packet->command.oldcommand;
oldcommand->request_id = request_id;
@@ -1407,7 +1407,7 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
else
sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
- sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
sgl->length = cpu_to_le32(length);
oldcommand->size += pae;
@@ -1455,7 +1455,7 @@ out:
/* This function will poll the status register for a flag */
static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
{
- u32 status_reg_value;
+ u32 status_reg_value;
unsigned long before;
int retval = 1;
@@ -1598,7 +1598,7 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
cmd->result = (DID_RESET << 16);
if (twa_command_mapped(cmd))
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
}
@@ -1698,9 +1698,6 @@ out:
static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
- TW_Device_Extension *tw_dev;
-
- tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
if (capacity >= 0x200000) {
heads = 255;
@@ -1749,8 +1746,9 @@ out:
} /* End twa_scsi_eh_reset() */
/* This is the main scsi queue function to handle scsi opcodes */
-static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
int request_id, retval;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
@@ -1768,9 +1766,6 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
goto out;
}
- /* Save done function into scsi_cmnd struct */
- SCpnt->scsi_done = done;
-
/* Get a free request id */
twa_get_request_id(tw_dev, &request_id);
@@ -1809,14 +1804,11 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
u32 num_sectors = 0x0;
int i, sg_count;
struct scsi_cmnd *srb = NULL;
- struct scatterlist *sglist = NULL, *sg;
+ struct scatterlist *sg;
int retval = 1;
- if (tw_dev->srb[request_id]) {
+ if (tw_dev->srb[request_id])
srb = tw_dev->srb[request_id];
- if (scsi_sglist(srb))
- sglist = scsi_sglist(srb);
- }
/* Initialize command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -1837,10 +1829,10 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
if (srb) {
command_packet->unit = srb->device->id;
command_packet->request_id__lunl =
- cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
+ TW_REQ_LUN_IN(srb->device->lun, request_id);
} else {
command_packet->request_id__lunl =
- cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
+ TW_REQ_LUN_IN(0, request_id);
command_packet->unit = 0;
}
@@ -1872,19 +1864,19 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
}
}
}
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
+ command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]));
}
} else {
/* Internal cdb post */
for (i = 0; i < use_sg; i++) {
- command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
- command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
+ command_packet->sg_list[i].address = sglistarg[i].address;
+ command_packet->sg_list[i].length = sglistarg[i].length;
if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
goto out;
}
}
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
+ command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg);
}
if (srb) {
@@ -1998,7 +1990,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
- .shost_attrs = twa_host_attrs,
+ .shost_groups = twa_host_groups,
.emulated = 1,
.no_write_same = 1,
};
@@ -2014,7 +2006,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
retval = pci_enable_device(pdev);
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
- goto out_disable_device;
+ return -ENODEV;
}
pci_set_master(pdev);
@@ -2109,7 +2101,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
(char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
- le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
+ le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
/* Try to enable MSI */
@@ -2191,10 +2183,10 @@ static void twa_remove(struct pci_dev *pdev)
twa_device_extension_count--;
} /* End twa_remove() */
-#ifdef CONFIG_PM
/* This function is called on PCI suspend */
-static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused twa_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
@@ -2214,32 +2206,19 @@ static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
}
TW_CLEAR_ALL_INTERRUPTS(tw_dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
} /* End twa_suspend() */
/* This function is called on PCI resume */
-static int twa_resume(struct pci_dev *pdev)
+static int __maybe_unused twa_resume(struct device *dev)
{
int retval = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- retval = pci_enable_device(pdev);
- if (retval) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
- return retval;
- }
-
- pci_set_master(pdev);
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -2277,11 +2256,9 @@ static int twa_resume(struct pci_dev *pdev)
out_disable_device:
scsi_remove_host(host);
- pci_disable_device(pdev);
return retval;
} /* End twa_resume() */
-#endif
/* PCI Devices supported by this driver */
static struct pci_device_id twa_pci_tbl[] = {
@@ -2297,16 +2274,15 @@ static struct pci_device_id twa_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
+static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume);
+
/* pci_driver initializer */
static struct pci_driver twa_driver = {
.name = "3w-9xxx",
.id_table = twa_pci_tbl,
.probe = twa_probe,
.remove = twa_remove,
-#ifdef CONFIG_PM
- .suspend = twa_suspend,
- .resume = twa_resume,
-#endif
+ .driver.pm = &twa_pm_ops,
.shutdown = twa_shutdown
};
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index d88cd3499bd5..0b23b0422e88 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -49,8 +49,8 @@
/* AEN string type */
typedef struct TAG_twa_message_type {
- unsigned int code;
- char* text;
+ unsigned int code;
+ char *text;
} twa_message_type;
/* AEN strings */
@@ -263,9 +263,9 @@ static twa_message_type twa_error_table[] = {
#define TW_CONTROL_ENABLE_INTERRUPTS 0x00000080
#define TW_CONTROL_DISABLE_INTERRUPTS 0x00000040
#define TW_CONTROL_ISSUE_HOST_INTERRUPT 0x00000020
-#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
-#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
-#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
+#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
+#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
+#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
/* Status register bit definitions */
#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
@@ -284,25 +284,25 @@ static twa_message_type twa_error_table[] = {
#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
#define TW_STATUS_EXPECTED_BITS 0x00002000
#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
-#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
+#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
/* PCI related defines */
#define TW_PCI_CLEAR_PARITY_ERRORS 0xc100
#define TW_PCI_CLEAR_PCI_ABORT 0x2000
/* Command packet opcodes used by the driver */
-#define TW_OP_INIT_CONNECTION 0x1
-#define TW_OP_GET_PARAM 0x12
-#define TW_OP_SET_PARAM 0x13
-#define TW_OP_EXECUTE_SCSI 0x10
+#define TW_OP_INIT_CONNECTION 0x1
+#define TW_OP_GET_PARAM 0x12
+#define TW_OP_SET_PARAM 0x13
+#define TW_OP_EXECUTE_SCSI 0x10
#define TW_OP_DOWNLOAD_FIRMWARE 0x16
-#define TW_OP_RESET 0x1C
+#define TW_OP_RESET 0x1C
/* Asynchronous Event Notification (AEN) codes used by the driver */
-#define TW_AEN_QUEUE_EMPTY 0x0000
-#define TW_AEN_SOFT_RESET 0x0001
+#define TW_AEN_QUEUE_EMPTY 0x0000
+#define TW_AEN_SOFT_RESET 0x0001
#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
-#define TW_AEN_SEVERITY_ERROR 0x1
+#define TW_AEN_SEVERITY_ERROR 0x1
#define TW_AEN_SEVERITY_DEBUG 0x4
#define TW_AEN_NOT_RETRIEVED 0x1
#define TW_AEN_RETRIEVED 0x2
@@ -323,9 +323,9 @@ static twa_message_type twa_error_table[] = {
/* Misc defines */
#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
-#define TW_SECTOR_SIZE 512
-#define TW_ALIGNMENT_9000 4 /* 4 bytes */
-#define TW_ALIGNMENT_9000_SGL 0x3
+#define TW_SECTOR_SIZE 512
+#define TW_ALIGNMENT_9000 4 /* 4 bytes */
+#define TW_ALIGNMENT_9000_SGL 0x3
#define TW_MAX_UNITS 16
#define TW_MAX_UNITS_9650SE 32
#define TW_INIT_MESSAGE_CREDITS 0x100
@@ -338,7 +338,7 @@ static twa_message_type twa_error_table[] = {
#define TW_BASE_FW_SRL 24
#define TW_BASE_FW_BRANCH 0
#define TW_BASE_FW_BUILD 1
-#define TW_FW_SRL_LUNS_SUPPORTED 28
+#define TW_FW_SRL_LUNS_SUPPORTED 28
#define TW_Q_LENGTH 256
#define TW_Q_START 0
#define TW_MAX_SLOT 32
@@ -346,19 +346,19 @@ static twa_message_type twa_error_table[] = {
#define TW_MAX_CMDS_PER_LUN 254
#define TW_MAX_RESPONSE_DRAIN 256
#define TW_MAX_AEN_DRAIN 255
-#define TW_IN_RESET 2
+#define TW_IN_RESET 2
#define TW_USING_MSI 3
#define TW_IN_ATTENTION_LOOP 4
-#define TW_MAX_SECTORS 256
-#define TW_AEN_WAIT_TIME 1000
-#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
-#define TW_MAX_CDB_LEN 16
-#define TW_ISR_DONT_COMPLETE 2
-#define TW_ISR_DONT_RESULT 3
-#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
-#define TW_IOCTL_CHRDEV_FREE -1
-#define TW_COMMAND_OFFSET 128 /* 128 bytes */
-#define TW_VERSION_TABLE 0x0402
+#define TW_MAX_SECTORS 256
+#define TW_AEN_WAIT_TIME 1000
+#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
+#define TW_MAX_CDB_LEN 16
+#define TW_ISR_DONT_COMPLETE 2
+#define TW_ISR_DONT_RESULT 3
+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE -1
+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
+#define TW_VERSION_TABLE 0x0402
#define TW_TIMEKEEP_TABLE 0x040A
#define TW_INFORMATION_TABLE 0x0403
#define TW_PARAM_FWVER 3
@@ -367,22 +367,22 @@ static twa_message_type twa_error_table[] = {
#define TW_PARAM_BIOSVER_LENGTH 16
#define TW_PARAM_PORTCOUNT 3
#define TW_PARAM_PORTCOUNT_LENGTH 1
-#define TW_MIN_SGL_LENGTH 0x200 /* 512 bytes */
-#define TW_MAX_SENSE_LENGTH 256
-#define TW_EVENT_SOURCE_AEN 0x1000
-#define TW_EVENT_SOURCE_COMMAND 0x1001
-#define TW_EVENT_SOURCE_PCHIP 0x1002
-#define TW_EVENT_SOURCE_DRIVER 0x1003
+#define TW_MIN_SGL_LENGTH 0x200 /* 512 bytes */
+#define TW_MAX_SENSE_LENGTH 256
+#define TW_EVENT_SOURCE_AEN 0x1000
+#define TW_EVENT_SOURCE_COMMAND 0x1001
+#define TW_EVENT_SOURCE_PCHIP 0x1002
+#define TW_EVENT_SOURCE_DRIVER 0x1003
#define TW_IOCTL_GET_COMPATIBILITY_INFO 0x101
-#define TW_IOCTL_GET_LAST_EVENT 0x102
-#define TW_IOCTL_GET_FIRST_EVENT 0x103
-#define TW_IOCTL_GET_NEXT_EVENT 0x104
-#define TW_IOCTL_GET_PREVIOUS_EVENT 0x105
-#define TW_IOCTL_GET_LOCK 0x106
-#define TW_IOCTL_RELEASE_LOCK 0x107
-#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108
+#define TW_IOCTL_GET_LAST_EVENT 0x102
+#define TW_IOCTL_GET_FIRST_EVENT 0x103
+#define TW_IOCTL_GET_NEXT_EVENT 0x104
+#define TW_IOCTL_GET_PREVIOUS_EVENT 0x105
+#define TW_IOCTL_GET_LOCK 0x106
+#define TW_IOCTL_RELEASE_LOCK 0x107
+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108
#define TW_IOCTL_ERROR_STATUS_NOT_LOCKED 0x1001 // Not locked
-#define TW_IOCTL_ERROR_STATUS_LOCKED 0x1002 // Already locked
+#define TW_IOCTL_ERROR_STATUS_LOCKED 0x1002 // Already locked
#define TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS 0x1003 // No more events
#define TW_IOCTL_ERROR_STATUS_AEN_CLOBBER 0x1004 // AEN clobber occurred
#define TW_IOCTL_ERROR_OS_EFAULT -EFAULT // Bad address
@@ -397,12 +397,12 @@ static twa_message_type twa_error_table[] = {
#define TW_SENSE_DATA_LENGTH 18
#define TW_STATUS_CHECK_CONDITION 2
#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
-#define TW_ERROR_UNIT_OFFLINE 0x128
+#define TW_ERROR_UNIT_OFFLINE 0x128
#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
-#define TW_MESSAGE_SOURCE_LINUX_DRIVER 6
+#define TW_MESSAGE_SOURCE_LINUX_DRIVER 6
#define TW_DRIVER TW_MESSAGE_SOURCE_LINUX_DRIVER
-#define TW_MESSAGE_SOURCE_LINUX_OS 9
+#define TW_MESSAGE_SOURCE_LINUX_OS 9
#define TW_OS TW_MESSAGE_SOURCE_LINUX_OS
#ifndef PCI_DEVICE_ID_3WARE_9000
#define PCI_DEVICE_ID_3WARE_9000 0x1002
@@ -434,24 +434,38 @@ static twa_message_type twa_error_table[] = {
#define TW_RESID_OUT(x) ((x >> 4) & 0xff)
/* request_id: 12, lun: 4 */
-#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
-#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
+#define TW_REQ_LUN_IN(lun, request_id) \
+ cpu_to_le16(((lun << 12) & 0xf000) | (request_id & 0xfff))
+#define TW_LUN_OUT(lun) ((le16_to_cpu(lun) >> 12) & 0xf)
/* Macros */
#define TW_CONTROL_REG_ADDR(x) (x->base_addr)
#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
-#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
-#define TW_COMMAND_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x20)
-#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
-#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
-#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_DISABLE_INTERRUPTS(x) (writel(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_MASK_COMMAND_INTERRUPT(x) (writel(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_UNMASK_COMMAND_INTERRUPT(x) (writel(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_SOFT_RESET(x) (writel(TW_CONTROL_ISSUE_SOFT_RESET | \
+#define TW_COMMAND_QUEUE_REG_ADDR(x) \
+ (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
+#define TW_COMMAND_QUEUE_REG_ADDR_LARGE(x) \
+ ((unsigned char __iomem *)x->base_addr + 0x20)
+#define TW_RESPONSE_QUEUE_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + 0xC)
+#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) \
+ ((unsigned char __iomem *)x->base_addr + 0x30)
+#define TW_CLEAR_ALL_INTERRUPTS(x) \
+ (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_ATTENTION_INTERRUPT(x) \
+ (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_HOST_INTERRUPT(x) \
+ (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_DISABLE_INTERRUPTS(x) \
+ (writel(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) \
+ (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
+ TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | \
+ TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_MASK_COMMAND_INTERRUPT(x) \
+ (writel(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_UNMASK_COMMAND_INTERRUPT(x) \
+ (writel(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_SOFT_RESET(x) (writel(TW_CONTROL_ISSUE_SOFT_RESET | \
TW_CONTROL_CLEAR_HOST_INTERRUPT | \
TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
TW_CONTROL_MASK_COMMAND_INTERRUPT | \
@@ -469,70 +483,75 @@ printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
#define TW_APACHE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 72 : 109)
#define TW_ESCALADE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 41 : 62)
#define TW_PADDING_LENGTH (sizeof(dma_addr_t) > 4 ? 8 : 0)
-#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
-#pragma pack(1)
+#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+typedef __le64 twa_addr_t;
+#define TW_CPU_TO_SGL(x) cpu_to_le64(x)
+#else
+typedef __le32 twa_addr_t;
+#define TW_CPU_TO_SGL(x) cpu_to_le32(x)
+#endif
/* Scatter Gather List Entry */
typedef struct TAG_TW_SG_Entry {
- dma_addr_t address;
- u32 length;
-} TW_SG_Entry;
+ twa_addr_t address;
+ __le32 length;
+} __packed TW_SG_Entry;
/* Command Packet */
typedef struct TW_Command {
- unsigned char opcode__sgloffset;
- unsigned char size;
- unsigned char request_id;
- unsigned char unit__hostid;
+ u8 opcode__sgloffset;
+ u8 size;
+ u8 request_id;
+ u8 unit__hostid;
/* Second DWORD */
- unsigned char status;
- unsigned char flags;
+ u8 status;
+ u8 flags;
union {
- unsigned short block_count;
- unsigned short parameter_count;
+ __le16 block_count;
+ __le16 parameter_count;
} byte6_offset;
union {
struct {
- u32 lba;
- TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
- dma_addr_t padding;
+ __le32 lba;
+ TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
+ twa_addr_t padding;
} io;
struct {
- TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
- u32 padding;
- dma_addr_t padding2;
+ TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
+ __le32 padding;
+ twa_addr_t padding2;
} param;
} byte8_offset;
} TW_Command;
/* Command Packet for 9000+ controllers */
typedef struct TAG_TW_Command_Apache {
- unsigned char opcode__reserved;
- unsigned char unit;
- unsigned short request_id__lunl;
- unsigned char status;
- unsigned char sgl_offset;
- unsigned short sgl_entries__lunh;
- unsigned char cdb[16];
- TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH];
- unsigned char padding[TW_PADDING_LENGTH];
+ u8 opcode__reserved;
+ u8 unit;
+ __le16 request_id__lunl;
+ u8 status;
+ u8 sgl_offset;
+ __le16 sgl_entries__lunh;
+ u8 cdb[16];
+ TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH];
+ u8 padding[TW_PADDING_LENGTH];
} TW_Command_Apache;
/* New command packet header */
typedef struct TAG_TW_Command_Apache_Header {
unsigned char sense_data[TW_SENSE_DATA_LENGTH];
struct {
- char reserved[4];
- unsigned short error;
- unsigned char padding;
- unsigned char severity__reserved;
+ u8 reserved[4];
+ __le16 error;
+ u8 padding;
+ u8 severity__reserved;
} status_block;
unsigned char err_specific_desc[98];
struct {
- unsigned char size_header;
- unsigned short reserved;
- unsigned char size_sense;
+ u8 size_header;
+ u8 reserved[2];
+ u8 size_sense;
} header_desc;
} TW_Command_Apache_Header;
@@ -547,19 +566,19 @@ typedef struct TAG_TW_Command_Full {
/* Initconnection structure */
typedef struct TAG_TW_Initconnect {
- unsigned char opcode__reserved;
- unsigned char size;
- unsigned char request_id;
- unsigned char res2;
- unsigned char status;
- unsigned char flags;
- unsigned short message_credits;
- u32 features;
- unsigned short fw_srl;
- unsigned short fw_arch_id;
- unsigned short fw_branch;
- unsigned short fw_build;
- u32 result;
+ u8 opcode__reserved;
+ u8 size;
+ u8 request_id;
+ u8 res2;
+ u8 status;
+ u8 flags;
+ __le16 message_credits;
+ __le32 features;
+ __le16 fw_srl;
+ __le16 fw_arch_id;
+ __le16 fw_branch;
+ __le16 fw_build;
+ __le32 result;
} TW_Initconnect;
/* Event info structure */
@@ -586,9 +605,9 @@ typedef struct TAG_TW_Ioctl_Driver_Command {
typedef struct TAG_TW_Ioctl_Apache {
TW_Ioctl_Driver_Command driver_command;
- char padding[488];
+ char padding[488];
TW_Command_Full firmware_command;
- char data_buffer[1];
+ char data_buffer[];
} TW_Ioctl_Buf_Apache;
/* Lock structure for ioctl get/release lock */
@@ -600,11 +619,11 @@ typedef struct TAG_TW_Lock {
/* GetParam descriptor */
typedef struct {
- unsigned short table_id;
- unsigned short parameter_id;
- unsigned short parameter_size_bytes;
- unsigned short actual_parameter_size_bytes;
- unsigned char data[1];
+ __le16 table_id;
+ __le16 parameter_id;
+ __le16 parameter_size_bytes;
+ __le16 actual_parameter_size_bytes;
+ u8 data[];
} TW_Param_Apache, *PTW_Param_Apache;
/* Response queue */
@@ -631,13 +650,11 @@ typedef struct TAG_TW_Compatibility_Info
unsigned short fw_on_ctlr_build;
} TW_Compatibility_Info;
-#pragma pack()
-
typedef struct TAG_TW_Device_Extension {
- u32 __iomem *base_addr;
- unsigned long *generic_buffer_virt[TW_Q_LENGTH];
- dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
- TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
+ u32 __iomem *base_addr;
+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
dma_addr_t command_packet_phys[TW_Q_LENGTH];
struct pci_dev *tw_pci_dev;
struct scsi_cmnd *srb[TW_Q_LENGTH];
@@ -647,10 +664,10 @@ typedef struct TAG_TW_Device_Extension {
unsigned char pending_queue[TW_Q_LENGTH];
unsigned char pending_head;
unsigned char pending_tail;
- int state[TW_Q_LENGTH];
+ int state[TW_Q_LENGTH];
unsigned int posted_request_count;
unsigned int max_posted_request_count;
- unsigned int pending_request_count;
+ unsigned int pending_request_count;
unsigned int max_pending_request_count;
unsigned int max_sgl_entries;
unsigned int sgl_entries;
@@ -661,12 +678,12 @@ typedef struct TAG_TW_Device_Extension {
struct Scsi_Host *host;
long flags;
int reset_print;
- TW_Event *event_queue[TW_Q_LENGTH];
- unsigned char error_index;
+ TW_Event *event_queue[TW_Q_LENGTH];
+ unsigned char error_index;
unsigned char event_queue_wrapped;
- unsigned int error_sequence_id;
- int ioctl_sem_lock;
- ktime_t ioctl_time;
+ unsigned int error_sequence_id;
+ int ioctl_sem_lock;
+ ktime_t ioctl_time;
int chrdev_request_id;
wait_queue_head_t ioctl_wqueue;
struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index dda6fa857709..3ebe66151dcb 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -120,7 +120,7 @@ static struct bin_attribute twl_sysfs_aen_read_attr = {
.attr = {
.name = "3ware_aen_read",
.mode = S_IRUSR,
- },
+ },
.size = 0,
.read = twl_sysfs_aen_read
};
@@ -151,7 +151,7 @@ static struct bin_attribute twl_sysfs_compat_info_attr = {
.attr = {
.name = "3ware_compat_info",
.mode = S_IRUSR,
- },
+ },
.size = 0,
.read = twl_sysfs_compat_info
};
@@ -174,7 +174,7 @@ static ssize_t twl_show_stats(struct device *dev,
"Last sector count: %4d\n"
"Max sector count: %4d\n"
"SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
+ "AEN's: %4d\n",
TW_DRIVER_VERSION,
tw_dev->posted_request_count,
tw_dev->max_posted_request_count,
@@ -191,18 +191,20 @@ static ssize_t twl_show_stats(struct device *dev,
/* stats sysfs attribute initializer */
static struct device_attribute twl_host_stats_attr = {
.attr = {
- .name = "3ware_stats",
+ .name = "3ware_stats",
.mode = S_IRUGO,
},
.show = twl_show_stats
};
/* Host attributes initializer */
-static struct device_attribute *twl_host_attrs[] = {
- &twl_host_stats_attr,
+static struct attribute *twl_host_attrs[] = {
+ &twl_host_stats_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(twl_host);
+
/* This function will look up an AEN severity string */
static char *twl_aen_severity_lookup(unsigned char severity_code)
{
@@ -295,14 +297,11 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
TW_Command_Apache *command_packet;
int i, sg_count;
struct scsi_cmnd *srb = NULL;
- struct scatterlist *sglist = NULL, *sg;
+ struct scatterlist *sg;
int retval = 1;
- if (tw_dev->srb[request_id]) {
+ if (tw_dev->srb[request_id])
srb = tw_dev->srb[request_id];
- if (scsi_sglist(srb))
- sglist = scsi_sglist(srb);
- }
/* Initialize command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -432,7 +431,7 @@ static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
param->parameter_size_bytes = cpu_to_le16(4);
- /* Convert system time in UTC to local time seconds since last
+ /* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
@@ -483,7 +482,7 @@ static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
/* Keep reading the queue in case there are more aen's */
if (twl_aen_read_queue(tw_dev, request_id))
goto out2;
- else {
+ else {
retval = 0;
goto out;
}
@@ -548,7 +547,7 @@ static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int se
msleep(50);
}
retval = 0;
-out:
+out:
return retval;
} /* End twl_poll_response() */
@@ -802,7 +801,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
/* Now copy in the command packet response */
memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
-
+
/* Now complete the io */
spin_lock_irqsave(tw_dev->host->host_lock, flags);
tw_dev->posted_request_count--;
@@ -863,7 +862,6 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
TW_Command_Full *full_command_packet;
unsigned short error;
char *error_str;
- int retval = 1;
header = tw_dev->sense_buffer_virt[i];
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -879,7 +877,7 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
tw_dev->host->host_no,
TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
header->status_block.error,
- error_str,
+ error_str,
header->err_specific_desc);
else
printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
@@ -895,7 +893,7 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
goto out;
}
out:
- return retval;
+ return 1;
} /* End twl_fill_sense() */
/* This function will free up device extension resources */
@@ -937,8 +935,8 @@ static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
command_packet = &full_command_packet->command.oldcommand;
command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
- command_packet->size = TW_COMMAND_SIZE;
- command_packet->request_id = request_id;
+ command_packet->size = TW_COMMAND_SIZE;
+ command_packet->request_id = request_id;
command_packet->byte6_offset.block_count = cpu_to_le16(1);
/* Now setup the param */
@@ -968,14 +966,14 @@ static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
/* This function will send an initconnection command to controller */
static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
- u32 set_features, unsigned short current_fw_srl,
- unsigned short current_fw_arch_id,
- unsigned short current_fw_branch,
- unsigned short current_fw_build,
- unsigned short *fw_on_ctlr_srl,
- unsigned short *fw_on_ctlr_arch_id,
- unsigned short *fw_on_ctlr_branch,
- unsigned short *fw_on_ctlr_build,
+ u32 set_features, unsigned short current_fw_srl,
+ unsigned short current_fw_arch_id,
+ unsigned short current_fw_branch,
+ unsigned short current_fw_build,
+ unsigned short *fw_on_ctlr_srl,
+ unsigned short *fw_on_ctlr_arch_id,
+ unsigned short *fw_on_ctlr_branch,
+ unsigned short *fw_on_ctlr_build,
u32 *init_connect_result)
{
TW_Command_Full *full_command_packet;
@@ -986,7 +984,7 @@ static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
full_command_packet = tw_dev->command_packet_virt[request_id];
memset(full_command_packet, 0, sizeof(TW_Command_Full));
full_command_packet->header.header_desc.size_header = 128;
-
+
tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
tw_initconnect->request_id = request_id;
@@ -1004,7 +1002,7 @@ static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
- } else
+ } else
tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
/* Send command packet to the board */
@@ -1211,7 +1209,7 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
if (!error)
cmd->result = (DID_OK << 16);
-
+
/* Report residual bytes for single sgl */
if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
@@ -1220,7 +1218,7 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
/* Now complete the io */
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twl_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
@@ -1245,7 +1243,7 @@ static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value,
reg_value = readl(reg);
before = jiffies;
- while ((reg_value & value) != result) {
+ while ((reg_value & value) != result) {
reg_value = readl(reg);
if (time_after(jiffies, before + HZ * seconds))
goto out;
@@ -1373,7 +1371,7 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
if (cmd) {
cmd->result = (DID_RESET << 16);
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
}
@@ -1408,9 +1406,6 @@ out:
static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
{
int heads, sectors;
- TW_Device_Extension *tw_dev;
-
- tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
if (capacity >= 0x200000) {
heads = 255;
@@ -1457,8 +1452,9 @@ out:
} /* End twl_scsi_eh_reset() */
/* This is the main scsi queue function to handle scsi opcodes */
-static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
int request_id, retval;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
@@ -1468,9 +1464,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
goto out;
}
- /* Save done function into scsi_cmnd struct */
- SCpnt->scsi_done = done;
-
/* Get a free request id */
twl_get_request_id(tw_dev, &request_id);
@@ -1524,7 +1517,7 @@ static void twl_shutdown(struct pci_dev *pdev)
tw_dev = (TW_Device_Extension *)host->hostdata;
- if (tw_dev->online)
+ if (tw_dev->online)
__twl_shutdown(tw_dev);
} /* End twl_shutdown() */
@@ -1551,7 +1544,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
- .shost_attrs = twl_host_attrs,
+ .shost_groups = twl_host_groups,
.emulated = 1,
.no_write_same = 1,
};
@@ -1574,8 +1567,6 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
@@ -1675,7 +1666,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
/* Re-enable interrupts on the card */
TWL_UNMASK_INTERRUPTS(tw_dev);
-
+
/* Finally, scan the host */
scsi_scan_host(host);
@@ -1756,11 +1747,10 @@ static void twl_remove(struct pci_dev *pdev)
twl_device_extension_count--;
} /* End twl_remove() */
-#ifdef CONFIG_PM
/* This function is called on PCI suspend */
-static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused twl_suspend(struct device *dev)
{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct Scsi_Host *host = dev_get_drvdata(dev);
TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
@@ -1779,37 +1769,21 @@ static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
/* Clear doorbell interrupt */
TWL_CLEAR_DB_INTERRUPT(tw_dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
} /* End twl_suspend() */
/* This function is called on PCI resume */
-static int twl_resume(struct pci_dev *pdev)
+static int __maybe_unused twl_resume(struct device *dev)
{
int retval = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- retval = pci_enable_device(pdev);
- if (retval) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
- return retval;
- }
-
- pci_set_master(pdev);
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
@@ -1842,11 +1816,9 @@ static int twl_resume(struct pci_dev *pdev)
out_disable_device:
scsi_remove_host(host);
- pci_disable_device(pdev);
return retval;
} /* End twl_resume() */
-#endif
/* PCI Devices supported by this driver */
static struct pci_device_id twl_pci_tbl[] = {
@@ -1855,16 +1827,15 @@ static struct pci_device_id twl_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
+static SIMPLE_DEV_PM_OPS(twl_pm_ops, twl_suspend, twl_resume);
+
/* pci_driver initializer */
static struct pci_driver twl_driver = {
.name = "3w-sas",
.id_table = twl_pci_tbl,
.probe = twl_probe,
.remove = twl_remove,
-#ifdef CONFIG_PM
- .suspend = twl_suspend,
- .resume = twl_resume,
-#endif
+ .driver.pm = &twl_pm_ops,
.shutdown = twl_shutdown
};
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index 05e77d84c16d..b0508039a280 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -52,17 +52,17 @@ static char *twl_aen_severity_table[] =
};
/* Liberator register offsets */
-#define TWL_STATUS 0x0 /* Status */
-#define TWL_HIBDB 0x20 /* Inbound doorbell */
-#define TWL_HISTAT 0x30 /* Host interrupt status */
-#define TWL_HIMASK 0x34 /* Host interrupt mask */
+#define TWL_STATUS 0x0 /* Status */
+#define TWL_HIBDB 0x20 /* Inbound doorbell */
+#define TWL_HISTAT 0x30 /* Host interrupt status */
+#define TWL_HIMASK 0x34 /* Host interrupt mask */
#define TWL_HOBDB 0x9C /* Outbound doorbell */
-#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
-#define TWL_SCRPD3 0xBC /* Scratchpad */
-#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
-#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
-#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
-#define TWL_HOBQPH 0xCC /* Host outbound Q high */
+#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
+#define TWL_SCRPD3 0xBC /* Scratchpad */
+#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
+#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
+#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
+#define TWL_HOBQPH 0xCC /* Host outbound Q high */
#define TWL_HISTATUS_VALID_INTERRUPT 0xC
#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4
#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8
@@ -80,12 +80,12 @@ static char *twl_aen_severity_table[] =
#define TW_OP_EXECUTE_SCSI 0x10
/* Asynchronous Event Notification (AEN) codes used by the driver */
-#define TW_AEN_QUEUE_EMPTY 0x0000
-#define TW_AEN_SOFT_RESET 0x0001
+#define TW_AEN_QUEUE_EMPTY 0x0000
+#define TW_AEN_SOFT_RESET 0x0001
#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
-#define TW_AEN_SEVERITY_ERROR 0x1
-#define TW_AEN_SEVERITY_DEBUG 0x4
-#define TW_AEN_NOT_RETRIEVED 0x1
+#define TW_AEN_SEVERITY_ERROR 0x1
+#define TW_AEN_SEVERITY_DEBUG 0x4
+#define TW_AEN_NOT_RETRIEVED 0x1
/* Command state defines */
#define TW_S_INITIAL 0x1 /* Initial state */
@@ -101,7 +101,7 @@ static char *twl_aen_severity_table[] =
#define TW_CURRENT_DRIVER_BRANCH 0
/* Misc defines */
-#define TW_SECTOR_SIZE 512
+#define TW_SECTOR_SIZE 512
#define TW_MAX_UNITS 32
#define TW_INIT_MESSAGE_CREDITS 0x100
#define TW_INIT_COMMAND_PACKET_SIZE 0x3
@@ -116,15 +116,15 @@ static char *twl_aen_severity_table[] =
#define TW_MAX_RESET_TRIES 2
#define TW_MAX_CMDS_PER_LUN 254
#define TW_MAX_AEN_DRAIN 255
-#define TW_IN_RESET 2
+#define TW_IN_RESET 2
#define TW_USING_MSI 3
#define TW_IN_ATTENTION_LOOP 4
-#define TW_MAX_SECTORS 256
-#define TW_MAX_CDB_LEN 16
-#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
-#define TW_IOCTL_CHRDEV_FREE -1
-#define TW_COMMAND_OFFSET 128 /* 128 bytes */
-#define TW_VERSION_TABLE 0x0402
+#define TW_MAX_SECTORS 256
+#define TW_MAX_CDB_LEN 16
+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE -1
+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
+#define TW_VERSION_TABLE 0x0402
#define TW_TIMEKEEP_TABLE 0x040A
#define TW_INFORMATION_TABLE 0x0403
#define TW_PARAM_FWVER 3
@@ -136,15 +136,15 @@ static char *twl_aen_severity_table[] =
#define TW_PARAM_PHY_SUMMARY_TABLE 1
#define TW_PARAM_PHYCOUNT 2
#define TW_PARAM_PHYCOUNT_LENGTH 1
-#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
#define TW_ALLOCATION_LENGTH 128
#define TW_SENSE_DATA_LENGTH 18
#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d
-#define TW_ERROR_UNIT_OFFLINE 0x128
+#define TW_ERROR_UNIT_OFFLINE 0x128
#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
-#define TW_DRIVER 6
+#define TW_DRIVER 6
#ifndef PCI_DEVICE_ID_3WARE_9750
#define PCI_DEVICE_ID_3WARE_9750 0x1010
#endif
@@ -167,25 +167,41 @@ static char *twl_aen_severity_table[] =
#define TW_NOTMFA_OUT(x) (x & 0x1)
/* request_id: 12, lun: 4 */
-#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
+#define TW_REQ_LUN_IN(lun, request_id) \
+ (((lun << 12) & 0xf000) | (request_id & 0xfff))
#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
/* Register access macros */
-#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
-#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
-#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
-#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
-#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
-#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
-#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
-#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
-#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
-#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
-#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
-#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
-#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
-#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
-#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
+#define TWL_STATUS_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
+#define TWL_HOBQPL_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
+#define TWL_HOBQPH_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
+#define TWL_HOBDB_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
+#define TWL_HOBDBC_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
+#define TWL_HIMASK_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
+#define TWL_HISTAT_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
+#define TWL_HIBQPH_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
+#define TWL_HIBQPL_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
+#define TWL_HIBDB_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
+#define TWL_SCRPD3_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
+#define TWL_MASK_INTERRUPTS(x) \
+ (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
+#define TWL_UNMASK_INTERRUPTS(x) \
+ (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
+#define TWL_CLEAR_DB_INTERRUPT(x) \
+ (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
+#define TWL_SOFT_RESET(x) \
+ (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
/* Macros */
#define TW_PRINTK(h,a,b,c) { \
@@ -317,7 +333,7 @@ typedef struct TAG_TW_Ioctl_Driver_Command {
typedef struct TAG_TW_Ioctl_Apache {
TW_Ioctl_Driver_Command driver_command;
- char padding[488];
+ char padding[488];
TW_Command_Full firmware_command;
char data_buffer[1];
} TW_Ioctl_Buf_Apache;
@@ -352,10 +368,10 @@ typedef struct TAG_TW_Compatibility_Info
#pragma pack()
typedef struct TAG_TW_Device_Extension {
- void __iomem *base_addr;
- unsigned long *generic_buffer_virt[TW_Q_LENGTH];
- dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
- TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
+ void __iomem *base_addr;
+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
dma_addr_t command_packet_phys[TW_Q_LENGTH];
TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
@@ -364,7 +380,7 @@ typedef struct TAG_TW_Device_Extension {
unsigned char free_queue[TW_Q_LENGTH];
unsigned char free_head;
unsigned char free_tail;
- int state[TW_Q_LENGTH];
+ int state[TW_Q_LENGTH];
unsigned int posted_request_count;
unsigned int max_posted_request_count;
unsigned int max_sgl_entries;
@@ -375,9 +391,9 @@ typedef struct TAG_TW_Device_Extension {
unsigned int aen_count;
struct Scsi_Host *host;
long flags;
- TW_Event *event_queue[TW_Q_LENGTH];
- unsigned char error_index;
- unsigned int error_sequence_id;
+ TW_Event *event_queue[TW_Q_LENGTH];
+ unsigned char error_index;
+ unsigned int error_sequence_id;
int chrdev_request_id;
wait_queue_head_t ioctl_wqueue;
struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index fb6444d0409c..ffdecb12d654 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,52 +1,52 @@
-/*
+/*
3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
- Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
Copyright (C) 1999-2010 3ware Inc.
- Kernel compatibility By: Andre Hedrick <andre@suse.com>
+ Kernel compatibility By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
-
+
Further tiny build fixes and trivial hoovering Alan Cox
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- NO WARRANTY
- THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- solely responsible for determining the appropriateness of using and
- distributing the Program and assumes all risks associated with its
- exercise of rights under this Agreement, including but not limited to
- the risks and costs of program errors, damage to or loss of data,
- programs or equipment, and unavailability or interruption of operations.
-
- DISCLAIMER OF LIABILITY
- NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- Bugs/Comments/Suggestions should be mailed to:
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Bugs/Comments/Suggestions should be mailed to:
aradford@gmail.com
@@ -70,7 +70,7 @@
1.02.00.003 - Fix tw_interrupt() to report error to scsi layer when
controller status is non-zero.
Added handling of request_sense opcode.
- Fix possible null pointer dereference in
+ Fix possible null pointer dereference in
tw_reset_device_extension()
1.02.00.004 - Add support for device id of 3ware 7000 series controllers.
Make tw_setfeature() call with interrupts disabled.
@@ -239,7 +239,7 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev);
/* This function will check the status register for unexpected bits */
static int tw_check_bits(u32 status_reg_value)
{
- if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) {
+ if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) {
dprintk(KERN_WARNING "3w-xxxx: tw_check_bits(): No expected bits (0x%x).\n", status_reg_value);
return 1;
}
@@ -291,7 +291,7 @@ static int tw_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value, int
}
return 1;
}
-
+
return 0;
} /* End tw_decode_bits() */
@@ -390,7 +390,7 @@ static int tw_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
} else {
tw_dev->pending_tail = tw_dev->pending_tail + 1;
}
- }
+ }
TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
return 1;
}
@@ -403,7 +403,7 @@ static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill
int i;
TW_Command *command;
- dprintk(KERN_WARNING "3w-xxxx: tw_decode_sense()\n");
+ dprintk(KERN_WARNING "3w-xxxx: tw_decode_sense()\n");
command = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
printk(KERN_WARNING "3w-xxxx: scsi%d: Command failed: status = 0x%x, flags = 0x%x, unit #%d.\n", tw_dev->host->host_no, command->status, command->flags, TW_UNIT_OUT(command->unit__hostid));
@@ -429,7 +429,7 @@ static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill
/* Additional sense code qualifier */
tw_dev->srb[request_id]->sense_buffer[13] = tw_sense_table[i][3];
- tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
return TW_ISR_DONT_RESULT; /* Special case for isr to not over-write result */
}
}
@@ -443,10 +443,10 @@ static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill
} /* End tw_decode_sense() */
/* This function will report controller error status */
-static int tw_check_errors(TW_Device_Extension *tw_dev)
+static int tw_check_errors(TW_Device_Extension *tw_dev)
{
u32 status_reg_value;
-
+
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
if (TW_STATUS_ERRORS(status_reg_value) || tw_check_bits(status_reg_value)) {
@@ -458,14 +458,14 @@ static int tw_check_errors(TW_Device_Extension *tw_dev)
} /* End tw_check_errors() */
/* This function will empty the response que */
-static void tw_empty_response_que(TW_Device_Extension *tw_dev)
+static void tw_empty_response_que(TW_Device_Extension *tw_dev)
{
- u32 status_reg_value, response_que_value;
+ u32 status_reg_value;
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
- response_que_value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
}
} /* End tw_empty_response_que() */
@@ -525,20 +525,22 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr,
/* Create sysfs 'stats' entry */
static struct device_attribute tw_host_stats_attr = {
.attr = {
- .name = "stats",
+ .name = "stats",
.mode = S_IRUGO,
},
.show = tw_show_stats
};
/* Host attributes initializer */
-static struct device_attribute *tw_host_attrs[] = {
- &tw_host_stats_attr,
+static struct attribute *tw_host_attrs[] = {
+ &tw_host_stats_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(tw_host);
+
/* This function will read the aen queue from the isr */
-static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
+static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
{
TW_Command *command_packet;
TW_Param *param;
@@ -604,7 +606,7 @@ static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
} /* End tw_aen_read_queue() */
/* This function will complete an aen request from the isr */
-static int tw_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+static int tw_aen_complete(TW_Device_Extension *tw_dev, int request_id)
{
TW_Param *param;
unsigned short aen;
@@ -628,7 +630,7 @@ static int tw_aen_complete(TW_Device_Extension *tw_dev, int request_id)
if ((tw_aen_string[aen & 0xff][strlen(tw_aen_string[aen & 0xff])-1]) == '#') {
printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: %s%d.\n", tw_dev->host->host_no, tw_aen_string[aen & 0xff], aen >> 8);
} else {
- if (aen != 0x0)
+ if (aen != 0x0)
printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: %s.\n", tw_dev->host->host_no, tw_aen_string[aen & 0xff]);
}
} else {
@@ -746,7 +748,7 @@ static int tw_aen_drain_queue(TW_Device_Extension *tw_dev)
printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Unexpected request id.\n");
return 1;
}
-
+
if (command_packet->status != 0) {
if (command_packet->flags != TW_AEN_TABLE_UNDEFINED) {
/* Bad response */
@@ -908,9 +910,9 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
/* Hardware can only do multiple of 512 byte transfers */
data_buffer_length_adjusted = (data_buffer_length + 511) & ~511;
-
+
/* Now allocate ioctl buf memory */
- cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) {
retval = -ENOMEM;
goto out;
@@ -919,7 +921,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
tw_ioctl = (TW_New_Ioctl *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl) - 1))
+ if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl)))
goto out2;
passthru = (TW_Passthru *)&tw_ioctl->firmware_command;
@@ -964,15 +966,15 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
/* Load the sg list */
switch (TW_SGL_OUT(tw_ioctl->firmware_command.opcode__sgloffset)) {
case 2:
- tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl);
tw_ioctl->firmware_command.byte8.param.sgl[0].length = data_buffer_length_adjusted;
break;
case 3:
- tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl);
tw_ioctl->firmware_command.byte8.io.sgl[0].length = data_buffer_length_adjusted;
break;
case 5:
- passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl);
passthru->sg_list[0].length = data_buffer_length_adjusted;
break;
}
@@ -1015,12 +1017,12 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
}
/* Now copy the response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length - 1))
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length))
goto out2;
retval = 0;
out2:
/* Now free ioctl buf memory */
- dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), cpu_addr, dma_handle);
out:
mutex_unlock(&tw_dev->ioctl_lock);
mutex_unlock(&tw_mutex);
@@ -1075,7 +1077,7 @@ static void tw_free_device_extension(TW_Device_Extension *tw_dev)
} /* End tw_free_device_extension() */
/* This function will send an initconnection command to controller */
-static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits)
+static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits)
{
unsigned long command_que_value;
TW_Command *command_packet;
@@ -1105,10 +1107,10 @@ static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits)
printk(KERN_WARNING "3w-xxxx: tw_initconnection(): Bad command packet physical address.\n");
return 1;
}
-
+
/* Send command packet to the board */
outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
-
+
/* Poll for completion */
if (tw_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, 30) == 0) {
response_queue.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
@@ -1130,7 +1132,7 @@ static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits)
/* Set a value in the features table */
static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
- unsigned char *val)
+ unsigned char *val)
{
TW_Param *param;
TW_Command *command_packet;
@@ -1139,7 +1141,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
unsigned long command_que_value;
unsigned long param_value;
- /* Initialize SetParam command packet */
+ /* Initialize SetParam command packet */
if (tw_dev->command_packet_virtual_address[request_id] == NULL) {
printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet virtual address.\n");
return 1;
@@ -1160,7 +1162,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->srb[request_id]->result = (DID_OK << 16);
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ scsi_done(tw_dev->srb[request_id]);
}
command_packet->byte8.param.sgl[0].address = param_value;
command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
@@ -1169,7 +1171,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
command_packet->request_id = request_id;
command_packet->byte6.parameter_count = 1;
- command_que_value = tw_dev->command_packet_physical_address[request_id];
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
if (command_que_value == 0) {
printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet physical address.\n");
return 1;
@@ -1199,7 +1201,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
} /* End tw_setfeature() */
/* This function will reset a controller */
-static int tw_reset_sequence(TW_Device_Extension *tw_dev)
+static int tw_reset_sequence(TW_Device_Extension *tw_dev)
{
int error = 0;
int tries = 0;
@@ -1298,14 +1300,14 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
/* Abort all requests that are in progress */
for (i=0;i<TW_Q_LENGTH;i++) {
- if ((tw_dev->state[i] != TW_S_FINISHED) &&
+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
(tw_dev->state[i] != TW_S_INITIAL) &&
(tw_dev->state[i] != TW_S_COMPLETED)) {
srb = tw_dev->srb[i];
if (srb != NULL) {
srb->result = (DID_RESET << 16);
scsi_dma_unmap(srb);
- srb->scsi_done(srb);
+ scsi_done(srb);
}
}
}
@@ -1339,13 +1341,11 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
/* This funciton returns unit geometry in cylinders/heads/sectors */
static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int geom[])
+ sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
- TW_Device_Extension *tw_dev;
-
+
dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam()\n");
- tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
heads = 64;
sectors = 32;
@@ -1358,7 +1358,7 @@ static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev
}
dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam(): heads = %d, sectors = %d, cylinders = %d\n", heads, sectors, cylinders);
- geom[0] = heads;
+ geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
@@ -1366,7 +1366,7 @@ static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev
} /* End tw_scsi_biosparam() */
/* This is the new scsi eh reset function */
-static int tw_scsi_eh_reset(struct scsi_cmnd *SCpnt)
+static int tw_scsi_eh_reset(struct scsi_cmnd *SCpnt)
{
TW_Device_Extension *tw_dev=NULL;
int retval = FAILED;
@@ -1507,7 +1507,7 @@ static int tw_scsiop_mode_sense(TW_Device_Extension *tw_dev, int request_id)
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->srb[request_id]->result = (DID_OK << 16);
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ scsi_done(tw_dev->srb[request_id]);
return 0;
}
@@ -1554,7 +1554,7 @@ static int tw_scsiop_mode_sense(TW_Device_Extension *tw_dev, int request_id)
/* Now try to post the command packet */
tw_post_command_packet(tw_dev, request_id);
-
+
return 0;
} /* End tw_scsiop_mode_sense() */
@@ -1575,16 +1575,16 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
flags = (char *)&(param->data[0]);
memset(request_buffer, 0, sizeof(request_buffer));
- request_buffer[0] = 0xf; /* mode data length */
- request_buffer[1] = 0; /* default medium type */
- request_buffer[2] = 0x10; /* dpo/fua support on */
- request_buffer[3] = 0; /* no block descriptors */
- request_buffer[4] = 0x8; /* caching page */
- request_buffer[5] = 0xa; /* page length */
+ request_buffer[0] = 0xf; /* mode data length */
+ request_buffer[1] = 0; /* default medium type */
+ request_buffer[2] = 0x10; /* dpo/fua support on */
+ request_buffer[3] = 0; /* no block descriptors */
+ request_buffer[4] = 0x8; /* caching page */
+ request_buffer[5] = 0xa; /* page length */
if (*flags & 0x1)
- request_buffer[6] = 0x5; /* WCE on, RCD on */
+ request_buffer[6] = 0x5; /* WCE on, RCD on */
else
- request_buffer[6] = 0x1; /* WCE off, RCD on */
+ request_buffer[6] = 0x1; /* WCE off, RCD on */
tw_transfer_internal(tw_dev, request_id, request_buffer,
sizeof(request_buffer));
@@ -1592,7 +1592,7 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
} /* End tw_scsiop_mode_sense_complete() */
/* This function handles scsi read_capacity commands */
-static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
+static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
{
TW_Param *param;
TW_Command *command_packet;
@@ -1624,8 +1624,8 @@ static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
}
param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
memset(param, 0, sizeof(TW_Sector));
- param->table_id = TW_UNIT_INFORMATION_TABLE_BASE +
- tw_dev->srb[request_id]->device->id;
+ param->table_id = TW_UNIT_INFORMATION_TABLE_BASE +
+ tw_dev->srb[request_id]->device->id;
param->parameter_id = 4; /* unitcapacity parameter */
param->parameter_size_bytes = 4;
param_value = tw_dev->alignment_physical_address[request_id];
@@ -1633,7 +1633,7 @@ static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad alignment physical address.\n");
return 1;
}
-
+
command_packet->byte8.param.sgl[0].address = param_value;
command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
command_que_value = tw_dev->command_packet_physical_address[request_id];
@@ -1644,7 +1644,7 @@ static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
/* Now try to post the command to the board */
tw_post_command_packet(tw_dev, request_id);
-
+
return 0;
} /* End tw_scsiop_read_capacity() */
@@ -1666,7 +1666,7 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
}
param_data = &(param->data[0]);
- capacity = (param_data[3] << 24) | (param_data[2] << 16) |
+ capacity = (param_data[3] << 24) | (param_data[2] << 16) |
(param_data[1] << 8) | param_data[0];
/* Subtract one sector to fix get last sector ioctl */
@@ -1692,7 +1692,7 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
} /* End tw_scsiop_read_capacity_complete() */
/* This function handles scsi read or write commands */
-static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
+static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
{
TW_Command *command_packet;
unsigned long command_que_value;
@@ -1742,12 +1742,12 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
lba = ((u32)srb->cmnd[2] << 24) | ((u32)srb->cmnd[3] << 16) | ((u32)srb->cmnd[4] << 8) | (u32)srb->cmnd[5];
num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
}
-
+
/* Update sector statistic */
tw_dev->sector_count = num_sectors;
if (tw_dev->sector_count > tw_dev->max_sector_count)
tw_dev->max_sector_count = tw_dev->sector_count;
-
+
dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): lba = 0x%x num_sectors = 0x%x\n", lba, num_sectors);
command_packet->byte8.io.lba = lba;
command_packet->byte6.block_count = num_sectors;
@@ -1772,7 +1772,7 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
dprintk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Bad command packet physical address.\n");
return 1;
}
-
+
/* Now try to post the command to the board */
tw_post_command_packet(tw_dev, request_id);
@@ -1798,7 +1798,7 @@ static int tw_scsiop_request_sense(TW_Device_Extension *tw_dev, int request_id)
/* If we got a request_sense, we probably want a reset, return error */
tw_dev->srb[request_id]->result = (DID_ERROR << 16);
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ scsi_done(tw_dev->srb[request_id]);
return 0;
} /* End tw_scsiop_request_sense() */
@@ -1920,8 +1920,9 @@ static int tw_scsiop_test_unit_ready_complete(TW_Device_Extension *tw_dev, int r
} /* End tw_scsiop_test_unit_ready_complete() */
/* This is the main scsi queue function to handle scsi opcodes */
-static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
unsigned char *command = SCpnt->cmnd;
int request_id = 0;
int retval = 1;
@@ -1931,9 +1932,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
if (test_bit(TW_IN_RESET, &tw_dev->flags))
return SCSI_MLQUEUE_HOST_BUSY;
- /* Save done function into struct scsi_cmnd */
- SCpnt->scsi_done = done;
-
/* Queue the command and get a request id */
tw_state_request_start(tw_dev, &request_id);
@@ -1941,48 +1939,47 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
tw_dev->srb[request_id] = SCpnt;
switch (*command) {
- case READ_10:
- case READ_6:
- case WRITE_10:
- case WRITE_6:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ/WRITE.\n");
- retval = tw_scsiop_read_write(tw_dev, request_id);
- break;
- case TEST_UNIT_READY:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught TEST_UNIT_READY.\n");
- retval = tw_scsiop_test_unit_ready(tw_dev, request_id);
- break;
- case INQUIRY:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught INQUIRY.\n");
- retval = tw_scsiop_inquiry(tw_dev, request_id);
- break;
- case READ_CAPACITY:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ_CAPACITY.\n");
- retval = tw_scsiop_read_capacity(tw_dev, request_id);
- break;
- case REQUEST_SENSE:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught REQUEST_SENSE.\n");
- retval = tw_scsiop_request_sense(tw_dev, request_id);
- break;
- case MODE_SENSE:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught MODE_SENSE.\n");
- retval = tw_scsiop_mode_sense(tw_dev, request_id);
- break;
- case SYNCHRONIZE_CACHE:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught SYNCHRONIZE_CACHE.\n");
- retval = tw_scsiop_synchronize_cache(tw_dev, request_id);
- break;
- case TW_IOCTL:
- printk(KERN_WARNING "3w-xxxx: SCSI_IOCTL_SEND_COMMAND deprecated, please update your 3ware tools.\n");
- break;
- default:
- printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command);
- tw_dev->state[request_id] = TW_S_COMPLETED;
- tw_state_request_finish(tw_dev, request_id);
- SCpnt->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
- scsi_build_sense_buffer(1, SCpnt->sense_buffer, ILLEGAL_REQUEST, 0x20, 0);
- done(SCpnt);
- retval = 0;
+ case READ_10:
+ case READ_6:
+ case WRITE_10:
+ case WRITE_6:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ/WRITE.\n");
+ retval = tw_scsiop_read_write(tw_dev, request_id);
+ break;
+ case TEST_UNIT_READY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught TEST_UNIT_READY.\n");
+ retval = tw_scsiop_test_unit_ready(tw_dev, request_id);
+ break;
+ case INQUIRY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught INQUIRY.\n");
+ retval = tw_scsiop_inquiry(tw_dev, request_id);
+ break;
+ case READ_CAPACITY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ_CAPACITY.\n");
+ retval = tw_scsiop_read_capacity(tw_dev, request_id);
+ break;
+ case REQUEST_SENSE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught REQUEST_SENSE.\n");
+ retval = tw_scsiop_request_sense(tw_dev, request_id);
+ break;
+ case MODE_SENSE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught MODE_SENSE.\n");
+ retval = tw_scsiop_mode_sense(tw_dev, request_id);
+ break;
+ case SYNCHRONIZE_CACHE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught SYNCHRONIZE_CACHE.\n");
+ retval = tw_scsiop_synchronize_cache(tw_dev, request_id);
+ break;
+ case TW_IOCTL:
+ printk(KERN_WARNING "3w-xxxx: SCSI_IOCTL_SEND_COMMAND deprecated, please update your 3ware tools.\n");
+ break;
+ default:
+ printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ scsi_build_sense(SCpnt, 1, ILLEGAL_REQUEST, 0x20, 0);
+ done(SCpnt);
+ retval = 0;
}
if (retval) {
tw_dev->state[request_id] = TW_S_COMPLETED;
@@ -1997,7 +1994,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
static DEF_SCSI_QCMD(tw_scsi_queue)
/* This function is the interrupt service routine */
-static irqreturn_t tw_interrupt(int irq, void *dev_instance)
+static irqreturn_t tw_interrupt(int irq, void *dev_instance)
{
int request_id;
u32 status_reg_value;
@@ -2073,7 +2070,7 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
}
}
/* If there are no more pending requests, we mask command interrupt */
- if (tw_dev->pending_request_count == 0)
+ if (tw_dev->pending_request_count == 0)
TW_MASK_COMMAND_INTERRUPT(tw_dev);
}
@@ -2162,19 +2159,19 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
/* If error, command failed */
if (error == 1) {
/* Ask for a host reset */
- tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
}
/* Now complete the io */
if ((error != TW_ISR_DONT_COMPLETE)) {
scsi_dma_unmap(tw_dev->srb[request_id]);
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ scsi_done(tw_dev->srb[request_id]);
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->posted_request_count--;
}
}
-
+
/* Check for valid status after each drain */
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
if (tw_check_bits(status_reg_value)) {
@@ -2244,8 +2241,8 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
- .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
- .shost_attrs = tw_host_attrs,
+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
+ .shost_groups = tw_host_groups,
.emulated = 1,
.no_write_same = 1,
};
@@ -2255,7 +2252,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
- int retval = -ENODEV;
+ int retval;
retval = pci_enable_device(pdev);
if (retval) {
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index bd87fbacfbc7..120a087bdf3c 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,9 +1,9 @@
-/*
+/*
3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
-
+
Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
- Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
Copyright (C) 1999-2010 3ware Inc.
@@ -15,39 +15,39 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- NO WARRANTY
- THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- solely responsible for determining the appropriateness of using and
- distributing the Program and assumes all risks associated with its
- exercise of rights under this Agreement, including but not limited to
- the risks and costs of program errors, damage to or loss of data,
- programs or equipment, and unavailability or interruption of operations.
-
- DISCLAIMER OF LIABILITY
- NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- Bugs/Comments/Suggestions should be mailed to:
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Bugs/Comments/Suggestions should be mailed to:
aradford@gmail.com
-
+
For more information, goto:
http://www.lsi.com
*/
@@ -99,21 +99,21 @@ static char *tw_aen_string[] = {
static unsigned char tw_sense_table[][4] =
{
/* Codes for newer firmware */
- // ATA Error SCSI Error
- {0x01, 0x03, 0x13, 0x00}, // Address mark not found Address mark not found for data field
- {0x04, 0x0b, 0x00, 0x00}, // Aborted command Aborted command
- {0x10, 0x0b, 0x14, 0x00}, // ID not found Recorded entity not found
- {0x40, 0x03, 0x11, 0x00}, // Uncorrectable ECC error Unrecovered read error
- {0x61, 0x04, 0x00, 0x00}, // Device fault Hardware error
- {0x84, 0x0b, 0x47, 0x00}, // Data CRC error SCSI parity error
- {0xd0, 0x0b, 0x00, 0x00}, // Device busy Aborted command
- {0xd1, 0x0b, 0x00, 0x00}, // Device busy Aborted command
- {0x37, 0x02, 0x04, 0x00}, // Unit offline Not ready
- {0x09, 0x02, 0x04, 0x00}, // Unrecovered disk error Not ready
-
- /* Codes for older firmware */
- // 3ware Error SCSI Error
- {0x51, 0x0b, 0x00, 0x00} // Unspecified Aborted command
+ // ATA Error SCSI Error
+ {0x01, 0x03, 0x13, 0x00}, // Address mark not found Address mark not found for data field
+ {0x04, 0x0b, 0x00, 0x00}, // Aborted command Aborted command
+ {0x10, 0x0b, 0x14, 0x00}, // ID not found Recorded entity not found
+ {0x40, 0x03, 0x11, 0x00}, // Uncorrectable ECC error Unrecovered read error
+ {0x61, 0x04, 0x00, 0x00}, // Device fault Hardware error
+ {0x84, 0x0b, 0x47, 0x00}, // Data CRC error SCSI parity error
+ {0xd0, 0x0b, 0x00, 0x00}, // Device busy Aborted command
+ {0xd1, 0x0b, 0x00, 0x00}, // Device busy Aborted command
+ {0x37, 0x02, 0x04, 0x00}, // Unit offline Not ready
+ {0x09, 0x02, 0x04, 0x00}, // Unrecovered disk error Not ready
+
+ /* Codes for older firmware */
+ // 3ware Error SCSI Error
+ {0x51, 0x0b, 0x00, 0x00} // Unspecified Aborted command
};
/* Control register bit definitions */
@@ -128,9 +128,9 @@ static unsigned char tw_sense_table[][4] =
#define TW_CONTROL_ENABLE_INTERRUPTS 0x00000080
#define TW_CONTROL_DISABLE_INTERRUPTS 0x00000040
#define TW_CONTROL_ISSUE_HOST_INTERRUPT 0x00000020
-#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
-#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
-#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
+#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
+#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
+#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
/* Status register bit definitions */
@@ -152,8 +152,8 @@ static unsigned char tw_sense_table[][4] =
#define TW_STATUS_CLEARABLE_BITS 0x00D00000
#define TW_STATUS_EXPECTED_BITS 0x00002000
#define TW_STATUS_UNEXPECTED_BITS 0x00F00008
-#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008
-#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
+#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008
+#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
/* RESPONSE QUEUE BIT DEFINITIONS */
#define TW_RESPONSE_ID_MASK 0x00000FF0
@@ -179,33 +179,33 @@ static unsigned char tw_sense_table[][4] =
#define TW_OP_SECTOR_INFO 0x1a
#define TW_OP_AEN_LISTEN 0x1c
#define TW_OP_FLUSH_CACHE 0x0e
-#define TW_CMD_PACKET 0x1d
+#define TW_CMD_PACKET 0x1d
#define TW_CMD_PACKET_WITH_DATA 0x1f
/* Asynchronous Event Notification (AEN) Codes */
#define TW_AEN_QUEUE_EMPTY 0x0000
-#define TW_AEN_SOFT_RESET 0x0001
+#define TW_AEN_SOFT_RESET 0x0001
#define TW_AEN_DEGRADED_MIRROR 0x0002
#define TW_AEN_CONTROLLER_ERROR 0x0003
#define TW_AEN_REBUILD_FAIL 0x0004
#define TW_AEN_REBUILD_DONE 0x0005
-#define TW_AEN_QUEUE_FULL 0x00ff
+#define TW_AEN_QUEUE_FULL 0x00ff
#define TW_AEN_TABLE_UNDEFINED 0x15
#define TW_AEN_APORT_TIMEOUT 0x0009
#define TW_AEN_DRIVE_ERROR 0x000A
-#define TW_AEN_SMART_FAIL 0x000F
-#define TW_AEN_SBUF_FAIL 0x0024
+#define TW_AEN_SMART_FAIL 0x000F
+#define TW_AEN_SBUF_FAIL 0x0024
/* Misc defines */
#define TW_ALIGNMENT_6000 64 /* 64 bytes */
-#define TW_ALIGNMENT_7000 4 /* 4 bytes */
+#define TW_ALIGNMENT_7000 4 /* 4 bytes */
#define TW_MAX_UNITS 16
#define TW_COMMAND_ALIGNMENT_MASK 0x1ff
#define TW_INIT_MESSAGE_CREDITS 0x100
#define TW_INIT_COMMAND_PACKET_SIZE 0x3
-#define TW_POLL_MAX_RETRIES 20000
+#define TW_POLL_MAX_RETRIES 20000
#define TW_MAX_SGL_LENGTH 62
-#define TW_ATA_PASS_SGL_MAX 60
+#define TW_ATA_PASS_SGL_MAX 60
#define TW_Q_LENGTH 256
#define TW_Q_START 0
#define TW_MAX_SLOT 32
@@ -216,20 +216,20 @@ static unsigned char tw_sense_table[][4] =
chrdev ioctl, one for
internal aen post */
#define TW_BLOCK_SIZE 0x200 /* 512-byte blocks */
-#define TW_IOCTL 0x80
-#define TW_UNIT_ONLINE 1
-#define TW_IN_INTR 1
-#define TW_IN_RESET 2
-#define TW_IN_CHRDEV_IOCTL 3
-#define TW_MAX_SECTORS 256
+#define TW_IOCTL 0x80
+#define TW_UNIT_ONLINE 1
+#define TW_IN_INTR 1
+#define TW_IN_RESET 2
+#define TW_IN_CHRDEV_IOCTL 3
+#define TW_MAX_SECTORS 256
#define TW_MAX_IOCTL_SECTORS 512
-#define TW_AEN_WAIT_TIME 1000
-#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
-#define TW_ISR_DONT_COMPLETE 2
-#define TW_ISR_DONT_RESULT 3
-#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
-#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
-#define TW_IOCTL_CHRDEV_FREE -1
+#define TW_AEN_WAIT_TIME 1000
+#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
+#define TW_ISR_DONT_COMPLETE 2
+#define TW_ISR_DONT_RESULT 3
+#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE -1
#define TW_MAX_CDB_LEN 16
/* Bitmask macros to eliminate bitfields */
@@ -250,26 +250,35 @@ static unsigned char tw_sense_table[][4] =
#define TW_STATUS_REG_ADDR(x) (x->base_addr + 0x4)
#define TW_COMMAND_QUEUE_REG_ADDR(x) (x->base_addr + 0x8)
#define TW_RESPONSE_QUEUE_REG_ADDR(x) (x->base_addr + 0xC)
-#define TW_CLEAR_ALL_INTERRUPTS(x) (outl(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_CLEAR_ATTENTION_INTERRUPT(x) (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_CLEAR_HOST_INTERRUPT(x) (outl(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_DISABLE_INTERRUPTS(x) (outl(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_MASK_COMMAND_INTERRUPT(x) (outl(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_UNMASK_COMMAND_INTERRUPT(x) (outl(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_SOFT_RESET(x) (outl(TW_CONTROL_ISSUE_SOFT_RESET | \
- TW_CONTROL_CLEAR_HOST_INTERRUPT | \
- TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
- TW_CONTROL_MASK_COMMAND_INTERRUPT | \
- TW_CONTROL_MASK_RESPONSE_INTERRUPT | \
- TW_CONTROL_CLEAR_ERROR_STATUS | \
- TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_STATUS_ERRORS(x) \
- (((x & TW_STATUS_PCI_ABORT) || \
- (x & TW_STATUS_PCI_PARITY_ERROR) || \
- (x & TW_STATUS_QUEUE_ERROR) || \
- (x & TW_STATUS_MICROCONTROLLER_ERROR)) && \
- (x & TW_STATUS_MICROCONTROLLER_READY))
+#define TW_CLEAR_ALL_INTERRUPTS(x) \
+ (outl(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_ATTENTION_INTERRUPT(x) \
+ (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_HOST_INTERRUPT(x) \
+ (outl(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_DISABLE_INTERRUPTS(x) \
+ (outl(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) \
+ (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
+ TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | \
+ TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_MASK_COMMAND_INTERRUPT(x) \
+ (outl(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_UNMASK_COMMAND_INTERRUPT(x) \
+ (outl(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_SOFT_RESET(x) (outl(TW_CONTROL_ISSUE_SOFT_RESET | \
+ TW_CONTROL_CLEAR_HOST_INTERRUPT | \
+ TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
+ TW_CONTROL_MASK_COMMAND_INTERRUPT | \
+ TW_CONTROL_MASK_RESPONSE_INTERRUPT | \
+ TW_CONTROL_CLEAR_ERROR_STATUS | \
+ TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_STATUS_ERRORS(x) \
+ (((x & TW_STATUS_PCI_ABORT) || \
+ (x & TW_STATUS_PCI_PARITY_ERROR) || \
+ (x & TW_STATUS_QUEUE_ERROR) || \
+ (x & TW_STATUS_MICROCONTROLLER_ERROR)) && \
+ (x & TW_STATUS_MICROCONTROLLER_READY))
#ifdef TW_DEBUG
#define dprintk(msg...) printk(msg)
@@ -339,7 +348,7 @@ typedef struct TAG_TW_New_Ioctl {
unsigned int data_buffer_length;
unsigned char padding [508];
TW_Command firmware_command;
- char data_buffer[1];
+ char data_buffer[];
} TW_New_Ioctl;
/* GetParam descriptor */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 0068963bb933..e1e4f9d10887 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- mode: c; c-basic-offset: 8 -*- */
/* NCR (or Symbios) 53c700 and 53c700-66 Driver
*
@@ -116,9 +115,9 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/pgtable.h>
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/byteorder.h>
#include <scsi/scsi.h>
@@ -164,7 +163,7 @@ STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
-STATIC struct device_attribute *NCR_700_dev_attrs[];
+STATIC const struct attribute_group *NCR_700_dev_groups[];
STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
@@ -269,6 +268,27 @@ NCR_700_get_SXFER(struct scsi_device *SDp)
spi_period(SDp->sdev_target));
}
+static inline dma_addr_t virt_to_dma(struct NCR_700_Host_Parameters *h, void *p)
+{
+ return h->pScript + ((uintptr_t)p - (uintptr_t)h->script);
+}
+
+static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h,
+ void *addr, size_t size)
+{
+ if (h->noncoherent)
+ dma_sync_single_for_device(h->dev, virt_to_dma(h, addr),
+ size, DMA_BIDIRECTIONAL);
+}
+
+static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h,
+ void *addr, size_t size)
+{
+ if (h->noncoherent)
+ dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), size,
+ DMA_BIDIRECTIONAL);
+}
+
struct Scsi_Host *
NCR_700_detect(struct scsi_host_template *tpnt,
struct NCR_700_Host_Parameters *hostdata, struct device *dev)
@@ -280,12 +300,16 @@ NCR_700_detect(struct scsi_host_template *tpnt,
static int banner = 0;
int j;
- if(tpnt->sdev_attrs == NULL)
- tpnt->sdev_attrs = NCR_700_dev_attrs;
+ if (tpnt->sdev_groups == NULL)
+ tpnt->sdev_groups = NCR_700_dev_groups;
- memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
- GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
- if(memory == NULL) {
+ memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL);
+ if (!memory) {
+ hostdata->noncoherent = 1;
+ memory = dma_alloc_noncoherent(dev, TOTAL_MEM_SIZE, &pScript,
+ DMA_BIDIRECTIONAL, GFP_KERNEL);
+ }
+ if (!memory) {
printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
return NULL;
}
@@ -339,11 +363,11 @@ NCR_700_detect(struct scsi_host_template *tpnt,
for (j = 0; j < PATCHES; j++)
script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
/* now patch up fixed addresses. */
- script_patch_32(hostdata->dev, script, MessageLocation,
+ script_patch_32(hostdata, script, MessageLocation,
pScript + MSGOUT_OFFSET);
- script_patch_32(hostdata->dev, script, StatusAddress,
+ script_patch_32(hostdata, script, StatusAddress,
pScript + STATUS_OFFSET);
- script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
+ script_patch_32(hostdata, script, ReceiveMsgAddress,
pScript + MSGIN_OFFSET);
hostdata->script = script;
@@ -395,8 +419,13 @@ NCR_700_release(struct Scsi_Host *host)
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];
- dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script,
- hostdata->pScript, DMA_ATTR_NON_CONSISTENT);
+ if (hostdata->noncoherent)
+ dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
+ hostdata->script, hostdata->pScript,
+ DMA_BIDIRECTIONAL);
+ else
+ dma_free_coherent(hostdata->dev, TOTAL_MEM_SIZE,
+ hostdata->script, hostdata->pScript);
return 1;
}
@@ -605,7 +634,7 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
SCp->host_scribble = NULL;
SCp->result = result;
- SCp->scsi_done(SCp);
+ scsi_done(SCp);
} else {
printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
}
@@ -804,8 +833,8 @@ process_extended_message(struct Scsi_Host *host,
shost_printk(KERN_WARNING, host,
"Unexpected SDTR msg\n");
hostdata->msgout[0] = A_REJECT_MSG;
- dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
- script_patch_16(hostdata->dev, hostdata->script,
+ dma_sync_to_dev(hostdata, hostdata->msgout, 1);
+ script_patch_16(hostdata, hostdata->script,
MessageCount, 1);
/* SendMsgOut returns, so set up the return
* address */
@@ -817,9 +846,8 @@ process_extended_message(struct Scsi_Host *host,
printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
host->host_no, pun, lun);
hostdata->msgout[0] = A_REJECT_MSG;
- dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
- script_patch_16(hostdata->dev, hostdata->script, MessageCount,
- 1);
+ dma_sync_to_dev(hostdata, hostdata->msgout, 1);
+ script_patch_16(hostdata, hostdata->script, MessageCount, 1);
resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
break;
@@ -832,9 +860,8 @@ process_extended_message(struct Scsi_Host *host,
printk("\n");
/* just reject it */
hostdata->msgout[0] = A_REJECT_MSG;
- dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
- script_patch_16(hostdata->dev, hostdata->script, MessageCount,
- 1);
+ dma_sync_to_dev(hostdata, hostdata->msgout, 1);
+ script_patch_16(hostdata, hostdata->script, MessageCount, 1);
/* SendMsgOut returns, so set up the return
* address */
resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -917,9 +944,8 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
printk("\n");
/* just reject it */
hostdata->msgout[0] = A_REJECT_MSG;
- dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
- script_patch_16(hostdata->dev, hostdata->script, MessageCount,
- 1);
+ dma_sync_to_dev(hostdata, hostdata->msgout, 1);
+ script_patch_16(hostdata, hostdata->script, MessageCount, 1);
/* SendMsgOut returns, so set up the return
* address */
resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -928,7 +954,7 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
}
NCR_700_writel(temp, host, TEMP_REG);
/* set us up to receive another message */
- dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
+ dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
return resume_offset;
}
@@ -952,10 +978,10 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
NCR_700_set_tag_neg_state(SCp->device,
NCR_700_FINISHED_TAG_NEGOTIATION);
-
- /* check for contingent allegiance contitions */
- if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
- status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
+
+ /* check for contingent allegiance conditions */
+ if (hostdata->status[0] == SAM_STAT_CHECK_CONDITION ||
+ hostdata->status[0] == SAM_STAT_COMMAND_TERMINATED) {
struct NCR_700_command_slot *slot =
(struct NCR_700_command_slot *)SCp->host_scribble;
if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
@@ -1008,8 +1034,8 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
slot->SG[1].pAddr = 0;
slot->resume_offset = hostdata->pScript;
- dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
- dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG[0])*2);
+ dma_sync_from_dev(hostdata, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE);
/* queue the command for reissue */
slot->state = NCR_700_SLOT_QUEUED;
@@ -1129,11 +1155,11 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
hostdata->cmd = slot->cmnd;
/* re-patch for this command */
- script_patch_32_abs(hostdata->dev, hostdata->script,
+ script_patch_32_abs(hostdata, hostdata->script,
CommandAddress, slot->pCmd);
- script_patch_16(hostdata->dev, hostdata->script,
+ script_patch_16(hostdata, hostdata->script,
CommandCount, slot->cmnd->cmd_len);
- script_patch_32_abs(hostdata->dev, hostdata->script,
+ script_patch_32_abs(hostdata, hostdata->script,
SGScriptStartAddress,
to32bit(&slot->pSG[0].ins));
@@ -1144,14 +1170,14 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
* should therefore always clear ACK */
NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
host, SXFER_REG);
- dma_cache_sync(hostdata->dev, hostdata->msgin,
- MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
- dma_cache_sync(hostdata->dev, hostdata->msgout,
- MSG_ARRAY_SIZE, DMA_TO_DEVICE);
+ dma_sync_from_dev(hostdata, hostdata->msgin,
+ MSG_ARRAY_SIZE);
+ dma_sync_to_dev(hostdata, hostdata->msgout,
+ MSG_ARRAY_SIZE);
/* I'm just being paranoid here, the command should
* already have been flushed from the cache */
- dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
- slot->cmnd->cmd_len, DMA_TO_DEVICE);
+ dma_sync_to_dev(hostdata, slot->cmnd->cmnd,
+ slot->cmnd->cmd_len);
@@ -1214,8 +1240,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
hostdata->reselection_id = reselection_id;
/* just in case we have a stale simple tag message, clear it */
hostdata->msgin[1] = 0;
- dma_cache_sync(hostdata->dev, hostdata->msgin,
- MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
+ dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
if(hostdata->tag_negotiated & (1<<reselection_id)) {
resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
} else {
@@ -1329,8 +1354,7 @@ process_selection(struct Scsi_Host *host, __u32 dsp)
hostdata->cmd = NULL;
/* clear any stale simple tag message */
hostdata->msgin[1] = 0;
- dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
if(id == 0xff) {
/* Selected as target, Ignore */
@@ -1427,30 +1451,26 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
}
- script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
-
+ script_patch_16(hostdata, hostdata->script, MessageCount, count);
- script_patch_ID(hostdata->dev, hostdata->script,
- Device_ID, 1<<scmd_id(SCp));
+ script_patch_ID(hostdata, hostdata->script, Device_ID, 1<<scmd_id(SCp));
- script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
+ script_patch_32_abs(hostdata, hostdata->script, CommandAddress,
slot->pCmd);
- script_patch_16(hostdata->dev, hostdata->script, CommandCount,
- SCp->cmd_len);
+ script_patch_16(hostdata, hostdata->script, CommandCount, SCp->cmd_len);
/* finally plumb the beginning of the SG list into the script
* */
- script_patch_32_abs(hostdata->dev, hostdata->script,
+ script_patch_32_abs(hostdata, hostdata->script,
SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
NCR_700_clear_fifo(SCp->device->host);
if(slot->resume_offset == 0)
slot->resume_offset = hostdata->pScript;
/* now perform all the writebacks and invalidates */
- dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
- dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
- DMA_FROM_DEVICE);
- dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
- dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
+ dma_sync_to_dev(hostdata, hostdata->msgout, count);
+ dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
+ dma_sync_to_dev(hostdata, SCp->cmnd, SCp->cmd_len);
+ dma_sync_from_dev(hostdata, hostdata->status, 1);
/* set the synchronous period/offset */
NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
@@ -1485,11 +1505,8 @@ NCR_700_intr(int irq, void *dev_id)
__u8 sstat0 = 0, dstat = 0;
__u32 dsp;
struct scsi_cmnd *SCp = hostdata->cmd;
- enum NCR_700_Host_State state;
handled = 1;
- state = hostdata->state;
- SCp = hostdata->cmd;
if(istat & SCSI_INT_PENDING) {
udelay(10);
@@ -1553,7 +1570,7 @@ NCR_700_intr(int irq, void *dev_id)
* deadlock on the
* hostdata->state_lock */
SCp->result = DID_RESET << 16;
- SCp->scsi_done(SCp);
+ scsi_done(SCp);
}
mdelay(25);
NCR_700_chip_setup(host);
@@ -1626,7 +1643,7 @@ NCR_700_intr(int irq, void *dev_id)
slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
slot->SG[i].pAddr = 0;
}
- dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+ dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
/* and pretend we disconnected after
* the command phase */
resume_offset = hostdata->pScript + Ent_MsgInDuringData;
@@ -1733,13 +1750,11 @@ NCR_700_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static int
-NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
+static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp)
{
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
__u32 move_ins;
- enum dma_data_direction direction;
struct NCR_700_command_slot *slot;
if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
@@ -1775,10 +1790,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
slot->cmnd = SCp;
- SCp->scsi_done = done;
SCp->host_scribble = (unsigned char *)slot;
- SCp->SCp.ptr = NULL;
- SCp->SCp.buffer = NULL;
#ifdef NCR_700_DEBUG
printk("53c700: scsi%d, command ", SCp->device->host->host_no);
@@ -1806,7 +1818,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
SCp->device->simple_tags) {
- slot->tag = SCp->request->tag;
+ slot->tag = scsi_cmd_to_rq(SCp)->tag;
CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
slot->tag, slot);
} else {
@@ -1832,7 +1844,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
case REQUEST_SENSE:
/* clear the internal sense magic */
SCp->cmnd[6] = 0;
- /* fall through */
+ fallthrough;
default:
/* OK, get it from the command */
switch(SCp->sc_data_direction) {
@@ -1856,7 +1868,6 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
}
/* now build the scatter gather list */
- direction = SCp->sc_data_direction;
if(move_ins != 0) {
int i;
int sg_count;
@@ -1878,7 +1889,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
}
slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
slot->SG[i].pAddr = 0;
- dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+ dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
DEBUG((" SETTING %p to %x\n",
(&slot->pSG[i].ins),
slot->SG[i].ins));
@@ -2071,11 +2082,13 @@ static struct device_attribute NCR_700_active_tags_attr = {
.show = NCR_700_show_active_tags,
};
-STATIC struct device_attribute *NCR_700_dev_attrs[] = {
- &NCR_700_active_tags_attr,
+STATIC struct attribute *NCR_700_dev_attrs[] = {
+ &NCR_700_active_tags_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(NCR_700_dev);
+
EXPORT_SYMBOL(NCR_700_detect);
EXPORT_SYMBOL(NCR_700_release);
EXPORT_SYMBOL(NCR_700_intr);
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 05fe439b66af..2df347ca91af 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* -*- mode: c; c-basic-offset: 8 -*- */
/* Driver for 53c700 and 53c700-66 chips from NCR and Symbios
*
@@ -209,6 +208,7 @@ struct NCR_700_Host_Parameters {
#endif
__u32 chip710:1; /* set if really a 710 not 700 */
__u32 burst_length:4; /* set to 0 to disable 710 bursting */
+ __u32 noncoherent:1; /* needs to use non-coherent DMA */
/* NOTHING BELOW HERE NEEDS ALTERING */
__u32 fast:1; /* if we can alter the SCSI bus clock
@@ -422,33 +422,33 @@ struct NCR_700_Host_Parameters {
#define NCR_710_MIN_XFERP 0
#define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */
-#define script_patch_32(dev, script, symbol, value) \
+#define script_patch_32(h, script, symbol, value) \
{ \
int i; \
dma_addr_t da = value; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
__u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + da; \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \
- dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \
DEBUG((" script, patching %s at %d to %pad\n", \
#symbol, A_##symbol##_used[i], &da)); \
} \
}
-#define script_patch_32_abs(dev, script, symbol, value) \
+#define script_patch_32_abs(h, script, symbol, value) \
{ \
int i; \
dma_addr_t da = value; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
(script)[A_##symbol##_used[i]] = bS_to_host(da); \
- dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \
DEBUG((" script, patching %s at %d to %pad\n", \
#symbol, A_##symbol##_used[i], &da)); \
} \
}
/* Used for patching the SCSI ID in the SELECT instruction */
-#define script_patch_ID(dev, script, symbol, value) \
+#define script_patch_ID(h, script, symbol, value) \
{ \
int i; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -456,13 +456,13 @@ struct NCR_700_Host_Parameters {
val &= 0xff00ffff; \
val |= ((value) & 0xff) << 16; \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \
- dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \
DEBUG((" script, patching ID field %s at %d to 0x%x\n", \
#symbol, A_##symbol##_used[i], val)); \
} \
}
-#define script_patch_16(dev, script, symbol, value) \
+#define script_patch_16(h, script, symbol, value) \
{ \
int i; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -470,7 +470,7 @@ struct NCR_700_Host_Parameters {
val &= 0xffff0000; \
val |= ((value) & 0xffff); \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \
- dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \
DEBUG((" script, patching short field %s at %d to 0x%x\n", \
#symbol, A_##symbol##_used[i], val)); \
} \
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 3170b295a5da..f2abffce2659 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -36,6 +36,7 @@
#include <linux/jiffies.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/msdos_partition.h>
#include <scsi/scsicam.h>
#include <asm/dma.h>
@@ -561,60 +562,6 @@ done:
/*
- blogic_add_probeaddr_isa appends a single ISA I/O Address to the list
- of I/O Address and Bus Probe Information to be checked for potential BusLogic
- Host Adapters.
-*/
-
-static void __init blogic_add_probeaddr_isa(unsigned long io_addr)
-{
- struct blogic_probeinfo *probeinfo;
- if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
- return;
- probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count++];
- probeinfo->adapter_type = BLOGIC_MULTIMASTER;
- probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
- probeinfo->io_addr = io_addr;
- probeinfo->pci_device = NULL;
-}
-
-
-/*
- blogic_init_probeinfo_isa initializes the list of I/O Address and
- Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters
- only from the list of standard BusLogic MultiMaster ISA I/O Addresses.
-*/
-
-static void __init blogic_init_probeinfo_isa(struct blogic_adapter *adapter)
-{
- /*
- If BusLogic Driver Options specifications requested that ISA
- Bus Probes be inhibited, do not proceed further.
- */
- if (blogic_probe_options.noprobe_isa)
- return;
- /*
- Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
- */
- if (!blogic_probe_options.limited_isa || blogic_probe_options.probe330)
- blogic_add_probeaddr_isa(0x330);
- if (!blogic_probe_options.limited_isa || blogic_probe_options.probe334)
- blogic_add_probeaddr_isa(0x334);
- if (!blogic_probe_options.limited_isa || blogic_probe_options.probe230)
- blogic_add_probeaddr_isa(0x230);
- if (!blogic_probe_options.limited_isa || blogic_probe_options.probe234)
- blogic_add_probeaddr_isa(0x234);
- if (!blogic_probe_options.limited_isa || blogic_probe_options.probe130)
- blogic_add_probeaddr_isa(0x130);
- if (!blogic_probe_options.limited_isa || blogic_probe_options.probe134)
- blogic_add_probeaddr_isa(0x134);
-}
-
-
-#ifdef CONFIG_PCI
-
-
-/*
blogic_sort_probeinfo sorts a section of blogic_probeinfo_list in order
of increasing PCI Bus and Device Number.
*/
@@ -666,14 +613,11 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
int nonpr_mmcount = 0, mmcount = 0;
bool force_scan_order = false;
bool force_scan_order_checked = false;
- bool addr_seen[6];
struct pci_dev *pci_device = NULL;
int i;
if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
return 0;
blogic_probeinfo_count++;
- for (i = 0; i < 6; i++)
- addr_seen[i] = false;
/*
Iterate over the MultiMaster PCI Host Adapters. For each
enumerated host adapter, determine whether its ISA Compatible
@@ -743,11 +687,8 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
host_adapter->io_addr = io_addr;
blogic_intreset(host_adapter);
if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
- &adapter_info, sizeof(adapter_info)) ==
- sizeof(adapter_info)) {
- if (adapter_info.isa_port < 6)
- addr_seen[adapter_info.isa_port] = true;
- } else
+ &adapter_info, sizeof(adapter_info)) !=
+ sizeof(adapter_info))
adapter_info.isa_port = BLOGIC_IO_DISABLE;
/*
Issue the Modify I/O Address command to disable the
@@ -835,45 +776,6 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
blogic_sort_probeinfo(&blogic_probeinfo_list[nonpr_mmindex],
nonpr_mmcount);
/*
- If no PCI MultiMaster Host Adapter is assigned the Primary
- I/O Address, then the Primary I/O Address must be probed
- explicitly before any PCI host adapters are probed.
- */
- if (!blogic_probe_options.noprobe_isa)
- if (pr_probeinfo->io_addr == 0 &&
- (!blogic_probe_options.limited_isa ||
- blogic_probe_options.probe330)) {
- pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER;
- pr_probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
- pr_probeinfo->io_addr = 0x330;
- }
- /*
- Append the list of standard BusLogic MultiMaster ISA I/O Addresses,
- omitting the Primary I/O Address which has already been handled.
- */
- if (!blogic_probe_options.noprobe_isa) {
- if (!addr_seen[1] &&
- (!blogic_probe_options.limited_isa ||
- blogic_probe_options.probe334))
- blogic_add_probeaddr_isa(0x334);
- if (!addr_seen[2] &&
- (!blogic_probe_options.limited_isa ||
- blogic_probe_options.probe230))
- blogic_add_probeaddr_isa(0x230);
- if (!addr_seen[3] &&
- (!blogic_probe_options.limited_isa ||
- blogic_probe_options.probe234))
- blogic_add_probeaddr_isa(0x234);
- if (!addr_seen[4] &&
- (!blogic_probe_options.limited_isa ||
- blogic_probe_options.probe130))
- blogic_add_probeaddr_isa(0x130);
- if (!addr_seen[5] &&
- (!blogic_probe_options.limited_isa ||
- blogic_probe_options.probe134))
- blogic_add_probeaddr_isa(0x134);
- }
- /*
Iterate over the older non-compliant MultiMaster PCI Host Adapters,
noting the PCI bus location and assigned IRQ Channel.
*/
@@ -1077,18 +979,10 @@ static void __init blogic_init_probeinfo_list(struct blogic_adapter *adapter)
}
}
}
- } else {
- blogic_init_probeinfo_isa(adapter);
}
}
-#else
-#define blogic_init_probeinfo_list(adapter) \
- blogic_init_probeinfo_isa(adapter)
-#endif /* CONFIG_PCI */
-
-
/*
blogic_failure prints a standardized error message, and then returns false.
*/
@@ -1538,14 +1432,6 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
else if (config.irq_ch15)
adapter->irq_ch = 15;
}
- if (adapter->adapter_bus_type == BLOGIC_ISA_BUS) {
- if (config.dma_ch5)
- adapter->dma_ch = 5;
- else if (config.dma_ch6)
- adapter->dma_ch = 6;
- else if (config.dma_ch7)
- adapter->dma_ch = 7;
- }
/*
Determine whether Extended Translation is enabled and save it in
the Host Adapter structure.
@@ -1685,8 +1571,7 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
if (adapter->fw_ver[0] == '5')
adapter->adapter_qdepth = 192;
else if (adapter->fw_ver[0] == '4')
- adapter->adapter_qdepth = (adapter->adapter_bus_type !=
- BLOGIC_ISA_BUS ? 100 : 50);
+ adapter->adapter_qdepth = 100;
else
adapter->adapter_qdepth = 30;
if (strcmp(adapter->fw_ver, "3.31") >= 0) {
@@ -1727,25 +1612,16 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
*/
adapter->bios_addr = ext_setupinfo.bios_addr << 12;
/*
- ISA Host Adapters require Bounce Buffers if there is more than
- 16MB memory.
- */
- if (adapter->adapter_bus_type == BLOGIC_ISA_BUS &&
- (void *) high_memory > (void *) MAX_DMA_ADDRESS)
- adapter->need_bouncebuf = true;
- /*
BusLogic BT-445S Host Adapters prior to board revision E have a
hardware bug whereby when the BIOS is enabled, transfers to/from
the same address range the BIOS occupies modulo 16MB are handled
incorrectly. Only properly functioning BT-445S Host Adapters
- have firmware version 3.37, so require that ISA Bounce Buffers
- be used for the buggy BT-445S models if there is more than 16MB
- memory.
+ have firmware version 3.37.
*/
- if (adapter->bios_addr > 0 && strcmp(adapter->model, "BT-445S") == 0 &&
- strcmp(adapter->fw_ver, "3.37") < 0 &&
- (void *) high_memory > (void *) MAX_DMA_ADDRESS)
- adapter->need_bouncebuf = true;
+ if (adapter->bios_addr > 0 &&
+ strcmp(adapter->model, "BT-445S") == 0 &&
+ strcmp(adapter->fw_ver, "3.37") < 0)
+ return blogic_failure(adapter, "Too old firmware");
/*
Initialize parameters common to MultiMaster and FlashPoint
Host Adapters.
@@ -1768,14 +1644,9 @@ common:
if (adapter->drvr_opts != NULL &&
adapter->drvr_opts->qdepth[tgt_id] > 0)
qdepth = adapter->drvr_opts->qdepth[tgt_id];
- else if (adapter->need_bouncebuf)
- qdepth = BLOGIC_TAG_DEPTH_BB;
adapter->qdepth[tgt_id] = qdepth;
}
- if (adapter->need_bouncebuf)
- adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH_BB;
- else
- adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
+ adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
if (adapter->drvr_opts != NULL)
adapter->common_qdepth = adapter->drvr_opts->common_qdepth;
if (adapter->common_qdepth > 0 &&
@@ -1838,13 +1709,9 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : ""));
blogic_info(" Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
- blogic_info(" DMA Channel: ", adapter);
- if (adapter->dma_ch > 0)
- blogic_info("%d, ", adapter, adapter->dma_ch);
- else
- blogic_info("None, ", adapter);
+ blogic_info(" DMA Channel: None, ", adapter);
if (adapter->bios_addr > 0)
- blogic_info("BIOS Address: 0x%lX, ", adapter,
+ blogic_info("BIOS Address: 0x%X, ", adapter,
adapter->bios_addr);
else
blogic_info("BIOS Address: None, ", adapter);
@@ -1995,18 +1862,6 @@ static bool __init blogic_getres(struct blogic_adapter *adapter)
}
adapter->irq_acquired = true;
/*
- Acquire exclusive access to the DMA Channel.
- */
- if (adapter->dma_ch > 0) {
- if (request_dma(adapter->dma_ch, adapter->full_model) < 0) {
- blogic_err("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n", adapter, adapter->dma_ch);
- return false;
- }
- set_dma_mode(adapter->dma_ch, DMA_MODE_CASCADE);
- enable_dma(adapter->dma_ch);
- adapter->dma_chan_acquired = true;
- }
- /*
Indicate the System Resource Acquisition completed successfully,
*/
return true;
@@ -2026,11 +1881,6 @@ static void blogic_relres(struct blogic_adapter *adapter)
if (adapter->irq_acquired)
free_irq(adapter->irq_ch, adapter);
/*
- Release exclusive access to the DMA Channel.
- */
- if (adapter->dma_chan_acquired)
- free_dma(adapter->dma_ch);
- /*
Release any allocated memory structs not released elsewhere
*/
if (adapter->mbox_space)
@@ -2236,7 +2086,7 @@ static bool __init blogic_inquiry(struct blogic_adapter *adapter)
"INQUIRE INSTALLED DEVICES ID 0 TO 7");
for (tgt_id = 0; tgt_id < 8; tgt_id++)
adapter->tgt_flags[tgt_id].tgt_exists =
- (installed_devs0to7[tgt_id] != 0 ? true : false);
+ installed_devs0to7[tgt_id] != 0;
}
/*
Issue the Inquire Setup Information command.
@@ -2298,7 +2148,6 @@ static void __init blogic_inithoststruct(struct blogic_adapter *adapter,
host->this_id = adapter->scsi_id;
host->can_queue = adapter->drvr_qdepth;
host->sg_tablesize = adapter->drvr_sglimit;
- host->unchecked_isa_dma = adapter->need_bouncebuf;
host->cmd_per_lun = adapter->untag_qdepth;
}
@@ -2634,7 +2483,7 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
case BLOGIC_BAD_CMD_PARAM:
blogic_warn("BusLogic Driver Protocol Error 0x%02X\n",
adapter, adapter_status);
- /* fall through */
+ fallthrough;
case BLOGIC_DATA_UNDERRUN:
case BLOGIC_DATA_OVERRUN:
case BLOGIC_NOEXPECT_BUSFREE:
@@ -2666,12 +2515,26 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
return (hoststatus << 16) | tgt_status;
}
+/*
+ * turn the dma address from an inbox into a ccb pointer
+ * This is rather inefficient.
+ */
+static struct blogic_ccb *
+blogic_inbox_to_ccb(struct blogic_adapter *adapter, struct blogic_inbox *inbox)
+{
+ struct blogic_ccb *ccb;
+
+ for (ccb = adapter->all_ccbs; ccb; ccb = ccb->next_all)
+ if (inbox->ccb == ccb->dma_handle)
+ break;
+
+ return ccb;
+}
/*
blogic_scan_inbox scans the Incoming Mailboxes saving any
Incoming Mailbox entries for completion processing.
*/
-
static void blogic_scan_inbox(struct blogic_adapter *adapter)
{
/*
@@ -2691,17 +2554,14 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter)
enum blogic_cmplt_code comp_code;
while ((comp_code = next_inbox->comp_code) != BLOGIC_INBOX_FREE) {
- /*
- We are only allowed to do this because we limit our
- architectures we run on to machines where bus_to_virt(
- actually works. There *needs* to be a dma_addr_to_virt()
- in the new PCI DMA mapping interface to replace
- bus_to_virt() or else this code is going to become very
- innefficient.
- */
- struct blogic_ccb *ccb =
- (struct blogic_ccb *) bus_to_virt(next_inbox->ccb);
- if (comp_code != BLOGIC_CMD_NOTFOUND) {
+ struct blogic_ccb *ccb = blogic_inbox_to_ccb(adapter, next_inbox);
+ if (!ccb) {
+ /*
+ * This should never happen, unless the CCB list is
+ * corrupted in memory.
+ */
+ blogic_warn("Could not find CCB for dma address %x\n", adapter, next_inbox->ccb);
+ } else if (comp_code != BLOGIC_CMD_NOTFOUND) {
if (ccb->status == BLOGIC_CCB_ACTIVE ||
ccb->status == BLOGIC_CCB_RESET) {
/*
@@ -2775,7 +2635,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
command->reset_chain;
command->reset_chain = NULL;
command->result = DID_RESET << 16;
- command->scsi_done(command);
+ scsi_done(command);
command = nxt_cmd;
}
#endif
@@ -2792,7 +2652,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
blogic_dealloc_ccb(ccb, 1);
adapter->active_cmds[tgt_id]--;
command->result = DID_RESET << 16;
- command->scsi_done(command);
+ scsi_done(command);
}
adapter->bdr_pend[tgt_id] = NULL;
} else {
@@ -2864,7 +2724,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
/*
Call the SCSI Command Completion Routine.
*/
- command->scsi_done(command);
+ scsi_done(command);
}
}
adapter->processing_ccbs = false;
@@ -3017,9 +2877,9 @@ static int blogic_hostreset(struct scsi_cmnd *SCpnt)
Outgoing Mailbox for execution by the associated Host Adapter.
*/
-static int blogic_qcmd_lck(struct scsi_cmnd *command,
- void (*comp_cb) (struct scsi_cmnd *))
+static int blogic_qcmd_lck(struct scsi_cmnd *command)
{
+ void (*comp_cb)(struct scsi_cmnd *) = scsi_done;
struct blogic_adapter *adapter =
(struct blogic_adapter *) command->device->host->hostdata;
struct blogic_tgt_flags *tgt_flags =
@@ -3077,11 +2937,11 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
ccb->opcode = BLOGIC_INITIATOR_CCB_SG;
ccb->datalen = count * sizeof(struct blogic_sg_seg);
if (blogic_multimaster_type(adapter))
- ccb->data = (void *)((unsigned int) ccb->dma_handle +
+ ccb->data = (unsigned int) ccb->dma_handle +
((unsigned long) &ccb->sglist -
- (unsigned long) ccb));
+ (unsigned long) ccb);
else
- ccb->data = ccb->sglist;
+ ccb->data = virt_to_32bit_virt(ccb->sglist);
scsi_for_each_sg(command, sg, count, i) {
ccb->sglist[i].segbytes = sg_dma_len(sg);
@@ -3189,7 +3049,6 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
return SCSI_MLQUEUE_HOST_BUSY;
}
ccb->sensedata = sense_buf;
- command->scsi_done = comp_cb;
if (blogic_multimaster_type(adapter)) {
/*
Place the CCB in an Outgoing Mailbox. The higher levels
@@ -3211,7 +3070,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter);
blogic_dealloc_ccb(ccb, 1);
command->result = DID_ERROR << 16;
- command->scsi_done(command);
+ scsi_done(command);
}
}
} else {
@@ -3410,9 +3269,10 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
a partition table entry whose end_head matches one of the
standard BusLogic geometry translations (64/32, 128/32, or 255/63).
*/
- if (*(unsigned short *) (buf + 64) == 0xAA55) {
- struct partition *part1_entry = (struct partition *) buf;
- struct partition *part_entry = part1_entry;
+ if (*(unsigned short *) (buf + 64) == MSDOS_LABEL_MAGIC) {
+ struct msdos_partition *part1_entry =
+ (struct msdos_partition *)buf;
+ struct msdos_partition *part_entry = part1_entry;
int saved_cyl = diskparam->cylinders, part_no;
unsigned char part_end_head = 0, part_end_sector = 0;
@@ -3576,7 +3436,7 @@ Target Requested Completed Requested Completed Requested Completed\n\
/*
blogic_msg prints Driver Messages.
*/
-
+__printf(2, 4)
static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
struct blogic_adapter *adapter, ...)
{
@@ -3586,7 +3446,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
int len = 0;
va_start(args, adapter);
- len = vsprintf(buf, fmt, args);
+ len = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (msglevel == BLOGIC_ANNOUNCE_LEVEL) {
static int msglines = 0;
@@ -3601,7 +3461,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
if (buf[0] != '\n' || len > 1)
printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
} else
- printk("%s", buf);
+ pr_cont("%s", buf);
} else {
if (begin) {
if (adapter != NULL && adapter->adapter_initd)
@@ -3609,7 +3469,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
else
printk("%s%s", blogic_msglevelmap[msglevel], buf);
} else
- printk("%s", buf);
+ pr_cont("%s", buf);
}
begin = (buf[len - 1] == '\n');
}
@@ -3652,7 +3512,7 @@ static bool __init blogic_parse(char **str, char *keyword)
selected host adapter.
The BusLogic Driver Probing Options are described in
- <file:Documentation/scsi/BusLogic.txt>.
+ <file:Documentation/scsi/BusLogic.rst>.
*/
static int __init blogic_parseopts(char *options)
@@ -3664,37 +3524,7 @@ static int __init blogic_parseopts(char *options)
memset(drvr_opts, 0, sizeof(struct blogic_drvr_options));
while (*options != '\0' && *options != ';') {
- /* Probing Options. */
- if (blogic_parse(&options, "IO:")) {
- unsigned long io_addr = simple_strtoul(options,
- &options, 0);
- blogic_probe_options.limited_isa = true;
- switch (io_addr) {
- case 0x330:
- blogic_probe_options.probe330 = true;
- break;
- case 0x334:
- blogic_probe_options.probe334 = true;
- break;
- case 0x230:
- blogic_probe_options.probe230 = true;
- break;
- case 0x234:
- blogic_probe_options.probe234 = true;
- break;
- case 0x130:
- blogic_probe_options.probe130 = true;
- break;
- case 0x134:
- blogic_probe_options.probe134 = true;
- break;
- default:
- blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr);
- return 0;
- }
- } else if (blogic_parse(&options, "NoProbeISA"))
- blogic_probe_options.noprobe_isa = true;
- else if (blogic_parse(&options, "NoProbePCI"))
+ if (blogic_parse(&options, "NoProbePCI"))
blogic_probe_options.noprobe_pci = true;
else if (blogic_parse(&options, "NoProbe"))
blogic_probe_options.noprobe = true;
@@ -3849,7 +3679,6 @@ static struct scsi_host_template blogic_template = {
#if 0
.eh_abort_handler = blogic_abort,
#endif
- .unchecked_isa_dma = 1,
.max_sectors = 128,
};
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 6182cc8a0344..7d1ec10f2430 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -237,18 +237,10 @@ struct blogic_probeinfo {
struct blogic_probe_options {
bool noprobe:1; /* Bit 0 */
- bool noprobe_isa:1; /* Bit 1 */
bool noprobe_pci:1; /* Bit 2 */
bool nosort_pci:1; /* Bit 3 */
bool multimaster_first:1; /* Bit 4 */
bool flashpoint_first:1; /* Bit 5 */
- bool limited_isa:1; /* Bit 6 */
- bool probe330:1; /* Bit 7 */
- bool probe334:1; /* Bit 8 */
- bool probe230:1; /* Bit 9 */
- bool probe234:1; /* Bit 10 */
- bool probe130:1; /* Bit 11 */
- bool probe134:1; /* Bit 12 */
};
/*
@@ -814,7 +806,7 @@ struct blogic_ccb {
unsigned char cdblen; /* Byte 2 */
unsigned char sense_datalen; /* Byte 3 */
u32 datalen; /* Bytes 4-7 */
- void *data; /* Bytes 8-11 */
+ u32 data; /* Bytes 8-11 */
unsigned char:8; /* Byte 12 */
unsigned char:8; /* Byte 13 */
enum blogic_adapter_status adapter_status; /* Byte 14 */
@@ -997,10 +989,8 @@ struct blogic_adapter {
unsigned char bus;
unsigned char dev;
unsigned char irq_ch;
- unsigned char dma_ch;
unsigned char scsi_id;
bool irq_acquired:1;
- bool dma_chan_acquired:1;
bool ext_trans_enable:1;
bool parity:1;
bool reset_enabled:1;
@@ -1013,7 +1003,6 @@ struct blogic_adapter {
bool terminfo_valid:1;
bool low_term:1;
bool high_term:1;
- bool need_bouncebuf:1;
bool strict_rr:1;
bool scam_enabled:1;
bool scam_lev2:1;
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 0f17bd51088a..3d9c56ac8224 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -40,7 +40,7 @@ struct sccb_mgr_info {
u16 si_per_targ_ultra_nego;
u16 si_per_targ_no_disc;
u16 si_per_targ_wide_nego;
- u16 si_flags;
+ u16 si_mflags;
unsigned char si_card_family;
unsigned char si_bustype;
unsigned char si_card_model[3];
@@ -304,40 +304,12 @@ typedef struct SCCBscam_info {
} SCCBSCAM_INFO;
-#define SCSI_REQUEST_SENSE 0x03
-#define SCSI_READ 0x08
-#define SCSI_WRITE 0x0A
-#define SCSI_START_STOP_UNIT 0x1B
-#define SCSI_READ_EXTENDED 0x28
-#define SCSI_WRITE_EXTENDED 0x2A
-#define SCSI_WRITE_AND_VERIFY 0x2E
-
-#define SSGOOD 0x00
-#define SSCHECK 0x02
-#define SSQ_FULL 0x28
-
-#define SMCMD_COMP 0x00
-#define SMEXT 0x01
-#define SMSAVE_DATA_PTR 0x02
-#define SMREST_DATA_PTR 0x03
-#define SMDISC 0x04
-#define SMABORT 0x06
-#define SMREJECT 0x07
-#define SMNO_OP 0x08
-#define SMPARITY 0x09
-#define SMDEV_RESET 0x0C
-#define SMABORT_TAG 0x0D
-#define SMINIT_RECOVERY 0x0F
-#define SMREL_RECOVERY 0x10
#define SMIDENT 0x80
#define DISC_PRIV 0x40
-#define SMSYNC 0x01
-#define SMWDTR 0x03
#define SM8BIT 0x00
#define SM16BIT 0x01
-#define SMIGNORWR 0x23 /* Ignore Wide Residue */
#define SIX_BYTE_CMD 0x06
#define TWELVE_BYTE_CMD 0x0C
@@ -1034,11 +1006,14 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
temp6 >>= 1;
switch (temp & 0x3) {
case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */
- temp6 |= 0x8000; /* Fall through */
+ temp6 |= 0x8000;
+ fallthrough;
case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */
- temp5 |= 0x8000; /* Fall through */
+ temp5 |= 0x8000;
+ fallthrough;
case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */
- temp2 |= 0x8000; /* Fall through */
+ temp2 |= 0x8000;
+ fallthrough;
case AUTO_RATE_00: /* Asynchronous */
break;
}
@@ -1070,22 +1045,22 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
ScamFlg =
(unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
- pCardInfo->si_flags = 0x0000;
+ pCardInfo->si_mflags = 0x0000;
if (i & 0x01)
- pCardInfo->si_flags |= SCSI_PARITY_ENA;
+ pCardInfo->si_mflags |= SCSI_PARITY_ENA;
if (!(i & 0x02))
- pCardInfo->si_flags |= SOFT_RESET;
+ pCardInfo->si_mflags |= SOFT_RESET;
if (i & 0x10)
- pCardInfo->si_flags |= EXTENDED_TRANSLATION;
+ pCardInfo->si_mflags |= EXTENDED_TRANSLATION;
if (ScamFlg & SCAM_ENABLED)
- pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
+ pCardInfo->si_mflags |= FLAG_SCAM_ENABLED;
if (ScamFlg & SCAM_LEVEL2)
- pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
+ pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2;
j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
if (i & 0x04) {
@@ -1101,7 +1076,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD))
- pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
+ pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN;
pCardInfo->si_card_family = HARPOON_FAMILY;
pCardInfo->si_bustype = BUSTYPE_PCI;
@@ -1137,15 +1112,15 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
if (pCardInfo->si_card_model[1] == '3') {
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
- pCardInfo->si_flags |= LOW_BYTE_TERM;
+ pCardInfo->si_mflags |= LOW_BYTE_TERM;
} else if (pCardInfo->si_card_model[2] == '0') {
temp = RD_HARPOON(ioport + hp_xfer_pad);
WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4)));
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
- pCardInfo->si_flags |= LOW_BYTE_TERM;
+ pCardInfo->si_mflags |= LOW_BYTE_TERM;
WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4)));
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
- pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
WR_HARPOON(ioport + hp_xfer_pad, temp);
} else {
temp = RD_HARPOON(ioport + hp_ee_ctrl);
@@ -1163,9 +1138,9 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
WR_HARPOON(ioport + hp_ee_ctrl, temp);
WR_HARPOON(ioport + hp_xfer_pad, temp2);
if (!(temp3 & BIT(7)))
- pCardInfo->si_flags |= LOW_BYTE_TERM;
+ pCardInfo->si_mflags |= LOW_BYTE_TERM;
if (!(temp3 & BIT(6)))
- pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
}
ARAM_ACCESS(ioport);
@@ -1272,7 +1247,7 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id);
CurrCard->ourId = pCardInfo->si_id;
- i = (unsigned char)pCardInfo->si_flags;
+ i = (unsigned char)pCardInfo->si_mflags;
if (i & SCSI_PARITY_ENA)
WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P));
@@ -1286,14 +1261,14 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
j |= SCSI_TERM_ENA_H;
WR_HARPOON(ioport + hp_ee_ctrl, j);
- if (!(pCardInfo->si_flags & SOFT_RESET)) {
+ if (!(pCardInfo->si_mflags & SOFT_RESET)) {
FPT_sresb(ioport, thisCard);
FPT_scini(thisCard, pCardInfo->si_id, 0);
}
- if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
+ if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS)
CurrCard->globalFlags |= F_NO_FILTER;
if (pCurrNvRam) {
@@ -1612,7 +1587,6 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
unsigned char thisCard;
CALL_BK_FN callback;
- unsigned char TID;
struct sccb *pSaveSCCB;
struct sccb_mgr_tar_info *currTar_Info;
@@ -1649,9 +1623,6 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
}
else {
-
- TID = p_Sccb->TargID;
-
if (p_Sccb->Sccb_tag) {
MDISABLE_INT(ioport);
if (((struct sccb_card *)pCurrCard)->
@@ -1661,7 +1632,7 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
p_Sccb->Sccb_scsistat =
ABORT_ST;
p_Sccb->Sccb_scsimsg =
- SMABORT_TAG;
+ ABORT_TASK;
if (((struct sccb_card *)
pCurrCard)->currentSCCB ==
@@ -1741,7 +1712,7 @@ static unsigned char FlashPoint_InterruptPending(void *pCurrCard)
static int FlashPoint_HandleInterrupt(void *pcard)
{
struct sccb *currSCCB;
- unsigned char thisCard, result, bm_status, bm_int_st;
+ unsigned char thisCard, result, bm_status;
unsigned short hp_int;
unsigned char i, target;
struct sccb_card *pCurrCard = pcard;
@@ -1752,7 +1723,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
MDISABLE_INT(ioport);
- if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON)
+ if (RD_HARPOON(ioport + hp_int_status) & EXT_STATUS_ON)
bm_status = RD_HARPOON(ioport + hp_ext_status) &
(unsigned char)BAD_EXT_STATUS;
else
@@ -1813,7 +1784,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
FPT_phaseChkFifo(ioport, thisCard);
if (RD_HARPOON(ioport + hp_gp_reg_1) ==
- SMSAVE_DATA_PTR) {
+ SAVE_POINTERS) {
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
currSCCB->Sccb_XferState |= F_NO_DATA_YET;
@@ -1866,7 +1837,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
FPT_phaseChkFifo(ioport, thisCard);
if (RD_HARPOON(ioport + hp_gp_reg_1) ==
- SMSAVE_DATA_PTR) {
+ SAVE_POINTERS) {
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
currSCCB->Sccb_XferState |=
F_NO_DATA_YET;
@@ -2259,7 +2230,7 @@ static unsigned char FPT_sfm(u32 port, struct sccb *pCurrSCCB)
WR_HARPOON(port + hp_fiforead, 0);
WR_HARPOON(port + hp_fifowrite, 0);
if (pCurrSCCB != NULL) {
- pCurrSCCB->Sccb_scsimsg = SMPARITY;
+ pCurrSCCB->Sccb_scsimsg = MSG_PARITY_ERROR;
}
message = 0x00;
do {
@@ -2412,7 +2383,7 @@ static void FPT_ssel(u32 port, unsigned char p_card)
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + NP);
- currSCCB->Sccb_scsimsg = SMDEV_RESET;
+ currSCCB->Sccb_scsimsg = TARGET_RESET;
WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
auto_loaded = 1;
@@ -2759,9 +2730,9 @@ static void FPT_sres(u32 port, unsigned char p_card,
if (message == 0) {
msgRetryCount++;
if (msgRetryCount == 1) {
- FPT_SendMsg(port, SMPARITY);
+ FPT_SendMsg(port, MSG_PARITY_ERROR);
} else {
- FPT_SendMsg(port, SMDEV_RESET);
+ FPT_SendMsg(port, TARGET_RESET);
FPT_sssyncv(port, our_target, NARROW_SCSI,
currTar_Info);
@@ -2861,8 +2832,8 @@ static void FPT_SendMsg(u32 port, unsigned char message)
WR_HARPOON(port + hp_portctrl_0, 0x00);
- if ((message == SMABORT) || (message == SMDEV_RESET) ||
- (message == SMABORT_TAG)) {
+ if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) ||
+ (message == ABORT_TASK)) {
while (!
(RDW_HARPOON((port + hp_intstat)) &
(BUS_FREE | PHASE))) {
@@ -2894,7 +2865,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
- if (message == SMREST_DATA_PTR) {
+ if (message == RESTORE_POINTERS) {
if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET)) {
currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC;
@@ -2906,7 +2877,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
(AUTO_IMMED + DISCONNECT_START));
}
- else if (message == SMCMD_COMP) {
+ else if (message == COMMAND_COMPLETE) {
if (currSCCB->Sccb_scsistat == SELECT_Q_ST) {
currTar_Info->TarStatus &=
@@ -2918,15 +2889,16 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
- else if ((message == SMNO_OP) || (message >= SMIDENT)
- || (message == SMINIT_RECOVERY) || (message == SMREL_RECOVERY)) {
+ else if ((message == NOP) || (message >= IDENTIFY_BASE) ||
+ (message == INITIATE_RECOVERY) ||
+ (message == RELEASE_RECOVERY)) {
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
- else if (message == SMREJECT) {
+ else if (message == MESSAGE_REJECT) {
if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) ||
(currSCCB->Sccb_scsistat == SELECT_WN_ST) ||
@@ -3027,19 +2999,19 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
}
- else if (message == SMEXT) {
+ else if (message == EXTENDED_MESSAGE) {
ACCEPT_MSG(port);
FPT_shandem(port, p_card, currSCCB);
}
- else if (message == SMIGNORWR) {
+ else if (message == IGNORE_WIDE_RESIDUE) {
ACCEPT_MSG(port); /* ACK the RESIDUE MSG */
message = FPT_sfm(port, currSCCB);
- if (currSCCB->Sccb_scsimsg != SMPARITY)
+ if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
@@ -3048,7 +3020,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
else {
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
- currSCCB->Sccb_scsimsg = SMREJECT;
+ currSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
@@ -3074,7 +3046,7 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
message = FPT_sfm(port, pCurrSCCB);
if (message) {
- if (message == SMSYNC) {
+ if (message == EXTENDED_SDTR) {
if (length == 0x03) {
@@ -3082,10 +3054,10 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
FPT_stsyncn(port, p_card);
} else {
- pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
}
- } else if (message == SMWDTR) {
+ } else if (message == EXTENDED_WDTR) {
if (length == 0x02) {
@@ -3093,7 +3065,7 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
FPT_stwidn(port, p_card);
} else {
- pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
@@ -3102,20 +3074,20 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
}
} else {
- pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
} else {
- if (pCurrSCCB->Sccb_scsimsg != SMPARITY)
+ if (pCurrSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
} else {
- if (pCurrSCCB->Sccb_scsimsg == SMPARITY)
+ if (pCurrSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
@@ -3149,10 +3121,10 @@ static unsigned char FPT_sisyncn(u32 port, unsigned char p_card,
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
WRW_HARPOON((port + SYNC_MSGS + 0),
- (MPM_OP + AMSG_OUT + SMEXT));
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
WRW_HARPOON((port + SYNC_MSGS + 4),
- (MPM_OP + AMSG_OUT + SMSYNC));
+ (MPM_OP + AMSG_OUT + EXTENDED_SDTR));
if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
@@ -3222,7 +3194,7 @@ static void FPT_stsyncn(u32 port, unsigned char p_card)
sync_msg = FPT_sfm(port, currSCCB);
- if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
@@ -3232,7 +3204,7 @@ static void FPT_stsyncn(u32 port, unsigned char p_card)
offset = FPT_sfm(port, currSCCB);
- if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
@@ -3344,9 +3316,11 @@ static void FPT_sisyncr(u32 port, unsigned char sync_pulse,
unsigned char offset)
{
ARAM_ACCESS(port);
- WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 0),
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
- WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMSYNC));
+ WRW_HARPOON((port + SYNC_MSGS + 4),
+ (MPM_OP + AMSG_OUT + EXTENDED_SDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + sync_pulse));
WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 10), (MPM_OP + AMSG_OUT + offset));
@@ -3389,10 +3363,10 @@ static unsigned char FPT_siwidn(u32 port, unsigned char p_card)
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
WRW_HARPOON((port + SYNC_MSGS + 0),
- (MPM_OP + AMSG_OUT + SMEXT));
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
WRW_HARPOON((port + SYNC_MSGS + 4),
- (MPM_OP + AMSG_OUT + SMWDTR));
+ (MPM_OP + AMSG_OUT + EXTENDED_WDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 8),
(MPM_OP + AMSG_OUT + SM16BIT));
@@ -3437,7 +3411,7 @@ static void FPT_stwidn(u32 port, unsigned char p_card)
width = FPT_sfm(port, currSCCB);
- if ((width == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ if ((width == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
@@ -3500,9 +3474,11 @@ static void FPT_stwidn(u32 port, unsigned char p_card)
static void FPT_siwidr(u32 port, unsigned char width)
{
ARAM_ACCESS(port);
- WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 0),
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
- WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMWDTR));
+ WRW_HARPOON((port + SYNC_MSGS + 4),
+ (MPM_OP + AMSG_OUT + EXTENDED_WDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 8), (MPM_OP + AMSG_OUT + width));
WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP));
@@ -3683,7 +3659,7 @@ static void FPT_ssenss(struct sccb_card *pCurrCard)
}
currSCCB->CdbLength = SIX_BYTE_CMD;
- currSCCB->Cdb[0] = SCSI_REQUEST_SENSE;
+ currSCCB->Cdb[0] = REQUEST_SENSE;
currSCCB->Cdb[1] = currSCCB->Cdb[1] & (unsigned char)0xE0; /*Keep LUN. */
currSCCB->Cdb[2] = 0x00;
currSCCB->Cdb[3] = 0x00;
@@ -3940,13 +3916,9 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
*/
if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) ||
(currTar_Info->TarStatus & TAG_Q_TRYING)) {
- p_sccb->Sccb_idmsg =
- (unsigned char)(SMIDENT | DISC_PRIV) | p_sccb->Lun;
- }
-
- else {
-
- p_sccb->Sccb_idmsg = (unsigned char)SMIDENT | p_sccb->Lun;
+ p_sccb->Sccb_idmsg = IDENTIFY(true, p_sccb->Lun);
+ } else {
+ p_sccb->Sccb_idmsg = IDENTIFY(false, p_sccb->Lun);
}
p_sccb->HostStatus = 0x00;
@@ -3963,7 +3935,7 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
*/
p_sccb->Sccb_scsistat = BUS_FREE_ST;
p_sccb->SccbStatus = SCCB_IN_PROCESS;
- p_sccb->Sccb_scsimsg = SMNO_OP;
+ p_sccb->Sccb_scsimsg = NOP;
}
@@ -4168,7 +4140,7 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
message = currSCCB->Sccb_scsimsg;
scsiID = currSCCB->TargID;
- if (message == SMDEV_RESET) {
+ if (message == TARGET_RESET) {
currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID];
currTar_Info->TarSyncCtrl = 0;
@@ -4204,7 +4176,7 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
else if (currSCCB->Sccb_scsistat < COMMAND_ST) {
- if (message == SMNO_OP) {
+ if (message == NOP) {
currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED;
FPT_ssel(port, p_card);
@@ -4212,13 +4184,13 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
}
} else {
- if (message == SMABORT)
+ if (message == ABORT_TASK_SET)
FPT_queueFlushSccb(p_card, SCCB_COMPLETE);
}
} else {
- message = SMABORT;
+ message = ABORT_TASK_SET;
}
WRW_HARPOON((port + hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
@@ -4233,8 +4205,8 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
WR_HARPOON(port + hp_portctrl_0, 0x00);
- if ((message == SMABORT) || (message == SMDEV_RESET) ||
- (message == SMABORT_TAG)) {
+ if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) ||
+ (message == ABORT_TASK)) {
while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | PHASE))) {
}
@@ -4276,8 +4248,8 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
else {
- if (message == SMPARITY) {
- currSCCB->Sccb_scsimsg = SMNO_OP;
+ if (message == MSG_PARITY_ERROR) {
+ currSCCB->Sccb_scsimsg = NOP;
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
} else {
@@ -4307,7 +4279,7 @@ static void FPT_phaseMsgIn(u32 port, unsigned char p_card)
}
message = RD_HARPOON(port + hp_scsidata_0);
- if ((message == SMDISC) || (message == SMSAVE_DATA_PTR)) {
+ if ((message == DISCONNECT) || (message == SAVE_POINTERS)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + END_DATA_START));
@@ -4322,7 +4294,7 @@ static void FPT_phaseMsgIn(u32 port, unsigned char p_card)
FPT_sdecm(message, port, p_card);
} else {
- if (currSCCB->Sccb_scsimsg != SMPARITY)
+ if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
@@ -4352,7 +4324,7 @@ static void FPT_phaseIllegal(u32 port, unsigned char p_card)
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
currSCCB->Sccb_scsistat = ABORT_ST;
- currSCCB->Sccb_scsimsg = SMABORT;
+ currSCCB->Sccb_scsimsg = ABORT_TASK_SET;
}
ACCEPT_MSG_ATN(port);
@@ -4531,7 +4503,7 @@ static void FPT_phaseBusFree(u32 port, unsigned char p_card)
*
* Function: Auto Load Default Map
*
- * Description: Load the Automation RAM with the defualt map values.
+ * Description: Load the Automation RAM with the default map values.
*
*---------------------------------------------------------------------*/
static void FPT_autoLoadDefaultMap(u32 p_port)
@@ -4651,9 +4623,9 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = 0;
- if (status_byte != SSGOOD) {
+ if (status_byte != SAM_STAT_GOOD) {
- if (status_byte == SSQ_FULL) {
+ if (status_byte == SAM_STAT_TASK_SET_FULL) {
if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
@@ -4785,7 +4757,7 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
}
- if (status_byte == SSCHECK) {
+ if (status_byte == SAM_STAT_CHECK_CONDITION) {
if (FPT_BL_Card[p_card].globalFlags & F_DO_RENEGO) {
if (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarEEValue & EE_SYNC_MASK) {
@@ -4807,7 +4779,7 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
currSCCB->SccbStatus = SCCB_ERROR;
currSCCB->TargetStatus = status_byte;
- if (status_byte == SSCHECK) {
+ if (status_byte == SAM_STAT_CHECK_CONDITION) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUN_CA = 1;
@@ -6869,14 +6841,14 @@ static void FPT_queueCmdComplete(struct sccb_card *pCurrCard,
if ((p_sccb->
ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN))
&& (p_sccb->HostStatus == SCCB_COMPLETE)
- && (p_sccb->TargetStatus != SSCHECK))
-
- if ((SCSIcmd == SCSI_READ) ||
- (SCSIcmd == SCSI_WRITE) ||
- (SCSIcmd == SCSI_READ_EXTENDED) ||
- (SCSIcmd == SCSI_WRITE_EXTENDED) ||
- (SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
- (SCSIcmd == SCSI_START_STOP_UNIT) ||
+ && (p_sccb->TargetStatus != SAM_STAT_CHECK_CONDITION))
+
+ if ((SCSIcmd == READ_6) ||
+ (SCSIcmd == WRITE_6) ||
+ (SCSIcmd == READ_10) ||
+ (SCSIcmd == WRITE_10) ||
+ (SCSIcmd == WRITE_VERIFY) ||
+ (SCSIcmd == START_STOP) ||
(pCurrCard->globalFlags & F_NO_FILTER)
)
p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a7881f8eb05e..03e71e3d5e5b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -2,25 +2,30 @@
menu "SCSI device support"
config SCSI_MOD
- tristate
- default y if SCSI=n || SCSI=y
- default m if SCSI=m
+ tristate
+ default y if SCSI=n || SCSI=y
+ default m if SCSI=m
+ depends on BLOCK
config RAID_ATTRS
tristate "RAID Transport Class"
default n
depends on BLOCK
depends on SCSI_MOD
- ---help---
+ help
Provides RAID
+config SCSI_COMMON
+ tristate
+
config SCSI
tristate "SCSI device support"
depends on BLOCK
select SCSI_DMA if HAS_DMA
select SG_POOL
- select BLK_SCSI_REQUEST
- ---help---
+ select SCSI_COMMON
+ select BLK_DEV_BSG_COMMON if BLK_DEV_BSG
+ help
If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
any other SCSI device under Linux, say Y and make sure that you know
the name of your SCSI host adapter (the card inside your computer
@@ -33,7 +38,7 @@ config SCSI
Channel, and FireWire storage.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called scsi_mod.
However, do not compile this as a module if your root file system
@@ -55,7 +60,7 @@ config SCSI_PROC_FS
bool "legacy /proc/scsi/ support"
depends on SCSI && PROC_FS
default y
- ---help---
+ help
This option enables support for the various files in
/proc/scsi. In Linux 2.6 this has been superseded by
files in sysfs but many legacy applications rely on this.
@@ -69,7 +74,7 @@ config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
- ---help---
+ help
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
USB storage or the SCSI or parallel port version of
@@ -79,7 +84,7 @@ config BLK_DEV_SD
CD-ROMs.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called sd_mod.
Do not compile this driver as a module if your root file system
@@ -90,21 +95,21 @@ config BLK_DEV_SD
config CHR_DEV_ST
tristate "SCSI tape support"
depends on SCSI
- ---help---
+ help
If you want to use a SCSI tape drive under Linux, say Y and read the
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>, and
- <file:Documentation/scsi/st.txt> in the kernel source. This is NOT
+ <file:Documentation/scsi/st.rst> in the kernel source. This is NOT
for SCSI CD-ROMs.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>. The module will be called st.
+ <file:Documentation/scsi/scsi.rst>. The module will be called st.
config BLK_DEV_SR
tristate "SCSI CDROM support"
depends on SCSI && BLK_DEV
select CDROM
- ---help---
+ help
If you want to use a CD or DVD drive attached to your computer
by SCSI, FireWire, USB or ATAPI, say Y and read the SCSI-HOWTO
and the CDROM-HOWTO at <http://www.tldp.org/docs.html#howto>.
@@ -112,22 +117,13 @@ config BLK_DEV_SR
Make sure to say Y or M to "ISO 9660 CD-ROM file system support".
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called sr_mod.
-config BLK_DEV_SR_VENDOR
- bool "Enable vendor-specific extensions (for SCSI CDROM)"
- depends on BLK_DEV_SR
- help
- This enables the usage of vendor specific SCSI commands. This is
- required to support multisession CDs with old NEC/TOSHIBA cdrom
- drives (and HP Writers). If you have such a drive and get the first
- session only, try saying Y here; everybody else says N.
-
config CHR_DEV_SG
tristate "SCSI generic support"
depends on SCSI
- ---help---
+ help
If you want to use SCSI scanners, synthesizers or CD-writers or just
about anything having "SCSI" in its name other than hard disks,
CD-ROMs or tapes, say Y here. These won't be supported by the kernel
@@ -136,34 +132,46 @@ config CHR_DEV_SG
For scanners, look at SANE (<http://www.sane-project.org/>). For CD
writer software look at Cdrtools
- (<http://cdrecord.berlios.de/private/cdrecord.html>)
+ (<http://cdrtools.sourceforge.net/>)
and for burning a "disk at once": CDRDAO
(<http://cdrdao.sourceforge.net/>). Cdparanoia is a high
quality digital reader of audio CDs (<http://www.xiph.org/paranoia/>).
For other devices, it's possible that you'll have to write the
driver software yourself. Please read the file
- <file:Documentation/scsi/scsi-generic.txt> for more information.
+ <file:Documentation/scsi/scsi-generic.rst> for more information.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>. The module will be called sg.
+ <file:Documentation/scsi/scsi.rst>. The module will be called sg.
If unsure, say N.
+config BLK_DEV_BSG
+ bool "/dev/bsg support (SG v4)"
+ depends on SCSI
+ default y
+ help
+ Saying Y here will enable generic SG (SCSI generic) v4 support
+ for any SCSI device.
+
+ This option is required by UDEV to access device serial numbers, etc.
+
+ If unsure, say Y.
+
config CHR_DEV_SCH
tristate "SCSI media changer support"
depends on SCSI
- ---help---
+ help
This is a driver for SCSI media changers. Most common devices are
tape libraries and MOD/CDROM jukeboxes. *Real* jukeboxes, you
don't need this for those tiny 6-slot cdrom changers. Media
changers are listed as "Type: Medium Changer" in /proc/scsi/scsi.
If you have such hardware and want to use it with linux, say Y
- here. Check <file:Documentation/scsi/scsi-changer.txt> for details.
+ here. Check <file:Documentation/scsi/scsi-changer.rst> for details.
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.rst> and
- <file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
+ <file:Documentation/scsi/scsi.rst>. The module will be called ch.o.
If unsure, say N.
config SCSI_ENCLOSURE
@@ -187,7 +195,7 @@ config SCSI_CONSTANTS
config SCSI_LOGGING
bool "SCSI logging facility"
depends on SCSI
- ---help---
+ help
This turns on a logging facility that can be used to debug a number
of SCSI related problems.
@@ -320,7 +328,7 @@ source "drivers/scsi/cxlflash/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
depends on SGI_HAS_WD93 && SCSI
- help
+ help
If you have a Western Digital WD93 SCSI controller on
an SGI MIPS system, say Y. Otherwise, say N.
@@ -385,14 +393,14 @@ config SCSI_AHA152X
depends on ISA && SCSI
select SCSI_SPI_ATTRS
select CHECK_SIGNATURE
- ---help---
+ help
This is a driver for the AHA-1510, AHA-1520, AHA-1522, and AHA-2825
SCSI host adapters. It also works for the AVA-1505, but the IRQ etc.
must be manually specified in this case.
It is explained in section 3.3 of the SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. You might also want to
- read the file <file:Documentation/scsi/aha152x.txt>.
+ read the file <file:Documentation/scsi/aha152x.rst>.
To compile this driver as a module, choose M here: the
module will be called aha152x.
@@ -400,7 +408,7 @@ config SCSI_AHA152X
config SCSI_AHA1542
tristate "Adaptec AHA1542 support"
depends on ISA && SCSI && ISA_DMA_API
- ---help---
+ help
This is support for a SCSI host adapter. It is explained in section
3.4 of the SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. Note that Trantor was
@@ -414,7 +422,7 @@ config SCSI_AHA1542
config SCSI_AHA1740
tristate "Adaptec AHA1740 support"
depends on EISA && SCSI
- ---help---
+ help
This is support for a SCSI host adapter. It is explained in section
3.5 of the SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. If it doesn't work out
@@ -430,7 +438,7 @@ config SCSI_AACRAID
help
This driver supports a variety of Dell, HP, Adaptec, IBM and
ICP storage products. For a list of supported products, refer
- to <file:Documentation/scsi/aacraid.txt>.
+ to <file:Documentation/scsi/aacraid.rst>.
To compile this driver as a module, choose M here: the module
will be called aacraid.
@@ -451,17 +459,6 @@ config SCSI_MVUMI
To compile this driver as a module, choose M here: the
module will be called mvumi.
-config SCSI_DPT_I2O
- tristate "Adaptec I2O RAID support "
- depends on SCSI && PCI && VIRT_TO_BUS
- help
- This driver supports all of Adaptec's I2O based RAID controllers as
- well as the DPT SmartRaid V cards. This is an Adaptec maintained
- driver by Deanna Bonds. See <file:Documentation/scsi/dpti.txt>.
-
- To compile this driver as a module, choose M here: the
- module will be called dpt_i2o.
-
config SCSI_ADVANSYS
tristate "AdvanSys SCSI support"
depends on SCSI
@@ -491,8 +488,8 @@ config SCSI_ARCMSR
source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
+source "drivers/scsi/mpi3mr/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
-source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
@@ -506,13 +503,13 @@ config SCSI_HPTIOP
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on (PCI || ISA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
- ---help---
+ depends on PCI && SCSI
+ help
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>, and the files
- <file:Documentation/scsi/BusLogic.txt> and
- <file:Documentation/scsi/FlashPoint.txt> for more information.
+ <file:Documentation/scsi/BusLogic.rst> and
+ <file:Documentation/scsi/FlashPoint.rst> for more information.
Note that support for FlashPoint is only available for 32-bit
x86 configurations.
@@ -589,20 +586,20 @@ config LIBFC
tristate "LibFC module"
depends on SCSI_FC_ATTRS
select CRC32
- ---help---
+ help
Fibre Channel library module
config LIBFCOE
tristate "LibFCoE module"
depends on LIBFC
- ---help---
+ help
Library for Fibre Channel over Ethernet module
config FCOE
tristate "FCoE module"
depends on PCI
depends on LIBFCOE
- ---help---
+ help
Fibre Channel over Ethernet module
config FCOE_FNIC
@@ -613,7 +610,7 @@ config FCOE_FNIC
This is support for the Cisco PCI-Express FCoE HBA.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called fnic.
config SCSI_SNIC
@@ -623,7 +620,7 @@ config SCSI_SNIC
This is support for the Cisco PCI-Express SCSI HBA.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called snic.
config SCSI_SNIC_DEBUG_FS
@@ -678,26 +675,12 @@ config SCSI_FDOMAIN_ISA
To compile this driver as a module, choose M here: the
module will be called fdomain_isa.
-config SCSI_GDTH
- tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
- depends on PCI && SCSI
- ---help---
- Formerly called GDT SCSI Disk Array Controller Support.
-
- This is a driver for RAID/SCSI Disk Array Controllers (EISA/ISA/PCI)
- manufactured by Intel Corporation/ICP vortex GmbH. It is documented
- in the kernel source in <file:drivers/scsi/gdth.c> and
- <file:drivers/scsi/gdth.h>.
-
- To compile this driver as a module, choose M here: the
- module will be called gdth.
-
config SCSI_ISCI
tristate "Intel(R) C600 Series Chipset SAS Controller"
depends on PCI && SCSI
depends on X86
select SCSI_SAS_LIBSAS
- ---help---
+ help
This driver supports the 6Gb/s SAS capabilities of the storage
control unit found in the Intel(R) C600 series chipset.
@@ -705,7 +688,7 @@ config SCSI_GENERIC_NCR5380
tristate "Generic NCR5380/53c400 SCSI ISA card support"
depends on ISA && SCSI && HAS_IOPORT_MAP
select SCSI_SPI_ATTRS
- ---help---
+ help
This is a driver for old ISA card SCSI controllers based on a
NCR 5380, 53C80, 53C400, 53C400A, or DTC 436 device.
Most boards such as the Trantor T130 fit this category, as do
@@ -717,7 +700,7 @@ config SCSI_GENERIC_NCR5380
config SCSI_IPS
tristate "IBM ServeRAID support"
depends on PCI && SCSI
- ---help---
+ help
This is support for the IBM ServeRAID hardware RAID controllers.
See <http://www.developer.ibm.com/welcome/netfinity/serveraid.html>
and <http://www-947.ibm.com/support/entry/portal/docdisplay?brand=5000008&lndocid=SERV-RAID>
@@ -799,7 +782,7 @@ config SCSI_INIA100
config SCSI_PPA
tristate "IOMEGA parallel port (ppa - older drives)"
depends on SCSI && PARPORT_PC
- ---help---
+ help
This driver supports older versions of IOMEGA's parallel port ZIP
drive (a 100 MB removable media device).
@@ -813,7 +796,7 @@ config SCSI_PPA
newer drives)", below.
For more information about this driver and how to use it you should
- read the file <file:Documentation/scsi/ppa.txt>. You should also read
+ read the file <file:Documentation/scsi/ppa.rst>. You should also read
the SCSI-HOWTO, which is available from
<http://www.tldp.org/docs.html#howto>. If you use this driver,
you will still be able to use the parallel port for other tasks,
@@ -826,7 +809,7 @@ config SCSI_PPA
config SCSI_IMM
tristate "IOMEGA parallel port (imm - newer drives)"
depends on SCSI && PARPORT_PC
- ---help---
+ help
This driver supports newer versions of IOMEGA's parallel port ZIP
drive (a 100 MB removable media device).
@@ -840,7 +823,7 @@ config SCSI_IMM
here and Y to "IOMEGA Parallel Port (ppa - older drives)", above.
For more information about this driver and how to use it you should
- read the file <file:Documentation/scsi/ppa.txt>. You should also read
+ read the file <file:Documentation/scsi/ppa.rst>. You should also read
the SCSI-HOWTO, which is available from
<http://www.tldp.org/docs.html#howto>. If you use this driver,
you will still be able to use the parallel port for other tasks,
@@ -853,7 +836,7 @@ config SCSI_IMM
config SCSI_IZIP_EPP16
bool "ppa/imm option - Use slow (but safe) EPP-16"
depends on SCSI_PPA || SCSI_IMM
- ---help---
+ help
EPP (Enhanced Parallel Port) is a standard for parallel ports which
allows them to act as expansion buses that can handle up to 64
peripheral devices.
@@ -905,7 +888,7 @@ config 53C700_LE_ON_BE
config SCSI_STEX
tristate "Promise SuperTrak EX Series support"
depends on PCI && SCSI
- ---help---
+ help
This driver supports Promise SuperTrak EX series storage controllers.
Promise provides Linux RAID configuration utility for these
@@ -923,21 +906,21 @@ config SCSI_SYM53C8XX_2
tristate "SYM53C8XX Version 2 SCSI support"
depends on PCI && SCSI
select SCSI_SPI_ATTRS
- ---help---
+ help
This driver supports the whole NCR53C8XX/SYM53C8XX family of
PCI-SCSI controllers. It also supports the subset of LSI53C10XX
Ultra-160 controllers that are based on the SYM53C8XX SCRIPTS
language. It does not support LSI53C10XX Ultra-320 PCI-X SCSI
controllers; you need to use the Fusion MPT driver for that.
- Please read <file:Documentation/scsi/sym53c8xx_2.txt> for more
+ Please read <file:Documentation/scsi/sym53c8xx_2.rst> for more
information.
config SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
int "DMA addressing mode"
depends on SCSI_SYM53C8XX_2
default "1"
- ---help---
+ help
This option only applies to PCI-SCSI chips that are PCI DAC
capable (875A, 895A, 896, 1010-33, 1010-66, 1000).
@@ -989,10 +972,11 @@ config SCSI_SYM53C8XX_MMIO
config SCSI_IPR
tristate "IBM Power Linux RAID adapter support"
depends on PCI && SCSI && ATA
+ select SATA_HOST
select FW_LOADER
select IRQ_POLL
select SGL_ALLOC
- ---help---
+ help
This driver supports the IBM Power Linux family RAID adapters.
This includes IBM pSeries 5712, 5703, 5709, and 570A, as well
as IBM iSeries 5702, 5703, 5709, and 570A.
@@ -1030,7 +1014,7 @@ config SCSI_NCR53C8XX_DEFAULT_TAGS
int "default tagged command queue depth"
depends on SCSI_ZALON
default "8"
- ---help---
+ help
"Tagged command queuing" is a feature of SCSI-2 which improves
performance: the host adapter can send several SCSI commands to a
device's queue even if previous commands haven't finished yet.
@@ -1056,7 +1040,7 @@ config SCSI_NCR53C8XX_MAX_TAGS
int "maximum number of queued commands"
depends on SCSI_ZALON
default "32"
- ---help---
+ help
This option allows you to specify the maximum number of commands
that can be queued to any device, when tagged command queuing is
possible. The default value is 32. Minimum is 2, maximum is 64.
@@ -1073,7 +1057,7 @@ config SCSI_NCR53C8XX_SYNC
int "synchronous transfers frequency in MHz"
depends on SCSI_ZALON
default "20"
- ---help---
+ help
The SCSI Parallel Interface-2 Standard defines 5 classes of transfer
rates: FAST-5, FAST-10, FAST-20, FAST-40 and FAST-80. The numbers
are respectively the maximum data transfer rates in mega-transfers
@@ -1116,7 +1100,7 @@ config SCSI_NCR53C8XX_NO_DISCONNECT
config SCSI_QLOGIC_FAS
tristate "Qlogic FAS SCSI support"
depends on ISA && SCSI
- ---help---
+ help
This is a driver for the ISA, VLB, and PCMCIA versions of the Qlogic
FastSCSI! cards as well as any other card based on the FASXX chip
(including the Control Concepts SCSI/IDE/SIO/PIO/FDC cards).
@@ -1126,7 +1110,7 @@ config SCSI_QLOGIC_FAS
SCSI support"), below.
Information about this driver is contained in
- <file:Documentation/scsi/qlogicfas.txt>. You should also read the
+ <file:Documentation/scsi/qlogicfas.rst>. You should also read the
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
@@ -1162,26 +1146,30 @@ source "drivers/scsi/qedf/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
+ depends on CPU_FREQ
depends on SCSI_FC_ATTRS
depends on NVME_TARGET_FC || NVME_TARGET_FC=n
depends on NVME_FC || NVME_FC=n
select CRC_T10DIF
- ---help---
+ select IRQ_POLL
+ help
This lpfc driver supports the Emulex LightPulse
Family of Fibre Channel PCI host adapters.
config SCSI_LPFC_DEBUG_FS
bool "Emulex LightPulse Fibre Channel debugfs Support"
depends on SCSI_LPFC && DEBUG_FS
- ---help---
+ help
This makes debugging information from the lpfc driver
available via the debugfs filesystem.
+source "drivers/scsi/elx/Kconfig"
+
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
depends on EISA && SCSI
select SCSI_SPI_ATTRS
- ---help---
+ help
This driver is for NCR53c710 based SCSI host adapters.
It currently supports Compaq EISA cards.
@@ -1189,14 +1177,15 @@ config SCSI_SIM710
config SCSI_DC395x
tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support"
depends on PCI && SCSI
- ---help---
+ select SCSI_SPI_ATTRS
+ help
This driver supports PCI SCSI host adapters based on the ASIC
TRM-S1040 chip, e.g Tekram DC395(U/UW/F) and DC315(U) variants.
This driver works, but is still in experimental status. So better
have a bootable disk and a backup in case of emergency.
- Documentation can be found in <file:Documentation/scsi/dc395x.txt>.
+ Documentation can be found in <file:Documentation/scsi/dc395x.rst>.
To compile this driver as a module, choose M here: the
module will be called dc395x.
@@ -1205,7 +1194,7 @@ config SCSI_AM53C974
tristate "Tekram DC390(T) and Am53/79C974 SCSI support (new driver)"
depends on PCI && SCSI
select SCSI_SPI_ATTRS
- ---help---
+ help
This driver supports PCI SCSI host adapters based on the Am53C974A
chip, e.g. Tekram DC390(T), DawiControl 2974 and some onboard
PCscsi/PCnet (Am53/79C974) solutions.
@@ -1232,7 +1221,7 @@ config SCSI_WD719X
tristate "Western Digital WD7193/7197/7296 support"
depends on PCI && SCSI
select EEPROM_93CX6
- ---help---
+ help
This is a driver for Western Digital WD7193, WD7197 and WD7296 PCI
SCSI controllers (based on WD33C296A chip).
@@ -1326,7 +1315,7 @@ config A2091_SCSI
config GVP11_SCSI
tristate "GVP Series II WD33C93A support"
depends on ZORRO && SCSI
- ---help---
+ help
If you have a Great Valley Products Series II SCSI controller,
answer Y. Also say Y if you have a later model of GVP SCSI
controller (such as the GVP A4008 or a Combo board). Otherwise,
@@ -1382,7 +1371,7 @@ config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI
select SCSI_SPI_ATTRS
- ---help---
+ help
If you have an Atari with built-in NCR5380 SCSI controller (TT,
Falcon, ...) say Y to get it supported. Of course also, if you have
a compatible SCSI controller (e.g. for Medusa).
@@ -1477,14 +1466,19 @@ config SCSI_SUNESP
module will be called sun_esp.
config ZFCP
- tristate "FCP host bus adapter driver for IBM eServer zSeries"
+ tristate "FCP host bus adapter driver for IBM mainframes"
depends on S390 && QDIO && SCSI
depends on SCSI_FC_ATTRS
help
- If you want to access SCSI devices attached to your IBM eServer
- zSeries by means of Fibre Channel interfaces say Y.
- For details please refer to the documentation provided by IBM at
- <http://oss.software.ibm.com/developerworks/opensource/linux390>
+ If you want to access SCSI devices attached to your IBM mainframe by
+ means of Fibre Channel Protocol host bus adapters say Y.
+
+ Supported HBAs include different models of the FICON Express and FCP
+ Express I/O cards.
+
+ For a more complete list, and for more details about setup and
+ operation refer to the IBM publication "Device Drivers, Features, and
+ Commands", SC33-8411.
This driver is also available as a module. This module will be
called zfcp. If you want to compile it as a module, say M here
@@ -1494,7 +1488,7 @@ config SCSI_PMCRAID
tristate "PMC SIERRA Linux MaxRAID adapter support"
depends on PCI && SCSI && NET
select SGL_ALLOC
- ---help---
+ help
This driver supports the PMC SIERRA MaxRAID adapters.
config SCSI_PM8001
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index c00e3dd57990..f055bfd54a68 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -16,12 +16,11 @@
CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
-CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_SCSI) += scsi_mod.o
-obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_common.o
+obj-$(CONFIG_SCSI_COMMON) += scsi_common.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
@@ -64,7 +63,6 @@ obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
-obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
@@ -86,6 +84,7 @@ obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
+obj-$(CONFIG_SCSI_EFCT) += elx/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
@@ -100,10 +99,9 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
-obj-$(CONFIG_SCSI_UFSHCD) += ufs/
+obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
-obj-$(CONFIG_SCSI_GDTH) += gdth.o
obj-$(CONFIG_SCSI_INITIO) += initio.o
obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
@@ -168,6 +166,7 @@ scsi_mod-$(CONFIG_BLK_DEBUG_FS) += scsi_debugfs.o
scsi_mod-y += scsi_trace.o scsi_logging.o
scsi_mod-$(CONFIG_PM) += scsi_pm.o
scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o
+scsi_mod-$(CONFIG_BLK_DEV_BSG) += scsi_bsg.o
hv_storvsc-y := storvsc_drv.o
@@ -183,7 +182,7 @@ CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
# Files generated that shall be removed upon make clean
-clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
+clean-files := 53c700_d.h 53c700_u.h
$(obj)/53c700.o: $(obj)/53c700_d.h
@@ -192,9 +191,11 @@ $(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
quiet_cmd_bflags = GEN $@
cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
-$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
+$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h FORCE
$(call if_changed,bflags)
+targets += scsi_devinfo_tbl.c
+
# If you want to play with the firmware, uncomment
# GENERATE_FIRMWARE := 1
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index f2f7e6e76c07..dece7d9eb4d3 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -84,8 +84,7 @@
* On command termination, the done function will be called as
* appropriate.
*
- * SCSI pointers are maintained in the SCp field of SCSI command
- * structures, being initialized after the command is connected
+ * The command data pointer is initialized after the command is connected
* in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
* Note that in violation of the standard, an implicit SAVE POINTERS operation
* is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
@@ -132,7 +131,7 @@
static unsigned int disconnect_mask = ~0;
module_param(disconnect_mask, int, 0444);
-static int do_abort(struct Scsi_Host *);
+static int do_abort(struct Scsi_Host *, unsigned int);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@@ -145,40 +144,38 @@ static void bus_reset_cleanup(struct Scsi_Host *);
static inline void initialize_SCp(struct scsi_cmnd *cmd)
{
- /*
- * Initialize the Scsi Pointer field so that all of the commands in the
- * various queues are valid.
- */
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ ncmd->buffer = scsi_sglist(cmd);
+ ncmd->ptr = sg_virt(ncmd->buffer);
+ ncmd->this_residual = ncmd->buffer->length;
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.ptr = NULL;
- cmd->SCp.this_residual = 0;
+ ncmd->buffer = NULL;
+ ncmd->ptr = NULL;
+ ncmd->this_residual = 0;
}
- cmd->SCp.Status = 0;
- cmd->SCp.Message = 0;
+ ncmd->status = 0;
+ ncmd->message = 0;
}
-static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
+static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd)
{
- struct scatterlist *s = cmd->SCp.buffer;
+ struct scatterlist *s = ncmd->buffer;
- if (!cmd->SCp.this_residual && s && !sg_is_last(s)) {
- cmd->SCp.buffer = sg_next(s);
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ if (!ncmd->this_residual && s && !sg_is_last(s)) {
+ ncmd->buffer = sg_next(s);
+ ncmd->ptr = sg_virt(ncmd->buffer);
+ ncmd->this_residual = ncmd->buffer->length;
}
}
static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
{
- int resid = cmd->SCp.this_residual;
- struct scatterlist *s = cmd->SCp.buffer;
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
+ int resid = ncmd->this_residual;
+ struct scatterlist *s = ncmd->buffer;
if (s)
while (!sg_is_last(s)) {
@@ -197,7 +194,7 @@ static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
* @reg2: Second 5380 register to poll
* @bit2: Second bitmask to check
* @val2: Second expected value
- * @wait: Time-out in jiffies
+ * @wait: Time-out in jiffies, 0 if sleeping is not allowed
*
* Polls the chip in a reasonably efficient manner waiting for an
* event to occur. After a short quick poll we begin to yield the CPU
@@ -223,7 +220,7 @@ static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata,
cpu_relax();
} while (n--);
- if (irqs_disabled() || in_interrupt())
+ if (!wait)
return -ETIMEDOUT;
/* Repeatedly sleep for 1 ms until deadline */
@@ -486,7 +483,7 @@ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
break;
case 2:
shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n");
- do_abort(instance);
+ do_abort(instance, 1);
break;
case 4:
shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n");
@@ -538,16 +535,16 @@ static void complete_cmd(struct Scsi_Host *instance,
if (hostdata->sensing == cmd) {
/* Autosense processing ends here */
- if (status_byte(cmd->result) != GOOD) {
+ if (get_status_byte(cmd) != SAM_STAT_GOOD) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
} else {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- set_driver_byte(cmd, DRIVER_SENSE);
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
}
hostdata->sensing = NULL;
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/**
@@ -564,7 +561,7 @@ static int NCR5380_queue_command(struct Scsi_Host *instance,
struct scsi_cmnd *cmd)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
unsigned long flags;
#if (NDEBUG & NDEBUG_NO_WRITE)
@@ -573,18 +570,21 @@ static int NCR5380_queue_command(struct Scsi_Host *instance,
case WRITE_10:
shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n");
cmd->result = (DID_ERROR << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
cmd->result = 0;
- if (!NCR5380_acquire_dma_irq(instance))
- return SCSI_MLQUEUE_HOST_BUSY;
-
spin_lock_irqsave(&hostdata->lock, flags);
+ if (!NCR5380_acquire_dma_irq(instance)) {
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
/*
* Insert the cmd into the issue queue. Note that REQUEST SENSE
* commands are added to the head of the queue since any command will
@@ -669,7 +669,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
if (hostdata->sensing == cmd) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
@@ -722,7 +722,6 @@ static void NCR5380_main(struct work_struct *work)
if (!NCR5380_select(instance, cmd)) {
dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
- maybe_release_dma_irq(instance);
} else {
dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
"main: select failed, returning %p to queue\n", cmd);
@@ -734,8 +733,10 @@ static void NCR5380_main(struct work_struct *work)
NCR5380_information_transfer(instance);
done = 0;
}
- if (!hostdata->connected)
+ if (!hostdata->connected) {
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ maybe_release_dma_irq(instance);
+ }
spin_unlock_irq(&hostdata->lock);
if (!done)
cond_resched();
@@ -753,6 +754,7 @@ static void NCR5380_main(struct work_struct *work)
static void NCR5380_dma_complete(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected);
int transferred;
unsigned char **data;
int *count;
@@ -760,7 +762,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
unsigned char p;
if (hostdata->read_overruns) {
- p = hostdata->connected->SCp.phase;
+ p = ncmd->phase;
if (p & SR_IO) {
udelay(10);
if ((NCR5380_read(BUS_AND_STATUS_REG) &
@@ -774,7 +776,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
}
#ifdef CONFIG_SUN3
- if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
+ if (sun3scsi_dma_finish(hostdata->connected->sc_data_direction)) {
pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
instance->host_no);
BUG();
@@ -797,8 +799,8 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
hostdata->dma_len = 0;
- data = (unsigned char **)&hostdata->connected->SCp.ptr;
- count = &hostdata->connected->SCp.this_residual;
+ data = (unsigned char **)&ncmd->ptr;
+ count = &ncmd->this_residual;
*data += transferred;
*count -= transferred;
@@ -818,7 +820,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
if (toPIO > 0) {
dsprintk(NDEBUG_DMA, instance,
"Doing %d byte PIO to 0x%p\n", cnt, *data);
- NCR5380_transfer_pio(instance, &p, &cnt, data);
+ NCR5380_transfer_pio(instance, &p, &cnt, data, 0);
*count -= toPIO - cnt;
}
}
@@ -956,7 +958,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
* hostdata->connected will be set to cmd.
* SELECT interrupt will be disabled.
*
- * If failed (no target) : cmd->scsi_done() will be called, and the
+ * If failed (no target) : scsi_done() will be called, and the
* cmd->result host byte set to DID_BAD_TARGET.
*/
@@ -1185,7 +1187,7 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
goto out;
}
if (!hostdata->selecting) {
- do_abort(instance);
+ do_abort(instance, 0);
return false;
}
@@ -1196,7 +1198,7 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
len = 1;
data = tmp;
phase = PHASE_MSGOUT;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
if (len) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
cmd->result = DID_ERROR << 16;
@@ -1234,7 +1236,8 @@ out:
*
* Inputs : instance - instance of driver, *phase - pointer to
* what phase is expected, *count - pointer to number of
- * bytes to transfer, **data - pointer to data pointer.
+ * bytes to transfer, **data - pointer to data pointer,
+ * can_sleep - 1 or 0 when sleeping is permitted or not, respectively.
*
* Returns : -1 when different phase is entered without transferring
* maximum number of bytes, 0 if all bytes are transferred or exit
@@ -1253,7 +1256,7 @@ out:
static int NCR5380_transfer_pio(struct Scsi_Host *instance,
unsigned char *phase, int *count,
- unsigned char **data)
+ unsigned char **data, unsigned int can_sleep)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char p = *phase, tmp;
@@ -1274,7 +1277,8 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
* valid
*/
- if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
+ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+ HZ * can_sleep) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1320,7 +1324,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
}
if (NCR5380_poll_politely(hostdata,
- STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
+ STATUS_REG, SR_REQ, 0, 5 * HZ * can_sleep) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n");
@@ -1395,11 +1399,12 @@ static void do_reset(struct Scsi_Host *instance)
* do_abort - abort the currently established nexus by going to
* MESSAGE OUT phase and sending an ABORT message.
* @instance: relevant scsi host instance
+ * @can_sleep: 1 or 0 when sleeping is permitted or not, respectively
*
* Returns 0 on success, negative error code on failure.
*/
-static int do_abort(struct Scsi_Host *instance)
+static int do_abort(struct Scsi_Host *instance, unsigned int can_sleep)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *msgptr, phase, tmp;
@@ -1419,7 +1424,8 @@ static int do_abort(struct Scsi_Host *instance)
* the target sees, so we just handshake.
*/
- rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+ 10 * HZ * can_sleep);
if (rc < 0)
goto out;
@@ -1430,7 +1436,8 @@ static int do_abort(struct Scsi_Host *instance)
if (tmp != PHASE_MSGOUT) {
NCR5380_write(INITIATOR_COMMAND_REG,
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
- rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0,
+ 3 * HZ * can_sleep);
if (rc < 0)
goto out;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1440,7 +1447,7 @@ static int do_abort(struct Scsi_Host *instance)
msgptr = &tmp;
len = 1;
phase = PHASE_MSGOUT;
- NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
+ NCR5380_transfer_pio(instance, &phase, &len, &msgptr, can_sleep);
if (len)
rc = -ENXIO;
@@ -1489,7 +1496,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
return -1;
}
- hostdata->connected->SCp.phase = p;
+ NCR5380_to_ncmd(hostdata->connected)->phase = p;
if (p & SR_IO) {
if (hostdata->read_overruns)
@@ -1619,12 +1626,12 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
*/
if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
- BASR_DRQ, BASR_DRQ, HZ) < 0) {
+ BASR_DRQ, BASR_DRQ, 0) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
}
if (NCR5380_poll_politely(hostdata, STATUS_REG,
- SR_REQ, 0, HZ) < 0) {
+ SR_REQ, 0, 0) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
}
@@ -1636,7 +1643,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
*/
if (NCR5380_poll_politely2(hostdata,
BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
- BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
+ BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n");
}
@@ -1681,7 +1688,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
#endif
while ((cmd = hostdata->connected)) {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
tmp = NCR5380_read(STATUS_REG);
/* We only have a valid SCSI phase when REQ is asserted */
@@ -1696,17 +1703,17 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
sun3_dma_setup_done != cmd) {
int count;
- advance_sg_buffer(cmd);
+ advance_sg_buffer(ncmd);
count = sun3scsi_dma_xfer_len(hostdata, cmd);
if (count > 0) {
- if (rq_data_dir(cmd->request))
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
- cmd->SCp.ptr, count);
+ ncmd->ptr, count);
else
sun3scsi_dma_recv_setup(hostdata,
- cmd->SCp.ptr, count);
+ ncmd->ptr, count);
sun3_dma_setup_done = cmd;
}
#ifdef SUN3_SCSI_VME
@@ -1733,7 +1740,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
#if (NDEBUG & NDEBUG_NO_DATAOUT)
shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n");
sink = 1;
- do_abort(instance);
+ do_abort(instance, 0);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
hostdata->connected = NULL;
@@ -1746,11 +1753,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* scatter-gather list, move onto the next one.
*/
- advance_sg_buffer(cmd);
+ advance_sg_buffer(ncmd);
dsprintk(NDEBUG_INFORMATION, instance,
"this residual %d, sg ents %d\n",
- cmd->SCp.this_residual,
- sg_nents(cmd->SCp.buffer));
+ ncmd->this_residual,
+ sg_nents(ncmd->buffer));
/*
* The preferred transfer method is going to be
@@ -1769,7 +1776,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
if (transfersize > 0) {
len = transfersize;
if (NCR5380_transfer_dma(instance, &phase,
- &len, (unsigned char **)&cmd->SCp.ptr)) {
+ &len, (unsigned char **)&ncmd->ptr)) {
/*
* If the watchdog timer fires, all future
* accesses to this device will use the
@@ -1785,12 +1792,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* Transfer a small chunk so that the
* irq mode lock is not held too long.
*/
- transfersize = min(cmd->SCp.this_residual,
+ transfersize = min(ncmd->this_residual,
NCR5380_PIO_CHUNK_SIZE);
len = transfersize;
NCR5380_transfer_pio(instance, &phase, &len,
- (unsigned char **)&cmd->SCp.ptr);
- cmd->SCp.this_residual -= transfersize - len;
+ (unsigned char **)&ncmd->ptr,
+ 0);
+ ncmd->this_residual -= transfersize - len;
}
#ifdef CONFIG_SUN3
if (sun3_dma_setup_done == cmd)
@@ -1800,11 +1808,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
case PHASE_MSGIN:
len = 1;
data = &tmp;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
- cmd->SCp.Message = tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
+ ncmd->message = tmp;
switch (tmp) {
case ABORT:
+ set_host_byte(cmd, DID_ABORT);
+ fallthrough;
case COMMAND_COMPLETE:
/* Accept message by clearing ACK */
sink = 1;
@@ -1816,17 +1826,15 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
- cmd->result &= ~0xffff;
- cmd->result |= cmd->SCp.Status;
- cmd->result |= cmd->SCp.Message << 8;
+ set_status_byte(cmd, ncmd->status);
set_resid_from_SCp(cmd);
if (cmd->cmnd[0] == REQUEST_SENSE)
complete_cmd(instance, cmd);
else {
- if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION ||
- cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) {
+ if (ncmd->status == SAM_STAT_CHECK_CONDITION ||
+ ncmd->status == SAM_STAT_COMMAND_TERMINATED) {
dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n",
cmd);
list_add_tail(&ncmd->list,
@@ -1841,7 +1849,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
- maybe_release_dma_irq(instance);
return;
case MESSAGE_REJECT:
/* Accept message by clearing ACK */
@@ -1907,7 +1914,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
len = 2;
data = extended_msg + 1;
phase = PHASE_MSGIN;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 1);
dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n",
(int)extended_msg[1],
(int)extended_msg[2]);
@@ -1920,7 +1927,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
data = extended_msg + 3;
phase = PHASE_MSGIN;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 1);
dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n",
len);
@@ -1943,7 +1950,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
return;
/* Reject message */
- /* Fall through */
+ fallthrough;
default:
/*
* If we get something weird that we aren't expecting,
@@ -1967,13 +1974,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
len = 1;
data = &msgout;
hostdata->last_message = msgout;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
if (msgout == ABORT) {
hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
- maybe_release_dma_irq(instance);
return;
}
msgout = NOP;
@@ -1986,13 +1992,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* PSEUDO-DMA architecture we should probably
* use the dma transfer function.
*/
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
break;
case PHASE_STATIN:
len = 1;
data = &tmp;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
- cmd->SCp.Status = tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
+ ncmd->status = tmp;
break;
default:
shost_printk(KERN_ERR, instance, "unknown phase\n");
@@ -2050,7 +2056,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
if (NCR5380_poll_politely(hostdata,
- STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
+ STATUS_REG, SR_SEL, 0, 0) < 0) {
shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
@@ -2062,12 +2068,12 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
*/
if (NCR5380_poll_politely(hostdata,
- STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
+ STATUS_REG, SR_REQ, SR_REQ, 0) < 0) {
if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
/* BUS FREE phase */
return;
shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
- do_abort(instance);
+ do_abort(instance, 0);
return;
}
@@ -2083,10 +2089,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
unsigned char *data = msg;
unsigned char phase = PHASE_MSGIN;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
if (len) {
- do_abort(instance);
+ do_abort(instance, 0);
return;
}
}
@@ -2096,7 +2102,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got ");
spi_print_msg(msg);
printk("\n");
- do_abort(instance);
+ do_abort(instance, 0);
return;
}
lun = msg[0] & 0x07;
@@ -2136,7 +2142,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
* Since we have an established nexus that we can't do anything
* with, we must abort it.
*/
- if (do_abort(instance) == 0)
+ if (do_abort(instance, 0) == 0)
hostdata->busy[target] &= ~(1 << lun);
return;
}
@@ -2145,17 +2151,17 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
if (sun3_dma_setup_done != tmp) {
int count;
- advance_sg_buffer(tmp);
+ advance_sg_buffer(ncmd);
count = sun3scsi_dma_xfer_len(hostdata, tmp);
if (count > 0) {
- if (rq_data_dir(tmp->request))
+ if (tmp->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
- tmp->SCp.ptr, count);
+ ncmd->ptr, count);
else
sun3scsi_dma_recv_setup(hostdata,
- tmp->SCp.ptr, count);
+ ncmd->ptr, count);
sun3_dma_setup_done = tmp;
}
}
@@ -2198,7 +2204,7 @@ static bool list_del_cmd(struct list_head *haystack,
struct scsi_cmnd *needle)
{
if (list_find_cmd(haystack, needle)) {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(needle);
list_del(&ncmd->list);
return true;
@@ -2254,7 +2260,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from issue queue\n", cmd);
cmd->result = DID_ABORT << 16;
- cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
+ scsi_done(cmd); /* No tag or busy flag to worry about */
goto out;
}
@@ -2283,7 +2289,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
hostdata->dma_len = 0;
- if (do_abort(instance) < 0) {
+ if (do_abort(instance, 0) < 0) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
result = FAILED;
@@ -2309,7 +2315,6 @@ out:
}
queue_work(hostdata->work_q, &hostdata->main_task);
- maybe_release_dma_irq(instance);
spin_unlock_irqrestore(&hostdata->lock, flags);
return result;
@@ -2350,7 +2355,7 @@ static void bus_reset_cleanup(struct Scsi_Host *instance)
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
INIT_LIST_HEAD(&hostdata->autosense);
@@ -2365,7 +2370,6 @@ static void bus_reset_cleanup(struct Scsi_Host *instance)
hostdata->dma_len = 0;
queue_work(hostdata->work_q, &hostdata->main_task);
- maybe_release_dma_irq(instance);
}
/**
@@ -2394,7 +2398,7 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
scmd->result = DID_RESET << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
INIT_LIST_HEAD(&hostdata->unissued);
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 5935fd6d1a05..8dc2be4212dc 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -227,11 +227,15 @@ struct NCR5380_hostdata {
};
struct NCR5380_cmd {
+ char *ptr;
+ int this_residual;
+ struct scatterlist *buffer;
+ int status;
+ int message;
+ int phase;
struct list_head list;
};
-#define NCR5380_CMD_SIZE (sizeof(struct NCR5380_cmd))
-
#define NCR5380_PIO_CHUNK_SIZE 256
/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
@@ -242,6 +246,11 @@ static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
return ((struct scsi_cmnd *)ncmd_ptr) - 1;
}
+static inline struct NCR5380_cmd *NCR5380_to_ncmd(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#ifndef NDEBUG
#define NDEBUG (0)
#endif
@@ -277,7 +286,8 @@ static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance);
static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
+static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data,
+ unsigned int can_sleep);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
unsigned int, u8, u8,
unsigned int, u8, u8, unsigned long);
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 66c514310f3c..d02eb5b213d0 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -269,7 +269,7 @@ static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
}
/**
- * orc_exec_sb - Queue an SCB with the HA
+ * orc_exec_scb - Queue an SCB with the HA
* @host: host adapter the SCB belongs to
* @scb: SCB to queue for execution
*/
@@ -586,7 +586,7 @@ static int orc_reset_scsi_bus(struct orc_host * host)
* orc_device_reset - device reset handler
* @host: host to reset
* @cmd: command causing the reset
- * @target; target device
+ * @target: target device
*
* Reset registers, reset a hanging bus and kill active and disconnected
* commands for target w/o soft reset
@@ -727,7 +727,7 @@ static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
spin_unlock_irqrestore(&(host->allocation_lock), flags);
}
-/**
+/*
* orchid_abort_scb - abort a command
*
* Abort a queued command that has been passed to the firmware layer
@@ -902,22 +902,19 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
}
/**
- * inia100_queue - queue command with host
+ * inia100_queue_lck - queue command with host
* @cmd: Command block
- * @done: Completion function
*
* Called by the mid layer to queue a command. Process the command
* block, build the host specific scb structures and if there is room
* queue the command down to the controller
*/
-
-static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
+static int inia100_queue_lck(struct scsi_cmnd *cmd)
{
struct orc_scb *scb;
struct orc_host *host; /* Point to Host adapter control block */
host = (struct orc_host *) cmd->device->host->hostdata;
- cmd->scsi_done = done;
/* Get free SCSI control block */
if ((scb = orc_alloc_scb(host)) == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1042,7 +1039,7 @@ static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb)
}
cmd->result = scb->tastat | (scb->hastat << 16);
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd); /* Notify system DONE */
+ scsi_done(cmd); /* Notify system DONE */
orc_release_scb(host, scb); /* Release SCB for current channel */
}
@@ -1088,8 +1085,6 @@ static int inia100_probe_one(struct pci_dev *pdev,
unsigned long port, bios;
int error = -ENODEV;
u32 sz;
- unsigned long biosaddr;
- char *bios_phys;
if (pci_enable_device(pdev))
goto out;
@@ -1139,9 +1134,6 @@ static int inia100_probe_one(struct pci_dev *pdev,
goto out_free_scb_array;
}
- biosaddr = host->BIOScfg;
- biosaddr = (biosaddr << 4);
- bios_phys = phys_to_virt(biosaddr);
if (init_orchid(host)) { /* Initialize orchid chip */
printk("inia100: initial orchid fail!!\n");
goto out_free_escb_array;
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 564b35473672..74312400468b 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -9,11 +9,14 @@
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "a2091.h"
@@ -21,8 +24,11 @@
struct a2091_hostdata {
struct WD33C93_hostdata wh;
struct a2091_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a2091_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -41,16 +47,33 @@ static irqreturn_t a2091_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a2091_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a2091_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/* don't allow DMA if the physical address is bad */
if (addr & A2091_XFER_MASK) {
- wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
+ wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
@@ -60,8 +83,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- /* get the physical address of the bounce buffer */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev, wh->dma_bounce_buffer,
+ wh->dma_bounce_len, DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
/* the bounce buffer may not be in the first 16M of physmem */
if (addr & A2091_XFER_MASK) {
@@ -72,11 +108,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
- cmd->SCp.this_residual);
- }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -91,13 +123,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
+
/* start DMA */
regs->ST_DMA = 1;
@@ -108,6 +135,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct a2091_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a2091_scsiregs *regs = hdata->regs;
@@ -137,11 +165,15 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* restore the CONTROL bits (minus the direction flag) */
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir)
- memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
+ memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
+ scsi_pointer->this_residual);
kfree(wh->dma_bounce_buffer);
wh->dma_bounce_buffer = NULL;
wh->dma_bounce_len = 0;
@@ -162,6 +194,7 @@ static struct scsi_host_template a2091_scsi_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
@@ -172,6 +205,11 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wd33c93_regs wdregs;
struct a2091_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&z->dev, DMA_BIT_MASK(24))) {
+ dev_warn(&z->dev, "cannot use 24 bit DMA\n");
+ return -ENODEV;
+ }
+
if (!request_mem_region(z->resource.start, 256, "wd33c93"))
return -EBUSY;
@@ -192,6 +230,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &z->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index b6a0432f305a..2c5cb1a02e86 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -7,14 +7,18 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "a3000.h"
@@ -22,8 +26,11 @@
struct a3000_hostdata {
struct WD33C93_hostdata wh;
struct a3000_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a3000_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -45,37 +52,65 @@ static irqreturn_t a3000_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/*
* if the physical address has the wrong alignment, or if
* physical address is bad, or if it is a write and at the
* end of a physical memory chunk, then allocate a bounce
* buffer
+ * MSch 20220629 - only wrong alignment tested - bounce
+ * buffer returned by kmalloc is guaranteed to be aligned
*/
if (addr & A3000_XFER_MASK) {
- wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ WARN_ONCE(1, "Invalid alignment for DMA!");
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+
+ wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
/* can't allocate memory; use PIO */
if (!wh->dma_bounce_buffer) {
wh->dma_bounce_len = 0;
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
return 1;
}
if (!dir_in) {
/* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
- cmd->SCp.this_residual);
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -90,13 +125,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
/* start DMA */
mb(); /* make sure setup is completed */
@@ -110,6 +139,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs;
@@ -146,12 +176,16 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
mb(); /* make sure CNTR is updated before next IO */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (SCpnt) {
if (wh->dma_dir && SCpnt)
- memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
+ memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
+ scsi_pointer->this_residual);
kfree(wh->dma_bounce_buffer);
wh->dma_bounce_buffer = NULL;
wh->dma_bounce_len = 0;
@@ -176,6 +210,7 @@ static struct scsi_host_template amiga_a3000_scsi_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
@@ -187,6 +222,11 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wd33c93_regs wdregs;
struct a3000_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_warn(&pdev->dev, "cannot use 32 bit DMA\n");
+ return -ENODEV;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
@@ -210,6 +250,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &pdev->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/aacraid/TODO b/drivers/scsi/aacraid/TODO
deleted file mode 100644
index 78dc863eff4f..000000000000
--- a/drivers/scsi/aacraid/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-o Testing
-o More testing
-o I/O size increase
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 33dbc051bff9..4d4cb47b3846 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -25,7 +25,6 @@
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <linux/uaccess.h>
-#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
#include <asm/unaligned.h>
@@ -224,6 +223,7 @@ static long aac_build_sghba(struct scsi_cmnd *scsicmd,
int sg_max, u64 sg_address);
static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
int pages, int nseg, int nseg_new);
+static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
@@ -242,7 +242,7 @@ int aac_commit = -1;
int startup_timeout = 180;
int aif_timeout = 120;
int aac_sync_mode; /* Only Sync. transfer - disabled */
-int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
+static int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
@@ -271,7 +271,7 @@ MODULE_PARM_DESC(msi, "IRQ handling."
" 0=PIC(default), 1=MSI, 2=MSI-X)");
module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
- " adapter to have it's kernel up and\n"
+ " adapter to have its kernel up and\n"
"running. This is typically adjusted for large systems that do not"
" have a BIOS.");
module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
@@ -290,7 +290,7 @@ MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
" blocks (FIB) allocated. Valid values are 512 and down. Default is"
" to use suggestion from Firmware.");
-int acbsize = -1;
+static int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
" size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
@@ -321,7 +321,7 @@ int aac_reset_devices;
module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
-int aac_wwn = 1;
+static int aac_wwn = 1;
module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
"\t0 - Disable\n"
@@ -333,12 +333,12 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
struct fib *fibptr) {
struct scsi_device *device;
- if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
+ if (unlikely(!scsicmd)) {
dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
aac_fib_complete(fibptr);
return 0;
}
- scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
+ aac_priv(scsicmd)->owner = AAC_OWNER_MIDLEVEL;
device = scsicmd->device;
if (unlikely(!device)) {
dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
@@ -350,7 +350,8 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
/**
* aac_get_config_status - check the adapter configuration
- * @common: adapter to query
+ * @dev: aac driver data
+ * @commit_flag: force sending CT_COMMIT_CONFIG
*
* Query config status, and commit the configuration if needed.
*/
@@ -442,7 +443,7 @@ static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
/**
* aac_get_containers - list containers
- * @common: adapter to probe
+ * @dev: aac driver data
*
* Make a list of all containers on this controller
*/
@@ -517,6 +518,17 @@ int aac_get_containers(struct aac_dev *dev)
return status;
}
+static void aac_scsi_done(struct scsi_cmnd *scmd)
+{
+ if (scmd->device->request_queue) {
+ /* SCSI command has been submitted by the SCSI mid-layer. */
+ scsi_done(scmd);
+ } else {
+ /* SCSI command has been submitted by aac_probe_container(). */
+ aac_probe_container_scsi_done(scmd);
+ }
+}
+
static void get_container_name_callback(void *context, struct fib * fibptr)
{
struct aac_get_name_resp * get_name_reply;
@@ -555,13 +567,13 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
}
}
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
-/**
+/*
* aac_get_container_name - get container name, none blocking.
*/
static int aac_get_container_name(struct scsi_cmnd * scsicmd)
@@ -580,7 +592,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
aac_fib_init(cmd_fibcontext);
dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_READ_NAME);
@@ -614,7 +626,7 @@ static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
return aac_scsi_cmd(scsicmd);
scsicmd->result = DID_NO_CONNECT << 16;
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -622,14 +634,15 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
{
struct fsa_dev_info *fsa_dev_ptr;
int (*callback)(struct scsi_cmnd *);
- struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
+ struct scsi_cmnd *scsicmd = context;
+ struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
int i;
if (!aac_valid_context(scsicmd, fibptr))
return;
- scsicmd->SCp.Status = 0;
+ cmd_priv->status = 0;
fsa_dev_ptr = fibptr->dev->fsa_dev;
if (fsa_dev_ptr) {
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
@@ -667,12 +680,12 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
}
if ((fsa_dev_ptr->valid & 1) == 0)
fsa_dev_ptr->valid = 0;
- scsicmd->SCp.Status = le32_to_cpu(dresp->count);
+ cmd_priv->status = le32_to_cpu(dresp->count);
}
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
- callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
- scsicmd->SCp.ptr = NULL;
+ callback = cmd_priv->callback;
+ cmd_priv->callback = NULL;
(*callback)(scsicmd);
return;
}
@@ -710,7 +723,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_fib_send(ContainerCommand,
fibptr,
@@ -731,6 +744,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
{
+ struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
struct fib * fibptr;
int status = -ENOMEM;
@@ -749,8 +763,8 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
- scsicmd->SCp.ptr = (char *)callback;
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ cmd_priv->callback = callback;
+ cmd_priv->owner = AAC_OWNER_FIRMWARE;
status = aac_fib_send(ContainerCommand,
fibptr,
@@ -766,7 +780,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
return 0;
if (status < 0) {
- scsicmd->SCp.ptr = NULL;
+ cmd_priv->callback = NULL;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
}
@@ -785,9 +799,8 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
}
/**
- * aac_probe_container - query a logical volume
- * @dev: device to query
- * @cid: container identifier
+ * aac_probe_container_callback1 - query a logical volume
+ * @scsicmd: the scsi command block
*
* Queries the controller about the given volume. The volume information
* is updated in the struct fsa_dev_info structure rather than returned.
@@ -798,10 +811,16 @@ static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
return 0;
}
+static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
+{
+ aac_probe_container_callback1(scsi_cmnd);
+}
+
int aac_probe_container(struct aac_dev *dev, int cid)
{
- struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
- struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
+ struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
+ struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
+ struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
int status;
if (!scsicmd || !scsidev) {
@@ -809,8 +828,6 @@ int aac_probe_container(struct aac_dev *dev, int cid)
kfree(scsidev);
return -ENOMEM;
}
- scsicmd->list.next = NULL;
- scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
scsicmd->device = scsidev;
scsidev->sdev_state = 0;
@@ -821,7 +838,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
while (scsicmd->device == scsidev)
schedule();
kfree(scsidev);
- status = scsicmd->SCp.Status;
+ status = cmd_priv->status;
kfree(scsicmd);
return status;
}
@@ -834,7 +851,7 @@ struct scsi_inq {
};
/**
- * InqStrCopy - string merge
+ * inqstrcpy - string merge
* @a: string to copy from
* @b: string to copy to
*
@@ -1033,7 +1050,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
vpdpage83data.type1.productid));
/* Convert to ascii based serial number.
- * The LSB is the the end.
+ * The LSB is the end.
*/
for (i = 0; i < 8; i++) {
u8 temp =
@@ -1088,13 +1105,13 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
}
}
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
-/**
+/*
* aac_get_container_serial - get container serial, none blocking.
*/
static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
@@ -1114,7 +1131,7 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
@@ -1187,15 +1204,14 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
if (lba & 0xffffffff00000000LL) {
int cid = scmd_id(cmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
- cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ cmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- cmd->scsi_done(cmd);
+ aac_scsi_done(cmd);
return 1;
}
return 0;
@@ -1232,8 +1248,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
if (ret < 0)
return ret;
command = ContainerRawIo2;
- fibsize = sizeof(struct aac_raw_io2) +
- ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ fibsize = struct_size(readcmd2, sge,
+ le32_to_cpu(readcmd2->sgeCnt));
} else {
struct aac_raw_io *readcmd;
readcmd = (struct aac_raw_io *) fib_data(fib);
@@ -1363,8 +1379,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
if (ret < 0)
return ret;
command = ContainerRawIo2;
- fibsize = sizeof(struct aac_raw_io2) +
- ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ fibsize = struct_size(writecmd2, sge,
+ le32_to_cpu(writecmd2->sgeCnt));
} else {
struct aac_raw_io *writecmd;
writecmd = (struct aac_raw_io *) fib_data(fib);
@@ -1502,7 +1518,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->id = cpu_to_le32(scmd_id(cmd));
srbcmd->lun = cpu_to_le32(cmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
- timeout = cmd->request->timeout/HZ;
+ timeout = scsi_cmd_to_rq(cmd)->timeout / HZ;
if (timeout == 0)
timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
@@ -1801,7 +1817,7 @@ static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
}
/**
- * aac_get_safw_ciss_luns() Process topology change
+ * aac_get_safw_ciss_luns() - Process topology change
* @dev: aac_dev structure
*
* Execute a CISS REPORT PHYS LUNS and process the results into
@@ -1878,11 +1894,6 @@ static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
}
-static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
-{
- return dev->safw_phys_luns->lun[lun].node_ident[8];
-}
-
static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
int bus, int target)
{
@@ -1948,8 +1959,6 @@ free_identify_resp:
/**
* aac_set_safw_attr_all_targets- update current hba map with data from FW
* @dev: aac_dev structure
- * @phys_luns: FW information from report phys luns
- * @rescan: Indicates scan type
*
* Update our hba map with the information gathered from the FW
*/
@@ -2227,10 +2236,10 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
if (dev->dac_support) {
- if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(64))) {
if (!dev->in_reset)
dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
- } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(32))) {
dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
dev->dac_support = 0;
} else {
@@ -2362,13 +2371,11 @@ static void io_callback(void *context, struct fib * fibptr)
readreply = (struct aac_read_reply *)fib_data(fibptr);
switch (le32_to_cpu(readreply->status)) {
case ST_OK:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
break;
case ST_NOT_READY:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
@@ -2376,8 +2383,7 @@ static void io_callback(void *context, struct fib * fibptr)
SCSI_SENSE_BUFFERSIZE));
break;
case ST_MEDERR:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
@@ -2389,8 +2395,7 @@ static void io_callback(void *context, struct fib * fibptr)
printk(KERN_WARNING "io_callback: io failed, status = %d\n",
le32_to_cpu(readreply->status));
#endif
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
@@ -2401,7 +2406,7 @@ static void io_callback(void *context, struct fib * fibptr)
}
aac_fib_complete(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
static int aac_read(struct scsi_cmnd * scsicmd)
@@ -2465,15 +2470,14 @@ static int aac_read(struct scsi_cmnd * scsicmd)
if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
cid = scmd_id(scsicmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -2485,7 +2489,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
* Alocate and initialize a Fib
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
/*
@@ -2498,8 +2502,8 @@ static int aac_read(struct scsi_cmnd * scsicmd)
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
- scsicmd->scsi_done(scsicmd);
+ scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL;
+ aac_scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return 0;
@@ -2557,15 +2561,14 @@ static int aac_write(struct scsi_cmnd * scsicmd)
if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
cid = scmd_id(scsicmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -2577,7 +2580,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
* Allocate and initialize a Fib then setup a BlockWrite command
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
/*
@@ -2590,8 +2593,8 @@ static int aac_write(struct scsi_cmnd * scsicmd)
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
- scsicmd->scsi_done(scsicmd);
+ scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL;
+ aac_scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
@@ -2601,9 +2604,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
static void synchronize_callback(void *context, struct fib *fibptr)
{
struct aac_synchronize_reply *synchronizereply;
- struct scsi_cmnd *cmd;
-
- cmd = context;
+ struct scsi_cmnd *cmd = context;
if (!aac_valid_context(cmd, fibptr))
return;
@@ -2615,8 +2616,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
synchronizereply = fib_data(fibptr);
if (le32_to_cpu(synchronizereply->status) == CT_OK)
- cmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ cmd->result = DID_OK << 16 | SAM_STAT_GOOD;
else {
struct scsi_device *sdev = cmd->device;
struct aac_dev *dev = fibptr->dev;
@@ -2624,8 +2624,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
printk(KERN_WARNING
"synchronize_callback: synchronize failed, status = %d\n",
le32_to_cpu(synchronizereply->status));
- cmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ cmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
@@ -2636,7 +2635,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
- cmd->scsi_done(cmd);
+ aac_scsi_done(cmd);
}
static int aac_synchronize(struct scsi_cmnd *scsicmd)
@@ -2644,77 +2643,8 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
int status;
struct fib *cmd_fibcontext;
struct aac_synchronize *synchronizecmd;
- struct scsi_cmnd *cmd;
struct scsi_device *sdev = scsicmd->device;
- int active = 0;
struct aac_dev *aac;
- u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
- (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
- u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
- unsigned long flags;
-
- /*
- * Wait for all outstanding queued commands to complete to this
- * specific target (block).
- */
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_for_each_entry(cmd, &sdev->cmd_list, list)
- if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
- u64 cmnd_lba;
- u32 cmnd_count;
-
- if (cmd->cmnd[0] == WRITE_6) {
- cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
- (cmd->cmnd[2] << 8) |
- cmd->cmnd[3];
- cmnd_count = cmd->cmnd[4];
- if (cmnd_count == 0)
- cmnd_count = 256;
- } else if (cmd->cmnd[0] == WRITE_16) {
- cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
- ((u64)cmd->cmnd[3] << 48) |
- ((u64)cmd->cmnd[4] << 40) |
- ((u64)cmd->cmnd[5] << 32) |
- ((u64)cmd->cmnd[6] << 24) |
- (cmd->cmnd[7] << 16) |
- (cmd->cmnd[8] << 8) |
- cmd->cmnd[9];
- cmnd_count = (cmd->cmnd[10] << 24) |
- (cmd->cmnd[11] << 16) |
- (cmd->cmnd[12] << 8) |
- cmd->cmnd[13];
- } else if (cmd->cmnd[0] == WRITE_12) {
- cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
- (cmd->cmnd[3] << 16) |
- (cmd->cmnd[4] << 8) |
- cmd->cmnd[5];
- cmnd_count = (cmd->cmnd[6] << 24) |
- (cmd->cmnd[7] << 16) |
- (cmd->cmnd[8] << 8) |
- cmd->cmnd[9];
- } else if (cmd->cmnd[0] == WRITE_10) {
- cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
- (cmd->cmnd[3] << 16) |
- (cmd->cmnd[4] << 8) |
- cmd->cmnd[5];
- cmnd_count = (cmd->cmnd[7] << 8) |
- cmd->cmnd[8];
- } else
- continue;
- if (((cmnd_lba + cmnd_count) < lba) ||
- (count && ((lba + count) < cmnd_lba)))
- continue;
- ++active;
- break;
- }
-
- spin_unlock_irqrestore(&sdev->list_lock, flags);
-
- /*
- * Yield the processor (requeue for later)
- */
- if (active)
- return SCSI_MLQUEUE_DEVICE_BUSY;
aac = (struct aac_dev *)sdev->host->hostdata;
if (aac->in_reset)
@@ -2723,8 +2653,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
/*
* Allocate and initialize a Fib
*/
- if (!(cmd_fibcontext = aac_fib_alloc(aac)))
- return SCSI_MLQUEUE_HOST_BUSY;
+ cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
aac_fib_init(cmd_fibcontext);
@@ -2734,7 +2663,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
synchronizecmd->count =
cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
/*
* Now send the Fib to the adapter
@@ -2769,11 +2698,11 @@ static void aac_start_stop_callback(void *context, struct fib *fibptr)
BUG_ON(fibptr == NULL);
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
static int aac_start_stop(struct scsi_cmnd *scsicmd)
@@ -2786,9 +2715,8 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
if (!(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)) {
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -2811,7 +2739,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
pmcmd->cid = cpu_to_le32(sdev_id(sdev));
pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
/*
* Now send the Fib to the adapter
@@ -2879,7 +2807,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
!(dev->raw_io_64) ||
((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
- /* fall through */
+ fallthrough;
case INQUIRY:
case READ_CAPACITY:
case TEST_UNIT_READY:
@@ -2918,7 +2846,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
(scsicmd->cmnd[0] != TEST_UNIT_READY))
{
dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
@@ -2947,14 +2875,13 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case SYNCHRONIZE_CACHE:
if (((aac_cache & 6) == 6) && dev->cache_protected) {
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
/* Issue FIB to tell Firmware to flush it's cache */
if ((aac_cache & 6) != 2)
return aac_synchronize(scsicmd);
- /* fall through */
+ fallthrough;
case INQUIRY:
{
struct inquiry_data inq_data;
@@ -2977,9 +2904,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x80) {
/* unit serial number page */
arr[3] = setinqserial(dev, &arr[4],
@@ -2990,9 +2915,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
char *sno = (char *)&inq_data;
@@ -3001,14 +2924,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
} else {
/* vpd page not implemented */
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
ASENCODE_NO_SENSE, 7, 2);
@@ -3034,8 +2953,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
if (dev->in_reset)
@@ -3084,8 +3002,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
@@ -3111,8 +3028,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
@@ -3191,8 +3107,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd,
mode_buf_length);
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
case MODE_SENSE_10:
@@ -3269,8 +3184,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
(char *)&mpd10,
mode_buf_length);
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
case REQUEST_SENSE:
@@ -3279,8 +3193,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
sizeof(struct sense_data));
memset(&dev->fsa_dev[cid].sense_data, 0,
sizeof(struct sense_data));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
case ALLOW_MEDIUM_REMOVAL:
@@ -3290,16 +3203,14 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
else
fsa_dev_ptr[cid].locked = 0;
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
/*
* These commands are all No-Ops
*/
case TEST_UNIT_READY:
if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
NOT_READY, SENCODE_BECOMING_READY,
ASENCODE_BECOMING_READY, 0, 0);
@@ -3310,28 +3221,25 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
SCSI_SENSE_BUFFERSIZE));
break;
}
- /* fall through */
+ fallthrough;
case RESERVE:
case RELEASE:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
case START_STOP:
return aac_start_stop(scsicmd);
- /* FALLTHRU */
default:
/*
* Unhandled commands
*/
dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
scsicmd->cmnd[0]));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
@@ -3343,7 +3251,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_done_ret:
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -3459,15 +3367,12 @@ int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
}
/**
- *
* aac_srb_callback
* @context: the context set in the fib - here it is scsi cmd
* @fibptr: pointer to the fib
*
* Handles the completion of a scsi command to a non dasd device
- *
*/
-
static void aac_srb_callback(void *context, struct fib * fibptr)
{
struct aac_srb_reply *srbreply;
@@ -3515,9 +3420,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
le32_to_cpu(srbreply->status));
len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
SCSI_SENSE_BUFFERSIZE);
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8
- | SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION;
memcpy(scsicmd->sense_buffer,
srbreply->sense_data, len);
}
@@ -3529,7 +3432,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
case SRB_STATUS_ERROR_RECOVERY:
case SRB_STATUS_PENDING:
case SRB_STATUS_SUCCESS:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
case SRB_STATUS_DATA_OVERRUN:
switch (scsicmd->cmnd[0]) {
@@ -3546,60 +3449,52 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
pr_warn("aacraid: SCSI CMD underflow\n");
else
pr_warn("aacraid: SCSI CMD Data Overrun\n");
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
case INQUIRY:
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
default:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
}
break;
case SRB_STATUS_ABORTED:
- scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ scsicmd->result = DID_ABORT << 16;
break;
case SRB_STATUS_ABORT_FAILED:
/*
* Not sure about this one - but assuming the
* hba was trying to abort for some reason
*/
- scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
case SRB_STATUS_PARITY_ERROR:
- scsicmd->result = DID_PARITY << 16
- | MSG_PARITY_ERROR << 8;
+ scsicmd->result = DID_PARITY << 16;
break;
case SRB_STATUS_NO_DEVICE:
case SRB_STATUS_INVALID_PATH_ID:
case SRB_STATUS_INVALID_TARGET_ID:
case SRB_STATUS_INVALID_LUN:
case SRB_STATUS_SELECTION_TIMEOUT:
- scsicmd->result = DID_NO_CONNECT << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_NO_CONNECT << 16;
break;
case SRB_STATUS_COMMAND_TIMEOUT:
case SRB_STATUS_TIMEOUT:
- scsicmd->result = DID_TIME_OUT << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_TIME_OUT << 16;
break;
case SRB_STATUS_BUSY:
- scsicmd->result = DID_BUS_BUSY << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_BUS_BUSY << 16;
break;
case SRB_STATUS_BUS_RESET:
- scsicmd->result = DID_RESET << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_RESET << 16;
break;
case SRB_STATUS_MESSAGE_REJECTED:
- scsicmd->result = DID_ERROR << 16
- | MESSAGE_REJECT << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
case SRB_STATUS_REQUEST_FLUSHED:
case SRB_STATUS_ERROR:
@@ -3635,19 +3530,14 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|| (scsicmd->cmnd[0] == ATA_16)) {
if (scsicmd->cmnd[2] & (0x01 << 5)) {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
+ scsicmd->result = DID_OK << 16;
} else {
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
+ scsicmd->result = DID_ERROR << 16;
}
} else {
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
+ scsicmd->result = DID_ERROR << 16;
}
+ break;
}
if (le32_to_cpu(srbreply->scsi_status)
== SAM_STAT_CHECK_CONDITION) {
@@ -3670,7 +3560,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
aac_fib_complete(fibptr);
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
static void hba_resp_task_complete(struct aac_dev *dev,
@@ -3683,7 +3573,7 @@ static void hba_resp_task_complete(struct aac_dev *dev,
switch (err->status) {
case SAM_STAT_GOOD:
- scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result |= DID_OK << 16;
break;
case SAM_STAT_CHECK_CONDITION:
{
@@ -3694,19 +3584,19 @@ static void hba_resp_task_complete(struct aac_dev *dev,
if (len)
memcpy(scsicmd->sense_buffer,
err->sense_response_buf, len);
- scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result |= DID_OK << 16;
break;
}
case SAM_STAT_BUSY:
- scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result |= DID_BUS_BUSY << 16;
break;
case SAM_STAT_TASK_ABORTED:
- scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
+ scsicmd->result |= DID_ABORT << 16;
break;
case SAM_STAT_RESERVATION_CONFLICT:
case SAM_STAT_TASK_SET_FULL:
default:
- scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result |= DID_ERROR << 16;
break;
}
}
@@ -3726,39 +3616,36 @@ static void hba_resp_task_failure(struct aac_dev *dev,
dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
}
- scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_NO_CONNECT << 16;
break;
}
case HBA_RESP_STAT_IO_ERROR:
case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_BUSY;
break;
case HBA_RESP_STAT_IO_ABORTED:
- scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ scsicmd->result = DID_ABORT << 16;
break;
case HBA_RESP_STAT_INVALID_DEVICE:
- scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_NO_CONNECT << 16;
break;
case HBA_RESP_STAT_UNDERRUN:
/* UNDERRUN is OK */
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
case HBA_RESP_STAT_OVERRUN:
default:
- scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
}
}
/**
- *
* aac_hba_callback
* @context: the context set in the fib - here it is scsi cmd
* @fibptr: pointer to the fib
*
* Handles the completion of a native HBA scsi command
- *
*/
void aac_hba_callback(void *context, struct fib *fibptr)
{
@@ -3781,7 +3668,7 @@ void aac_hba_callback(void *context, struct fib *fibptr)
if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
/* fast response */
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
goto out;
}
@@ -3793,17 +3680,17 @@ void aac_hba_callback(void *context, struct fib *fibptr)
hba_resp_task_failure(dev, scsicmd, err);
break;
case HBA_RESP_SVCRES_TMF_REJECTED:
- scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
case HBA_RESP_SVCRES_TMF_LUN_INVALID:
- scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_NO_CONNECT << 16;
break;
case HBA_RESP_SVCRES_TMF_COMPLETE:
case HBA_RESP_SVCRES_TMF_SUCCEEDED:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
default:
- scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
}
@@ -3811,20 +3698,18 @@ out:
aac_fib_complete(fibptr);
if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
- scsicmd->SCp.sent_command = 1;
+ aac_priv(scsicmd)->sent_command = 1;
else
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
}
/**
- *
* aac_send_srb_fib
* @scsicmd: the scsi command block
*
* This routine will form a FIB and fill in the aac_srb from the
* scsicmd passed in.
*/
-
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
{
struct fib* cmd_fibcontext;
@@ -3835,7 +3720,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
scsicmd->device->lun > 7) {
scsicmd->result = DID_NO_CONNECT << 16;
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -3843,7 +3728,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
* Allocate and initialize a Fib then setup a BlockWrite command
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
/*
@@ -3860,7 +3745,6 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
}
/**
- *
* aac_send_hba_fib
* @scsicmd: the scsi command block
*
@@ -3877,7 +3761,7 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
scsicmd->device->lun > AAC_MAX_LUN - 1) {
scsicmd->result = DID_NO_CONNECT << 16;
- scsicmd->scsi_done(scsicmd);
+ aac_scsi_done(scsicmd);
return 0;
}
@@ -3888,7 +3772,7 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
if (!cmd_fibcontext)
return -1;
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_hba(cmd_fibcontext, scsicmd);
/*
@@ -4127,7 +4011,7 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int
if (aac_convert_sgl == 0)
return 0;
- sge = kmalloc_array(nseg_new, sizeof(struct sge_ieee1212), GFP_ATOMIC);
+ sge = kmalloc_array(nseg_new, sizeof(*sge), GFP_ATOMIC);
if (sge == NULL)
return -ENOMEM;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index e3e4ecbea726..5e115e8b2ba4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -29,6 +29,7 @@
#include <linux/completion.h>
#include <linux/pci.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
/*------------------------------------------------------------------------------
* D E F I N E S
@@ -120,7 +121,7 @@ enum {
#define SA_AIF_PDEV_CHANGE (1<<4)
#define SA_AIF_LDEV_CHANGE (1<<5)
#define SA_AIF_BPSTAT_CHANGE (1<<30)
-#define SA_AIF_BPCFG_CHANGE (1<<31)
+#define SA_AIF_BPCFG_CHANGE (1U<<31)
#define HBA_MAX_SG_EMBEDDED 28
#define HBA_MAX_SG_SEPARATE 90
@@ -1929,7 +1930,7 @@ struct aac_raw_io2 {
u8 bpComplete; /* reserved for F/W use */
u8 sgeFirstIndex; /* reserved for F/W use */
u8 unused[4];
- struct sge_ieee1212 sge[1];
+ struct sge_ieee1212 sge[];
};
#define CT_FLUSH_CACHE 129
@@ -2673,11 +2674,24 @@ static inline void aac_cancel_rescan_worker(struct aac_dev *dev)
cancel_delayed_work_sync(&dev->src_reinit_aif_worker);
}
-/* SCp.phase values */
-#define AAC_OWNER_MIDLEVEL 0x101
-#define AAC_OWNER_LOWLEVEL 0x102
-#define AAC_OWNER_ERROR_HANDLER 0x103
-#define AAC_OWNER_FIRMWARE 0x106
+enum aac_cmd_owner {
+ AAC_OWNER_MIDLEVEL = 0x101,
+ AAC_OWNER_LOWLEVEL = 0x102,
+ AAC_OWNER_ERROR_HANDLER = 0x103,
+ AAC_OWNER_FIRMWARE = 0x106,
+};
+
+struct aac_cmd_priv {
+ int (*callback)(struct scsi_cmnd *);
+ int status;
+ enum aac_cmd_owner owner;
+ bool sent_command;
+};
+
+static inline struct aac_cmd_priv *aac_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
void aac_safw_rescan_worker(struct work_struct *work);
void aac_src_reinit_aif_worker(struct work_struct *work);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index ffe41bc111fc..e7cc927ed952 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -25,6 +25,7 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
+#include <linux/compat.h>
#include <linux/delay.h> /* ssleep prototype */
#include <linux/kthread.h>
#include <linux/uaccess.h>
@@ -32,6 +33,8 @@
#include "aacraid.h"
+# define AAC_DEBUG_PREAMBLE KERN_INFO
+# define AAC_DEBUG_POSTAMBLE
/**
* ioctl_send_fib - send a FIB from userspace
* @dev: adapter is being processed
@@ -40,9 +43,6 @@
* This routine sends a fib to the adapter on behalf of a user level
* program.
*/
-# define AAC_DEBUG_PREAMBLE KERN_INFO
-# define AAC_DEBUG_POSTAMBLE
-
static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
{
struct hw_fib * kfib;
@@ -158,11 +158,12 @@ cleanup:
/**
* open_getadapter_fib - Get the next fib
+ * @dev: adapter is being processed
+ * @arg: arguments to the open call
*
* This routine will get the next Fib, if available, from the AdapterFibContext
* passed in from the user.
*/
-
static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
struct aac_fib_context * fibctx;
@@ -226,6 +227,12 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
return status;
}
+struct compat_fib_ioctl {
+ u32 fibctx;
+ s32 wait;
+ compat_uptr_t fib;
+};
+
/**
* next_getadapter_fib - get the next fib
* @dev: adapter to use
@@ -234,7 +241,6 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
* This routine will get the next Fib, if available, from the AdapterFibContext
* passed in from the user.
*/
-
static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
struct fib_ioctl f;
@@ -244,8 +250,19 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
struct list_head * entry;
unsigned long flags;
- if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
- return -EFAULT;
+ if (in_compat_syscall()) {
+ struct compat_fib_ioctl cf;
+
+ if (copy_from_user(&cf, arg, sizeof(struct compat_fib_ioctl)))
+ return -EFAULT;
+
+ f.fibctx = cf.fibctx;
+ f.wait = cf.wait;
+ f.fib = compat_ptr(cf.fib);
+ } else {
+ if (copy_from_user(&f, arg, sizeof(struct fib_ioctl)))
+ return -EFAULT;
+ }
/*
* Verify that the HANDLE passed in was a valid AdapterFibContext
*
@@ -455,11 +472,10 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
/**
- *
- * aac_send_raw_scb
- *
+ * aac_send_raw_srb()
+ * @dev: adapter is being processed
+ * @arg: arguments to the send call
*/
-
static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
{
struct fib* srbfib;
@@ -513,15 +529,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
- if (!user_srbcmd) {
- dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
- rcode = -ENOMEM;
- goto cleanup;
- }
- if(copy_from_user(user_srbcmd, user_srb,fibsize)){
- dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
- rcode = -EFAULT;
+ user_srbcmd = memdup_user(user_srb, fibsize);
+ if (IS_ERR(user_srbcmd)) {
+ rcode = PTR_ERR(user_srbcmd);
+ user_srbcmd = NULL;
goto cleanup;
}
@@ -677,8 +688,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p, sg_count[i],
- data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p, sg_count[i],
+ data_dir);
hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
hbacmd->sge[i].addr_lo = cpu_to_le32(
(u32)(addr & 0xffffffff));
@@ -739,8 +750,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p,
- sg_count[i], data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
@@ -795,8 +806,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p,
- sg_count[i], data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
@@ -851,7 +862,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ usg->sg[i].count,
+ data_dir);
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
byte_count += usg->sg[i].count;
@@ -890,8 +903,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p,
- sg_count[i], data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr = cpu_to_le32(addr);
byte_count += sg_count[i];
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index f75878d773cf..bd99c5492b7d 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -272,36 +272,35 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
+static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data)
+{
+ int *active = data;
+
+ if (aac_priv(cmd)->owner == AAC_OWNER_FIRMWARE)
+ *active = *active + 1;
+ return true;
+}
static void aac_wait_for_io_completion(struct aac_dev *aac)
{
- unsigned long flagv = 0;
- int i = 0;
+ int i = 0, active;
for (i = 60; i; --i) {
- struct scsi_device *dev;
- struct scsi_cmnd *command;
- int active = 0;
-
- __shost_for_each_device(dev, aac->scsi_host_ptr) {
- spin_lock_irqsave(&dev->list_lock, flagv);
- list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flagv);
- if (active)
- break;
- }
+ active = 0;
+ scsi_host_busy_iter(aac->scsi_host_ptr,
+ wait_for_io_iter, &active);
/*
* We can exit If all the commands are complete
*/
if (active == 0)
break;
+ dev_info(&aac->pdev->dev,
+ "Wait for %d commands to complete\n", active);
ssleep(1);
}
+ if (active)
+ dev_err(&aac->pdev->dev,
+ "%d outstanding commands during shutdown\n", active);
}
/**
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 5a8a999606ea..deb32c9f4b3e 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -214,6 +214,7 @@ int aac_fib_setup(struct aac_dev * dev)
/**
* aac_fib_alloc_tag-allocate a fib using tags
* @dev: Adapter to allocate the fib for
+ * @scmd: SCSI command
*
* Allocate a fib from the adapter fib pool using tags
* from the blk layer.
@@ -223,7 +224,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
{
struct fib *fibptr;
- fibptr = &dev->fibs[scmd->request->tag];
+ fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
/*
* Null out fields that depend on being zero at the start of
* each I/O
@@ -322,7 +323,7 @@ void aac_fib_init(struct fib *fibptr)
}
/**
- * fib_deallocate - deallocate a fib
+ * fib_dealloc - deallocate a fib
* @fibptr: fib to deallocate
*
* Will deallocate and return to the free pool the FIB pointed to by the
@@ -405,8 +406,8 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
* aac_queue_get - get the next free QE
* @dev: Adapter
* @index: Returned index
- * @priority: Priority of fib
- * @fib: Fib to associate with the queue entry
+ * @qid: Queue number
+ * @hw_fib: Fib to associate with the queue entry
* @wait: Wait if queue full
* @fibptr: Driver fib object to go with fib
* @nonotify: Don't notify the adapter
@@ -729,7 +730,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
hbacmd->request_id =
cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
- } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
+ } else
return -EINVAL;
@@ -934,7 +935,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
/**
* aac_fib_complete - fib completion handler
- * @fib: FIB to complete
+ * @fibptr: FIB to complete
*
* Will do all necessary work to complete a FIB.
*/
@@ -1049,6 +1050,7 @@ static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
}
}
+#define AIF_SNIFF_TIMEOUT (500*HZ)
/**
* aac_handle_aif - Handle a message from the firmware
* @dev: Which adapter this fib is from
@@ -1057,8 +1059,6 @@ static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
* This routine handles a driver notify fib from the adapter and
* dispatches it to the appropriate routine for handling.
*/
-
-#define AIF_SNIFF_TIMEOUT (500*HZ)
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib_va;
@@ -1431,7 +1431,7 @@ retry_next:
"enclosure services event");
scsi_device_set_state(device, SDEV_RUNNING);
}
- /* FALLTHRU */
+ fallthrough;
case CHANGE:
if ((channel == CONTAINER_CHANNEL)
&& (!dev->fsa_dev[container].valid)) {
@@ -1448,6 +1448,7 @@ retry_next:
break;
}
scsi_rescan_device(&device->sdev_gendev);
+ break;
default:
break;
@@ -1476,10 +1477,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
int index, quirks;
int retval;
- struct Scsi_Host *host;
- struct scsi_device *dev;
- struct scsi_cmnd *command;
- struct scsi_cmnd *command_list;
+ struct Scsi_Host *host = aac->scsi_host_ptr;
int jafo = 0;
int bled;
u64 dmamask;
@@ -1495,8 +1493,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* - The card is dead, or will be very shortly ;-/ so no new
* commands are completing in the interrupt service.
*/
- host = aac->scsi_host_ptr;
- scsi_block_requests(host);
aac_adapter_disable_int(aac);
if (aac->thread && aac->thread->pid != current->pid) {
spin_unlock_irq(host->host_lock);
@@ -1556,6 +1552,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
aac_fib_map_free(aac);
dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
+ aac_adapter_ioremap(aac, 0);
aac->comm_addr = NULL;
aac->comm_phys = 0;
kfree(aac->queues);
@@ -1566,15 +1563,15 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
dmamask = DMA_BIT_MASK(32);
quirks = aac_get_driver_ident(index)->quirks;
if (quirks & AAC_QUIRK_31BIT)
- retval = pci_set_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_mask(&aac->pdev->dev, dmamask);
else if (!(quirks & AAC_QUIRK_SRC))
- retval = pci_set_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_mask(&aac->pdev->dev, dmamask);
else
- retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
if (quirks & AAC_QUIRK_31BIT && !retval) {
dmamask = DMA_BIT_MASK(31);
- retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
}
if (retval)
@@ -1607,39 +1604,11 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* This is where the assumption that the Adapter is quiesced
* is important.
*/
- command_list = NULL;
- __shost_for_each_device(dev, host) {
- unsigned long flags;
- spin_lock_irqsave(&dev->list_lock, flags);
- list_for_each_entry(command, &dev->cmd_list, list)
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- command->SCp.buffer = (struct scatterlist *)command_list;
- command_list = command;
- }
- spin_unlock_irqrestore(&dev->list_lock, flags);
- }
- while ((command = command_list)) {
- command_list = (struct scsi_cmnd *)command->SCp.buffer;
- command->SCp.buffer = NULL;
- command->result = DID_OK << 16
- | COMMAND_COMPLETE << 8
- | SAM_STAT_TASK_SET_FULL;
- command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
- command->scsi_done(command);
- }
- /*
- * Any Device that was already marked offline needs to be marked
- * running
- */
- __shost_for_each_device(dev, host) {
- if (!scsi_device_online(dev))
- scsi_device_set_state(dev, SDEV_RUNNING);
- }
- retval = 0;
+ scsi_host_complete_all_commands(host, DID_RESET);
+ retval = 0;
out:
aac->in_reset = 0;
- scsi_unblock_requests(host);
/*
* Issue bus rescan to catch any configuration that might have
@@ -1659,8 +1628,8 @@ out:
int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
unsigned long flagv = 0;
- int retval;
- struct Scsi_Host * host;
+ int retval, unblock_retval;
+ struct Scsi_Host *host = aac->scsi_host_ptr;
int bled;
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
@@ -1678,8 +1647,7 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* target (block maximum 60 seconds). Although not necessary,
* it does make us a good storage citizen.
*/
- host = aac->scsi_host_ptr;
- scsi_block_requests(host);
+ scsi_host_block(host);
/* Quiesce build, flush cache, write through mode */
if (forced < 2)
@@ -1690,6 +1658,9 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
retval = _aac_reset_adapter(aac, bled, reset_type);
spin_unlock_irqrestore(host->host_lock, flagv);
+ unblock_retval = scsi_host_unblock(host, SDEV_RUNNING);
+ if (!retval)
+ retval = unblock_retval;
if ((forced < 2) && (retval == -ENODEV)) {
/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
struct fib * fibctx = aac_fib_alloc(aac);
@@ -1979,7 +1950,7 @@ void aac_src_reinit_aif_worker(struct work_struct *work)
}
/**
- * aac_handle_sa_aif Handle a message from the firmware
+ * aac_handle_sa_aif - Handle a message from the firmware
* @dev: Which adapter this fib is from
* @fibptr: Pointer to fibptr from adapter
*
@@ -2382,7 +2353,7 @@ fib_free_out:
goto out;
}
-int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
+static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
{
struct tm cur_tm;
char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
@@ -2411,7 +2382,7 @@ out:
return ret;
}
-int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
+static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
{
int ret = -ENOMEM;
struct fib *fibptr;
@@ -2447,7 +2418,7 @@ out:
/**
* aac_command_thread - command processing thread
- * @dev: Adapter to monitor
+ * @data: Adapter to monitor
*
* Waits on the commandready event in it's queue. When the event gets set
* it will pull FIBs off it's queue. It will continue to pull FIBs off
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index a557aa629827..fbe334c59f37 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -99,10 +99,11 @@ unsigned int aac_response_normal(struct aac_queue * q)
}
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
{
- if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
+ if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) {
FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
- else
+ } else {
FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
+ }
/*
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
@@ -229,7 +230,6 @@ static void aac_aif_callback(void *context, struct fib * fibptr)
struct fib *fibctx;
struct aac_dev *dev;
struct aac_aifcmd *cmd;
- int status;
fibctx = (struct fib *)context;
BUG_ON(fibptr == NULL);
@@ -249,7 +249,7 @@ static void aac_aif_callback(void *context, struct fib * fibptr)
cmd = (struct aac_aifcmd *) fib_data(fibctx);
cmd->command = cpu_to_le32(AifReqEvent);
- status = aac_fib_send(AifRequest,
+ aac_fib_send(AifRequest,
fibctx,
sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
FsaNormal,
@@ -258,7 +258,7 @@ static void aac_aif_callback(void *context, struct fib * fibptr)
}
-/**
+/*
* aac_intr_normal - Handle command replies
* @dev: Device
* @index: completion reference
@@ -403,12 +403,13 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
if (hwfib->header.XferState &
cpu_to_le32(NoResponseExpected | Async)) {
if (hwfib->header.XferState & cpu_to_le32(
- NoResponseExpected))
+ NoResponseExpected)) {
FIB_COUNTER_INCREMENT(
aac_config.NoResponseRecved);
- else
+ } else {
FIB_COUNTER_INCREMENT(
aac_config.AsyncRecved);
+ }
start_callback = 1;
} else {
unsigned long flagv;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index ee6bc2f9b80a..5ba5c18b77b4 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -33,6 +33,7 @@
#include <linux/syscalls.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/msdos_partition.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -229,8 +230,8 @@ static struct aac_driver_ident aac_drivers[] = {
/**
* aac_queuecommand - queue a SCSI command
+ * @shost: Scsi host to queue command on
* @cmd: SCSI command to queue
- * @done: Function to call on command completion
*
* Queues a command for execution by the associated Host Adapter.
*
@@ -240,10 +241,9 @@ static struct aac_driver_ident aac_drivers[] = {
static int aac_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *cmd)
{
- int r = 0;
- cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
- r = (aac_scsi_cmd(cmd) ? FAILED : 0);
- return r;
+ aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
+
+ return aac_scsi_cmd(cmd) ? FAILED : 0;
}
/**
@@ -328,9 +328,9 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
buf = scsi_bios_ptable(bdev);
if (!buf)
return 0;
- if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
- struct partition *first = (struct partition * )buf;
- struct partition *entry = first;
+ if (*(__le16 *)(buf + 0x40) == cpu_to_le16(MSDOS_LABEL_MAGIC)) {
+ struct msdos_partition *first = (struct msdos_partition *)buf;
+ struct msdos_partition *entry = first;
int saved_cylinders = param->cylinders;
int num;
unsigned char end_head, end_sec;
@@ -362,9 +362,10 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
if (num < 4 && end_sec == param->sectors) {
- if (param->cylinders != saved_cylinders)
+ if (param->cylinders != saved_cylinders) {
dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
param->heads, param->sectors, num));
+ }
} else if (end_head > 0 || end_sec > 0) {
dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
end_head + 1, end_sec, num));
@@ -603,12 +604,14 @@ static struct device_attribute aac_unique_id_attr = {
-static struct device_attribute *aac_dev_attrs[] = {
- &aac_raid_level_attr,
- &aac_unique_id_attr,
+static struct attribute *aac_dev_attrs[] = {
+ &aac_raid_level_attr.attr,
+ &aac_unique_id_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(aac_dev);
+
static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg)
{
@@ -622,58 +625,61 @@ static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
return aac_do_ioctl(dev, cmd, arg);
}
-static int get_num_of_incomplete_fibs(struct aac_dev *aac)
+struct fib_count_data {
+ int mlcnt;
+ int llcnt;
+ int ehcnt;
+ int fwcnt;
+ int krlcnt;
+};
+
+static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data)
{
+ struct fib_count_data *fib_count = data;
- unsigned long flags;
- struct scsi_device *sdev = NULL;
+ switch (aac_priv(scmnd)->owner) {
+ case AAC_OWNER_FIRMWARE:
+ fib_count->fwcnt++;
+ break;
+ case AAC_OWNER_ERROR_HANDLER:
+ fib_count->ehcnt++;
+ break;
+ case AAC_OWNER_LOWLEVEL:
+ fib_count->llcnt++;
+ break;
+ case AAC_OWNER_MIDLEVEL:
+ fib_count->mlcnt++;
+ break;
+ default:
+ fib_count->krlcnt++;
+ break;
+ }
+ return true;
+}
+
+/* Called during SCSI EH, so we don't need to block requests */
+static int get_num_of_incomplete_fibs(struct aac_dev *aac)
+{
struct Scsi_Host *shost = aac->scsi_host_ptr;
- struct scsi_cmnd *scmnd = NULL;
struct device *ctrl_dev;
+ struct fib_count_data fcnt = { };
- int mlcnt = 0;
- int llcnt = 0;
- int ehcnt = 0;
- int fwcnt = 0;
- int krlcnt = 0;
-
- __shost_for_each_device(sdev, shost) {
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_for_each_entry(scmnd, &sdev->cmd_list, list) {
- switch (scmnd->SCp.phase) {
- case AAC_OWNER_FIRMWARE:
- fwcnt++;
- break;
- case AAC_OWNER_ERROR_HANDLER:
- ehcnt++;
- break;
- case AAC_OWNER_LOWLEVEL:
- llcnt++;
- break;
- case AAC_OWNER_MIDLEVEL:
- mlcnt++;
- break;
- default:
- krlcnt++;
- break;
- }
- }
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
+ scsi_host_busy_iter(shost, fib_count_iter, &fcnt);
ctrl_dev = &aac->pdev->dev;
- dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt);
- dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt);
- dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt);
- dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt);
- dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt);
+ dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", fcnt.mlcnt);
+ dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", fcnt.llcnt);
+ dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", fcnt.ehcnt);
+ dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fcnt.fwcnt);
+ dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", fcnt.krlcnt);
- return mlcnt + llcnt + ehcnt + fwcnt;
+ return fcnt.mlcnt + fcnt.llcnt + fcnt.ehcnt + fcnt.fwcnt;
}
static int aac_eh_abort(struct scsi_cmnd* cmd)
{
+ struct aac_cmd_priv *cmd_priv = aac_priv(cmd);
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
@@ -726,15 +732,19 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
fib->hbacmd_size = sizeof(*tmf);
- cmd->SCp.sent_command = 0;
+ cmd_priv->sent_command = 0;
status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
(fib_callback) aac_hba_callback,
(void *) cmd);
-
+ if (status != -EINPROGRESS) {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
/* Wait up to 15 secs for completion */
for (count = 0; count < 15; ++count) {
- if (cmd->SCp.sent_command) {
+ if (cmd_priv->sent_command) {
ret = SUCCESS;
break;
}
@@ -757,7 +767,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
!(aac->raw_io_64) ||
((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
- /* fall through */
+ fallthrough;
case INQUIRY:
case READ_CAPACITY:
/*
@@ -774,7 +784,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(fib->callback_data == cmd)) {
fib->flags |=
FIB_CONTEXT_FLAG_TIMED_OUT;
- cmd->SCp.phase =
+ cmd_priv->owner =
AAC_OWNER_ERROR_HANDLER;
ret = SUCCESS;
}
@@ -801,7 +811,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(command->device == cmd->device)) {
fib->flags |=
FIB_CONTEXT_FLAG_TIMED_OUT;
- command->SCp.phase =
+ aac_priv(command)->owner =
AAC_OWNER_ERROR_HANDLER;
if (command == cmd)
ret = SUCCESS;
@@ -854,10 +864,10 @@ static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
fib->hbacmd_size = sizeof(*rst);
- return HBA_IU_TYPE_SATA_REQ;
+ return HBA_IU_TYPE_SATA_REQ;
}
-void aac_tmf_callback(void *context, struct fib *fibptr)
+static void aac_tmf_callback(void *context, struct fib *fibptr)
{
struct aac_hba_resp *err =
&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
@@ -910,11 +920,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
- info->reset_state > 0)
+ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+ !(info->reset_state > 0)))
return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ pr_err("%s: Host device reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
fib = aac_fib_alloc(aac);
@@ -929,7 +939,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
status = aac_hba_send(command, fib,
(fib_callback) aac_tmf_callback,
(void *) info);
-
+ if (status != -EINPROGRESS) {
+ info->reset_state = 0;
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state == 0) {
@@ -968,11 +983,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
- info->reset_state > 0)
+ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+ !(info->reset_state > 0)))
return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ pr_err("%s: Host target reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
fib = aac_fib_alloc(aac);
@@ -989,6 +1004,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
(fib_callback) aac_tmf_callback,
(void *) info);
+ if (status != -EINPROGRESS) {
+ info->reset_state = 0;
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
+
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state <= 0) {
@@ -1036,12 +1058,12 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
- cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ aac_priv(cmd)->owner = AAC_OWNER_ERROR_HANDLER;
}
}
}
- pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
+ pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
/*
* Check the health of the controller
@@ -1059,7 +1081,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
* @scsi_cmd: SCSI command block causing the reset
*
*/
-int aac_eh_host_reset(struct scsi_cmnd *cmd)
+static int aac_eh_host_reset(struct scsi_cmnd *cmd)
{
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
@@ -1140,7 +1162,6 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
/**
* aac_cfg_ioctl - AAC configuration request
- * @inode: inode of device
* @file: file handle
* @cmd: ioctl command code
* @arg: argument
@@ -1163,63 +1184,6 @@ static long aac_cfg_ioctl(struct file *file,
return aac_do_ioctl(aac, cmd, (void __user *)arg);
}
-#ifdef CONFIG_COMPAT
-static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
-{
- long ret;
- switch (cmd) {
- case FSACTL_MINIPORT_REV_CHECK:
- case FSACTL_SENDFIB:
- case FSACTL_OPEN_GET_ADAPTER_FIB:
- case FSACTL_CLOSE_GET_ADAPTER_FIB:
- case FSACTL_SEND_RAW_SRB:
- case FSACTL_GET_PCI_INFO:
- case FSACTL_QUERY_DISK:
- case FSACTL_DELETE_DISK:
- case FSACTL_FORCE_DELETE_DISK:
- case FSACTL_GET_CONTAINERS:
- case FSACTL_SEND_LARGE_FIB:
- ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
- break;
-
- case FSACTL_GET_NEXT_ADAPTER_FIB: {
- struct fib_ioctl __user *f;
-
- f = compat_alloc_user_space(sizeof(*f));
- ret = 0;
- if (clear_user(f, sizeof(*f)))
- ret = -EFAULT;
- if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
- ret = -EFAULT;
- if (!ret)
- ret = aac_do_ioctl(dev, cmd, f);
- break;
- }
-
- default:
- ret = -ENOIOCTLCMD;
- break;
- }
- return ret;
-}
-
-static int aac_compat_ioctl(struct scsi_device *sdev, unsigned int cmd,
- void __user *arg)
-{
- struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
-}
-
-static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
-{
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- return aac_compat_do_ioctl(file->private_data, cmd, arg);
-}
-#endif
-
static ssize_t aac_show_model(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -1269,20 +1233,21 @@ static ssize_t aac_show_flags(struct device *cdev,
if (nblank(dprintk(x)))
len = snprintf(buf, PAGE_SIZE, "dprintk\n");
#ifdef AAC_DETAILED_STATUS_INFO
- len += snprintf(buf + len, PAGE_SIZE - len,
- "AAC_DETAILED_STATUS_INFO\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "AAC_DETAILED_STATUS_INFO\n");
#endif
if (dev->raw_io_interface && dev->raw_io_64)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "SAI_READ_CAPACITY_16\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "SAI_READ_CAPACITY_16\n");
if (dev->jbod)
- len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "SUPPORTED_JBOD\n");
if (dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "SUPPORTED_POWER_MANAGEMENT\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "SUPPORTED_POWER_MANAGEMENT\n");
if (dev->msi)
- len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
return len;
}
@@ -1479,21 +1444,23 @@ static struct device_attribute aac_reset = {
.show = aac_show_reset_adapter,
};
-static struct device_attribute *aac_attrs[] = {
- &aac_model,
- &aac_vendor,
- &aac_flags,
- &aac_kernel_version,
- &aac_monitor_version,
- &aac_bios_version,
- &aac_lld_version,
- &aac_serial_number,
- &aac_max_channel,
- &aac_max_id,
- &aac_reset,
+static struct attribute *aac_host_attrs[] = {
+ &aac_model.attr,
+ &aac_vendor.attr,
+ &aac_flags.attr,
+ &aac_kernel_version.attr,
+ &aac_monitor_version.attr,
+ &aac_bios_version.attr,
+ &aac_lld_version.attr,
+ &aac_serial_number.attr,
+ &aac_max_channel.attr,
+ &aac_max_id.attr,
+ &aac_reset.attr,
NULL
};
+ATTRIBUTE_GROUPS(aac_host);
+
ssize_t aac_get_serial_number(struct device *device, char *buf)
{
return aac_show_serial_number(device, &aac_serial_number, buf);
@@ -1503,7 +1470,7 @@ static const struct file_operations aac_cfg_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = aac_cfg_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = aac_compat_cfg_ioctl,
+ .compat_ioctl = aac_cfg_ioctl,
#endif
.open = aac_cfg_open,
.llseek = noop_llseek,
@@ -1516,14 +1483,14 @@ static struct scsi_host_template aac_driver_template = {
.info = aac_info,
.ioctl = aac_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = aac_compat_ioctl,
+ .compat_ioctl = aac_ioctl,
#endif
.queuecommand = aac_queuecommand,
.bios_param = aac_biosparm,
- .shost_attrs = aac_attrs,
+ .shost_groups = aac_host_groups,
.slave_configure = aac_slave_configure,
.change_queue_depth = aac_change_queue_depth,
- .sdev_attrs = aac_dev_attrs,
+ .sdev_groups = aac_dev_groups,
.eh_abort_handler = aac_eh_abort,
.eh_device_reset_handler = aac_eh_dev_reset,
.eh_target_reset_handler = aac_eh_target_reset,
@@ -1540,6 +1507,7 @@ static struct scsi_host_template aac_driver_template = {
#endif
.emulated = 1,
.no_write_same = 1,
+ .cmd_size = sizeof(struct aac_cmd_priv),
};
static void __aac_shutdown(struct aac_dev * aac)
@@ -1612,7 +1580,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *shost;
struct aac_dev *aac;
struct list_head *insert = &aac_devices;
- int error = -ENODEV;
+ int error;
int unique_id = 0;
u64 dmamask;
int mask_bits = 0;
@@ -1637,10 +1605,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = pci_enable_device(pdev);
if (error)
goto out;
- error = -ENODEV;
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (error) {
dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
goto out_disable_pdev;
@@ -1659,7 +1626,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mask_bits = 32;
}
- error = pci_set_consistent_dma_mask(pdev, dmamask);
+ error = dma_set_coherent_mask(&pdev->dev, dmamask);
if (error) {
dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
, mask_bits);
@@ -1669,13 +1636,14 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
- if (!shost)
+ if (!shost) {
+ error = -ENOMEM;
goto out_disable_pdev;
+ }
shost->irq = pdev->irq;
shost->unique_id = unique_id;
shost->max_cmd_len = 16;
- shost->use_cmd_list = 1;
if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
aac_init_char();
@@ -1695,8 +1663,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
sizeof(struct fib),
GFP_KERNEL);
- if (!aac->fibs)
+ if (!aac->fibs) {
+ error = -ENOMEM;
goto out_free_host;
+ }
+
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
@@ -1887,42 +1858,25 @@ error_iounmap:
}
-#if (defined(CONFIG_PM))
-static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused aac_suspend(struct device *dev)
{
-
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
- scsi_block_requests(shost);
+ scsi_host_block(shost);
aac_cancel_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
- pci_set_drvdata(pdev, shost);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-static int aac_resume(struct pci_dev *pdev)
+static int __maybe_unused aac_resume(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
- int r;
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- r = pci_enable_device(pdev);
-
- if (r)
- goto fail_device;
-
- pci_set_master(pdev);
if (aac_acquire_resources(aac))
goto fail_device;
/*
@@ -1930,22 +1884,21 @@ static int aac_resume(struct pci_dev *pdev)
* aac_send_shutdown() to block ioctls from upperlayer
*/
aac->adapter_shutdown = 0;
- scsi_unblock_requests(shost);
+ scsi_host_unblock(shost, SDEV_RUNNING);
return 0;
fail_device:
printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
scsi_host_put(shost);
- pci_disable_device(pdev);
return -ENODEV;
}
-#endif
static void aac_shutdown(struct pci_dev *dev)
{
struct Scsi_Host *shost = pci_get_drvdata(dev);
- scsi_block_requests(shost);
+
+ scsi_host_block(shost);
__aac_shutdown((struct aac_dev *)shost->hostdata);
}
@@ -1977,28 +1930,8 @@ static void aac_remove_one(struct pci_dev *pdev)
}
}
-static void aac_flush_ios(struct aac_dev *aac)
-{
- int i;
- struct scsi_cmnd *cmd;
-
- for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
- cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
- if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
- scsi_dma_unmap(cmd);
-
- if (aac->handle_pci_error)
- cmd->result = DID_NO_CONNECT << 16;
- else
- cmd->result = DID_RESET << 16;
-
- cmd->scsi_done(cmd);
- }
- }
-}
-
static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
- enum pci_channel_state error)
+ pci_channel_state_t error)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = shost_priv(shost);
@@ -2011,9 +1944,9 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
case pci_channel_io_frozen:
aac->handle_pci_error = 1;
- scsi_block_requests(aac->scsi_host_ptr);
+ scsi_host_block(shost);
aac_cancel_rescan_worker(aac);
- aac_flush_ios(aac);
+ scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
aac_release_resources(aac);
pci_disable_pcie_error_reporting(pdev);
@@ -2023,7 +1956,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
aac->handle_pci_error = 1;
- aac_flush_ios(aac);
+ scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -2064,7 +1997,6 @@ fail_device:
static void aac_pci_resume(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct scsi_device *sdev = NULL;
struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
if (aac_adapter_ioremap(aac, aac->base_size)) {
@@ -2091,10 +2023,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
aac->adapter_shutdown = 0;
aac->handle_pci_error = 0;
- shost_for_each_device(sdev, shost)
- if (sdev->sdev_state == SDEV_OFFLINE)
- sdev->sdev_state = SDEV_RUNNING;
- scsi_unblock_requests(aac->scsi_host_ptr);
+ scsi_host_unblock(shost, SDEV_RUNNING);
aac_scan_host(aac);
pci_save_state(pdev);
@@ -2108,15 +2037,14 @@ static struct pci_error_handlers aac_pci_err_handler = {
.resume = aac_pci_resume,
};
+static SIMPLE_DEV_PM_OPS(aac_pm_ops, aac_suspend, aac_resume);
+
static struct pci_driver aac_pci_driver = {
.name = AAC_DRIVERNAME,
.id_table = aac_pci_tbl,
.probe = aac_probe_one,
.remove = aac_remove_one,
-#if (defined(CONFIG_PM))
- .suspend = aac_suspend,
- .resume = aac_resume,
-#endif
+ .driver.pm = &aac_pm_ops,
.shutdown = aac_shutdown,
.err_handler = &aac_pci_err_handler,
};
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index b5d6b24d6dbd..4745a99fba8c 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -24,6 +24,7 @@
/**
* aac_nark_ioremap
+ * @dev: device to ioremap
* @size: mapping resize request
*
*/
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 5f2cede4d477..8ebc67e541af 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -57,6 +57,7 @@ static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
/**
* aac_rkt_ioremap
+ * @dev: device to ioremap
* @size: mapping resize request
*
*/
@@ -77,8 +78,8 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
* aac_rkt_init - initialize an i960 based AAC card
* @dev: device to configure
*
- * Allocate and set up resources for the i960 based AAC variants. The
- * device_interface in the commregion will be allocated and linked
+ * Allocate and set up resources for the i960 based AAC variants. The
+ * device_interface in the commregion will be allocated and linked
* to the comm region.
*/
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 3dea348bd25d..e06ff83b69ce 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -144,7 +144,16 @@ static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
* @dev: Adapter
* @command: Command to execute
* @p1: first parameter
- * @ret: adapter status
+ * @p2: second parameter
+ * @p3: third parameter
+ * @p4: forth parameter
+ * @p5: fifth parameter
+ * @p6: sixth parameter
+ * @status: adapter status
+ * @r1: first return value
+ * @r2: second return value
+ * @r3: third return value
+ * @r4: forth return value
*
* This routine will send a synchronous command to the adapter and wait
* for its completion.
@@ -443,6 +452,7 @@ static int aac_rx_deliver_message(struct fib * fib)
/**
* aac_rx_ioremap
+ * @dev: adapter
* @size: mapping resize request
*
*/
@@ -522,7 +532,7 @@ int aac_rx_select_comm(struct aac_dev *dev, int comm)
}
/**
- * aac_rx_init - initialize an i960 based AAC card
+ * _aac_rx_init - initialize an i960 based AAC card
* @dev: device to configure
*
* Allocate and set up resources for the i960 based AAC variants. The
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index aa5d7638cade..c9a1dad2f563 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -135,13 +135,21 @@ static void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
* @dev: Adapter
* @command: Command to execute
* @p1: first parameter
+ * @p2: second parameter
+ * @p3: third parameter
+ * @p4: forth parameter
+ * @p5: fifth parameter
+ * @p6: sixth parameter
* @ret: adapter status
+ * @r1: first return value
+ * @r2: second return value
+ * @r3: third return value
+ * @r4: forth return value
*
- * This routine will send a synchronous command to the adapter and wait
+ * This routine will send a synchronous command to the adapter and wait
* for its completion.
*/
-
-static int sa_sync_cmd(struct aac_dev *dev, u32 command,
+static int sa_sync_cmd(struct aac_dev *dev, u32 command,
u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
u32 *ret, u32 *r1, u32 *r2, u32 *r3, u32 *r4)
{
@@ -283,6 +291,7 @@ static int aac_sa_check_health(struct aac_dev *dev)
/**
* aac_sa_ioremap
+ * @dev: device to ioremap
* @size: mapping resize request
*
*/
@@ -300,8 +309,8 @@ static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
* aac_sa_init - initialize an ARM based AAC card
* @dev: device to configure
*
- * Allocate and set up resources for the ARM based AAC variants. The
- * device_interface in the commregion will be allocated and linked
+ * Allocate and set up resources for the ARM based AAC variants. The
+ * device_interface in the commregion will be allocated and linked
* to the comm region.
*/
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 787ec9baebb0..11ef58204e96 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -191,7 +191,16 @@ static void aac_src_enable_interrupt_message(struct aac_dev *dev)
* @dev: Adapter
* @command: Command to execute
* @p1: first parameter
- * @ret: adapter status
+ * @p2: second parameter
+ * @p3: third parameter
+ * @p4: forth parameter
+ * @p5: fifth parameter
+ * @p6: sixth parameter
+ * @status: adapter status
+ * @r1: first return value
+ * @r2: second return valu
+ * @r3: third return value
+ * @r4: forth return value
*
* This routine will send a synchronous command to the adapter and wait
* for its completion.
@@ -602,6 +611,7 @@ static int aac_src_deliver_message(struct fib *fib)
/**
* aac_src_ioremap
+ * @dev: device ioremap
* @size: mapping resize request
*
*/
@@ -632,6 +642,7 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
/**
* aac_srcv_ioremap
+ * @dev: device ioremap
* @size: mapping resize request
*
*/
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index a242a62caaa1..f301aec044bb 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -84,8 +84,6 @@ typedef unsigned char uchar;
#define ASC_CS_TYPE unsigned short
-#define ASC_IS_ISA (0x0001)
-#define ASC_IS_ISAPNP (0x0081)
#define ASC_IS_EISA (0x0002)
#define ASC_IS_PCI (0x0004)
#define ASC_IS_PCI_ULTRA (0x0104)
@@ -101,11 +99,6 @@ typedef unsigned char uchar;
#define ASC_CHIP_MIN_VER_PCI (0x09)
#define ASC_CHIP_MAX_VER_PCI (0x0F)
#define ASC_CHIP_VER_PCI_BIT (0x08)
-#define ASC_CHIP_MIN_VER_ISA (0x11)
-#define ASC_CHIP_MIN_VER_ISA_PNP (0x21)
-#define ASC_CHIP_MAX_VER_ISA (0x27)
-#define ASC_CHIP_VER_ISA_BIT (0x30)
-#define ASC_CHIP_VER_ISAPNP_BIT (0x20)
#define ASC_CHIP_VER_ASYN_BUG (0x21)
#define ASC_CHIP_VER_PCI 0x08
#define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02)
@@ -116,7 +109,6 @@ typedef unsigned char uchar;
#define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3)
#define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL)
#define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL)
-#define ASC_MAX_ISA_DMA_COUNT (0x00FFFFFFL)
#define ASC_SCSI_ID_BITS 3
#define ASC_SCSI_TIX_TYPE uchar
@@ -194,7 +186,6 @@ typedef unsigned char uchar;
#define ASC_FLAG_SRB_LINEAR_ADDR 0x08
#define ASC_FLAG_WIN16 0x10
#define ASC_FLAG_WIN32 0x20
-#define ASC_FLAG_ISA_OVER_16MB 0x40
#define ASC_FLAG_DOS_VM_CALLBACK 0x80
#define ASC_TAG_FLAG_EXTRA_BYTES 0x10
#define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04
@@ -316,7 +307,7 @@ typedef struct asc_sg_head {
ushort queue_cnt;
ushort entry_to_copy;
ushort res;
- ASC_SG_LIST sg_list[0];
+ ASC_SG_LIST sg_list[];
} ASC_SG_HEAD;
typedef struct asc_scsi_q {
@@ -464,8 +455,6 @@ typedef struct asc_dvc_cfg {
ASC_SCSI_BIT_ID_TYPE disc_enable;
ASC_SCSI_BIT_ID_TYPE sdtr_enable;
uchar chip_scsi_id;
- uchar isa_dma_speed;
- uchar isa_dma_channel;
uchar chip_version;
ushort mcode_date;
ushort mcode_version;
@@ -572,10 +561,8 @@ typedef struct asc_cap_info_array {
#define ASC_EEP_MAX_RETRY 20
/*
- * These macros keep the chip SCSI id and ISA DMA speed
- * bitfields in board order. C bitfields aren't portable
- * between big and little-endian platforms so they are
- * not used.
+ * These macros keep the chip SCSI id bitfields in board order. C bitfields
+ * aren't portable between big and little-endian platforms so they are not used.
*/
#define ASC_EEP_GET_CHIP_ID(cfg) ((cfg)->id_speed & 0x0f)
@@ -1812,7 +1799,7 @@ typedef struct adv_req {
* Field naming convention:
*
* *_able indicates both whether a feature should be enabled or disabled
- * and whether a device isi capable of the feature. At initialization
+ * and whether a device is capable of the feature. At initialization
* this field may be set, but later if a device is found to be incapable
* of the feature, the field is cleared.
*/
@@ -2085,12 +2072,6 @@ do { \
#define ASC_BUSY 0
#define ASC_ERROR (-1)
-/* struct scsi_cmnd function return codes */
-#define STATUS_BYTE(byte) (byte)
-#define MSG_BYTE(byte) ((byte) << 8)
-#define HOST_BYTE(byte) ((byte) << 16)
-#define DRIVER_BYTE(byte) ((byte) << 24)
-
#define ASC_STATS(shost, counter) ASC_STATS_ADD(shost, counter, 1)
#ifndef ADVANSYS_STATS
#define ASC_STATS_ADD(shost, counter, count)
@@ -2296,6 +2277,15 @@ struct asc_board {
dvc_var.adv_dvc_var)
#define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev)
+struct advansys_cmd {
+ dma_addr_t dma_handle;
+};
+
+static struct advansys_cmd *advansys_cmd(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#ifdef ADVANSYS_DEBUG
static int asc_dbglvl = 3;
@@ -2346,9 +2336,8 @@ static void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h)
printk(" disc_enable 0x%x, sdtr_enable 0x%x,\n",
h->disc_enable, h->sdtr_enable);
- printk(" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, "
- "chip_version %d,\n", h->chip_scsi_id, h->isa_dma_speed,
- h->isa_dma_channel, h->chip_version);
+ printk(" chip_scsi_id %d, chip_version %d,\n",
+ h->chip_scsi_id, h->chip_version);
printk(" mcode_date 0x%x, mcode_version %d\n",
h->mcode_date, h->mcode_version);
@@ -2421,8 +2410,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
printk(" dma_channel %d, this_id %d, can_queue %d,\n",
s->dma_channel, s->this_id, s->can_queue);
- printk(" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d\n",
- s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma);
+ printk(" cmd_per_lun %d, sg_tablesize %d\n",
+ s->cmd_per_lun, s->sg_tablesize);
if (ASC_NARROW_BOARD(boardp)) {
asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var);
@@ -2638,42 +2627,28 @@ static const char *advansys_info(struct Scsi_Host *shost)
if (ASC_NARROW_BOARD(boardp)) {
asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
ASC_DBG(1, "begin\n");
- if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
- if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) ==
- ASC_IS_ISAPNP) {
- busname = "ISA PnP";
+
+ if (asc_dvc_varp->bus_type & ASC_IS_VL) {
+ busname = "VL";
+ } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
+ busname = "EISA";
+ } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
+ if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
+ == ASC_IS_PCI_ULTRA) {
+ busname = "PCI Ultra";
} else {
- busname = "ISA";
+ busname = "PCI";
}
- sprintf(info,
- "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X, DMA 0x%X",
- ASC_VERSION, busname,
- (ulong)shost->io_port,
- (ulong)shost->io_port + ASC_IOADR_GAP - 1,
- boardp->irq, shost->dma_channel);
} else {
- if (asc_dvc_varp->bus_type & ASC_IS_VL) {
- busname = "VL";
- } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
- busname = "EISA";
- } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
- if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
- == ASC_IS_PCI_ULTRA) {
- busname = "PCI Ultra";
- } else {
- busname = "PCI";
- }
- } else {
- busname = "?";
- shost_printk(KERN_ERR, shost, "unknown bus "
- "type %d\n", asc_dvc_varp->bus_type);
- }
- sprintf(info,
- "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X",
- ASC_VERSION, busname, (ulong)shost->io_port,
- (ulong)shost->io_port + ASC_IOADR_GAP - 1,
- boardp->irq);
+ busname = "?";
+ shost_printk(KERN_ERR, shost, "unknown bus "
+ "type %d\n", asc_dvc_varp->bus_type);
}
+ sprintf(info,
+ "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X",
+ ASC_VERSION, busname, (ulong)shost->io_port,
+ (ulong)shost->io_port + ASC_IOADR_GAP - 1,
+ boardp->irq);
} else {
/*
* Wide Adapter Information
@@ -2876,15 +2851,10 @@ static int asc_get_eeprom_string(ushort *serialnum, uchar *cp)
static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
- ASC_DVC_VAR *asc_dvc_varp;
ASCEEP_CONFIG *ep;
int i;
-#ifdef CONFIG_ISA
- int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 };
-#endif /* CONFIG_ISA */
uchar serialstr[13];
- asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
ep = &boardp->eep_config.asc_eep;
seq_printf(m,
@@ -2932,14 +2902,6 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
seq_printf(m, " %c",
(ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
seq_putc(m, '\n');
-
-#ifdef CONFIG_ISA
- if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
- seq_printf(m,
- " Host ISA DMA speed: %d MB/S\n",
- isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]);
- }
-#endif /* CONFIG_ISA */
}
/*
@@ -3171,7 +3133,6 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
- int chip_scsi_id;
seq_printf(m,
"\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n",
@@ -3188,21 +3149,11 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
shost->sg_tablesize, shost->cmd_per_lun);
seq_printf(m,
- " unchecked_isa_dma %d\n",
- shost->unchecked_isa_dma);
-
- seq_printf(m,
" flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
boardp->flags, shost->last_reset, jiffies,
boardp->asc_n_io_port);
seq_printf(m, " io_port 0x%lx\n", shost->io_port);
-
- if (ASC_NARROW_BOARD(boardp)) {
- chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
- } else {
- chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
- }
}
/*
@@ -3366,8 +3317,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no);
seq_printf(m,
- " iop_base 0x%lx, cable_detect: %X, err_code %u\n",
- (unsigned long)v->iop_base,
+ " iop_base 0x%p, cable_detect: %X, err_code %u\n",
+ v->iop_base,
AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT,
v->err_code);
@@ -3650,7 +3601,7 @@ static void asc_scsi_done(struct scsi_cmnd *scp)
{
scsi_dma_unmap(scp);
ASC_STATS(scp->device->host, done);
- scp->scsi_done(scp);
+ scsi_done(scp);
}
static void AscSetBank(PortAddr iop_base, uchar bank)
@@ -5993,10 +5944,10 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
/*
* 'done_status' contains the command's ending status.
*/
+ scp->result = 0;
switch (scsiqp->done_status) {
case QD_NO_ERROR:
ASC_DBG(2, "QD_NO_ERROR\n");
- scp->result = 0;
/*
* Check for an underrun condition.
@@ -6017,47 +5968,32 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
ASC_DBG(2, "QD_WITH_ERROR\n");
switch (scsiqp->host_status) {
case QHSTA_NO_ERROR:
+ set_status_byte(scp, scsiqp->scsi_status);
if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) {
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
- /*
- * Note: The 'status_byte()' macro used by
- * target drivers defined in scsi.h shifts the
- * status byte returned by host drivers right
- * by 1 bit. This is why target drivers also
- * use right shifted status byte definitions.
- * For instance target drivers use
- * CHECK_CONDITION, defined to 0x1, instead of
- * the SCSI defined check condition value of
- * 0x2. Host drivers are supposed to return
- * the status byte as it is defined by SCSI.
- */
- scp->result = DRIVER_BYTE(DRIVER_SENSE) |
- STATUS_BYTE(scsiqp->scsi_status);
- } else {
- scp->result = STATUS_BYTE(scsiqp->scsi_status);
}
break;
default:
/* Some other QHSTA error occurred. */
ASC_DBG(1, "host_status 0x%x\n", scsiqp->host_status);
- scp->result = HOST_BYTE(DID_BAD_TARGET);
+ set_host_byte(scp, DID_BAD_TARGET);
break;
}
break;
case QD_ABORTED_BY_HOST:
ASC_DBG(1, "QD_ABORTED_BY_HOST\n");
- scp->result =
- HOST_BYTE(DID_ABORT) | STATUS_BYTE(scsiqp->scsi_status);
+ set_status_byte(scp, scsiqp->scsi_status);
+ set_host_byte(scp, DID_ABORT);
break;
default:
ASC_DBG(1, "done_status 0x%x\n", scsiqp->done_status);
- scp->result =
- HOST_BYTE(DID_ERROR) | STATUS_BYTE(scsiqp->scsi_status);
+ set_status_byte(scp, scsiqp->scsi_status);
+ set_host_byte(scp, DID_ERROR);
break;
}
@@ -6111,7 +6047,6 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
{
AdvPortAddr iop_base;
uchar int_stat;
- ushort target_bit;
ADV_CARR_T *free_carrp;
__le32 irq_next_vpa;
ADV_SCSI_REQ_Q *scsiq;
@@ -6198,8 +6133,6 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
asc_dvc->carr_freelist = free_carrp;
asc_dvc->carr_pending_cnt--;
- target_bit = ADV_TID_TO_TIDMASK(scsiq->target_id);
-
/*
* Clear request microcode control flag.
*/
@@ -6757,15 +6690,15 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
ASC_STATS(boardp->shost, callback);
- dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
+ dma_unmap_single(boardp->dev, advansys_cmd(scp)->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
/*
* 'qdonep' contains the command's ending status.
*/
+ scp->result = 0;
switch (qdonep->d3.done_stat) {
case QD_NO_ERROR:
ASC_DBG(2, "QD_NO_ERROR\n");
- scp->result = 0;
/*
* Check for an underrun condition.
@@ -6785,51 +6718,32 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
ASC_DBG(2, "QD_WITH_ERROR\n");
switch (qdonep->d3.host_stat) {
case QHSTA_NO_ERROR:
+ set_status_byte(scp, qdonep->d3.scsi_stat);
if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) {
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
- /*
- * Note: The 'status_byte()' macro used by
- * target drivers defined in scsi.h shifts the
- * status byte returned by host drivers right
- * by 1 bit. This is why target drivers also
- * use right shifted status byte definitions.
- * For instance target drivers use
- * CHECK_CONDITION, defined to 0x1, instead of
- * the SCSI defined check condition value of
- * 0x2. Host drivers are supposed to return
- * the status byte as it is defined by SCSI.
- */
- scp->result = DRIVER_BYTE(DRIVER_SENSE) |
- STATUS_BYTE(qdonep->d3.scsi_stat);
- } else {
- scp->result = STATUS_BYTE(qdonep->d3.scsi_stat);
}
break;
default:
/* QHSTA error occurred */
ASC_DBG(1, "host_stat 0x%x\n", qdonep->d3.host_stat);
- scp->result = HOST_BYTE(DID_BAD_TARGET);
+ set_host_byte(scp, DID_BAD_TARGET);
break;
}
break;
case QD_ABORTED_BY_HOST:
ASC_DBG(1, "QD_ABORTED_BY_HOST\n");
- scp->result =
- HOST_BYTE(DID_ABORT) | MSG_BYTE(qdonep->d3.
- scsi_msg) |
- STATUS_BYTE(qdonep->d3.scsi_stat);
+ set_status_byte(scp, qdonep->d3.scsi_stat);
+ set_host_byte(scp, DID_ABORT);
break;
default:
ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat);
- scp->result =
- HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3.
- scsi_msg) |
- STATUS_BYTE(qdonep->d3.scsi_stat);
+ set_status_byte(scp, qdonep->d3.scsi_stat);
+ set_host_byte(scp, DID_ERROR);
break;
}
@@ -7494,15 +7408,15 @@ static int advansys_slave_configure(struct scsi_device *sdev)
static __le32 asc_get_sense_buffer_dma(struct scsi_cmnd *scp)
{
struct asc_board *board = shost_priv(scp->device->host);
+ struct advansys_cmd *acmd = advansys_cmd(scp);
- scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(board->dev, scp->SCp.dma_handle)) {
+ acmd->dma_handle = dma_map_single(board->dev, scp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(board->dev, acmd->dma_handle)) {
ASC_DBG(1, "failed to map sense buffer\n");
return 0;
}
- return cpu_to_le32(scp->SCp.dma_handle);
+ return cpu_to_le32(acmd->dma_handle);
}
static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
@@ -7518,7 +7432,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
* Set the srb_tag to the command tag + 1, as
* srb_tag '0' is used internally by the chip.
*/
- srb_tag = scp->request->tag + 1;
+ srb_tag = scsi_cmd_to_rq(scp)->tag + 1;
asc_scsi_q->q2.srb_tag = srb_tag;
/*
@@ -7568,15 +7482,15 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
"sg_tablesize %d\n", use_sg,
scp->device->host->sg_tablesize);
scsi_dma_unmap(scp);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
return ASC_ERROR;
}
- asc_sg_head = kzalloc(sizeof(asc_scsi_q->sg_head) +
- use_sg * sizeof(struct asc_sg_list), GFP_ATOMIC);
+ asc_sg_head = kzalloc(struct_size(asc_sg_head, sg_list, use_sg),
+ GFP_ATOMIC);
if (!asc_sg_head) {
scsi_dma_unmap(scp);
- scp->result = HOST_BYTE(DID_SOFT_ERROR);
+ set_host_byte(scp, DID_SOFT_ERROR);
return ASC_ERROR;
}
@@ -7732,7 +7646,7 @@ static int
adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
adv_req_t **adv_reqpp)
{
- u32 srb_tag = scp->request->tag;
+ u32 srb_tag = scsi_cmd_to_rq(scp)->tag;
adv_req_t *reqp;
ADV_SCSI_REQ_Q *scsiqp;
int ret;
@@ -7819,7 +7733,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
"ADV_MAX_SG_LIST %d\n", use_sg,
scp->device->host->sg_tablesize);
scsi_dma_unmap(scp);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
reqp->cmndp = NULL;
scp->host_scribble = NULL;
@@ -7831,7 +7745,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
ret = adv_get_sglist(boardp, reqp, scsiqp, scp, use_sg);
if (ret != ADV_SUCCESS) {
scsi_dma_unmap(scp);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
reqp->cmndp = NULL;
scp->host_scribble = NULL;
@@ -8528,13 +8442,13 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
scmd_printk(KERN_ERR, scp, "ExeScsiQueue() ASC_ERROR, "
"err_code 0x%x\n", err_code);
ASC_STATS(scp->device->host, exe_error);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
break;
default:
scmd_printk(KERN_ERR, scp, "ExeScsiQueue() unknown, "
"err_code 0x%x\n", err_code);
ASC_STATS(scp->device->host, exe_unknown);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
break;
}
@@ -8548,14 +8462,12 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
* This function always returns 0. Command return status is saved
* in the 'scp' result field.
*/
-static int
-advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+static int advansys_queuecommand_lck(struct scsi_cmnd *scp)
{
struct Scsi_Host *shost = scp->device->host;
int asc_res, result = 0;
ASC_STATS(shost, queuecommand);
- scp->scsi_done = done;
asc_res = asc_execute_scsi_cmnd(scp);
@@ -8609,12 +8521,6 @@ static unsigned short AscGetChipBiosAddress(PortAddr iop_base,
}
cfg_lsw = AscGetChipCfgLsw(iop_base);
-
- /*
- * ISA PnP uses the top bit as the 32K BIOS flag
- */
- if (bus_type == ASC_IS_ISAPNP)
- cfg_lsw &= 0x7FFF;
bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE;
return bios_addr;
}
@@ -8657,19 +8563,6 @@ static unsigned char AscGetChipVersion(PortAddr iop_base,
return AscGetChipVerNo(iop_base);
}
-#ifdef CONFIG_ISA
-static void AscEnableIsaDma(uchar dma_channel)
-{
- if (dma_channel < 4) {
- outp(0x000B, (ushort)(0xC0 | dma_channel));
- outp(0x000A, dma_channel);
- } else if (dma_channel < 8) {
- outp(0x00D6, (ushort)(0xC0 | (dma_channel - 4)));
- outp(0x00D4, (ushort)(dma_channel - 4));
- }
-}
-#endif /* CONFIG_ISA */
-
static int AscStopQueueExe(PortAddr iop_base)
{
int count = 0;
@@ -8690,65 +8583,11 @@ static int AscStopQueueExe(PortAddr iop_base)
static unsigned int AscGetMaxDmaCount(ushort bus_type)
{
- if (bus_type & ASC_IS_ISA)
- return ASC_MAX_ISA_DMA_COUNT;
- else if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
+ if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
return ASC_MAX_VL_DMA_COUNT;
return ASC_MAX_PCI_DMA_COUNT;
}
-#ifdef CONFIG_ISA
-static ushort AscGetIsaDmaChannel(PortAddr iop_base)
-{
- ushort channel;
-
- channel = AscGetChipCfgLsw(iop_base) & 0x0003;
- if (channel == 0x03)
- return (0);
- else if (channel == 0x00)
- return (7);
- return (channel + 4);
-}
-
-static ushort AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel)
-{
- ushort cfg_lsw;
- uchar value;
-
- if ((dma_channel >= 5) && (dma_channel <= 7)) {
- if (dma_channel == 7)
- value = 0x00;
- else
- value = dma_channel - 4;
- cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC;
- cfg_lsw |= value;
- AscSetChipCfgLsw(iop_base, cfg_lsw);
- return (AscGetIsaDmaChannel(iop_base));
- }
- return 0;
-}
-
-static uchar AscGetIsaDmaSpeed(PortAddr iop_base)
-{
- uchar speed_value;
-
- AscSetBank(iop_base, 1);
- speed_value = AscReadChipDmaSpeed(iop_base);
- speed_value &= 0x07;
- AscSetBank(iop_base, 0);
- return speed_value;
-}
-
-static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
-{
- speed_value &= 0x07;
- AscSetBank(iop_base, 1);
- AscWriteChipDmaSpeed(iop_base, speed_value);
- AscSetBank(iop_base, 0);
- return AscGetIsaDmaSpeed(iop_base);
-}
-#endif /* CONFIG_ISA */
-
static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
{
int i;
@@ -8758,7 +8597,7 @@ static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
iop_base = asc_dvc->iop_base;
asc_dvc->err_code = 0;
if ((asc_dvc->bus_type &
- (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
+ (ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE;
}
AscSetChipControl(iop_base, CC_HALT);
@@ -8813,17 +8652,6 @@ static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
(SEC_ACTIVE_NEGATE | SEC_SLEW_RATE));
}
- asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED;
-#ifdef CONFIG_ISA
- if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
- if (chip_version >= ASC_CHIP_MIN_VER_ISA_PNP) {
- AscSetChipIFC(iop_base, IFC_INIT_DEFAULT);
- asc_dvc->bus_type = ASC_IS_ISAPNP;
- }
- asc_dvc->cfg->isa_dma_channel =
- (uchar)AscGetIsaDmaChannel(iop_base);
- }
-#endif /* CONFIG_ISA */
for (i = 0; i <= ASC_MAX_TID; i++) {
asc_dvc->cur_dvc_qng[i] = 0;
asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG;
@@ -9187,7 +9015,6 @@ static int AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr;
asc_dvc->cfg->disc_enable = eep_config->disc_enable;
asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng;
- asc_dvc->cfg->isa_dma_speed = ASC_EEP_GET_DMA_SPD(eep_config);
asc_dvc->start_motor = eep_config->start_motor;
asc_dvc->dvc_cntl = eep_config->cntl;
asc_dvc->no_scam = eep_config->no_scam;
@@ -9360,22 +9187,10 @@ static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
}
} else
#endif /* CONFIG_PCI */
- if (asc_dvc->bus_type == ASC_IS_ISAPNP) {
- if (AscGetChipVersion(iop_base, asc_dvc->bus_type)
- == ASC_CHIP_VER_ASYN_BUG) {
- asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
- }
- }
if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) !=
asc_dvc->cfg->chip_scsi_id) {
asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID;
}
-#ifdef CONFIG_ISA
- if (asc_dvc->bus_type & ASC_IS_ISA) {
- AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel);
- AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed);
- }
-#endif /* CONFIG_ISA */
asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG;
@@ -10798,12 +10613,7 @@ static struct scsi_host_template advansys_template = {
.eh_host_reset_handler = advansys_reset,
.bios_param = advansys_biosparam,
.slave_configure = advansys_slave_configure,
- /*
- * Because the driver may control an ISA adapter 'unchecked_isa_dma'
- * must be set. The flag will be cleared in advansys_board_found
- * for non-ISA adapters.
- */
- .unchecked_isa_dma = true,
+ .cmd_size = sizeof(struct advansys_cmd),
};
static int advansys_wide_init_chip(struct Scsi_Host *shost)
@@ -10969,29 +10779,21 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
*/
switch (asc_dvc_varp->bus_type) {
#ifdef CONFIG_ISA
- case ASC_IS_ISA:
- shost->unchecked_isa_dma = true;
- share_irq = 0;
- break;
case ASC_IS_VL:
- shost->unchecked_isa_dma = false;
share_irq = 0;
break;
case ASC_IS_EISA:
- shost->unchecked_isa_dma = false;
share_irq = IRQF_SHARED;
break;
#endif /* CONFIG_ISA */
#ifdef CONFIG_PCI
case ASC_IS_PCI:
- shost->unchecked_isa_dma = false;
share_irq = IRQF_SHARED;
break;
#endif /* CONFIG_PCI */
default:
shost_printk(KERN_ERR, shost, "unknown adapter type: "
"%d\n", asc_dvc_varp->bus_type);
- shost->unchecked_isa_dma = false;
share_irq = 0;
break;
}
@@ -11010,7 +10812,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
* For Wide boards set PCI information before calling
* AdvInitGetConfig().
*/
- shost->unchecked_isa_dma = false;
share_irq = IRQF_SHARED;
ASC_DBG(2, "AdvInitGetConfig()\n");
@@ -11046,7 +10847,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable;
ep->disc_enable = asc_dvc_varp->cfg->disc_enable;
ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled;
- ASC_EEP_SET_DMA_SPD(ep, asc_dvc_varp->cfg->isa_dma_speed);
+ ASC_EEP_SET_DMA_SPD(ep, ASC_DEF_ISA_DMA_SPEED);
ep->start_motor = asc_dvc_varp->start_motor;
ep->cntl = asc_dvc_varp->dvc_cntl;
ep->no_scam = asc_dvc_varp->no_scam;
@@ -11274,22 +11075,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
/* Register DMA Channel for Narrow boards. */
shost->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */
-#ifdef CONFIG_ISA
- if (ASC_NARROW_BOARD(boardp)) {
- /* Register DMA channel for ISA bus. */
- if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
- shost->dma_channel = asc_dvc_varp->cfg->isa_dma_channel;
- ret = request_dma(shost->dma_channel, DRV_NAME);
- if (ret) {
- shost_printk(KERN_ERR, shost, "request_dma() "
- "%d failed %d\n",
- shost->dma_channel, ret);
- goto err_unmap;
- }
- AscEnableIsaDma(shost->dma_channel);
- }
- }
-#endif /* CONFIG_ISA */
/* Register IRQ Number. */
ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost);
@@ -11308,7 +11093,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x "
"failed with %d\n", boardp->irq, ret);
}
- goto err_free_dma;
+ goto err_unmap;
}
/*
@@ -11360,11 +11145,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
advansys_wide_free_mem(boardp);
err_free_irq:
free_irq(boardp->irq, shost);
- err_free_dma:
-#ifdef CONFIG_ISA
- if (shost->dma_channel != NO_ISA_DMA)
- free_dma(shost->dma_channel);
-#endif
err_unmap:
if (boardp->ioremap_addr)
iounmap(boardp->ioremap_addr);
@@ -11385,12 +11165,7 @@ static int advansys_release(struct Scsi_Host *shost)
ASC_DBG(1, "begin\n");
scsi_remove_host(shost);
free_irq(board->irq, shost);
-#ifdef CONFIG_ISA
- if (shost->dma_channel != NO_ISA_DMA) {
- ASC_DBG(1, "free_dma()\n");
- free_dma(shost->dma_channel);
- }
-#endif
+
if (ASC_NARROW_BOARD(board)) {
dma_unmap_single(board->dev,
board->dvc_var.asc_dvc_var.overrun_dma,
@@ -11412,80 +11187,13 @@ static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = {
0x0210, 0x0230, 0x0250, 0x0330
};
-/*
- * The ISA IRQ number is found in bits 2 and 3 of the CfgLsw. It decodes as:
- * 00: 10
- * 01: 11
- * 10: 12
- * 11: 15
- */
-static unsigned int advansys_isa_irq_no(PortAddr iop_base)
-{
- unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
- unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10;
- if (chip_irq == 13)
- chip_irq = 15;
- return chip_irq;
-}
-
-static int advansys_isa_probe(struct device *dev, unsigned int id)
-{
- int err = -ENODEV;
- PortAddr iop_base = _asc_def_iop_base[id];
- struct Scsi_Host *shost;
- struct asc_board *board;
-
- if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) {
- ASC_DBG(1, "I/O port 0x%x busy\n", iop_base);
- return -ENODEV;
- }
- ASC_DBG(1, "probing I/O port 0x%x\n", iop_base);
- if (!AscFindSignature(iop_base))
- goto release_region;
- if (!(AscGetChipVersion(iop_base, ASC_IS_ISA) & ASC_CHIP_VER_ISA_BIT))
- goto release_region;
-
- err = -ENOMEM;
- shost = scsi_host_alloc(&advansys_template, sizeof(*board));
- if (!shost)
- goto release_region;
-
- board = shost_priv(shost);
- board->irq = advansys_isa_irq_no(iop_base);
- board->dev = dev;
- board->shost = shost;
-
- err = advansys_board_found(shost, iop_base, ASC_IS_ISA);
- if (err)
- goto free_host;
-
- dev_set_drvdata(dev, shost);
- return 0;
-
- free_host:
- scsi_host_put(shost);
- release_region:
- release_region(iop_base, ASC_IOADR_GAP);
- return err;
-}
-
-static int advansys_isa_remove(struct device *dev, unsigned int id)
+static void advansys_vlb_remove(struct device *dev, unsigned int id)
{
int ioport = _asc_def_iop_base[id];
advansys_release(dev_get_drvdata(dev));
release_region(ioport, ASC_IOADR_GAP);
- return 0;
}
-static struct isa_driver advansys_isa_driver = {
- .probe = advansys_isa_probe,
- .remove = advansys_isa_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- },
-};
-
/*
* The VLB IRQ number is found in bits 2 to 4 of the CfgLsw. It decodes as:
* 000: invalid
@@ -11554,7 +11262,7 @@ static int advansys_vlb_probe(struct device *dev, unsigned int id)
static struct isa_driver advansys_vlb_driver = {
.probe = advansys_vlb_probe,
- .remove = advansys_isa_remove,
+ .remove = advansys_vlb_remove,
.driver = {
.owner = THIS_MODULE,
.name = "advansys_vlb",
@@ -11804,15 +11512,10 @@ static int __init advansys_init(void)
{
int error;
- error = isa_register_driver(&advansys_isa_driver,
- ASC_IOADR_TABLE_MAX_IX);
- if (error)
- goto fail;
-
error = isa_register_driver(&advansys_vlb_driver,
ASC_IOADR_TABLE_MAX_IX);
if (error)
- goto unregister_isa;
+ goto fail;
error = eisa_driver_register(&advansys_eisa_driver);
if (error)
@@ -11828,8 +11531,6 @@ static int __init advansys_init(void)
eisa_driver_unregister(&advansys_eisa_driver);
unregister_vlb:
isa_unregister_driver(&advansys_vlb_driver);
- unregister_isa:
- isa_unregister_driver(&advansys_isa_driver);
fail:
return error;
}
@@ -11839,7 +11540,6 @@ static void __exit advansys_exit(void)
pci_unregister_driver(&advansys_pci_driver);
eisa_driver_unregister(&advansys_eisa_driver);
isa_unregister_driver(&advansys_vlb_driver);
- isa_unregister_driver(&advansys_isa_driver);
}
module_init(advansys_init);
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index eb466c2e1839..caeebfb67149 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -220,7 +220,7 @@
*
**************************************************************************
- see Documentation/scsi/aha152x.txt for configuration details
+ see Documentation/scsi/aha152x.rst for configuration details
**************************************************************************/
@@ -243,13 +243,16 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <scsi/scsicam.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_spi.h>
-#include <scsi/scsi_eh.h>
+#include <scsi/scsicam.h>
#include "aha152x.h"
static LIST_HEAD(aha152x_host_list);
@@ -313,6 +316,21 @@ enum {
check_condition = 0x0800, /* requesting sense after CHECK CONDITION */
};
+struct aha152x_cmd_priv {
+ char *ptr;
+ int this_residual;
+ struct scatterlist *buffer;
+ int status;
+ int message;
+ int sent_command;
+ int phase;
+};
+
+static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
MODULE_AUTHOR("Jürgen Fischer");
MODULE_DESCRIPTION(AHA152X_REVID);
MODULE_LICENSE("GPL");
@@ -619,7 +637,8 @@ static struct {
static irqreturn_t intr(int irq, void *dev_id);
static void reset_ports(struct Scsi_Host *shpnt);
static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
-static void done(struct Scsi_Host *shpnt, int error);
+static void done(struct Scsi_Host *shpnt, unsigned char status_byte,
+ unsigned char host_byte);
/* diagnostics */
static void show_command(struct scsi_cmnd * ptr);
@@ -875,14 +894,16 @@ void aha152x_release(struct Scsi_Host *shpnt)
static int setup_expected_interrupts(struct Scsi_Host *shpnt)
{
if(CURRENT_SC) {
- CURRENT_SC->SCp.phase |= 1 << 16;
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
+ acp->phase |= 1 << 16;
- if(CURRENT_SC->SCp.phase & selecting) {
+ if (acp->phase & selecting) {
SETPORT(SSTAT1, SELTO);
SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
SETPORT(SIMODE1, ENSELTIMO);
} else {
- SETPORT(SIMODE0, (CURRENT_SC->SCp.phase & spiordy) ? ENSPIORDY : 0);
+ SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0);
SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
}
} else if(STATE==seldi) {
@@ -904,20 +925,18 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
* Queue a command and setup interrupts for a free bus.
*/
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
- struct completion *complete,
- int phase, void (*done)(struct scsi_cmnd *))
+ struct completion *complete, int phase)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt);
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
- SCpnt->scsi_done = done;
- SCpnt->SCp.phase = not_issued | phase;
- SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */
- SCpnt->SCp.Message = 0;
- SCpnt->SCp.have_data_in = 0;
- SCpnt->SCp.sent_command = 0;
+ acp->phase = not_issued | phase;
+ acp->status = 0x1; /* Illegal status by SCSI standard */
+ acp->message = 0;
+ acp->sent_command = 0;
- if(SCpnt->SCp.phase & (resetting|check_condition)) {
+ if (acp->phase & (resetting | check_condition)) {
if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
return FAILED;
@@ -940,15 +959,15 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
SCp.phase : current state of the command */
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
+ acp->ptr = NULL;
+ acp->this_residual = 0;
scsi_set_resid(SCpnt, 0);
- SCpnt->SCp.buffer = NULL;
+ acp->buffer = NULL;
} else {
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- SCpnt->SCp.buffer = scsi_sglist(SCpnt);
- SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ acp->buffer = scsi_sglist(SCpnt);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
DO_LOCK(flags);
@@ -976,10 +995,9 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
* queue a command
*
*/
-static int aha152x_queue_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int aha152x_queue_lck(struct scsi_cmnd *SCpnt)
{
- return aha152x_internal_queue(SCpnt, NULL, 0, done);
+ return aha152x_internal_queue(SCpnt, NULL, 0);
}
static DEF_SCSI_QCMD(aha152x_queue)
@@ -997,6 +1015,14 @@ static void reset_done(struct scsi_cmnd *SCpnt)
}
}
+static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
+{
+ if (aha152x_priv(SCpnt)->phase & resetting)
+ reset_done(SCpnt);
+ else
+ scsi_done(SCpnt);
+}
+
/*
* Abort a command
*
@@ -1063,7 +1089,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
SCpnt->cmd_len = 0;
- aha152x_internal_queue(SCpnt, &done, resetting, reset_done);
+ aha152x_internal_queue(SCpnt, &done, resetting);
timeleft = wait_for_completion_timeout(&done, 100*HZ);
if (!timeleft) {
@@ -1077,7 +1103,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
DO_LOCK(flags);
- if(SCpnt->SCp.phase & resetted) {
+ if (aha152x_priv(SCpnt)->phase & resetted) {
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0);
@@ -1249,7 +1275,7 @@ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev
"aha152x: unable to verify geometry for disk with >1GB.\n"
" Using default translation. Please verify yourself.\n"
" Perhaps you need to enable extended translation in the driver.\n"
- " See Documentation/scsi/aha152x.txt for details.\n");
+ " See Documentation/scsi/aha152x.rst for details.\n");
}
} else {
info_array[0] = info[0];
@@ -1271,7 +1297,8 @@ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev
* Internal done function
*
*/
-static void done(struct Scsi_Host *shpnt, int error)
+static void done(struct Scsi_Host *shpnt, unsigned char status_byte,
+ unsigned char host_byte)
{
if (CURRENT_SC) {
if(DONE_SC)
@@ -1281,7 +1308,8 @@ static void done(struct Scsi_Host *shpnt, int error)
DONE_SC = CURRENT_SC;
CURRENT_SC = NULL;
- DONE_SC->result = error;
+ set_status_byte(DONE_SC, status_byte);
+ set_host_byte(DONE_SC, host_byte);
} else
printk(KERN_ERR "aha152x: done() called outside of command\n");
}
@@ -1369,32 +1397,34 @@ static void busfree_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRBUSFREE);
if(CURRENT_SC) {
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
#if defined(AHA152X_STAT)
action++;
#endif
- CURRENT_SC->SCp.phase &= ~syncneg;
+ acp->phase &= ~syncneg;
- if(CURRENT_SC->SCp.phase & completed) {
+ if (acp->phase & completed) {
/* target sent COMMAND COMPLETE */
- done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16));
+ done(shpnt, acp->status, DID_OK);
- } else if(CURRENT_SC->SCp.phase & aborted) {
- done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_ABORT << 16));
+ } else if (acp->phase & aborted) {
+ done(shpnt, acp->status, DID_ABORT);
- } else if(CURRENT_SC->SCp.phase & resetted) {
- done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_RESET << 16));
+ } else if (acp->phase & resetted) {
+ done(shpnt, acp->status, DID_RESET);
- } else if(CURRENT_SC->SCp.phase & disconnected) {
+ } else if (acp->phase & disconnected) {
/* target sent DISCONNECT */
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->disconnections++;
#endif
append_SC(&DISCONNECTED_SC, CURRENT_SC);
- CURRENT_SC->SCp.phase |= 1 << 16;
+ acp->phase |= 1 << 16;
CURRENT_SC = NULL;
} else {
- done(shpnt, DID_ERROR << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_ERROR);
}
#if defined(AHA152X_STAT)
} else {
@@ -1409,23 +1439,23 @@ static void busfree_run(struct Scsi_Host *shpnt)
action++;
#endif
- if(DONE_SC->SCp.phase & check_condition) {
+ if (aha152x_priv(DONE_SC)->phase & check_condition) {
struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
struct aha152x_scdata *sc = SCDATA(cmd);
scsi_eh_restore_cmnd(cmd, &sc->ses);
- cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
+ aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION;
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- } else if(DONE_SC->SCp.Status==SAM_STAT_CHECK_CONDITION) {
+ } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) {
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->busfree_with_check_condition++;
#endif
- if(!(DONE_SC->SCp.phase & not_issued)) {
+ if (!(aha152x_priv(DONE_SC)->phase & not_issued)) {
struct aha152x_scdata *sc;
struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
@@ -1436,12 +1466,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0);
DO_UNLOCK(flags);
- aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
+ aha152x_internal_queue(ptr, NULL, check_condition);
DO_LOCK(flags);
}
}
- if(DONE_SC && DONE_SC->scsi_done) {
+ if (DONE_SC) {
struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
@@ -1450,13 +1480,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- if(ptr->scsi_done != reset_done) {
+ if (!(aha152x_priv(ptr)->phase & resetting)) {
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
}
DO_UNLOCK(flags);
- ptr->scsi_done(ptr);
+ aha152x_scsi_done(ptr);
DO_LOCK(flags);
}
@@ -1473,10 +1503,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
DO_UNLOCK(flags);
if(CURRENT_SC) {
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
#if defined(AHA152X_STAT)
action++;
#endif
- CURRENT_SC->SCp.phase |= selecting;
+ acp->phase |= selecting;
/* clear selection timeout */
SETPORT(SSTAT1, SELTO);
@@ -1504,18 +1536,20 @@ static void busfree_run(struct Scsi_Host *shpnt)
*/
static void seldo_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
SETPORT(SCSISIG, 0);
SETPORT(SSTAT1, CLRBUSFREE);
SETPORT(SSTAT1, CLRPHASECHG);
- CURRENT_SC->SCp.phase &= ~(selecting|not_issued);
+ acp->phase &= ~(selecting | not_issued);
SETPORT(SCSISEQ, 0);
if (TESTLO(SSTAT0, SELDO)) {
scmd_printk(KERN_ERR, CURRENT_SC,
"aha152x: passing bus free condition\n");
- done(shpnt, DID_NO_CONNECT << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT);
return;
}
@@ -1523,12 +1557,12 @@ static void seldo_run(struct Scsi_Host *shpnt)
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
- if (CURRENT_SC->SCp.phase & aborting) {
+ if (acp->phase & aborting) {
ADDMSGO(ABORT);
- } else if (CURRENT_SC->SCp.phase & resetting) {
+ } else if (acp->phase & resetting) {
ADDMSGO(BUS_DEVICE_RESET);
} else if (SYNCNEG==0 && SYNCHRONOUS) {
- CURRENT_SC->SCp.phase |= syncneg;
+ acp->phase |= syncneg;
MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
SYNCNEG=1; /* negotiation in progress */
}
@@ -1543,21 +1577,24 @@ static void seldo_run(struct Scsi_Host *shpnt)
*/
static void selto_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp;
+
SETPORT(SCSISEQ, 0);
SETPORT(SSTAT1, CLRSELTIMO);
if (!CURRENT_SC)
return;
- CURRENT_SC->SCp.phase &= ~selecting;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->phase &= ~selecting;
- if (CURRENT_SC->SCp.phase & aborted)
- done(shpnt, DID_ABORT << 16);
+ if (acp->phase & aborted)
+ done(shpnt, SAM_STAT_GOOD, DID_ABORT);
else if (TESTLO(SSTAT0, SELINGO))
- done(shpnt, DID_BUS_BUSY << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
else
/* ARBITRATION won, but SELECTION failed */
- done(shpnt, DID_NO_CONNECT << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT);
}
/*
@@ -1579,7 +1616,9 @@ static void seldi_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRPHASECHG);
if(CURRENT_SC) {
- if(!(CURRENT_SC->SCp.phase & not_issued))
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
+ if (!(acp->phase & not_issued))
scmd_printk(KERN_ERR, CURRENT_SC,
"command should not have been issued yet\n");
@@ -1636,6 +1675,7 @@ static void seldi_run(struct Scsi_Host *shpnt)
static void msgi_run(struct Scsi_Host *shpnt)
{
for(;;) {
+ struct aha152x_cmd_priv *acp;
int sstat1 = GETPORT(SSTAT1);
if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
@@ -1673,8 +1713,9 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- CURRENT_SC->SCp.Message = MSGI(0);
- CURRENT_SC->SCp.phase &= ~disconnected;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
+ acp->phase &= ~disconnected;
MSGILEN=0;
@@ -1682,7 +1723,8 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- CURRENT_SC->SCp.Message = MSGI(0);
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
switch (MSGI(0)) {
case DISCONNECT:
@@ -1690,11 +1732,11 @@ static void msgi_run(struct Scsi_Host *shpnt)
scmd_printk(KERN_WARNING, CURRENT_SC,
"target was not allowed to disconnect\n");
- CURRENT_SC->SCp.phase |= disconnected;
+ acp->phase |= disconnected;
break;
case COMMAND_COMPLETE:
- CURRENT_SC->SCp.phase |= completed;
+ acp->phase |= completed;
break;
case MESSAGE_REJECT:
@@ -1825,7 +1867,8 @@ static void msgi_end(struct Scsi_Host *shpnt)
static void msgo_init(struct Scsi_Host *shpnt)
{
if(MSGOLEN==0) {
- if((CURRENT_SC->SCp.phase & syncneg) && SYNCNEG==2 && SYNCRATE==0) {
+ if ((aha152x_priv(CURRENT_SC)->phase & syncneg) &&
+ SYNCNEG == 2 && SYNCRATE == 0) {
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
} else {
scmd_printk(KERN_INFO, CURRENT_SC,
@@ -1842,6 +1885,8 @@ static void msgo_init(struct Scsi_Host *shpnt)
*/
static void msgo_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
while(MSGO_I<MSGOLEN) {
if (TESTLO(SSTAT0, SPIORDY))
return;
@@ -1853,13 +1898,13 @@ static void msgo_run(struct Scsi_Host *shpnt)
if (MSGO(MSGO_I) & IDENTIFY_BASE)
- CURRENT_SC->SCp.phase |= identified;
+ acp->phase |= identified;
if (MSGO(MSGO_I)==ABORT)
- CURRENT_SC->SCp.phase |= aborted;
+ acp->phase |= aborted;
if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
- CURRENT_SC->SCp.phase |= resetted;
+ acp->phase |= resetted;
SETPORT(SCSIDAT, MSGO(MSGO_I++));
}
@@ -1888,10 +1933,10 @@ static void msgo_end(struct Scsi_Host *shpnt)
*/
static void cmd_init(struct Scsi_Host *shpnt)
{
- if (CURRENT_SC->SCp.sent_command) {
+ if (aha152x_priv(CURRENT_SC)->sent_command) {
scmd_printk(KERN_ERR, CURRENT_SC,
"command already sent\n");
- done(shpnt, DID_ERROR << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_ERROR);
return;
}
@@ -1919,7 +1964,7 @@ static void cmd_end(struct Scsi_Host *shpnt)
"command sent incompletely (%d/%d)\n",
CMD_I, CURRENT_SC->cmd_len);
else
- CURRENT_SC->SCp.sent_command++;
+ aha152x_priv(CURRENT_SC)->sent_command++;
}
/*
@@ -1931,7 +1976,7 @@ static void status_run(struct Scsi_Host *shpnt)
if (TESTLO(SSTAT0, SPIORDY))
return;
- CURRENT_SC->SCp.Status = GETPORT(SCSIDAT);
+ aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT);
}
@@ -1955,6 +2000,7 @@ static void datai_init(struct Scsi_Host *shpnt)
static void datai_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp;
unsigned long the_time;
int fifodata, data_count;
@@ -1992,35 +2038,35 @@ static void datai_run(struct Scsi_Host *shpnt)
fifodata = GETPORT(FIFOSTAT);
}
- if(CURRENT_SC->SCp.this_residual>0) {
- while(fifodata>0 && CURRENT_SC->SCp.this_residual>0) {
- data_count = fifodata > CURRENT_SC->SCp.this_residual ?
- CURRENT_SC->SCp.this_residual :
- fifodata;
+ acp = aha152x_priv(CURRENT_SC);
+ if (acp->this_residual > 0) {
+ while (fifodata > 0 && acp->this_residual > 0) {
+ data_count = fifodata > acp->this_residual ?
+ acp->this_residual : fifodata;
fifodata -= data_count;
if (data_count & 1) {
SETPORT(DMACNTRL0, ENDMA|_8BIT);
- *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT);
- CURRENT_SC->SCp.this_residual--;
+ *acp->ptr++ = GETPORT(DATAPORT);
+ acp->this_residual--;
DATA_LEN++;
SETPORT(DMACNTRL0, ENDMA);
}
if (data_count > 1) {
data_count >>= 1;
- insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
- CURRENT_SC->SCp.ptr += 2 * data_count;
- CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ insw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
DATA_LEN += 2 * data_count;
}
- if (CURRENT_SC->SCp.this_residual == 0 &&
- !sg_is_last(CURRENT_SC->SCp.buffer)) {
+ if (acp->this_residual == 0 &&
+ !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
}
} else if (fifodata > 0) {
@@ -2030,8 +2076,7 @@ static void datai_run(struct Scsi_Host *shpnt)
fifodata, GETPORT(FIFOSTAT));
SETPORT(DMACNTRL0, ENDMA|_8BIT);
while(fifodata>0) {
- int data;
- data=GETPORT(DATAPORT);
+ GETPORT(DATAPORT);
fifodata--;
DATA_LEN++;
}
@@ -2089,14 +2134,15 @@ static void datao_init(struct Scsi_Host *shpnt)
static void datao_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
unsigned long the_time;
int data_count;
/* until phase changes or all data sent */
- while(TESTLO(DMASTAT, INTSTAT) && CURRENT_SC->SCp.this_residual>0) {
+ while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) {
data_count = 128;
- if(data_count > CURRENT_SC->SCp.this_residual)
- data_count=CURRENT_SC->SCp.this_residual;
+ if (data_count > acp->this_residual)
+ data_count = acp->this_residual;
if(TESTLO(DMASTAT, DFIFOEMP)) {
scmd_printk(KERN_ERR, CURRENT_SC,
@@ -2107,26 +2153,25 @@ static void datao_run(struct Scsi_Host *shpnt)
if(data_count & 1) {
SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
- SETPORT(DATAPORT, *CURRENT_SC->SCp.ptr++);
- CURRENT_SC->SCp.this_residual--;
+ SETPORT(DATAPORT, *acp->ptr++);
+ acp->this_residual--;
CMD_INC_RESID(CURRENT_SC, -1);
SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
}
if(data_count > 1) {
data_count >>= 1;
- outsw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
- CURRENT_SC->SCp.ptr += 2 * data_count;
- CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ outsw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
}
- if (CURRENT_SC->SCp.this_residual == 0 &&
- !sg_is_last(CURRENT_SC->SCp.buffer)) {
+ if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
the_time=jiffies + 100*HZ;
@@ -2142,6 +2187,8 @@ static void datao_run(struct Scsi_Host *shpnt)
static void datao_end(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
if(TESTLO(DMASTAT, DFIFOEMP)) {
u32 datao_cnt = GETSTCNT();
int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC);
@@ -2159,10 +2206,9 @@ static void datao_end(struct Scsi_Host *shpnt)
sg = sg_next(sg);
}
- CURRENT_SC->SCp.buffer = sg;
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) + done;
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length -
- done;
+ acp->buffer = sg;
+ acp->ptr = SG_ADDRESS(acp->buffer) + done;
+ acp->this_residual = acp->buffer->length - done;
}
SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2190,7 +2236,8 @@ static int update_state(struct Scsi_Host *shpnt)
SETPORT(SSTAT1,SCSIRSTI);
} else if (stat0 & SELDI && PREVSTATE == busfree) {
STATE=seldi;
- } else if(stat0 & SELDO && CURRENT_SC && (CURRENT_SC->SCp.phase & selecting)) {
+ } else if (stat0 & SELDO && CURRENT_SC &&
+ (aha152x_priv(CURRENT_SC)->phase & selecting)) {
STATE=seldo;
} else if(stat1 & SELTO) {
STATE=selto;
@@ -2232,7 +2279,7 @@ static int update_state(struct Scsi_Host *shpnt)
static void parerr_run(struct Scsi_Host *shpnt)
{
scmd_printk(KERN_ERR, CURRENT_SC, "parity error\n");
- done(shpnt, DID_PARITY << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_PARITY);
}
/*
@@ -2255,15 +2302,15 @@ static void rsti_run(struct Scsi_Host *shpnt)
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
- ptr->result = DID_RESET << 16;
- ptr->scsi_done(ptr);
+ set_host_byte(ptr, DID_RESET);
+ aha152x_scsi_done(ptr);
}
ptr = next;
}
if(CURRENT_SC && !CURRENT_SC->device->soft_reset)
- done(shpnt, DID_RESET << 16 );
+ done(shpnt, SAM_STAT_GOOD, DID_RESET);
}
@@ -2322,7 +2369,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(SXFRCTL0, CH1);
SETPORT(DMACNTRL0, 0);
if(CURRENT_SC)
- CURRENT_SC->SCp.phase &= ~spiordy;
+ aha152x_priv(CURRENT_SC)->phase &= ~spiordy;
}
/*
@@ -2344,7 +2391,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(DMACNTRL0, 0);
SETPORT(SXFRCTL0, CH1|SPIOEN);
if(CURRENT_SC)
- CURRENT_SC->SCp.phase |= spiordy;
+ aha152x_priv(CURRENT_SC)->phase |= spiordy;
}
/*
@@ -2434,21 +2481,23 @@ static void disp_enintr(struct Scsi_Host *shpnt)
*/
static void show_command(struct scsi_cmnd *ptr)
{
+ const int phase = aha152x_priv(ptr)->phase;
+
scsi_print_command(ptr);
scmd_printk(KERN_DEBUG, ptr,
"request_bufflen=%d; resid=%d; "
"phase |%s%s%s%s%s%s%s%s%s; next=0x%p",
scsi_bufflen(ptr), scsi_get_resid(ptr),
- (ptr->SCp.phase & not_issued) ? "not issued|" : "",
- (ptr->SCp.phase & selecting) ? "selecting|" : "",
- (ptr->SCp.phase & identified) ? "identified|" : "",
- (ptr->SCp.phase & disconnected) ? "disconnected|" : "",
- (ptr->SCp.phase & completed) ? "completed|" : "",
- (ptr->SCp.phase & spiordy) ? "spiordy|" : "",
- (ptr->SCp.phase & syncneg) ? "syncneg|" : "",
- (ptr->SCp.phase & aborted) ? "aborted|" : "",
- (ptr->SCp.phase & resetted) ? "resetted|" : "",
- (SCDATA(ptr)) ? SCNEXT(ptr) : NULL);
+ phase & not_issued ? "not issued|" : "",
+ phase & selecting ? "selecting|" : "",
+ phase & identified ? "identified|" : "",
+ phase & disconnected ? "disconnected|" : "",
+ phase & completed ? "completed|" : "",
+ phase & spiordy ? "spiordy|" : "",
+ phase & syncneg ? "syncneg|" : "",
+ phase & aborted ? "aborted|" : "",
+ phase & resetted ? "resetted|" : "",
+ SCDATA(ptr) ? SCNEXT(ptr) : NULL);
}
/*
@@ -2480,6 +2529,8 @@ static void show_queues(struct Scsi_Host *shpnt)
static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(ptr);
+ const int phase = acp->phase;
int i;
seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
@@ -2489,24 +2540,24 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
seq_printf(m, "0x%02x ", ptr->cmnd[i]);
seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
- scsi_get_resid(ptr), ptr->SCp.this_residual,
- sg_nents(ptr->SCp.buffer) - 1);
+ scsi_get_resid(ptr), acp->this_residual,
+ sg_nents(acp->buffer) - 1);
- if (ptr->SCp.phase & not_issued)
+ if (phase & not_issued)
seq_puts(m, "not issued|");
- if (ptr->SCp.phase & selecting)
+ if (phase & selecting)
seq_puts(m, "selecting|");
- if (ptr->SCp.phase & disconnected)
+ if (phase & disconnected)
seq_puts(m, "disconnected|");
- if (ptr->SCp.phase & aborted)
+ if (phase & aborted)
seq_puts(m, "aborted|");
- if (ptr->SCp.phase & identified)
+ if (phase & identified)
seq_puts(m, "identified|");
- if (ptr->SCp.phase & completed)
+ if (phase & completed)
seq_puts(m, "completed|");
- if (ptr->SCp.phase & spiordy)
+ if (phase & spiordy)
seq_puts(m, "spiordy|");
- if (ptr->SCp.phase & syncneg)
+ if (phase & syncneg)
seq_puts(m, "syncneg|");
seq_printf(m, "; next=0x%p\n", SCNEXT(ptr));
}
@@ -2911,6 +2962,7 @@ static struct scsi_host_template aha152x_driver_template = {
.sg_tablesize = SG_ALL,
.dma_boundary = PAGE_SIZE - 1,
.slave_alloc = aha152x_adjust_queue,
+ .cmd_size = sizeof(struct aha152x_cmd_priv),
};
#if !defined(AHA152X_PCMCIA)
@@ -3368,13 +3420,11 @@ static int __init aha152x_setup(char *str)
setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1;
setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0;
- if (ints[0] > 8) { /*}*/
+ if (ints[0] > 8)
printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
"[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
- } else {
+ else
setup_count++;
- return 0;
- }
return 1;
}
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index dc5667afeb27..552ca95157da 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -65,9 +65,12 @@ struct aha1542_hostdata {
dma_addr_t ccb_handle;
};
+#define AHA1542_MAX_SECTORS 16
+
struct aha1542_cmd {
- struct chain *chain;
- dma_addr_t chain_handle;
+ /* bounce buffer */
+ void *data_buffer;
+ dma_addr_t data_buffer_handle;
};
static inline void aha1542_intr_reset(u16 base)
@@ -119,8 +122,10 @@ static int aha1542_out(unsigned int base, u8 *buf, int len)
return 0;
}
-/* Only used at boot time, so we do not need to worry about latency as much
- here */
+/*
+ * Only used at boot time, so we do not need to worry about latency as much
+ * here
+ */
static int aha1542_in(unsigned int base, u8 *buf, int len, int timeout)
{
@@ -142,35 +147,43 @@ static int makecode(unsigned hosterr, unsigned scsierr)
break;
case 0x11: /* Selection time out-The initiator selection or target
- reselection was not complete within the SCSI Time out period */
+ * reselection was not complete within the SCSI Time out period
+ */
hosterr = DID_TIME_OUT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
- than was allocated by the Data Length field or the sum of the
- Scatter / Gather Data Length fields. */
+ * than was allocated by the Data Length field or the sum of the
+ * Scatter / Gather Data Length fields.
+ */
case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
case 0x15: /* MBO command was not 00, 01 or 02-The first byte of the CB was
- invalid. This usually indicates a software failure. */
+ * invalid. This usually indicates a software failure.
+ */
case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid.
- This usually indicates a software failure. */
+ * This usually indicates a software failure.
+ */
case 0x17: /* Linked CCB does not have the same LUN-A subsequent CCB of a set
- of linked CCB's does not specify the same logical unit number as
- the first. */
+ * of linked CCB's does not specify the same logical unit number as
+ * the first.
+ */
case 0x18: /* Invalid Target Direction received from Host-The direction of a
- Target Mode CCB was invalid. */
+ * Target Mode CCB was invalid.
+ */
case 0x19: /* Duplicate CCB Received in Target Mode-More than once CCB was
- received to service data transfer between the same target LUN
- and initiator SCSI ID in the same direction. */
+ * received to service data transfer between the same target LUN
+ * and initiator SCSI ID in the same direction.
+ */
case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero
- length segment or invalid segment list boundaries was received.
- A CCB parameter was invalid. */
+ * length segment or invalid segment list boundaries was received.
+ * A CCB parameter was invalid.
+ */
#ifdef DEBUG
printk("Aha1542: %x %x\n", hosterr, scsierr);
#endif
@@ -178,9 +191,10 @@ static int makecode(unsigned hosterr, unsigned scsierr)
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
- phase sequence was requested by the target. The host adapter
- will generate a SCSI Reset Condition, notifying the host with
- a SCRD interrupt */
+ * phase sequence was requested by the target. The host adapter
+ * will generate a SCSI Reset Condition, notifying the host with
+ * a SCRD interrupt
+ */
hosterr = DID_RESET;
break;
default:
@@ -192,7 +206,6 @@ static int makecode(unsigned hosterr, unsigned scsierr)
static int aha1542_test_port(struct Scsi_Host *sh)
{
- u8 inquiry_result[4];
int i;
/* Quick and dirty test for presence of the card. */
@@ -216,15 +229,17 @@ static int aha1542_test_port(struct Scsi_Host *sh)
if (inb(INTRFLAGS(sh->io_port)) & INTRMASK)
return 0;
- /* Perform a host adapter inquiry instead so we do not need to set
- up the mailboxes ahead of time */
+ /*
+ * Perform a host adapter inquiry instead so we do not need to set
+ * up the mailboxes ahead of time
+ */
aha1542_outb(sh->io_port, CMD_INQUIRY);
for (i = 0; i < 4; i++) {
if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0))
return 0;
- inquiry_result[i] = inb(DATA(sh->io_port));
+ (void)inb(DATA(sh->io_port));
}
/* Reading port should reset DF */
@@ -244,15 +259,19 @@ static int aha1542_test_port(struct Scsi_Host *sh)
static void aha1542_free_cmd(struct scsi_cmnd *cmd)
{
struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
- struct device *dev = cmd->device->host->dma_dev;
- size_t len = scsi_sg_count(cmd) * sizeof(struct chain);
- if (acmd->chain) {
- dma_unmap_single(dev, acmd->chain_handle, len, DMA_TO_DEVICE);
- kfree(acmd->chain);
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ void *buf = acmd->data_buffer;
+ struct req_iterator iter;
+ struct bio_vec bv;
+
+ rq_for_each_segment(bv, rq, iter) {
+ memcpy_to_bvec(&bv, buf);
+ buf += bv.bv_len;
+ }
}
- acmd->chain = NULL;
scsi_dma_unmap(cmd);
}
@@ -260,7 +279,6 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
{
struct Scsi_Host *sh = dev_id;
struct aha1542_hostdata *aha1542 = shost_priv(sh);
- void (*my_done)(struct scsi_cmnd *) = NULL;
int errstatus, mbi, mbo, mbistatus;
int number_serviced;
unsigned long flags;
@@ -284,7 +302,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
if (flag & SCRD)
printk("SCRD ");
printk("status %02x\n", inb(STATUS(sh->io_port)));
- };
+ }
#endif
number_serviced = 0;
@@ -292,10 +310,12 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
while (1) {
flag = inb(INTRFLAGS(sh->io_port));
- /* Check for unusual interrupts. If any of these happen, we should
- probably do something special, but for now just printing a message
- is sufficient. A SCSI reset detected is something that we really
- need to deal with in some way. */
+ /*
+ * Check for unusual interrupts. If any of these happen, we should
+ * probably do something special, but for now just printing a message
+ * is sufficient. A SCSI reset detected is something that we really
+ * need to deal with in some way.
+ */
if (flag & ~MBIF) {
if (flag & MBOA)
printk("MBOF ");
@@ -324,7 +344,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
if (!number_serviced)
shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n");
return IRQ_HANDLED;
- };
+ }
mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb);
mbistatus = mb[mbi].status;
@@ -346,18 +366,19 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
tmp_cmd = aha1542->int_cmds[mbo];
- if (!tmp_cmd || !tmp_cmd->scsi_done) {
+ if (!tmp_cmd) {
spin_unlock_irqrestore(sh->host_lock, flags);
shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n");
shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat,
ccb[mbo].hastat, ccb[mbo].idlun, mbo);
return IRQ_HANDLED;
}
- my_done = tmp_cmd->scsi_done;
aha1542_free_cmd(tmp_cmd);
- /* Fetch the sense data, and tuck it away, in the required slot. The
- Adaptec automatically fetches it, and there is no guarantee that
- we will still have it in the cdb when we come back */
+ /*
+ * Fetch the sense data, and tuck it away, in the required slot. The
+ * Adaptec automatically fetches it, and there is no guarantee that
+ * we will still have it in the cdb when we come back
+ */
if (ccb[mbo].tarstat == 2)
memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
SCSI_SENSE_BUFFERSIZE);
@@ -383,10 +404,11 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
#endif
tmp_cmd->result = errstatus;
aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as
- far as queuecommand is concerned */
- my_done(tmp_cmd);
+ * far as queuecommand is concerned
+ */
+ scsi_done(tmp_cmd);
number_serviced++;
- };
+ }
}
static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
@@ -398,14 +420,14 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
u8 lun = cmd->device->lun;
unsigned long flags;
int bufflen = scsi_bufflen(cmd);
- int mbo, sg_count;
+ int mbo;
struct mailbox *mb = aha1542->mb;
struct ccb *ccb = aha1542->ccb;
if (*cmd->cmnd == REQUEST_SENSE) {
/* Don't do the command - we have the sense data already */
cmd->result = 0;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
#ifdef DEBUG
@@ -420,21 +442,23 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
}
#endif
- sg_count = scsi_dma_map(cmd);
- if (sg_count) {
- size_t len = sg_count * sizeof(struct chain);
-
- acmd->chain = kmalloc(len, GFP_DMA);
- if (!acmd->chain)
- goto out_unmap;
- acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain,
- len, DMA_TO_DEVICE);
- if (dma_mapping_error(sh->dma_dev, acmd->chain_handle))
- goto out_free_chain;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ void *buf = acmd->data_buffer;
+ struct req_iterator iter;
+ struct bio_vec bv;
+
+ rq_for_each_segment(bv, rq, iter) {
+ memcpy_from_bvec(buf, &bv);
+ buf += bv.bv_len;
+ }
}
- /* Use the outgoing mailboxes in a round-robin fashion, because this
- is how the host adapter will scan for them */
+ /*
+ * Use the outgoing mailboxes in a round-robin fashion, because this
+ * is how the host adapter will scan for them
+ */
spin_lock_irqsave(sh->host_lock, flags);
mbo = aha1542->aha1542_last_mbo_used + 1;
@@ -453,12 +477,13 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
panic("Unable to find empty mailbox for aha1542.\n");
aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from
- screwing with this cdb. */
+ * screwing with this cdb.
+ */
aha1542->aha1542_last_mbo_used = mbo;
#ifdef DEBUG
- shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
+ shost_printk(KERN_DEBUG, sh, "Sending command (%d)...", mbo);
#endif
/* This gets trashed for some reason */
@@ -475,27 +500,12 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
direction = 16;
memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen);
-
- if (bufflen) {
- struct scatterlist *sg;
- int i;
-
- ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
- scsi_for_each_sg(cmd, sg, sg_count, i) {
- any2scsi(acmd->chain[i].dataptr, sg_dma_address(sg));
- any2scsi(acmd->chain[i].datalen, sg_dma_len(sg));
- };
- any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
- any2scsi(ccb[mbo].dataptr, acmd->chain_handle);
-#ifdef DEBUG
- shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain);
- print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, acmd->chain, 18);
-#endif
- } else {
- ccb[mbo].op = 0; /* SCSI Initiator Command */
- any2scsi(ccb[mbo].datalen, 0);
+ ccb[mbo].op = 0; /* SCSI Initiator Command */
+ any2scsi(ccb[mbo].datalen, bufflen);
+ if (bufflen)
+ any2scsi(ccb[mbo].dataptr, acmd->data_buffer_handle);
+ else
any2scsi(ccb[mbo].dataptr, 0);
- };
ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */
ccb[mbo].rsalen = 16;
ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
@@ -510,12 +520,6 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
spin_unlock_irqrestore(sh->host_lock, flags);
return 0;
-out_free_chain:
- kfree(acmd->chain);
- acmd->chain = NULL;
-out_unmap:
- scsi_dma_unmap(cmd);
- return SCSI_MLQUEUE_HOST_BUSY;
}
/* Initialize mailboxes */
@@ -530,7 +534,7 @@ static void setup_mailboxes(struct Scsi_Host *sh)
any2scsi(aha1542->mb[i].ccbptr,
aha1542->ccb_handle + i * sizeof(struct ccb));
aha1542->mb[AHA1542_MAILBOXES + i].status = 0;
- };
+ }
aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
any2scsi(mb_cmd + 2, aha1542->mb_handle);
if (aha1542_out(sh->io_port, mb_cmd, 5))
@@ -545,7 +549,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
i = inb(STATUS(sh->io_port));
if (i & DF) {
i = inb(DATA(sh->io_port));
- };
+ }
aha1542_outb(sh->io_port, CMD_RETCONF);
aha1542_in(sh->io_port, inquiry_result, 3, 0);
if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
@@ -565,14 +569,16 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
sh->dma_channel = 0;
break;
case 0:
- /* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
- Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */
+ /*
+ * This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
+ * Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this.
+ */
sh->dma_channel = 0xFF;
break;
default:
shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n");
return -1;
- };
+ }
switch (inquiry_result[1]) {
case 0x40:
sh->irq = 15;
@@ -595,13 +601,15 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
default:
shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n");
return -1;
- };
+ }
sh->this_id = inquiry_result[2] & 7;
return 0;
}
-/* This function should only be called for 1542C boards - we can detect
- the special firmware settings and unlock the board */
+/*
+ * This function should only be called for 1542C boards - we can detect
+ * the special firmware settings and unlock the board
+ */
static int aha1542_mbenable(struct Scsi_Host *sh)
{
@@ -628,7 +636,7 @@ static int aha1542_mbenable(struct Scsi_Host *sh)
if (aha1542_out(sh->io_port, mbenable_cmd, 3))
goto fail;
- };
+ }
while (0) {
fail:
shost_printk(KERN_ERR, sh, "Mailbox init failed\n");
@@ -646,7 +654,7 @@ static int aha1542_query(struct Scsi_Host *sh)
i = inb(STATUS(sh->io_port));
if (i & DF) {
i = inb(DATA(sh->io_port));
- };
+ }
aha1542_outb(sh->io_port, CMD_INQUIRY);
aha1542_in(sh->io_port, inquiry_result, 4, 0);
if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
@@ -655,19 +663,22 @@ static int aha1542_query(struct Scsi_Host *sh)
aha1542->bios_translation = BIOS_TRANSLATION_6432; /* Default case */
- /* For an AHA1740 series board, we ignore the board since there is a
- hardware bug which can lead to wrong blocks being returned if the board
- is operating in the 1542 emulation mode. Since there is an extended mode
- driver, we simply ignore the board and let the 1740 driver pick it up.
+ /*
+ * For an AHA1740 series board, we ignore the board since there is a
+ * hardware bug which can lead to wrong blocks being returned if the board
+ * is operating in the 1542 emulation mode. Since there is an extended mode
+ * driver, we simply ignore the board and let the 1740 driver pick it up.
*/
if (inquiry_result[0] == 0x43) {
shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n");
return 1;
- };
+ }
- /* Always call this - boards that do not support extended bios translation
- will ignore the command, and we will set the proper default */
+ /*
+ * Always call this - boards that do not support extended bios translation
+ * will ignore the command, and we will set the proper default
+ */
aha1542->bios_translation = aha1542_mbenable(sh);
@@ -877,8 +888,9 @@ static int aha1542_dev_reset(struct scsi_cmnd *cmd)
panic("Unable to find empty mailbox for aha1542.\n");
aha1542->int_cmds[mbo] = cmd; /* This will effectively
- prevent someone else from
- screwing with this cdb. */
+ * prevent someone else from
+ * screwing with this cdb.
+ */
aha1542->aha1542_last_mbo_used = mbo;
@@ -894,9 +906,9 @@ static int aha1542_dev_reset(struct scsi_cmnd *cmd)
ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
ccb[mbo].commlinkid = 0;
- /*
- * Now tell the 1542 to flush all pending commands for this
- * target
+ /*
+ * Now tell the 1542 to flush all pending commands for this
+ * target
*/
aha1542_outb(sh->io_port, CMD_START_SCSI);
spin_unlock_irqrestore(sh->host_lock, flags);
@@ -915,7 +927,7 @@ static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
int i;
spin_lock_irqsave(sh->host_lock, flags);
- /*
+ /*
* This does a scsi reset for all devices on the bus.
* In principle, we could also reset the 1542 - should
* we do this? Try this first, and we can add that later
@@ -939,7 +951,7 @@ static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
/*
* Now try to pick up the pieces. For all pending commands,
* free any internal data structures, and basically clear things
- * out. We do not try and restart any commands or anything -
+ * out. We do not try and restart any commands or anything -
* the strategy handler takes care of that crap.
*/
shost_printk(KERN_WARNING, cmd->device->host, "Sent BUS RESET to scsi host %d\n", cmd->device->host->host_no);
@@ -998,6 +1010,27 @@ static int aha1542_biosparam(struct scsi_device *sdev,
}
MODULE_LICENSE("GPL");
+static int aha1542_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
+
+ acmd->data_buffer = dma_alloc_coherent(shost->dma_dev,
+ SECTOR_SIZE * AHA1542_MAX_SECTORS,
+ &acmd->data_buffer_handle, GFP_KERNEL);
+ if (!acmd->data_buffer)
+ return -ENOMEM;
+ return 0;
+}
+
+static int aha1542_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
+
+ dma_free_coherent(shost->dma_dev, SECTOR_SIZE * AHA1542_MAX_SECTORS,
+ acmd->data_buffer, acmd->data_buffer_handle);
+ return 0;
+}
+
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.proc_name = "aha1542",
@@ -1008,10 +1041,12 @@ static struct scsi_host_template driver_template = {
.eh_bus_reset_handler = aha1542_bus_reset,
.eh_host_reset_handler = aha1542_host_reset,
.bios_param = aha1542_biosparam,
- .can_queue = AHA1542_MAILBOXES,
+ .init_cmd_priv = aha1542_init_cmd_priv,
+ .exit_cmd_priv = aha1542_exit_cmd_priv,
+ .can_queue = AHA1542_MAILBOXES,
.this_id = 7,
- .sg_tablesize = 16,
- .unchecked_isa_dma = 1,
+ .max_sectors = AHA1542_MAX_SECTORS,
+ .sg_tablesize = SG_ALL,
};
static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
@@ -1025,12 +1060,11 @@ static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
return 1;
}
-static int aha1542_isa_remove(struct device *pdev,
+static void aha1542_isa_remove(struct device *pdev,
unsigned int ndev)
{
aha1542_release(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
- return 0;
}
static struct isa_driver aha1542_isa_driver = {
@@ -1063,8 +1097,10 @@ static int aha1542_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *i
io[indx] = pnp_port_start(pdev, 0);
- /* The card can be queried for its DMA, we have
- the DMA set up that is enough */
+ /*
+ * The card can be queried for its DMA, we have
+ * the DMA set up that is enough
+ */
dev_info(&pdev->dev, "ISAPnP found an AHA1535 at I/O 0x%03X", io[indx]);
}
diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h
index f5b0d210fb3c..92a5f9896ace 100644
--- a/drivers/scsi/aha1542.h
+++ b/drivers/scsi/aha1542.h
@@ -78,23 +78,28 @@ static inline void any2scsi(u8 *p, u32 v)
#define MAX_CDB 12
#define MAX_SENSE 14
-struct ccb { /* Command Control Block 5.3 */
- u8 op; /* Command Control Block Operation Code */
- u8 idlun; /* op=0,2:Target Id, op=1:Initiator Id */
- /* Outbound data transfer, length is checked*/
- /* Inbound data transfer, length is checked */
- /* Logical Unit Number */
+/* Command Control Block (CCB), 5.3 */
+struct ccb {
+ u8 op; /* Command Control Block Operation Code: */
+ /* 0x00: SCSI Initiator CCB, 0x01: SCSI Target CCB, */
+ /* 0x02: SCSI Initiator CCB with Scatter/Gather, */
+ /* 0x81: SCSI Bus Device Reset CCB */
+ u8 idlun; /* Address and Direction Control: */
+ /* Bits 7-5: op=0, 2: Target ID, op=1: Initiator ID */
+ /* Bit 4: Outbound data transfer, length is checked */
+ /* Bit 3: Inbound data transfer, length is checked */
+ /* Bits 2-0: Logical Unit Number */
u8 cdblen; /* SCSI Command Length */
- u8 rsalen; /* Request Sense Allocation Length/Disable */
- u8 datalen[3]; /* Data Length (msb, .., lsb) */
- u8 dataptr[3]; /* Data Pointer */
- u8 linkptr[3]; /* Link Pointer */
+ u8 rsalen; /* Request Sense Allocation Length/Disable Auto Sense */
+ u8 datalen[3]; /* Data Length (MSB, ..., LSB) */
+ u8 dataptr[3]; /* Data Pointer (MSB, ..., LSB) */
+ u8 linkptr[3]; /* Link Pointer (MSB, ..., LSB) */
u8 commlinkid; /* Command Linking Identifier */
- u8 hastat; /* Host Adapter Status (HASTAT) */
- u8 tarstat; /* Target Device Status */
+ u8 hastat; /* Host Adapter Status (HASTAT) */
+ u8 tarstat; /* Target Device Status (TARSTAT) */
u8 reserved[2];
- u8 cdb[MAX_CDB+MAX_SENSE]; /* SCSI Command Descriptor Block */
- /* REQUEST SENSE */
+ u8 cdb[MAX_CDB + MAX_SENSE]; /* SCSI Command Descriptor Block */
+ /* followed by the Auto Sense data */
};
#define AHA1542_REGION_SIZE 4
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index da4150c17781..134255751819 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -55,8 +55,12 @@
#include <asm/dma.h>
#include <asm/io.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "aha1740.h"
/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
@@ -152,6 +156,7 @@ static int aha1740_makecode(unchar *sense, unchar *status)
retval=DID_ERROR; /* It's an Overrun */
/* If not overrun, assume underrun and
* ignore it! */
+ break;
case 0x00: /* No info, assume no error, should
* not occur */
break;
@@ -266,8 +271,11 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
guarantee that we will still have it in the
cdb when we come back */
if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
- memcpy(SCtmp->sense_buffer, ecbptr->sense,
- SCSI_SENSE_BUFFERSIZE);
+ memcpy_and_pad(SCtmp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ ecbptr->sense,
+ sizeof(ecbptr->sense),
+ 0);
errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
} else
errstatus = 0;
@@ -311,9 +319,9 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static int aha1740_queuecommand_lck(struct scsi_cmnd * SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int aha1740_queuecommand_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
unchar target = scmd_id(SCpnt);
@@ -592,7 +600,6 @@ static int aha1740_probe (struct device *dev)
DMA_BIDIRECTIONAL);
if (!host->ecb_dma_addr) {
printk (KERN_ERR "aha1740_probe: Couldn't map ECB, giving up\n");
- scsi_host_put (shpnt);
goto err_host_put;
}
diff --git a/drivers/scsi/aic7xxx/.gitignore b/drivers/scsi/aic7xxx/.gitignore
index b8ee24d5748a..9aa780221718 100644
--- a/drivers/scsi/aic7xxx/.gitignore
+++ b/drivers/scsi/aic7xxx/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
aic79xx_reg.h
aic79xx_reg_print.c
aic79xx_seq.h
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index 16743fb9eead..a47dbd500e9a 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -15,7 +15,7 @@ config AIC79XX_CMDS_PER_DEVICE
int "Maximum number of TCQ commands per device"
depends on SCSI_AIC79XX
default "32"
- ---help---
+ help
Specify the number of commands you would like to allocate per SCSI
device when Tagged Command Queueing (TCQ) is enabled on that device.
@@ -32,13 +32,13 @@ config AIC79XX_CMDS_PER_DEVICE
on some devices. The upper bound is 253. 0 disables tagged queueing.
Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See Documentation/scsi/aic79xx.txt for details.
+ "tag_info" option. See Documentation/scsi/aic79xx.rst for details.
config AIC79XX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
depends on SCSI_AIC79XX
default "5000"
- ---help---
+ help
The number of milliseconds to delay after an initial bus reset.
The bus settle delay following all error recovery actions is
dictated by the SCSI layer and is not affected by this value.
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 3546b8cc401f..0cfd92ce750a 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -7,7 +7,7 @@ config SCSI_AIC7XXX
tristate "Adaptec AIC7xxx Fast -> U160 support"
depends on (PCI || EISA) && SCSI
select SCSI_SPI_ATTRS
- ---help---
+ help
This driver supports all of Adaptec's Fast through Ultra 160 PCI
based SCSI controllers as well as the aic7770 based EISA and VLB
SCSI controllers (the 274x and 284x series). For AAA and ARO based
@@ -20,7 +20,7 @@ config AIC7XXX_CMDS_PER_DEVICE
int "Maximum number of TCQ commands per device"
depends on SCSI_AIC7XXX
default "32"
- ---help---
+ help
Specify the number of commands you would like to allocate per SCSI
device when Tagged Command Queueing (TCQ) is enabled on that device.
@@ -37,13 +37,13 @@ config AIC7XXX_CMDS_PER_DEVICE
on some devices. The upper bound is 253. 0 disables tagged queueing.
Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See Documentation/scsi/aic7xxx.txt for details.
+ "tag_info" option. See Documentation/scsi/aic7xxx.rst for details.
config AIC7XXX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
depends on SCSI_AIC7XXX
default "5000"
- ---help---
+ help
The number of milliseconds to delay after an initial bus reset.
The bus settle delay following all error recovery actions is
dictated by the SCSI layer and is not affected by this value.
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 9a515551641c..59e9321815c8 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -211,7 +211,7 @@ typedef enum {
*/
typedef enum {
AHD_FENONE = 0x00000,
- AHD_WIDE = 0x00001,/* Wide Channel */
+ AHD_WIDE = 0x00001,/* Wide Channel */
AHD_AIC79XXB_SLOWCRC = 0x00002,/* SLOWCRC bit should be set */
AHD_MULTI_FUNC = 0x00100,/* Multi-Function/Channel Device */
AHD_TARGETMODE = 0x01000,/* Has tested target mode support */
@@ -433,7 +433,7 @@ union initiator_data {
* Target mode version of the shared data SCB segment.
*/
struct target_data {
- uint32_t spare[2];
+ uint32_t spare[2];
uint8_t scsi_status; /* SCSI status to give to initiator */
uint8_t target_phases; /* Bitmap of phases to execute */
uint8_t data_phase; /* Data-In or Data-Out */
@@ -608,9 +608,9 @@ struct scb {
struct ahd_softc *ahd_softc;
scb_flag flags;
struct scb_platform_data *platform_data;
- struct map_node *hscb_map;
- struct map_node *sg_map;
- struct map_node *sense_map;
+ struct map_node *hscb_map;
+ struct map_node *sg_map;
+ struct map_node *sense_map;
void *sg_list;
uint8_t *sense_data;
dma_addr_t sg_list_busaddr;
@@ -674,7 +674,7 @@ struct scb_data {
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
uint8_t identify; /* Identify message */
- uint8_t bytes[22]; /*
+ uint8_t bytes[22]; /*
* Bytes contains any additional message
* bytes terminated by 0xFF. The remainder
* is the cdb to execute.
@@ -712,7 +712,7 @@ struct ahd_tmode_event {
* structure here so we can store arrays of them, etc. in OS neutral
* data structures.
*/
-#ifdef AHD_TARGET_MODE
+#ifdef AHD_TARGET_MODE
struct ahd_tmode_lstate {
struct cam_path *path;
struct ccb_hdr_slist accept_tios;
@@ -807,11 +807,11 @@ struct ahd_tmode_tstate {
/***************************** Lookup Tables **********************************/
/*
* Phase -> name and message out response
- * to parity errors in each phase table.
+ * to parity errors in each phase table.
*/
struct ahd_phase_table_entry {
- uint8_t phase;
- uint8_t mesg_out; /* Message response to parity errors */
+ uint8_t phase;
+ uint8_t mesg_out; /* Message response to parity errors */
const char *phasemsg;
};
@@ -844,7 +844,7 @@ struct seeprom_config {
#define CFBS_ENABLED 0x04
#define CFBS_DISABLED_SCAN 0x08
#define CFENABLEDV 0x0010 /* Perform Domain Validation */
-#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
+#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
#define CFSPARITY 0x0040 /* SCSI parity */
#define CFEXTEND 0x0080 /* extended translation enabled */
#define CFBOOTCD 0x0100 /* Support Bootable CD-ROM */
@@ -858,7 +858,7 @@ struct seeprom_config {
/*
* Host Adapter Control Bits
*/
- uint16_t adapter_control; /* word 17 */
+ uint16_t adapter_control; /* word 17 */
#define CFAUTOTERM 0x0001 /* Perform Auto termination */
#define CFSTERM 0x0002 /* SCSI low byte termination */
#define CFWSTERM 0x0004 /* SCSI high byte termination */
@@ -867,7 +867,7 @@ struct seeprom_config {
#define CFSEHIGHTERM 0x0020 /* Ultra2 secondary high term */
#define CFSTPWLEVEL 0x0040 /* Termination level control */
#define CFBIOSAUTOTERM 0x0080 /* Perform Auto termination */
-#define CFTERM_MENU 0x0100 /* BIOS displays termination menu */
+#define CFTERM_MENU 0x0100 /* BIOS displays termination menu */
#define CFCLUSTERENB 0x8000 /* Cluster Enable */
/*
@@ -881,7 +881,7 @@ struct seeprom_config {
/*
* Maximum targets
*/
- uint16_t max_targets; /* word 19 */
+ uint16_t max_targets; /* word 19 */
#define CFMAXTARG 0x00ff /* maximum targets */
#define CFBOOTLUN 0x0f00 /* Lun to boot from */
#define CFBOOTID 0xf000 /* Target to boot from */
@@ -941,7 +941,7 @@ struct vpd_config {
#define FLX_ROMSTAT_EE_2MBx8 0x2
#define FLX_ROMSTAT_EE_4MBx8 0x3
#define FLX_ROMSTAT_EE_16MBx8 0x4
-#define CURSENSE_ENB 0x1
+#define CURSENSE_ENB 0x1
#define FLXADDR_FLEXSTAT 0x2
#define FLX_FSTAT_BUSY 0x1
#define FLXADDR_CURRENT_STAT 0x4
@@ -1051,8 +1051,8 @@ struct ahd_completion
};
struct ahd_softc {
- bus_space_tag_t tags[2];
- bus_space_handle_t bshs[2];
+ bus_space_tag_t tags[2];
+ bus_space_handle_t bshs[2];
struct scb_data scb_data;
struct hardware_scb *next_queued_hscb;
@@ -1175,7 +1175,7 @@ struct ahd_softc {
uint8_t tqinfifonext;
/*
- * Cached verson of the hs_mailbox so we can avoid
+ * Cached version of the hs_mailbox so we can avoid
* pausing the sequencer during mailbox updates.
*/
uint8_t hs_mailbox;
@@ -1243,7 +1243,7 @@ struct ahd_softc {
u_int int_coalescing_threshold;
u_int int_coalescing_stop_threshold;
- uint16_t user_discenable;/* Disconnection allowed */
+ uint16_t user_discenable;/* Disconnection allowed */
uint16_t user_tagenable;/* Tagged Queuing allowed */
};
@@ -1330,10 +1330,8 @@ const struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
int ahd_pci_config(struct ahd_softc *,
const struct ahd_pci_identity *);
int ahd_pci_test_register_access(struct ahd_softc *);
-#ifdef CONFIG_PM
-void ahd_pci_suspend(struct ahd_softc *);
-void ahd_pci_resume(struct ahd_softc *);
-#endif
+void __maybe_unused ahd_pci_suspend(struct ahd_softc *);
+void __maybe_unused ahd_pci_resume(struct ahd_softc *);
/************************** SCB and SCB queue management **********************/
void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd,
@@ -1344,10 +1342,8 @@ struct ahd_softc *ahd_alloc(void *platform_arg, char *name);
int ahd_softc_init(struct ahd_softc *);
void ahd_controller_info(struct ahd_softc *ahd, char *buf);
int ahd_init(struct ahd_softc *ahd);
-#ifdef CONFIG_PM
-int ahd_suspend(struct ahd_softc *ahd);
-void ahd_resume(struct ahd_softc *ahd);
-#endif
+int __maybe_unused ahd_suspend(struct ahd_softc *ahd);
+void __maybe_unused ahd_resume(struct ahd_softc *ahd);
int ahd_default_config(struct ahd_softc *ahd);
int ahd_parse_vpddata(struct ahd_softc *ahd,
struct vpd_config *vpd);
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 7e5044bf05c0..3e3100dbfda3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -57,7 +57,7 @@ static const char *const ahd_chip_names[] =
* Hardware error codes.
*/
struct ahd_hard_error_entry {
- uint8_t errno;
+ uint8_t errno;
const char *errmesg;
};
@@ -73,16 +73,16 @@ static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors);
static const struct ahd_phase_table_entry ahd_phase_table[] =
{
- { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
- { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
- { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
- { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
- { P_COMMAND, MSG_NOOP, "in Command phase" },
- { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
- { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
+ { P_DATAOUT, NOP, "in Data-out phase" },
+ { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" },
+ { P_DATAOUT_DT, NOP, "in DT Data-out phase" },
+ { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" },
+ { P_COMMAND, NOP, "in Command phase" },
+ { P_MESGOUT, NOP, "in Message-out phase" },
+ { P_STATUS, INITIATOR_ERROR, "in Status phase" },
{ P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
- { P_BUSFREE, MSG_NOOP, "while idle" },
- { 0, MSG_NOOP, "in unknown phase" }
+ { P_BUSFREE, NOP, "while idle" },
+ { 0, NOP, "in unknown phase" }
};
/*
@@ -113,7 +113,7 @@ static void ahd_free_tstate(struct ahd_softc *ahd,
u_int scsi_id, char channel, int force);
#endif
static void ahd_devlimited_syncrate(struct ahd_softc *ahd,
- struct ahd_initiator_tinfo *,
+ struct ahd_initiator_tinfo *,
u_int *period,
u_int *ppr_options,
role_t role);
@@ -170,7 +170,7 @@ static void ahd_setup_target_msgin(struct ahd_softc *ahd,
static u_int ahd_sglist_size(struct ahd_softc *ahd);
static u_int ahd_sglist_allocsize(struct ahd_softc *ahd);
static bus_dmamap_callback_t
- ahd_dmamap_cb;
+ ahd_dmamap_cb;
static void ahd_initialize_hscbs(struct ahd_softc *ahd);
static int ahd_init_scbdata(struct ahd_softc *ahd);
static void ahd_fini_scbdata(struct ahd_softc *ahd);
@@ -268,7 +268,7 @@ static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
static void ahd_handle_hwerrint(struct ahd_softc *ahd);
static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
static void ahd_handle_scsiint(struct ahd_softc *ahd,
- u_int intstat);
+ u_int intstat);
/************************ Sequencer Execution Control *************************/
void
@@ -1126,7 +1126,7 @@ ahd_restart(struct ahd_softc *ahd)
/* No more pending messages */
ahd_clear_msg_state(ahd);
ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */
- ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */
+ ahd_outb(ahd, MSG_OUT, NOP); /* No message to send */
ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET);
ahd_outb(ahd, SEQINTCTL, 0);
ahd_outb(ahd, LASTPHASE, P_BUSFREE);
@@ -1203,7 +1203,7 @@ ahd_flush_qoutfifo(struct ahd_softc *ahd)
while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
u_int fifo_mode;
u_int i;
-
+
scbid = ahd_inw(ahd, GSFIFO);
scb = ahd_lookup_scb(ahd, scbid);
if (scb == NULL) {
@@ -1326,7 +1326,7 @@ rescan_fifos:
while (!SCBID_IS_NULL(scbid)) {
uint8_t *hscb_ptr;
u_int i;
-
+
ahd_set_scbptr(ahd, scbid);
next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
scb = ahd_lookup_scb(ahd, scbid);
@@ -1735,10 +1735,8 @@ ahd_dump_sglist(struct scb *scb)
sg_list = (struct ahd_dma64_seg*)scb->sg_list;
for (i = 0; i < scb->sg_count; i++) {
uint64_t addr;
- uint32_t len;
addr = ahd_le64toh(sg_list[i].addr);
- len = ahd_le32toh(sg_list[i].len);
printk("sg[%d] - Addr 0x%x%x : Length %d%s\n",
i,
(uint32_t)((addr >> 32) & 0xFFFFFFFF),
@@ -1906,9 +1904,6 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
{
struct ahd_devinfo devinfo;
struct scb *scb;
- struct ahd_initiator_tinfo *targ_info;
- struct ahd_tmode_tstate *tstate;
- struct ahd_transinfo *tinfo;
u_int scbid;
/*
@@ -1936,12 +1931,6 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
SCB_GET_LUN(scb),
SCB_GET_CHANNEL(ahd, scb),
ROLE_INITIATOR);
- targ_info = ahd_fetch_transinfo(ahd,
- devinfo.channel,
- devinfo.our_scsiid,
- devinfo.target,
- &tstate);
- tinfo = &targ_info->curr;
ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
AHD_TRANS_ACTIVE, /*paused*/TRUE);
ahd_set_syncrate(ahd, &devinfo, /*period*/0,
@@ -2002,7 +1991,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
{
struct scb *scb;
u_int scb_index;
-
+
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
@@ -2018,7 +2007,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
*/
ahd_assert_atn(ahd);
ahd_outb(ahd, MSG_OUT, HOST_MSG);
- ahd->msgout_buf[0] = MSG_ABORT_TASK;
+ ahd->msgout_buf[0] = ABORT_TASK;
ahd->msgout_len = 1;
ahd->msgout_index = 0;
ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -2105,8 +2094,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
ahd->msg_type =
MSG_TYPE_TARGET_MSGOUT;
ahd->msgin_index = 0;
- }
- else
+ } else
ahd_setup_target_msgin(ahd,
&devinfo,
scb);
@@ -2147,7 +2135,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
ahd_dump_card_state(ahd);
- ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
+ ahd->msgout_buf[0] = TARGET_RESET;
ahd->msgout_len = 1;
ahd->msgout_index = 0;
ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -2285,7 +2273,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
switch (scb->hscb->task_management) {
case SIU_TASKMGMT_ABORT_TASK:
tag = SCB_GET_TAG(scb);
- /* fall through */
+ fallthrough;
case SIU_TASKMGMT_ABORT_TASK_SET:
case SIU_TASKMGMT_CLEAR_TASK_SET:
lun = scb->hscb->lun;
@@ -2296,7 +2284,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
break;
case SIU_TASKMGMT_LUN_RESET:
lun = scb->hscb->lun;
- /* fall through */
+ fallthrough;
case SIU_TASKMGMT_TARGET_RESET:
{
struct ahd_devinfo devinfo;
@@ -2349,9 +2337,9 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
;
ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
- SCB_GET_CHANNEL(ahd, scb),
- SCB_GET_LUN(scb), SCB_GET_TAG(scb),
- ROLE_INITIATOR, /*status*/0,
+ SCB_GET_CHANNEL(ahd, scb),
+ SCB_GET_LUN(scb), SCB_GET_TAG(scb),
+ ROLE_INITIATOR, /*status*/0,
SEARCH_REMOVE);
}
break;
@@ -2669,7 +2657,6 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
struct scb *scb;
u_int scbid;
u_int lqistat1;
- u_int lqistat2;
u_int msg_out;
u_int curphase;
u_int lastphase;
@@ -2680,7 +2667,7 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
scb = NULL;
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ);
- lqistat2 = ahd_inb(ahd, LQISTAT2);
+ ahd_inb(ahd, LQISTAT2);
if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0
&& (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) {
u_int lqistate;
@@ -2704,16 +2691,16 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
lastphase = ahd_inb(ahd, LASTPHASE);
curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
perrdiag = ahd_inb(ahd, PERRDIAG);
- msg_out = MSG_INITIATOR_DET_ERR;
+ msg_out = INITIATOR_ERROR;
ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
-
+
/*
* Try to find the SCB associated with this error.
*/
silent = FALSE;
if (lqistat1 == 0
|| (lqistat1 & LQICRCI_NLQ) != 0) {
- if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
+ if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
ahd_set_active_fifo(ahd);
scbid = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scbid);
@@ -2830,20 +2817,20 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
ahd_lookup_phase_entry(curphase)->phasemsg);
ahd_inb(ahd, SCSIDAT);
}
-
+
if (curphase == P_MESGIN)
msg_out = MSG_PARITY_ERROR;
}
/*
- * We've set the hardware to assert ATN if we
+ * We've set the hardware to assert ATN if we
* get a parity error on "in" phases, so all we
* need to do is stuff the message buffer with
* the appropriate message. "In" phases have set
- * mesg_out to something other than MSG_NOP.
+ * mesg_out to something other than NOP.
*/
ahd->send_msg_perror = msg_out;
- if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR)
+ if (scb != NULL && msg_out == INITIATOR_ERROR)
scb->flags |= SCB_TRANSMISSION_ERROR;
ahd_outb(ahd, MSG_OUT, HOST_MSG);
ahd_outb(ahd, CLRINT, CLRSCSIINT);
@@ -3063,8 +3050,8 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
u_int tag;
tag = SCB_LIST_NULL;
- if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE)
- || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK, TRUE)
+ || ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK_SET, TRUE)) {
int found;
int sent_msg;
@@ -3079,9 +3066,9 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
ahd_print_path(ahd, scb);
printk("SCB %d - Abort%s Completed.\n",
SCB_GET_TAG(scb),
- sent_msg == MSG_ABORT_TAG ? "" : " Tag");
+ sent_msg == ABORT_TASK ? "" : " Tag");
- if (sent_msg == MSG_ABORT_TAG)
+ if (sent_msg == ABORT_TASK)
tag = SCB_GET_TAG(scb);
if ((scb->flags & SCB_EXTERNAL_RESET) != 0) {
@@ -3106,25 +3093,12 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
printk("found == 0x%x\n", found);
printerror = 0;
} else if (ahd_sent_msg(ahd, AHDMSG_1B,
- MSG_BUS_DEV_RESET, TRUE)) {
-#ifdef __FreeBSD__
- /*
- * Don't mark the user's request for this BDR
- * as completing with CAM_BDR_SENT. CAM3
- * specifies CAM_REQ_CMP.
- */
- if (scb != NULL
- && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
- && ahd_match_scb(ahd, scb, target, 'A',
- CAM_LUN_WILDCARD, SCB_LIST_NULL,
- ROLE_INITIATOR))
- ahd_set_transaction_status(scb, CAM_REQ_CMP);
-#endif
+ TARGET_RESET, TRUE)) {
ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
CAM_BDR_SENT, "Bus Device Reset",
/*verbose_level*/0);
printerror = 0;
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, FALSE)
&& ppr_busfree == 0) {
struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
@@ -3177,7 +3151,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
}
printerror = 0;
}
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, FALSE)
&& ppr_busfree == 0) {
/*
* Negotiation Rejected. Go-narrow and
@@ -3202,7 +3176,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
ahd_qinfifo_requeue_tail(ahd, scb);
}
printerror = 0;
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, FALSE)
&& ppr_busfree == 0) {
/*
* Negotiation Rejected. Go-async and
@@ -3230,7 +3204,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
printerror = 0;
} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
&& ahd_sent_msg(ahd, AHDMSG_1B,
- MSG_INITIATOR_DET_ERR, TRUE)) {
+ INITIATOR_ERROR, TRUE)) {
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
@@ -3239,7 +3213,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
printerror = 0;
} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
&& ahd_sent_msg(ahd, AHDMSG_1B,
- MSG_MESSAGE_REJECT, TRUE)) {
+ MESSAGE_REJECT, TRUE)) {
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
@@ -3393,7 +3367,7 @@ proto_violation_reset:
ahd_outb(ahd, MSG_OUT, HOST_MSG);
if (scb == NULL) {
ahd_print_devinfo(ahd, &devinfo);
- ahd->msgout_buf[0] = MSG_ABORT_TASK;
+ ahd->msgout_buf[0] = ABORT_TASK;
ahd->msgout_len = 1;
ahd->msgout_index = 0;
ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -3471,7 +3445,6 @@ ahd_clear_critical_section(struct ahd_softc *ahd)
cs = ahd->critical_sections;
for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
-
if (cs->begin < seqaddr && cs->end >= seqaddr)
break;
}
@@ -3497,8 +3470,8 @@ ahd_clear_critical_section(struct ahd_softc *ahd)
if (stepping == FALSE) {
first_instr = seqaddr;
- ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
- simode0 = ahd_inb(ahd, SIMODE0);
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ simode0 = ahd_inb(ahd, SIMODE0);
simode3 = ahd_inb(ahd, SIMODE3);
lqimode0 = ahd_inb(ahd, LQIMODE0);
lqimode1 = ahd_inb(ahd, LQIMODE1);
@@ -3540,7 +3513,7 @@ ahd_clear_critical_section(struct ahd_softc *ahd)
ahd_outb(ahd, LQOMODE1, lqomode1);
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
- ahd_outb(ahd, SIMODE1, simode1);
+ ahd_outb(ahd, SIMODE1, simode1);
/*
* SCSIINT seems to glitch occasionally when
* the interrupt masks are restored. Clear SCSIINT
@@ -3578,7 +3551,7 @@ ahd_clear_intstat(struct ahd_softc *ahd)
ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
|CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
- |CLRIOERR|CLROVERRUN);
+ |CLRIOERR|CLROVERRUN);
ahd_outb(ahd, CLRINT, CLRSCSIINT);
}
@@ -3675,8 +3648,7 @@ ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
return;
tstate = ahd->enabled_targets[scsi_id];
- if (tstate != NULL)
- kfree(tstate);
+ kfree(tstate);
ahd->enabled_targets[scsi_id] = NULL;
}
#endif
@@ -3715,7 +3687,7 @@ ahd_devlimited_syncrate(struct ahd_softc *ahd,
*/
if (role == ROLE_TARGET)
transinfo = &tinfo->user;
- else
+ else
transinfo = &tinfo->goal;
*ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
@@ -3746,7 +3718,7 @@ ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
&& *period > AHD_SYNCRATE_MIN_DT)
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
-
+
if (*period > AHD_SYNCRATE_MIN)
*period = 0;
@@ -3817,7 +3789,7 @@ ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo,
*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case MSG_EXT_WDTR_BUS_8_BIT:
*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
break;
@@ -4109,7 +4081,7 @@ ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
ahd_outb(ahd, NEGOADDR, devinfo->target);
period = tinfo->period;
offset = tinfo->offset;
- memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
+ memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
|MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
con_opts = 0;
@@ -4232,13 +4204,11 @@ ahd_update_pending_scbs(struct ahd_softc *ahd)
pending_scb_count = 0;
LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
struct ahd_devinfo devinfo;
- struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
ahd_scb_devinfo(ahd, &devinfo, pending_scb);
- tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
- devinfo.our_scsiid,
- devinfo.target, &tstate);
+ ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid,
+ devinfo.target, &tstate);
if ((tstate->auto_negotiate & devinfo.target_mask) == 0
&& (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
@@ -4419,7 +4389,7 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
} else if (scb == NULL) {
printk("%s: WARNING. No pending message for "
"I_T msgin. Issuing NO-OP\n", ahd_name(ahd));
- ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
+ ahd->msgout_buf[ahd->msgout_index++] = NOP;
ahd->msgout_len++;
ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
return;
@@ -4445,7 +4415,7 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
}
if (scb->flags & SCB_DEVICE_RESET) {
- ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
+ ahd->msgout_buf[ahd->msgout_index++] = TARGET_RESET;
ahd->msgout_len++;
ahd_print_path(ahd, scb);
printk("Bus Device Reset Message Sent\n");
@@ -4460,9 +4430,9 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
} else if ((scb->flags & SCB_ABORT) != 0) {
if ((scb->hscb->control & TAG_ENB) != 0) {
- ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG;
+ ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK;
} else {
- ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT;
+ ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK_SET;
}
ahd->msgout_len++;
ahd_print_path(ahd, scb);
@@ -4694,7 +4664,7 @@ ahd_clear_msg_state(struct ahd_softc *ahd)
*/
ahd_outb(ahd, CLRSINT1, CLRATNO);
}
- ahd_outb(ahd, MSG_OUT, MSG_NOOP);
+ ahd_outb(ahd, MSG_OUT, NOP);
ahd_outb(ahd, SEQ_FLAGS2,
ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
ahd_restore_modes(ahd, saved_modes);
@@ -4775,7 +4745,7 @@ reswitch:
* with a busfree.
*/
if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0
- && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR)
+ && ahd->send_msg_perror == INITIATOR_ERROR)
ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE;
ahd_outb(ahd, RETURN_2, ahd->send_msg_perror);
@@ -4877,7 +4847,7 @@ reswitch:
#endif
ahd_assert_atn(ahd);
}
- } else
+ } else
ahd->msgin_index++;
if (message_done == MSGLOOP_TERMINATED) {
@@ -4980,7 +4950,7 @@ reswitch:
*/
return;
}
-
+
ahd->msgin_index++;
/*
@@ -5053,7 +5023,7 @@ ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full)
index = 0;
while (index < ahd->msgout_len) {
- if (ahd->msgout_buf[index] == MSG_EXTENDED) {
+ if (ahd->msgout_buf[index] == EXTENDED_MESSAGE) {
u_int end_index;
end_index = index + 1 + ahd->msgout_buf[index + 1];
@@ -5067,8 +5037,8 @@ ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full)
found = TRUE;
}
index = end_index;
- } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK
- && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
+ } else if (ahd->msgout_buf[index] >= SIMPLE_QUEUE_TAG
+ && ahd->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) {
/* Skip tag type and tag id or residue param*/
index += 2;
@@ -5119,36 +5089,36 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
* extended message type.
*/
switch (ahd->msgin_buf[0]) {
- case MSG_DISCONNECT:
- case MSG_SAVEDATAPOINTER:
- case MSG_CMDCOMPLETE:
- case MSG_RESTOREPOINTERS:
- case MSG_IGN_WIDE_RESIDUE:
+ case DISCONNECT:
+ case SAVE_POINTERS:
+ case COMMAND_COMPLETE:
+ case RESTORE_POINTERS:
+ case IGNORE_WIDE_RESIDUE:
/*
* End our message loop as these are messages
* the sequencer handles on its own.
*/
done = MSGLOOP_TERMINATED;
break;
- case MSG_MESSAGE_REJECT:
+ case MESSAGE_REJECT:
response = ahd_handle_msg_reject(ahd, devinfo);
- /* FALLTHROUGH */
- case MSG_NOOP:
+ fallthrough;
+ case NOP:
done = MSGLOOP_MSGCOMPLETE;
break;
- case MSG_EXTENDED:
+ case EXTENDED_MESSAGE:
{
/* Wait for enough of the message to begin validation */
if (ahd->msgin_index < 2)
break;
switch (ahd->msgin_buf[2]) {
- case MSG_EXT_SDTR:
+ case EXTENDED_SDTR:
{
u_int period;
u_int ppr_options;
u_int offset;
u_int saved_offset;
-
+
if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
reject = TRUE;
break;
@@ -5190,7 +5160,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
* and didn't have to fall down to async
* transfers.
*/
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, TRUE)) {
/* We started it */
if (saved_offset != offset) {
/* Went too low - force async */
@@ -5217,7 +5187,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
done = MSGLOOP_MSGCOMPLETE;
break;
}
- case MSG_EXT_WDTR:
+ case EXTENDED_WDTR:
{
u_int bus_width;
u_int saved_width;
@@ -5251,7 +5221,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
saved_width, bus_width);
}
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, TRUE)) {
/*
* Don't send a WDTR back to the
* target, since we asked first.
@@ -5313,7 +5283,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
done = MSGLOOP_MSGCOMPLETE;
break;
}
- case MSG_EXT_PPR:
+ case EXTENDED_PPR:
{
u_int period;
u_int offset;
@@ -5368,7 +5338,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
ahd_validate_offset(ahd, tinfo, period, &offset,
bus_width, devinfo->role);
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, TRUE)) {
/*
* If we are unable to do any of the
* requested options (we went too low),
@@ -5431,7 +5401,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
break;
}
#ifdef AHD_TARGET_MODE
- case MSG_BUS_DEV_RESET:
+ case TARGET_RESET:
ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD,
CAM_BDR_SENT,
"Bus Device Reset Received",
@@ -5439,9 +5409,9 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
ahd_restart(ahd);
done = MSGLOOP_TERMINATED;
break;
- case MSG_ABORT_TAG:
- case MSG_ABORT:
- case MSG_CLEAR_QUEUE:
+ case ABORT_TASK:
+ case ABORT_TASK_SET:
+ case CLEAR_TASK_SET:
{
int tag;
@@ -5451,7 +5421,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
break;
}
tag = SCB_LIST_NULL;
- if (ahd->msgin_buf[0] == MSG_ABORT_TAG)
+ if (ahd->msgin_buf[0] == ABORT_TASK)
tag = ahd_inb(ahd, INITIATOR_TAG);
ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
devinfo->lun, tag, ROLE_TARGET,
@@ -5475,15 +5445,15 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
break;
}
#endif
- case MSG_QAS_REQUEST:
+ case QAS_REQUEST:
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
printk("%s: QAS request. SCSISIGI == 0x%x\n",
ahd_name(ahd), ahd_inb(ahd, SCSISIGI));
#endif
ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
- /* FALLTHROUGH */
- case MSG_TERM_IO_PROC:
+ fallthrough;
+ case TERMINATE_IO_PROC:
default:
reject = TRUE;
break;
@@ -5495,7 +5465,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
*/
ahd->msgout_index = 0;
ahd->msgout_len = 1;
- ahd->msgout_buf[0] = MSG_MESSAGE_REJECT;
+ ahd->msgout_buf[0] = MESSAGE_REJECT;
done = MSGLOOP_MSGCOMPLETE;
response = TRUE;
}
@@ -5534,8 +5504,8 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
/* Might be necessary */
last_msg = ahd_inb(ahd, LAST_MSG);
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE)
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/TRUE)
&& tinfo->goal.period <= AHD_SYNCRATE_PACED) {
/*
* Target may not like our SPI-4 PPR Options.
@@ -5572,7 +5542,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
ahd_build_transfer_msg(ahd, devinfo);
ahd->msgout_index = 0;
response = 1;
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) {
/* note 8bit xfers */
printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
@@ -5597,7 +5567,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
ahd->msgout_index = 0;
response = 1;
}
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) {
/* note asynch xfers and clear flag */
ahd_set_syncrate(ahd, devinfo, /*period*/0,
/*offset*/0, /*ppr_options*/0,
@@ -5607,13 +5577,13 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
"Using asynchronous transfers\n",
ahd_name(ahd), devinfo->channel,
devinfo->target, devinfo->lun);
- } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
+ } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) {
int tag_type;
int mask;
- tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
+ tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG);
- if (tag_type == MSG_SIMPLE_TASK) {
+ if (tag_type == SIMPLE_QUEUE_TAG) {
printk("(%s:%c:%d:%d): refuses tagged commands. "
"Performing non-tagged I/O\n", ahd_name(ahd),
devinfo->channel, devinfo->target, devinfo->lun);
@@ -5623,7 +5593,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
printk("(%s:%c:%d:%d): refuses %s tagged commands. "
"Performing simple queue tagged I/O only\n",
ahd_name(ahd), devinfo->channel, devinfo->target,
- devinfo->lun, tag_type == MSG_ORDERED_TASK
+ devinfo->lun, tag_type == ORDERED_QUEUE_TAG
? "ordered" : "head of queue");
ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC);
mask = ~0x03;
@@ -5635,9 +5605,9 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
*/
ahd_outb(ahd, SCB_CONTROL,
ahd_inb_scbram(ahd, SCB_CONTROL) & mask);
- scb->hscb->control &= mask;
+ scb->hscb->control &= mask;
ahd_set_transaction_tag(scb, /*enabled*/FALSE,
- /*type*/MSG_SIMPLE_TASK);
+ /*type*/SIMPLE_QUEUE_TAG);
ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG);
ahd_assert_atn(ahd);
ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
@@ -5844,7 +5814,7 @@ ahd_reinitialize_dataptrs(struct ahd_softc *ahd)
AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK,
AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK);
-
+
scb_index = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scb_index);
@@ -5952,7 +5922,7 @@ ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
continue;
ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid,
- MSG_BUS_DEV_RESET, /*arg*/0);
+ TARGET_RESET, /*arg*/0);
ahd_send_lstate_events(ahd, lstate);
}
}
@@ -5966,7 +5936,7 @@ ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0,
/*ppr_options*/0, AHD_TRANS_CUR,
/*paused*/TRUE);
-
+
if (status != CAM_SEL_TIMEOUT)
ahd_send_async(ahd, devinfo->channel, devinfo->target,
CAM_LUN_WILDCARD, AC_SENT_BDR);
@@ -5982,11 +5952,11 @@ ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
struct scb *scb)
{
- /*
+ /*
* To facilitate adding multiple messages together,
* each routine should increment the index and len
* variables instead of setting them explicitly.
- */
+ */
ahd->msgout_index = 0;
ahd->msgout_len = 0;
@@ -6067,22 +6037,16 @@ ahd_alloc(void *platform_arg, char *name)
{
struct ahd_softc *ahd;
-#ifndef __FreeBSD__
- ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC);
+ ahd = kzalloc(sizeof(*ahd), GFP_ATOMIC);
if (!ahd) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
-#else
- ahd = device_get_softc((device_t)platform_arg);
-#endif
- memset(ahd, 0, sizeof(*ahd));
+
ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
if (ahd->seep_config == NULL) {
-#ifndef __FreeBSD__
kfree(ahd);
-#endif
kfree(name);
return (NULL);
}
@@ -6125,7 +6089,7 @@ ahd_softc_init(struct ahd_softc *ahd)
{
ahd->unpause = 0;
- ahd->pause = PAUSE;
+ ahd->pause = PAUSE;
return (0);
}
@@ -6138,8 +6102,7 @@ ahd_set_unit(struct ahd_softc *ahd, int unit)
void
ahd_set_name(struct ahd_softc *ahd, char *name)
{
- if (ahd->name != NULL)
- kfree(ahd->name);
+ kfree(ahd->name);
ahd->name = name;
}
@@ -6152,19 +6115,20 @@ ahd_free(struct ahd_softc *ahd)
default:
case 5:
ahd_shutdown(ahd);
- /* FALLTHROUGH */
+ fallthrough;
case 4:
ahd_dmamap_unload(ahd, ahd->shared_data_dmat,
ahd->shared_data_map.dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 3:
ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo,
ahd->shared_data_map.dmamap);
ahd_dmamap_destroy(ahd, ahd->shared_data_dmat,
ahd->shared_data_map.dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 2:
ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
+ break;
case 1:
break;
case 0:
@@ -6200,15 +6164,10 @@ ahd_free(struct ahd_softc *ahd)
kfree(ahd->black_hole);
}
#endif
- if (ahd->name != NULL)
- kfree(ahd->name);
- if (ahd->seep_config != NULL)
- kfree(ahd->seep_config);
- if (ahd->saved_stack != NULL)
- kfree(ahd->saved_stack);
-#ifndef __FreeBSD__
+ kfree(ahd->name);
+ kfree(ahd->seep_config);
+ kfree(ahd->saved_stack);
kfree(ahd);
-#endif
return;
}
@@ -6243,7 +6202,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
u_int sxfrctl1;
int wait;
uint32_t cmd;
-
+
/*
* Preserve the value of the SXFRCTL1 register for all channels.
* It contains settings that affect termination and we don't want
@@ -6483,7 +6442,7 @@ ahd_init_scbdata(struct ahd_softc *ahd)
/*
* Note that we were successful
*/
- return (0);
+ return (0);
error_exit:
@@ -6553,7 +6512,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
}
ahd_dma_tag_destroy(ahd, scb_data->sense_dmat);
}
- /* fall through */
+ fallthrough;
case 6:
{
struct map_node *sg_map;
@@ -6568,7 +6527,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
}
ahd_dma_tag_destroy(ahd, scb_data->sg_dmat);
}
- /* fall through */
+ fallthrough;
case 5:
{
struct map_node *hscb_map;
@@ -6582,8 +6541,8 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
kfree(hscb_map);
}
ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat);
- /* FALLTHROUGH */
}
+ fallthrough;
case 4:
case 3:
case 2:
@@ -7001,7 +6960,7 @@ ahd_controller_info(struct ahd_softc *ahd, char *buf)
static const char *channel_strings[] = {
"Primary Low",
"Primary High",
- "Secondary Low",
+ "Secondary Low",
"Secondary High"
};
@@ -7211,7 +7170,7 @@ ahd_init(struct ahd_softc *ahd)
case FLX_CSTAT_OVER:
case FLX_CSTAT_UNDER:
warn_user++;
- /* fall through */
+ fallthrough;
case FLX_CSTAT_INVALID:
case FLX_CSTAT_OKAY:
if (warn_user == 0 && bootverbose == 0)
@@ -7273,7 +7232,7 @@ ahd_chip_init(struct ahd_softc *ahd)
} else {
sxfrctl1 |= ahd->seltime;
}
-
+
ahd_outb(ahd, SXFRCTL0, DFON);
ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN);
ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
@@ -7529,7 +7488,7 @@ ahd_chip_init(struct ahd_softc *ahd)
ahd_outb(ahd, CMDSIZE_TABLE + 5, 11);
ahd_outb(ahd, CMDSIZE_TABLE + 6, 0);
ahd_outb(ahd, CMDSIZE_TABLE + 7, 0);
-
+
/* Tell the sequencer of our initial queue positions */
ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512);
@@ -7906,11 +7865,9 @@ ahd_pause_and_flushwork(struct ahd_softc *ahd)
ahd->flags &= ~AHD_ALL_INTERRUPTS;
}
-#ifdef CONFIG_PM
-int
+int __maybe_unused
ahd_suspend(struct ahd_softc *ahd)
{
-
ahd_pause_and_flushwork(ahd);
if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
@@ -7921,15 +7878,13 @@ ahd_suspend(struct ahd_softc *ahd)
return (0);
}
-void
+void __maybe_unused
ahd_resume(struct ahd_softc *ahd)
{
-
ahd_reset(ahd, /*reinit*/TRUE);
- ahd_intr_enable(ahd, TRUE);
+ ahd_intr_enable(ahd, TRUE);
ahd_restart(ahd);
}
-#endif
/************************** Busy Target Table *********************************/
/*
@@ -7968,7 +7923,7 @@ ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl)
u_int scbid;
u_int scb_offset;
u_int saved_scbptr;
-
+
scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
scbid = ahd_inw_scbram(ahd, scb_offset);
ahd_set_scbptr(ahd, saved_scbptr);
@@ -7980,7 +7935,7 @@ ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid)
{
u_int scb_offset;
u_int saved_scbptr;
-
+
scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
ahd_outw(ahd, scb_offset, scbid);
ahd_set_scbptr(ahd, saved_scbptr);
@@ -8033,7 +7988,7 @@ ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
target = SCB_GET_TARGET(ahd, scb);
lun = SCB_GET_LUN(scb);
channel = SCB_GET_CHANNEL(ahd, scb);
-
+
ahd_search_qinfifo(ahd, target, channel, lun,
/*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
CAM_REQUEUE_REQ, SEARCH_COMPLETE);
@@ -8074,7 +8029,7 @@ ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb,
ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
} else {
prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
- ahd_sync_scb(ahd, prev_scb,
+ ahd_sync_scb(ahd, prev_scb,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
}
ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
@@ -8215,12 +8170,12 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in qinfifo\n");
ahd_done_with_status(ahd, scb, status);
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_REMOVE:
break;
case SEARCH_PRINT:
printk(" 0x%x", ahd->qinfifo[qinpos]);
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_COUNT:
ahd_qinfifo_requeue(ahd, prev_scb, scb);
prev_scb = scb;
@@ -8311,7 +8266,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
if ((mk_msg_scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB pending MK_MSG\n");
ahd_done_with_status(ahd, mk_msg_scb, status);
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_REMOVE:
{
u_int tail_offset;
@@ -8335,7 +8290,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
}
case SEARCH_PRINT:
printk(" 0x%x", SCB_GET_TAG(scb));
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_COUNT:
break;
}
@@ -8374,7 +8329,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
static int
ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
int lun, u_int tag, role_t role, uint32_t status,
- ahd_search_action action, u_int *list_head,
+ ahd_search_action action, u_int *list_head,
u_int *list_tail, u_int tid)
{
struct scb *scb;
@@ -8416,7 +8371,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in Waiting List\n");
ahd_done_with_status(ahd, scb, status);
- /* fall through */
+ fallthrough;
case SEARCH_REMOVE:
ahd_rem_wscb(ahd, scbid, prev, next, tid);
*list_tail = prev;
@@ -8425,7 +8380,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
break;
case SEARCH_PRINT:
printk("0x%x ", scbid);
- /* fall through */
+ fallthrough;
case SEARCH_COUNT:
prev = scbid;
break;
@@ -8832,7 +8787,7 @@ ahd_stat_timer(struct timer_list *t)
struct ahd_softc *ahd = from_timer(ahd, t, stat_timer);
u_long s;
int enint_coal;
-
+
ahd_lock(ahd, &s);
enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
@@ -8877,7 +8832,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
* operations are on data structures that the sequencer
* is not touching once the queue is frozen.
*/
- hscb = scb->hscb;
+ hscb = scb->hscb;
if (ahd_is_paused(ahd)) {
paused = 1;
@@ -8951,11 +8906,12 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
break;
case SIU_PFC_ILLEGAL_REQUEST:
printk("Illegal request\n");
+ break;
default:
break;
}
}
- if (siu->status == SCSI_STATUS_OK)
+ if (siu->status == SAM_STAT_GOOD)
ahd_set_transaction_status(scb,
CAM_REQ_CMP_ERR);
}
@@ -8969,8 +8925,8 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
ahd_done(ahd, scb);
break;
}
- case SCSI_STATUS_CMD_TERMINATED:
- case SCSI_STATUS_CHECK_COND:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
{
struct ahd_devinfo devinfo;
struct ahd_dma_seg *sg;
@@ -9060,10 +9016,10 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
ahd_queue_scb(ahd, scb);
break;
}
- case SCSI_STATUS_OK:
+ case SAM_STAT_GOOD:
printk("%s: Interrupted for status of 0???\n",
ahd_name(ahd));
- /* FALLTHROUGH */
+ fallthrough;
default:
ahd_done(ahd, scb);
break;
@@ -9150,7 +9106,7 @@ ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
/*
* Remainder of the SG where the transfer
- * stopped.
+ * stopped.
*/
resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK;
sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK);
@@ -9202,7 +9158,7 @@ ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate,
- (lstate->event_r_idx - lstate->event_w_idx);
if (event_type == EVENT_TYPE_BUS_RESET
- || event_type == MSG_BUS_DEV_RESET) {
+ || event_type == TARGET_RESET) {
/*
* Any earlier events are irrelevant, so reset our buffer.
* This has the effect of allowing us to deal with reset
@@ -9333,7 +9289,7 @@ ahd_loadseq(struct ahd_softc *ahd)
/*
* Setup downloadable constant table.
- *
+ *
* The computation for the S/G prefetch variables is
* a bit complicated. We would like to always fetch
* in terms of cachelined sized increments. However,
@@ -9422,7 +9378,7 @@ ahd_loadseq(struct ahd_softc *ahd)
if (begin_set[cs_count] == TRUE
&& end_set[cs_count] == FALSE) {
cs_table[cs_count].end = downloaded;
- end_set[cs_count] = TRUE;
+ end_set[cs_count] = TRUE;
cs_count++;
}
continue;
@@ -9442,10 +9398,9 @@ ahd_loadseq(struct ahd_softc *ahd)
if (cs_count != 0) {
cs_count *= sizeof(struct cs);
- ahd->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
+ ahd->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC);
if (ahd->critical_sections == NULL)
panic("ahd_loadseq: Could not malloc");
- memcpy(ahd->critical_sections, cs_table, cs_count);
}
ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
@@ -9552,7 +9507,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
fmt3_ins = &instr.format3;
fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
}
- /* fall through */
+ fallthrough;
case AIC_OP_OR:
case AIC_OP_AND:
case AIC_OP_XOR:
@@ -9563,7 +9518,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
}
fmt1_ins->parity = 0;
- /* fall through */
+ fallthrough;
case AIC_OP_ROL:
{
int i, count;
@@ -9658,7 +9613,7 @@ ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
printed_mask == 0 ? ":(" : "|",
table[entry].name);
printed_mask |= table[entry].mask;
-
+
break;
}
if (entry >= num_entries)
@@ -9695,7 +9650,7 @@ ahd_dump_card_state(struct ahd_softc *ahd)
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
"%s: Dumping Card State at program address 0x%x Mode 0x%x\n",
- ahd_name(ahd),
+ ahd_name(ahd),
ahd_inw(ahd, CURADDR),
ahd_build_mode_state(ahd, ahd->saved_src_mode,
ahd->saved_dst_mode));
@@ -9811,7 +9766,6 @@ ahd_dump_card_state(struct ahd_softc *ahd)
}
printk("\n");
-
printk("Sequencer DMA-Up and Complete list: ");
scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
i = 0;
@@ -9989,7 +9943,7 @@ ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf,
ahd_outb(ahd, SEEADR, cur_addr);
ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART);
-
+
error = ahd_wait_seeprom(ahd);
if (error)
break;
@@ -10044,7 +9998,7 @@ ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf,
ahd_outw(ahd, SEEDAT, *buf++);
ahd_outb(ahd, SEEADR, cur_addr);
ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART);
-
+
retval = ahd_wait_seeprom(ahd);
if (retval)
break;
@@ -10149,7 +10103,7 @@ ahd_acquire_seeprom(struct ahd_softc *ahd)
error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype);
if (error != 0
- || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
+ || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
return (0);
return (1);
#endif
@@ -10291,7 +10245,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
our_id = ahd->our_id;
if (ccb->ccb_h.target_id != our_id) {
if ((ahd->features & AHD_MULTI_TID) != 0
- && (ahd->flags & AHD_INITIATORROLE) != 0) {
+ && (ahd->flags & AHD_INITIATORROLE) != 0) {
/*
* Only allow additional targets if
* the initiator role is disabled.
@@ -10478,7 +10432,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
}
ahd_lock(ahd, &s);
-
+
ccb->ccb_h.status = CAM_REQ_CMP;
LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
struct ccb_hdr *ccbh;
@@ -10742,7 +10696,7 @@ ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
printk("Reserved or VU command code type encountered\n");
break;
}
-
+
memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
atio->ccb_h.status |= CAM_CDB_RECVD;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 57992519384e..f2f3405cdec5 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -194,7 +194,7 @@ struct ahd_linux_iocell_opts
#define AIC79XX_PRECOMP_INDEX 0
#define AIC79XX_SLEWRATE_INDEX 1
#define AIC79XX_AMPLITUDE_INDEX 2
-static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
+static struct ahd_linux_iocell_opts aic79xx_iocell_info[] __ro_after_init =
{
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
@@ -572,8 +572,7 @@ ahd_linux_info(struct Scsi_Host *host)
/*
* Queue an SCB to the controller.
*/
-static int
-ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
+static int ahd_linux_queue_lck(struct scsi_cmnd *cmd)
{
struct ahd_softc *ahd;
struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device);
@@ -581,7 +580,6 @@ ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
- cmd->scsi_done = scsi_done;
cmd->result = CAM_REQ_INPROG << 16;
rtn = ahd_linux_run_command(ahd, dev, cmd);
@@ -700,9 +698,6 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
static int
ahd_linux_slave_configure(struct scsi_device *sdev)
{
- struct ahd_softc *ahd;
-
- ahd = *((struct ahd_softc **)sdev->host->hostdata);
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
@@ -723,24 +718,17 @@ static int
ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
- uint8_t *bh;
int heads;
int sectors;
int cylinders;
- int ret;
int extended;
struct ahd_softc *ahd;
ahd = *((struct ahd_softc **)sdev->host->hostdata);
- bh = scsi_bios_ptable(bdev);
- if (bh) {
- ret = scsi_partsize(bh, capacity,
- &geom[2], &geom[0], &geom[1]);
- kfree(bh);
- if (ret != -1)
- return (ret);
- }
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
+
heads = 64;
sectors = 32;
cylinders = aic_sector_div(capacity, heads, sectors);
@@ -767,11 +755,7 @@ ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
static int
ahd_linux_abort(struct scsi_cmnd *cmd)
{
- int error;
-
- error = ahd_linux_queue_abort_cmd(cmd);
-
- return error;
+ return ahd_linux_queue_abort_cmd(cmd);
}
/*
@@ -785,16 +769,13 @@ ahd_linux_dev_reset(struct scsi_cmnd *cmd)
struct scb *reset_scb;
u_int cdb_byte;
int retval = SUCCESS;
- int paused;
- int wait;
struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
unsigned long flags;
DECLARE_COMPLETION_ONSTACK(done);
reset_scb = NULL;
- paused = FALSE;
- wait = FALSE;
+
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
scmd_printk(KERN_INFO, cmd,
@@ -965,8 +946,8 @@ int
ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
int flags, bus_dmamap_t *mapp)
{
- *vaddr = pci_alloc_consistent(ahd->dev_softc,
- dmat->maxsize, mapp);
+ *vaddr = dma_alloc_coherent(&ahd->dev_softc->dev, dmat->maxsize, mapp,
+ GFP_ATOMIC);
if (*vaddr == NULL)
return (ENOMEM);
return(0);
@@ -976,8 +957,7 @@ void
ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
void* vaddr, bus_dmamap_t map)
{
- pci_free_consistent(ahd->dev_softc, dmat->maxsize,
- vaddr, map);
+ dma_free_coherent(&ahd->dev_softc->dev, dmat->maxsize, vaddr, map);
}
int
@@ -1616,10 +1596,10 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
&& (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
- hscb->control |= MSG_ORDERED_TASK;
+ hscb->control |= ORDERED_QUEUE_TAG;
dev->commands_since_idle_or_otag = 0;
} else {
- hscb->control |= MSG_SIMPLE_TASK;
+ hscb->control |= SIMPLE_QUEUE_TAG;
}
}
@@ -1800,10 +1780,12 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
*/
cmd->sense_buffer[0] = 0;
if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
+#ifdef AHD_REPORT_UNDERFLOWS
uint32_t amount_xferred;
amount_xferred =
ahd_get_transfer_length(scb) - ahd_get_residual(scb);
+#endif
if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_MISC) != 0) {
@@ -1846,7 +1828,7 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
if (dev->openings == 1
&& ahd_get_transaction_status(scb) == CAM_REQ_CMP
- && ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
+ && ahd_get_scsi_status(scb) != SAM_STAT_TASK_SET_FULL)
dev->tag_success_count++;
/*
* Some devices deal with temporary internal resource
@@ -1903,8 +1885,8 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
switch (ahd_get_scsi_status(scb)) {
default:
break;
- case SCSI_STATUS_CHECK_COND:
- case SCSI_STATUS_CMD_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
+ case SAM_STAT_COMMAND_TERMINATED:
{
struct scsi_cmnd *cmd;
@@ -1940,7 +1922,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
memcpy(cmd->sense_buffer,
ahd_get_sense_buf(ahd, scb)
+ sense_offset, sense_size);
- cmd->result |= (DRIVER_SENSE << 24);
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
#ifdef AHD_DEBUG
if (ahd_debug & AHD_SHOW_SENSE) {
@@ -1959,7 +1941,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
}
break;
}
- case SCSI_STATUS_QUEUE_FULL:
+ case SAM_STAT_TASK_SET_FULL:
/*
* By the time the core driver has returned this
* command, all other commands that were queued
@@ -2005,7 +1987,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
dev->last_queuefull_same_count = 0;
}
ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
- ahd_set_scsi_status(scb, SCSI_STATUS_OK);
+ ahd_set_scsi_status(scb, SAM_STAT_GOOD);
ahd_platform_set_tags(ahd, sdev, &devinfo,
(dev->flags & AHD_DEV_Q_BASIC)
? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
@@ -2019,7 +2001,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
ahd_platform_set_tags(ahd, sdev, &devinfo,
(dev->flags & AHD_DEV_Q_BASIC)
? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
- ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
+ ahd_set_scsi_status(scb, SAM_STAT_BUSY);
}
}
@@ -2030,6 +2012,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
int new_status = DID_OK;
int do_fallback = 0;
int scsi_status;
+ struct scsi_sense_data *sense;
/*
* Map CAM error codes into Linux Error codes. We
@@ -2046,25 +2029,19 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
break;
case CAM_AUTOSENSE_FAIL:
new_status = DID_ERROR;
- /* Fallthrough */
+ fallthrough;
case CAM_SCSI_STATUS_ERROR:
scsi_status = ahd_cmd_get_scsi_status(cmd);
switch(scsi_status) {
- case SCSI_STATUS_CMD_TERMINATED:
- case SCSI_STATUS_CHECK_COND:
- if ((cmd->result >> 24) != DRIVER_SENSE) {
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
+ sense = (struct scsi_sense_data *)
+ cmd->sense_buffer;
+ if (sense->extra_len >= 5 &&
+ (sense->add_sense_code == 0x47
+ || sense->add_sense_code == 0x48))
do_fallback = 1;
- } else {
- struct scsi_sense_data *sense;
-
- sense = (struct scsi_sense_data *)
- cmd->sense_buffer;
- if (sense->extra_len >= 5 &&
- (sense->add_sense_code == 0x47
- || sense->add_sense_code == 0x48))
- do_fallback = 1;
- }
break;
default:
break;
@@ -2128,7 +2105,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
ahd_cmd_set_transaction_status(cmd, new_status);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
static void
@@ -2152,9 +2129,8 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
u_int saved_scbptr;
u_int active_scbptr;
u_int last_phase;
- u_int saved_scsiid;
u_int cdb_byte;
- int retval;
+ int retval = SUCCESS;
int was_paused;
int paused;
int wait;
@@ -2192,8 +2168,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
* so we must not still own the command.
*/
scmd_printk(KERN_INFO, cmd, "Is not an active device\n");
- retval = SUCCESS;
- goto no_cmd;
+ goto done;
}
/*
@@ -2206,7 +2181,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
if (pending_scb == NULL) {
scmd_printk(KERN_INFO, cmd, "Command not found\n");
- goto no_cmd;
+ goto done;
}
if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
@@ -2214,7 +2189,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
* We can't queue two recovery actions using the same SCB
*/
retval = FAILED;
- goto done;
+ goto done;
}
/*
@@ -2229,7 +2204,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
if ((pending_scb->flags & SCB_ACTIVE) == 0) {
scmd_printk(KERN_INFO, cmd, "Command already completed\n");
- goto no_cmd;
+ goto done;
}
printk("%s: At time of recovery, card was %spaused\n",
@@ -2246,7 +2221,6 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
ahd_name(ahd), cmd->device->channel,
cmd->device->id, (u8)cmd->device->lun);
- retval = SUCCESS;
goto done;
}
@@ -2268,7 +2242,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
* passed in command. That command is currently active on the
* bus or is in the disconnected state.
*/
- saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
+ ahd_inb(ahd, SAVED_SCSIID);
if (last_phase != P_BUSFREE
&& SCB_GET_TAG(pending_scb) == active_scbptr) {
@@ -2343,17 +2317,10 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
} else {
scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
retval = FAILED;
- goto done;
}
-no_cmd:
- /*
- * Our assumption is that if we don't have the command, no
- * recovery action was required, so we return success. Again,
- * the semantics of the mid-layer recovery engine are not
- * well defined, so this may change in time.
- */
- retval = SUCCESS;
+
+ ahd_restore_modes(ahd, saved_modes);
done:
if (paused)
ahd_unpause(ahd);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 8a8b7ae7aed3..793fe19993a9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -196,7 +196,7 @@ int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t);
/*
* XXX
* ahd_dmamap_sync is only used on buffers allocated with
- * the pci_alloc_consistent() API. Although I'm not sure how
+ * the dma_alloc_coherent() API. Although I'm not sure how
* this works on architectures with a write buffer, Linux does
* not have an API to sync "coherent" memory. Perhaps we need
* to do an mb()?
@@ -242,7 +242,7 @@ struct ahd_linux_device {
int active;
/*
- * The currently allowed number of
+ * The currently allowed number of
* transactions that can be queued to
* the device. Must be signed for
* conversion from tagged to untagged
@@ -256,7 +256,7 @@ struct ahd_linux_device {
* device's queue is halted.
*/
u_int qfrozen;
-
+
/*
* Cumulative command counter.
*/
@@ -340,11 +340,11 @@ struct ahd_platform_data {
/*
* Fields accessed from interrupt context.
*/
- struct scsi_target *starget[AHD_NUM_TARGETS];
+ struct scsi_target *starget[AHD_NUM_TARGETS];
spinlock_t spin_lock;
struct completion *eh_done;
- struct Scsi_Host *host; /* pointer to scsi host */
+ struct Scsi_Host *host; /* pointer to scsi host */
#define AHD_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
@@ -420,8 +420,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
/****************************** PCI-X definitions *****************************/
#define PCIXR_COMMAND 0x96
@@ -497,29 +495,6 @@ int ahd_proc_write_seeprom(struct Scsi_Host *, char *, int);
int ahd_linux_show_info(struct seq_file *,struct Scsi_Host *);
/*********************** Transaction Access Wrappers **************************/
-static inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
-static inline void ahd_set_transaction_status(struct scb *, uint32_t);
-static inline void ahd_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
-static inline void ahd_set_scsi_status(struct scb *, uint32_t);
-static inline uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd);
-static inline uint32_t ahd_get_transaction_status(struct scb *);
-static inline uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd);
-static inline uint32_t ahd_get_scsi_status(struct scb *);
-static inline void ahd_set_transaction_tag(struct scb *, int, u_int);
-static inline u_long ahd_get_transfer_length(struct scb *);
-static inline int ahd_get_transfer_dir(struct scb *);
-static inline void ahd_set_residual(struct scb *, u_long);
-static inline void ahd_set_sense_residual(struct scb *scb, u_long resid);
-static inline u_long ahd_get_residual(struct scb *);
-static inline u_long ahd_get_sense_residual(struct scb *);
-static inline int ahd_perform_autosense(struct scb *);
-static inline uint32_t ahd_get_sense_bufsize(struct ahd_softc *,
- struct scb *);
-static inline void ahd_notify_xfer_settings_change(struct ahd_softc *,
- struct ahd_devinfo *);
-static inline void ahd_platform_scb_free(struct ahd_softc *ahd,
- struct scb *scb);
-static inline void ahd_freeze_scb(struct scb *scb);
static inline
void ahd_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
@@ -655,9 +630,9 @@ static inline void
ahd_freeze_scb(struct scb *scb)
{
if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
- scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
- scb->platform_data->dev->qfrozen++;
- }
+ scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+ scb->platform_data->dev->qfrozen++;
+ }
}
void ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 8b891a05d9e7..b92e2e3c358a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -45,8 +45,8 @@
/* Define the macro locally since it's different for different class of chips.
*/
-#define ID(x) \
- ID2C(x), \
+#define ID(x) \
+ ID2C(x), \
ID2C(IDIROC(x))
static const struct pci_device_id ahd_linux_pci_id_table[] = {
@@ -74,11 +74,10 @@ static const struct pci_device_id ahd_linux_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table);
-#ifdef CONFIG_PM
-static int
-ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
+static int __maybe_unused
+ahd_linux_pci_dev_suspend(struct device *dev)
{
- struct ahd_softc *ahd = pci_get_drvdata(pdev);
+ struct ahd_softc *ahd = dev_get_drvdata(dev);
int rc;
if ((rc = ahd_suspend(ahd)))
@@ -86,39 +85,20 @@ ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
ahd_pci_suspend(ahd);
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- if (mesg.event & PM_EVENT_SLEEP)
- pci_set_power_state(pdev, PCI_D3hot);
-
return rc;
}
-static int
-ahd_linux_pci_dev_resume(struct pci_dev *pdev)
+static int __maybe_unused
+ahd_linux_pci_dev_resume(struct device *dev)
{
- struct ahd_softc *ahd = pci_get_drvdata(pdev);
- int rc;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- if ((rc = pci_enable_device(pdev))) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to enable device after resume (%d)\n", rc);
- return rc;
- }
-
- pci_set_master(pdev);
+ struct ahd_softc *ahd = dev_get_drvdata(dev);
ahd_pci_resume(ahd);
ahd_resume(ahd);
- return rc;
+ return 0;
}
-#endif
static void
ahd_linux_pci_dev_remove(struct pci_dev *pdev)
@@ -224,13 +204,14 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return (0);
}
+static SIMPLE_DEV_PM_OPS(ahd_linux_pci_dev_pm_ops,
+ ahd_linux_pci_dev_suspend,
+ ahd_linux_pci_dev_resume);
+
static struct pci_driver aic79xx_pci_driver = {
.name = "aic79xx",
.probe = ahd_linux_pci_dev_probe,
-#ifdef CONFIG_PM
- .suspend = ahd_linux_pci_dev_suspend,
- .resume = ahd_linux_pci_dev_resume,
-#endif
+ .driver.pm = &ahd_linux_pci_dev_pm_ops,
.remove = ahd_linux_pci_dev_remove,
.id_table = ahd_linux_pci_id_table
};
@@ -386,7 +367,7 @@ ahd_pci_map_int(struct ahd_softc *ahd)
IRQF_SHARED, "aic79xx", ahd);
if (!error)
ahd->platform_data->irq = ahd->dev_softc->irq;
-
+
return (-error);
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 8397ae93f7dd..5fad41b1ab58 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -260,8 +260,8 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahd_compose_id(device,
vendor,
subdevice,
@@ -298,7 +298,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
* Record if this is an HP board.
*/
subvendor = ahd_pci_read_config(ahd->dev_softc,
- PCIR_SUBVEND_0, /*bytes*/2);
+ PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
if (subvendor == SUBID_HP)
ahd->flags |= AHD_HP_BOARD;
@@ -377,8 +377,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
return ahd_pci_map_int(ahd);
}
-#ifdef CONFIG_PM
-void
+void __maybe_unused
ahd_pci_suspend(struct ahd_softc *ahd)
{
/*
@@ -394,7 +393,7 @@ ahd_pci_suspend(struct ahd_softc *ahd)
}
-void
+void __maybe_unused
ahd_pci_resume(struct ahd_softc *ahd)
{
ahd_pci_write_config(ahd->dev_softc, DEVCONFIG,
@@ -404,7 +403,6 @@ ahd_pci_resume(struct ahd_softc *ahd)
ahd_pci_write_config(ahd->dev_softc, CSIZE_LATTIME,
ahd->suspend_state.pci_state.csize_lattime, /*bytes*/1);
}
-#endif
/*
* Perform some simple tests that should catch situations where
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index add2da581d66..746d0ca2a657 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -100,17 +100,17 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo)
seq_puts(m, "Renegotiation Pending\n");
return;
}
- speed = 3300;
- freq = 0;
+ speed = 3300;
+ freq = 0;
if (tinfo->offset != 0) {
freq = ahd_calc_syncsrate(tinfo->period);
speed = freq;
}
speed *= (0x01 << tinfo->width);
- mb = speed / 1000;
- if (mb > 0)
+ mb = speed / 1000;
+ if (mb > 0)
seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
- else
+ else
seq_printf(m, "%dKB/s transfers", speed);
if (freq != 0) {
@@ -242,7 +242,8 @@ ahd_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
u_int start_addr;
if (ahd->seep_config == NULL) {
- ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
+ ahd->seep_config = kmalloc(sizeof(*ahd->seep_config),
+ GFP_ATOMIC);
if (ahd->seep_config == NULL) {
printk("aic79xx: Unable to allocate serial "
"eeprom buffer. Write failing\n");
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 88b90f9806c9..9bc755a0a2d3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -896,8 +896,6 @@ union ahc_bus_softc {
typedef void (*ahc_bus_intr_t)(struct ahc_softc *);
typedef int (*ahc_bus_chip_init_t)(struct ahc_softc *);
-typedef int (*ahc_bus_suspend_t)(struct ahc_softc *);
-typedef int (*ahc_bus_resume_t)(struct ahc_softc *);
typedef void ahc_callback_t (void *);
struct ahc_softc {
@@ -1134,9 +1132,7 @@ const struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
int ahc_pci_config(struct ahc_softc *,
const struct ahc_pci_identity *);
int ahc_pci_test_register_access(struct ahc_softc *);
-#ifdef CONFIG_PM
-void ahc_pci_resume(struct ahc_softc *ahc);
-#endif
+void __maybe_unused ahc_pci_resume(struct ahc_softc *ahc);
/*************************** EISA/VL Front End ********************************/
struct aic7770_identity *aic7770_find_device(uint32_t);
@@ -1160,10 +1156,8 @@ int ahc_chip_init(struct ahc_softc *ahc);
int ahc_init(struct ahc_softc *ahc);
void ahc_intr_enable(struct ahc_softc *ahc, int enable);
void ahc_pause_and_flushwork(struct ahc_softc *ahc);
-#ifdef CONFIG_PM
-int ahc_suspend(struct ahc_softc *ahc);
-int ahc_resume(struct ahc_softc *ahc);
-#endif
+int __maybe_unused ahc_suspend(struct ahc_softc *ahc);
+int __maybe_unused ahc_resume(struct ahc_softc *ahc);
void ahc_set_unit(struct ahc_softc *, int);
void ahc_set_name(struct ahc_softc *, char *);
void ahc_free(struct ahc_softc *ahc);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index cc9e41967ce4..11ddffbcc2f3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -73,8 +73,8 @@
* add other 93Cx6 functions.
*/
struct seeprom_cmd {
- uint8_t len;
- uint8_t bits[11];
+ uint8_t len;
+ uint8_t bits[11];
};
/* Short opcodes for the c46 */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 4190a025381a..a396f048a031 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -66,7 +66,7 @@ static const char *const ahc_chip_names[] = {
* Hardware error codes.
*/
struct ahc_hard_error_entry {
- uint8_t errno;
+ uint8_t errno;
const char *errmesg;
};
@@ -84,16 +84,16 @@ static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
static const struct ahc_phase_table_entry ahc_phase_table[] =
{
- { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
- { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
- { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
- { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
- { P_COMMAND, MSG_NOOP, "in Command phase" },
- { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
- { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
+ { P_DATAOUT, NOP, "in Data-out phase" },
+ { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" },
+ { P_DATAOUT_DT, NOP, "in DT Data-out phase" },
+ { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" },
+ { P_COMMAND, NOP, "in Command phase" },
+ { P_MESGOUT, NOP, "in Message-out phase" },
+ { P_STATUS, INITIATOR_ERROR, "in Status phase" },
{ P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
- { P_BUSFREE, MSG_NOOP, "while idle" },
- { 0, MSG_NOOP, "in unknown phase" }
+ { P_BUSFREE, NOP, "while idle" },
+ { 0, NOP, "in unknown phase" }
};
/*
@@ -142,7 +142,7 @@ static void ahc_free_tstate(struct ahc_softc *ahc,
#endif
static const struct ahc_syncrate*
ahc_devlimited_syncrate(struct ahc_softc *ahc,
- struct ahc_initiator_tinfo *,
+ struct ahc_initiator_tinfo *,
u_int *period,
u_int *ppr_options,
role_t role);
@@ -195,7 +195,7 @@ static void ahc_setup_target_msgin(struct ahc_softc *ahc,
struct scb *scb);
#endif
-static bus_dmamap_callback_t ahc_dmamap_cb;
+static bus_dmamap_callback_t ahc_dmamap_cb;
static void ahc_build_free_scb_list(struct ahc_softc *ahc);
static int ahc_init_scbdata(struct ahc_softc *ahc);
static void ahc_fini_scbdata(struct ahc_softc *ahc);
@@ -493,7 +493,7 @@ ahc_inq(struct ahc_softc *ahc, u_int port)
return ((ahc_inb(ahc, port))
| (ahc_inb(ahc, port+1) << 8)
| (ahc_inb(ahc, port+2) << 16)
- | (ahc_inb(ahc, port+3) << 24)
+ | (((uint64_t)ahc_inb(ahc, port+3)) << 24)
| (((uint64_t)ahc_inb(ahc, port+4)) << 32)
| (((uint64_t)ahc_inb(ahc, port+5)) << 40)
| (((uint64_t)ahc_inb(ahc, port+6)) << 48)
@@ -815,7 +815,7 @@ ahc_restart(struct ahc_softc *ahc)
ahc_clear_msg_state(ahc);
ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */
- ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */
+ ahc_outb(ahc, MSG_OUT, NOP); /* No message to send */
ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
ahc_outb(ahc, LASTPHASE, P_BUSFREE);
ahc_outb(ahc, SAVED_SCSIID, 0xFF);
@@ -978,7 +978,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
{
struct scb *scb;
struct ahc_devinfo devinfo;
-
+
ahc_fetch_devinfo(ahc, &devinfo);
/*
@@ -1022,7 +1022,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
goto unpause;
}
- hscb = scb->hscb;
+ hscb = scb->hscb;
/* Don't want to clobber the original sense code */
if ((scb->flags & SCB_SENSE) != 0) {
@@ -1041,12 +1041,12 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
ahc_freeze_scb(scb);
ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
switch (hscb->shared_data.status.scsi_status) {
- case SCSI_STATUS_OK:
+ case SAM_STAT_GOOD:
printk("%s: Interrupted for status of 0???\n",
ahc_name(ahc));
break;
- case SCSI_STATUS_CMD_TERMINATED:
- case SCSI_STATUS_CHECK_COND:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
{
struct ahc_dma_seg *sg;
struct scsi_sense *sc;
@@ -1071,7 +1071,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
&tstate);
tinfo = &targ_info->curr;
sg = scb->sg_list;
- sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
+ sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
/*
* Save off the residual if there is one.
*/
@@ -1117,8 +1117,8 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
* errors will be reported before any data
* phases occur.
*/
- if (ahc_get_residual(scb)
- == ahc_get_transfer_length(scb)) {
+ if (ahc_get_residual(scb)
+ == ahc_get_transfer_length(scb)) {
ahc_update_neg_request(ahc, &devinfo,
tstate, targ_info,
AHC_NEG_IF_NON_ASYNC);
@@ -1129,7 +1129,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
scb->flags |= SCB_AUTO_NEGOTIATE;
}
hscb->cdb_len = sizeof(*sc);
- hscb->dataptr = sg->addr;
+ hscb->dataptr = sg->addr;
hscb->datacnt = sg->len;
hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
hscb->sgptr = ahc_htole32(hscb->sgptr);
@@ -1179,7 +1179,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
ahc_dump_card_state(ahc);
- ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
+ ahc->msgout_buf[0] = TARGET_RESET;
ahc->msgout_len = 1;
ahc->msgout_index = 0;
ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -1187,13 +1187,13 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
ahc_assert_atn(ahc);
break;
}
- case SEND_REJECT:
+ case SEND_REJECT:
{
u_int rejbyte = ahc_inb(ahc, ACCUM);
printk("%s:%c:%d: Warning - unknown message received from "
- "target (0x%x). Rejecting\n",
+ "target (0x%x). Rejecting\n",
ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
- break;
+ break;
}
case PROTO_VIOLATION:
{
@@ -1286,8 +1286,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
ahc->msg_type =
MSG_TYPE_TARGET_MSGOUT;
ahc->msgin_index = 0;
- }
- else
+ } else
ahc_setup_target_msgin(ahc,
&devinfo,
scb);
@@ -1359,7 +1358,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
if (scb != NULL)
ahc_set_transaction_status(scb,
CAM_UNCOR_PARITY);
- ahc_reset_channel(ahc, devinfo.channel,
+ ahc_reset_channel(ahc, devinfo.channel,
/*init reset*/TRUE);
}
} else {
@@ -1391,7 +1390,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
printk("data overrun detected %s."
" Tag == 0x%x.\n",
ahc_phase_table[i].phasemsg,
- scb->hscb->tag);
+ scb->hscb->tag);
ahc_print_path(ahc, scb);
printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
@@ -1402,7 +1401,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
printk("sg[%d] - Addr 0x%x%x : Length %d\n",
i,
(ahc_le32toh(scb->sg_list[i].len) >> 24
- & SG_HIGH_ADDR_BITS),
+ & SG_HIGH_ADDR_BITS),
ahc_le32toh(scb->sg_list[i].addr),
ahc_le32toh(scb->sg_list[i].len)
& AHC_SG_LEN_MASK);
@@ -1549,7 +1548,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
if (status == 0 && status0 == 0) {
if ((ahc->features & AHC_TWIN) != 0) {
/* Try the other channel */
- ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
status = ahc_inb(ahc, SSTAT1)
& (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
intr_channel = (cur_channel == 'A') ? 'B' : 'A';
@@ -1595,7 +1594,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
printk("%s: Someone reset channel %c\n",
ahc_name(ahc), intr_channel);
if (intr_channel != cur_channel)
- ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
} else if ((status & SCSIPERR) != 0) {
/*
@@ -1684,17 +1683,17 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
* data direction, so ignore the value
* in the phase table.
*/
- mesg_out = MSG_INITIATOR_DET_ERR;
+ mesg_out = INITIATOR_ERROR;
}
/*
- * We've set the hardware to assert ATN if we
- * get a parity error on "in" phases, so all we
+ * We've set the hardware to assert ATN if we
+ * get a parity error on "in" phases, so all we
* need to do is stuff the message buffer with
* the appropriate message. "In" phases have set
* mesg_out to something other than MSG_NOP.
*/
- if (mesg_out != MSG_NOOP) {
+ if (mesg_out != NOP) {
if (ahc->msg_type != MSG_TYPE_NONE)
ahc->send_msg_perror = TRUE;
else
@@ -1818,10 +1817,10 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
u_int tag;
tag = SCB_LIST_NULL;
- if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
- || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK, TRUE)
+ || ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK_SET, TRUE)) {
if (ahc->msgout_buf[ahc->msgout_index - 1]
- == MSG_ABORT_TAG)
+ == ABORT_TASK)
tag = scb->hscb->tag;
ahc_print_path(ahc, scb);
printk("SCB %d - Abort%s Completed.\n",
@@ -1833,22 +1832,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
CAM_REQ_ABORTED);
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_1B,
- MSG_BUS_DEV_RESET, TRUE)) {
-#ifdef __FreeBSD__
- /*
- * Don't mark the user's request for this BDR
- * as completing with CAM_BDR_SENT. CAM3
- * specifies CAM_REQ_CMP.
- */
- if (scb != NULL
- && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
- && ahc_match_scb(ahc, scb, target, channel,
- CAM_LUN_WILDCARD,
- SCB_LIST_NULL,
- ROLE_INITIATOR)) {
- ahc_set_transaction_status(scb, CAM_REQ_CMP);
- }
-#endif
+ TARGET_RESET, TRUE)) {
ahc_compile_devinfo(&devinfo,
initiator_role_id,
target,
@@ -1861,7 +1845,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
/*verbose_level*/0);
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
- MSG_EXT_PPR, FALSE)) {
+ EXTENDED_PPR, FALSE)) {
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
@@ -1880,7 +1864,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
ahc_qinfifo_requeue_tail(ahc, scb);
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
- MSG_EXT_WDTR, FALSE)) {
+ EXTENDED_WDTR, FALSE)) {
/*
* Negotiation Rejected. Go-narrow and
* retry command.
@@ -1892,7 +1876,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
ahc_qinfifo_requeue_tail(ahc, scb);
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
- MSG_EXT_SDTR, FALSE)) {
+ EXTENDED_SDTR, FALSE)) {
/*
* Negotiation Rejected. Go-async and
* retry command.
@@ -2001,7 +1985,7 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
| (ahc_inb(ahc, SEQADDR1) << 8);
/*
- * Seqaddr represents the next instruction to execute,
+ * Seqaddr represents the next instruction to execute,
* so we are really executing the instruction just
* before it.
*/
@@ -2009,7 +1993,6 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
seqaddr -= 1;
cs = ahc->critical_sections;
for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
-
if (cs->begin < seqaddr && cs->end >= seqaddr)
break;
}
@@ -2079,7 +2062,7 @@ ahc_clear_intstat(struct ahc_softc *ahc)
CLRREQINIT);
ahc_flush_device_writes(ahc);
ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
- ahc_flush_device_writes(ahc);
+ ahc_flush_device_writes(ahc);
ahc_outb(ahc, CLRINT, CLRSCSIINT);
ahc_flush_device_writes(ahc);
}
@@ -2116,7 +2099,7 @@ ahc_print_scb(struct scb *scb)
printk("sg[%d] - Addr 0x%x%x : Length %d\n",
i,
(ahc_le32toh(scb->sg_list[i].len) >> 24
- & SG_HIGH_ADDR_BITS),
+ & SG_HIGH_ADDR_BITS),
ahc_le32toh(scb->sg_list[i].addr),
ahc_le32toh(scb->sg_list[i].len));
}
@@ -2193,8 +2176,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
if (channel == 'B')
scsi_id += 8;
tstate = ahc->enabled_targets[scsi_id];
- if (tstate != NULL)
- kfree(tstate);
+ kfree(tstate);
ahc->enabled_targets[scsi_id] = NULL;
}
#endif
@@ -2239,7 +2221,7 @@ ahc_devlimited_syncrate(struct ahc_softc *ahc,
*/
if (role == ROLE_TARGET)
transinfo = &tinfo->user;
- else
+ else
transinfo = &tinfo->goal;
*ppr_options &= transinfo->ppr_options;
if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
@@ -2420,7 +2402,7 @@ ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case MSG_EXT_WDTR_BUS_8_BIT:
*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
break;
@@ -2671,9 +2653,9 @@ ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
{
struct scsi_device *sdev = cmd->device;
- ahc_platform_set_tags(ahc, sdev, devinfo, alg);
- ahc_send_async(ahc, devinfo->channel, devinfo->target,
- devinfo->lun, AC_TRANSFER_NEG);
+ ahc_platform_set_tags(ahc, sdev, devinfo, alg);
+ ahc_send_async(ahc, devinfo->channel, devinfo->target,
+ devinfo->lun, AC_TRANSFER_NEG);
}
/*
@@ -2772,9 +2754,9 @@ ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
role = ROLE_INITIATOR;
if (role == ROLE_TARGET
- && (ahc->features & AHC_MULTI_TID) != 0
- && (ahc_inb(ahc, SEQ_FLAGS)
- & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
+ && (ahc->features & AHC_MULTI_TID) != 0
+ && (ahc_inb(ahc, SEQ_FLAGS)
+ & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
/* We were selected, so pull our id from TARGIDIN */
our_id = ahc_inb(ahc, TARGIDIN) & OID;
} else if ((ahc->features & AHC_ULTRA2) != 0)
@@ -2896,7 +2878,7 @@ ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
}
if (scb->flags & SCB_DEVICE_RESET) {
- ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
+ ahc->msgout_buf[ahc->msgout_index++] = TARGET_RESET;
ahc->msgout_len++;
ahc_print_path(ahc, scb);
printk("Bus Device Reset Message Sent\n");
@@ -2910,9 +2892,9 @@ ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
} else if ((scb->flags & SCB_ABORT) != 0) {
if ((scb->hscb->control & TAG_ENB) != 0)
- ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
+ ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK;
else
- ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
+ ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK_SET;
ahc->msgout_len++;
ahc_print_path(ahc, scb);
printk("Abort%s Message Sent\n",
@@ -3122,7 +3104,7 @@ ahc_clear_msg_state(struct ahc_softc *ahc)
*/
ahc_outb(ahc, CLRSINT1, CLRATNO);
}
- ahc_outb(ahc, MSG_OUT, MSG_NOOP);
+ ahc_outb(ahc, MSG_OUT, NOP);
ahc_outb(ahc, SEQ_FLAGS2,
ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
}
@@ -3208,7 +3190,7 @@ proto_violation_reset:
ahc_outb(ahc, MSG_OUT, HOST_MSG);
if (scb == NULL) {
ahc_print_devinfo(ahc, &devinfo);
- ahc->msgout_buf[0] = MSG_ABORT_TASK;
+ ahc->msgout_buf[0] = ABORT_TASK;
ahc->msgout_len = 1;
ahc->msgout_index = 0;
ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -3382,7 +3364,7 @@ reswitch:
#endif
ahc_assert_atn(ahc);
}
- } else
+ } else
ahc->msgin_index++;
if (message_done == MSGLOOP_TERMINATED) {
@@ -3475,7 +3457,7 @@ reswitch:
*/
return;
}
-
+
ahc->msgin_index++;
/*
@@ -3536,7 +3518,7 @@ ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
index = 0;
while (index < ahc->msgout_len) {
- if (ahc->msgout_buf[index] == MSG_EXTENDED) {
+ if (ahc->msgout_buf[index] == EXTENDED_MESSAGE) {
u_int end_index;
end_index = index + 1 + ahc->msgout_buf[index + 1];
@@ -3550,8 +3532,8 @@ ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
found = TRUE;
}
index = end_index;
- } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
- && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
+ } else if (ahc->msgout_buf[index] >= SIMPLE_QUEUE_TAG
+ && ahc->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) {
/* Skip tag type and tag id or residue param*/
index += 2;
@@ -3602,37 +3584,37 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
* extended message type.
*/
switch (ahc->msgin_buf[0]) {
- case MSG_DISCONNECT:
- case MSG_SAVEDATAPOINTER:
- case MSG_CMDCOMPLETE:
- case MSG_RESTOREPOINTERS:
- case MSG_IGN_WIDE_RESIDUE:
+ case DISCONNECT:
+ case SAVE_POINTERS:
+ case COMMAND_COMPLETE:
+ case RESTORE_POINTERS:
+ case IGNORE_WIDE_RESIDUE:
/*
* End our message loop as these are messages
* the sequencer handles on its own.
*/
done = MSGLOOP_TERMINATED;
break;
- case MSG_MESSAGE_REJECT:
+ case MESSAGE_REJECT:
response = ahc_handle_msg_reject(ahc, devinfo);
- /* FALLTHROUGH */
- case MSG_NOOP:
+ fallthrough;
+ case NOP:
done = MSGLOOP_MSGCOMPLETE;
break;
- case MSG_EXTENDED:
+ case EXTENDED_MESSAGE:
{
/* Wait for enough of the message to begin validation */
if (ahc->msgin_index < 2)
break;
switch (ahc->msgin_buf[2]) {
- case MSG_EXT_SDTR:
+ case EXTENDED_SDTR:
{
const struct ahc_syncrate *syncrate;
u_int period;
u_int ppr_options;
u_int offset;
u_int saved_offset;
-
+
if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
reject = TRUE;
break;
@@ -3666,7 +3648,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
ahc->msgin_buf[3], saved_offset,
period, offset);
}
- ahc_set_syncrate(ahc, devinfo,
+ ahc_set_syncrate(ahc, devinfo,
syncrate, period,
offset, ppr_options,
AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
@@ -3677,7 +3659,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
* and didn't have to fall down to async
* transfers.
*/
- if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, TRUE)) {
/* We started it */
if (saved_offset != offset) {
/* Went too low - force async */
@@ -3704,7 +3686,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
done = MSGLOOP_MSGCOMPLETE;
break;
}
- case MSG_EXT_WDTR:
+ case EXTENDED_WDTR:
{
u_int bus_width;
u_int saved_width;
@@ -3738,7 +3720,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
saved_width, bus_width);
}
- if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, TRUE)) {
/*
* Don't send a WDTR back to the
* target, since we asked first.
@@ -3800,7 +3782,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
done = MSGLOOP_MSGCOMPLETE;
break;
}
- case MSG_EXT_PPR:
+ case EXTENDED_PPR:
{
const struct ahc_syncrate *syncrate;
u_int period;
@@ -3860,7 +3842,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
&offset, bus_width,
devinfo->role);
- if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, TRUE)) {
/*
* If we are unable to do any of the
* requested options (we went too low),
@@ -3924,7 +3906,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
break;
}
#ifdef AHC_TARGET_MODE
- case MSG_BUS_DEV_RESET:
+ case TARGET_RESET:
ahc_handle_devreset(ahc, devinfo,
CAM_BDR_SENT,
"Bus Device Reset Received",
@@ -3932,9 +3914,9 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
ahc_restart(ahc);
done = MSGLOOP_TERMINATED;
break;
- case MSG_ABORT_TAG:
- case MSG_ABORT:
- case MSG_CLEAR_QUEUE:
+ case ABORT_TASK:
+ case ABORT_TASK_SET:
+ case CLEAR_QUEUE_TASK_SET:
{
int tag;
@@ -3944,7 +3926,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
break;
}
tag = SCB_LIST_NULL;
- if (ahc->msgin_buf[0] == MSG_ABORT_TAG)
+ if (ahc->msgin_buf[0] == ABORT_TASK)
tag = ahc_inb(ahc, INITIATOR_TAG);
ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
devinfo->lun, tag, ROLE_TARGET,
@@ -3968,7 +3950,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
break;
}
#endif
- case MSG_TERM_IO_PROC:
+ case TERMINATE_IO_PROC:
default:
reject = TRUE;
break;
@@ -3980,7 +3962,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
*/
ahc->msgout_index = 0;
ahc->msgout_len = 1;
- ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
+ ahc->msgout_buf[0] = MESSAGE_REJECT;
done = MSGLOOP_MSGCOMPLETE;
response = TRUE;
}
@@ -4019,7 +4001,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
/* Might be necessary */
last_msg = ahc_inb(ahc, LAST_MSG);
- if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) {
/*
* Target does not support the PPR message.
* Attempt to negotiate SPI-2 style.
@@ -4038,7 +4020,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
ahc_build_transfer_msg(ahc, devinfo);
ahc->msgout_index = 0;
response = 1;
- } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
+ } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) {
/* note 8bit xfers */
printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
@@ -4063,7 +4045,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
ahc->msgout_index = 0;
response = 1;
}
- } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
+ } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) {
/* note asynch xfers and clear flag */
ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
/*offset*/0, /*ppr_options*/0,
@@ -4073,13 +4055,13 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
"Using asynchronous transfers\n",
ahc_name(ahc), devinfo->channel,
devinfo->target, devinfo->lun);
- } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
+ } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) {
int tag_type;
int mask;
- tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
+ tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG);
- if (tag_type == MSG_SIMPLE_TASK) {
+ if (tag_type == SIMPLE_QUEUE_TAG) {
printk("(%s:%c:%d:%d): refuses tagged commands. "
"Performing non-tagged I/O\n", ahc_name(ahc),
devinfo->channel, devinfo->target, devinfo->lun);
@@ -4089,7 +4071,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
printk("(%s:%c:%d:%d): refuses %s tagged commands. "
"Performing simple queue tagged I/O only\n",
ahc_name(ahc), devinfo->channel, devinfo->target,
- devinfo->lun, tag_type == MSG_ORDERED_TASK
+ devinfo->lun, tag_type == ORDERED_QUEUE_TAG
? "ordered" : "head of queue");
ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC);
mask = ~0x03;
@@ -4101,9 +4083,9 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
*/
ahc_outb(ahc, SCB_CONTROL,
ahc_inb(ahc, SCB_CONTROL) & mask);
- scb->hscb->control &= mask;
+ scb->hscb->control &= mask;
ahc_set_transaction_tag(scb, /*enabled*/FALSE,
- /*type*/MSG_SIMPLE_TASK);
+ /*type*/SIMPLE_QUEUE_TAG);
ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
ahc_assert_atn(ahc);
@@ -4340,7 +4322,7 @@ ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
continue;
ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
- MSG_BUS_DEV_RESET, /*arg*/0);
+ TARGET_RESET, /*arg*/0);
ahc_send_lstate_events(ahc, lstate);
}
}
@@ -4354,7 +4336,7 @@ ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
/*period*/0, /*offset*/0, /*ppr_options*/0,
AHC_TRANS_CUR, /*paused*/TRUE);
-
+
if (status != CAM_SEL_TIMEOUT)
ahc_send_async(ahc, devinfo->channel, devinfo->target,
CAM_LUN_WILDCARD, AC_SENT_BDR);
@@ -4371,11 +4353,11 @@ ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
struct scb *scb)
{
- /*
+ /*
* To facilitate adding multiple messages together,
* each routine should increment the index and len
* variables instead of setting them explicitly.
- */
+ */
ahc->msgout_index = 0;
ahc->msgout_len = 0;
@@ -4399,22 +4381,16 @@ ahc_alloc(void *platform_arg, char *name)
struct ahc_softc *ahc;
int i;
-#ifndef __FreeBSD__
- ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC);
+ ahc = kzalloc(sizeof(*ahc), GFP_ATOMIC);
if (!ahc) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
-#else
- ahc = device_get_softc((device_t)platform_arg);
-#endif
- memset(ahc, 0, sizeof(*ahc));
+
ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
if (ahc->seep_config == NULL) {
-#ifndef __FreeBSD__
kfree(ahc);
-#endif
kfree(name);
return (NULL);
}
@@ -4454,7 +4430,7 @@ ahc_softc_init(struct ahc_softc *ahc)
ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
else
ahc->unpause = 0;
- ahc->pause = ahc->unpause | PAUSE;
+ ahc->pause = ahc->unpause | PAUSE;
/* XXX The shared scb data stuff should be deprecated */
if (ahc->scb_data == NULL) {
ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
@@ -4474,8 +4450,7 @@ ahc_set_unit(struct ahc_softc *ahc, int unit)
void
ahc_set_name(struct ahc_softc *ahc, char *name)
{
- if (ahc->name != NULL)
- kfree(ahc->name);
+ kfree(ahc->name);
ahc->name = name;
}
@@ -4488,19 +4463,20 @@ ahc_free(struct ahc_softc *ahc)
default:
case 5:
ahc_shutdown(ahc);
- /* FALLTHROUGH */
+ fallthrough;
case 4:
ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
ahc->shared_data_dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 3:
ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
ahc->shared_data_dmamap);
ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
ahc->shared_data_dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 2:
ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
+ fallthrough;
case 1:
break;
case 0:
@@ -4536,13 +4512,9 @@ ahc_free(struct ahc_softc *ahc)
kfree(ahc->black_hole);
}
#endif
- if (ahc->name != NULL)
- kfree(ahc->name);
- if (ahc->seep_config != NULL)
- kfree(ahc->seep_config);
-#ifndef __FreeBSD__
+ kfree(ahc->name);
+ kfree(ahc->seep_config);
kfree(ahc);
-#endif
return;
}
@@ -4580,7 +4552,7 @@ ahc_reset(struct ahc_softc *ahc, int reinit)
u_int sxfrctl1_a, sxfrctl1_b;
int error;
int wait;
-
+
/*
* Preserve the value of the SXFRCTL1 register for all channels.
* It contains settings that affect termination and we don't want
@@ -4669,7 +4641,7 @@ ahc_reset(struct ahc_softc *ahc, int reinit)
*/
error = ahc->bus_chip_init(ahc);
#ifdef AHC_DUMP_SEQ
- else
+ else
ahc_dumpseq(ahc);
#endif
@@ -4734,7 +4706,7 @@ ahc_build_free_scb_list(struct ahc_softc *ahc)
/* Set the next pointer */
if ((ahc->flags & AHC_PAGESCBS) != 0)
ahc_outb(ahc, SCB_NEXT, i+1);
- else
+ else
ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
/* Make the tag number, SCSIID, and lun invalid */
@@ -4887,7 +4859,7 @@ ahc_init_scbdata(struct ahc_softc *ahc)
/*
* Note that we were successful
*/
- return (0);
+ return (0);
error_exit:
@@ -4920,38 +4892,37 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
}
ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
}
- /* fall through */
+ fallthrough;
case 6:
ahc_dmamap_unload(ahc, scb_data->sense_dmat,
scb_data->sense_dmamap);
- /* fall through */
+ fallthrough;
case 5:
ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
scb_data->sense_dmamap);
ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
scb_data->sense_dmamap);
- /* fall through */
+ fallthrough;
case 4:
ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
- /* fall through */
+ fallthrough;
case 3:
ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
scb_data->hscb_dmamap);
- /* fall through */
+ fallthrough;
case 2:
ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
scb_data->hscb_dmamap);
ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
scb_data->hscb_dmamap);
- /* fall through */
+ fallthrough;
case 1:
ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
break;
case 0:
break;
}
- if (scb_data->scbarray != NULL)
- kfree(scb_data->scbarray);
+ kfree(scb_data->scbarray);
}
static void
@@ -5031,7 +5002,7 @@ ahc_controller_info(struct ahc_softc *ahc, char *buf)
len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
buf += len;
if ((ahc->features & AHC_TWIN) != 0)
- len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
+ len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
"B SCSI Id=%d, primary %c, ",
ahc->our_id, ahc->our_id_b,
(ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
@@ -5167,7 +5138,7 @@ ahc_chip_init(struct ahc_softc *ahc)
ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
-
+
if ((ahc->features & AHC_HS_MAILBOX) != 0)
ahc_outb(ahc, HS_MAILBOX, 0);
@@ -5197,7 +5168,7 @@ ahc_chip_init(struct ahc_softc *ahc)
ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
/* Message out buffer starts empty */
- ahc_outb(ahc, MSG_OUT, MSG_NOOP);
+ ahc_outb(ahc, MSG_OUT, NOP);
/*
* Setup the allowed SCSI Sequences based on operational mode.
@@ -5298,7 +5269,7 @@ ahc_init(struct ahc_softc *ahc)
*/
if ((ahc->flags & AHC_USEDEFAULTS) != 0)
ahc->our_id = ahc->our_id_b = 7;
-
+
/*
* Default to allowing initiator operations.
*/
@@ -5316,7 +5287,7 @@ ahc_init(struct ahc_softc *ahc)
* DMA tag for our command fifos and other data in system memory
* the card's sequencer must be able to access. For initiator
* roles, we need to allocate space for the qinfifo and qoutfifo.
- * The qinfifo and qoutfifo are composed of 256 1 byte elements.
+ * The qinfifo and qoutfifo are composed of 256 1 byte elements.
* When providing for the target mode role, we must additionally
* provide space for the incoming target command fifo and an extra
* byte to deal with a dma bug in some chip versions.
@@ -5425,7 +5396,7 @@ ahc_init(struct ahc_softc *ahc)
&& (ahc->flags & AHC_INITIATORROLE) != 0)
ahc->flags |= AHC_RESET_BUS_A;
- ultraenb = 0;
+ ultraenb = 0;
tagenable = ALL_TARGETS_MASK;
/* Grab the disconnection disable table and invert it for our needs */
@@ -5521,9 +5492,9 @@ ahc_init(struct ahc_softc *ahc)
&& (ultraenb & mask) != 0) {
/* Treat 10MHz as a non-ultra speed */
scsirate &= ~SXFR;
- ultraenb &= ~mask;
+ ultraenb &= ~mask;
}
- tinfo->user.period =
+ tinfo->user.period =
ahc_find_period(ahc, scsirate,
(ultraenb & mask)
? AHC_SYNCRATE_ULTRA
@@ -5618,8 +5589,7 @@ ahc_pause_and_flushwork(struct ahc_softc *ahc)
ahc->flags &= ~AHC_ALL_INTERRUPTS;
}
-#ifdef CONFIG_PM
-int
+int __maybe_unused
ahc_suspend(struct ahc_softc *ahc)
{
@@ -5645,16 +5615,15 @@ ahc_suspend(struct ahc_softc *ahc)
return (0);
}
-int
+int __maybe_unused
ahc_resume(struct ahc_softc *ahc)
{
ahc_reset(ahc, /*reinit*/TRUE);
- ahc_intr_enable(ahc, TRUE);
+ ahc_intr_enable(ahc, TRUE);
ahc_restart(ahc);
return (0);
}
-#endif
/************************** Busy Target Table *********************************/
/*
* Return the untagged transaction id for a given target/channel lun.
@@ -5668,7 +5637,7 @@ ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
if ((ahc->flags & AHC_SCB_BTT) != 0) {
u_int saved_scbptr;
-
+
saved_scbptr = ahc_inb(ahc, SCBPTR);
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
@@ -5688,7 +5657,7 @@ ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
if ((ahc->flags & AHC_SCB_BTT) != 0) {
u_int saved_scbptr;
-
+
saved_scbptr = ahc_inb(ahc, SCBPTR);
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
@@ -5706,7 +5675,7 @@ ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
if ((ahc->flags & AHC_SCB_BTT) != 0) {
u_int saved_scbptr;
-
+
saved_scbptr = ahc_inb(ahc, SCBPTR);
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
@@ -5764,7 +5733,7 @@ ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
target = SCB_GET_TARGET(ahc, scb);
lun = SCB_GET_LUN(scb);
channel = SCB_GET_CHANNEL(ahc, scb);
-
+
ahc_search_qinfifo(ahc, target, channel, lun,
/*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
CAM_REQUEUE_REQ, SEARCH_COMPLETE);
@@ -5802,7 +5771,7 @@ ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
} else {
prev_scb->hscb->next = scb->hscb->tag;
- ahc_sync_scb(ahc, prev_scb,
+ ahc_sync_scb(ahc, prev_scb,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
}
ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
@@ -5895,9 +5864,8 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in qinfifo\n");
ahc_done(ahc, scb);
-
- /* FALLTHROUGH */
}
+ fallthrough;
case SEARCH_REMOVE:
break;
case SEARCH_COUNT:
@@ -6009,7 +5977,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
printk("Inactive SCB in Waiting List\n");
ahc_done(ahc, scb);
}
- /* fall through */
+ fallthrough;
case SEARCH_REMOVE:
next = ahc_rem_wscb(ahc, next, prev);
break;
@@ -6019,7 +5987,6 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
break;
}
} else {
-
prev = next;
next = ahc_inb(ahc, SCB_NEXT);
}
@@ -6265,7 +6232,7 @@ ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
/* update the waiting list */
if (prev == SCB_LIST_NULL) {
/* First in the list */
- ahc_outb(ahc, WAITING_SCBH, next);
+ ahc_outb(ahc, WAITING_SCBH, next);
/*
* Ensure we aren't attempting to perform
@@ -6274,7 +6241,7 @@ ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
} else {
/*
- * Select the scb that pointed to us
+ * Select the scb that pointed to us
* and update its next pointer.
*/
ahc_outb(ahc, SCBPTR, prev);
@@ -6668,7 +6635,7 @@ ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
/*
* Remainder of the SG where the transfer
- * stopped.
+ * stopped.
*/
resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
@@ -6720,7 +6687,7 @@ ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
- (lstate->event_r_idx - lstate->event_w_idx);
if (event_type == EVENT_TYPE_BUS_RESET
- || event_type == MSG_BUS_DEV_RESET) {
+ || event_type == TARGET_RESET) {
/*
* Any earlier events are irrelevant, so reset our buffer.
* This has the effect of allowing us to deal with reset
@@ -6887,7 +6854,7 @@ ahc_loadseq(struct ahc_softc *ahc)
if (begin_set[cs_count] == TRUE
&& end_set[cs_count] == FALSE) {
cs_table[cs_count].end = downloaded;
- end_set[cs_count] = TRUE;
+ end_set[cs_count] = TRUE;
cs_count++;
}
continue;
@@ -6907,10 +6874,9 @@ ahc_loadseq(struct ahc_softc *ahc)
if (cs_count != 0) {
cs_count *= sizeof(struct cs);
- ahc->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
+ ahc->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC);
if (ahc->critical_sections == NULL)
panic("ahc_loadseq: Could not malloc");
- memcpy(ahc->critical_sections, cs_table, cs_count);
}
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
@@ -7015,7 +6981,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
address -= address_offset;
fmt3_ins->address = address;
}
- /* fall through */
+ fallthrough;
case AIC_OP_OR:
case AIC_OP_AND:
case AIC_OP_XOR:
@@ -7041,7 +7007,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
fmt1_ins->opcode = AIC_OP_AND;
fmt1_ins->immediate = 0xff;
}
- /* fall through */
+ fallthrough;
case AIC_OP_ROL:
if ((ahc->features & AHC_ULTRA2) != 0) {
int i, count;
@@ -7116,7 +7082,6 @@ ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
printed_mask == 0 ? ":(" : "|",
table[entry].name);
printed_mask |= table[entry].mask;
-
break;
}
if (entry >= num_entries)
@@ -7230,7 +7195,7 @@ ahc_dump_card_state(struct ahc_softc *ahc)
scb_index = ahc_inb(ahc, SCB_NEXT);
}
printk("\n");
-
+
ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
printk("QOUTFIFO entries: ");
qoutpos = ahc->qoutfifonext;
@@ -7407,7 +7372,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
if ((ahc->features & AHC_MULTIROLE) != 0) {
if ((ahc->features & AHC_MULTI_TID) != 0
- && (ahc->flags & AHC_INITIATORROLE) != 0) {
+ && (ahc->flags & AHC_INITIATORROLE) != 0) {
/*
* Only allow additional targets if
* the initiator role is disabled.
@@ -7558,7 +7523,6 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
targid_mask |= target_mask;
ahc_outb(ahc, TARGID, targid_mask);
ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
-
ahc_update_scsiid(ahc, targid_mask);
} else {
u_int our_id;
@@ -7623,7 +7587,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
}
ahc_lock(ahc, &s);
-
+
ccb->ccb_h.status = CAM_REQ_CMP;
LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
struct ccb_hdr *ccbh;
@@ -7682,7 +7646,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
targid_mask &= ~target_mask;
ahc_outb(ahc, TARGID, targid_mask);
ahc_outb(ahc, TARGID+1,
- (targid_mask >> 8));
+ (targid_mask >> 8));
ahc_update_scsiid(ahc, targid_mask);
}
}
@@ -7811,7 +7775,7 @@ ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
} else {
if (!paused)
- ahc_pause(ahc);
+ ahc_pause(ahc);
ahc_outb(ahc, KERNEL_TQINPOS,
ahc->tqinfifonext & HOST_TQINPOS);
if (!paused)
@@ -7910,7 +7874,7 @@ ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
printk("Reserved or VU command code type encountered\n");
break;
}
-
+
memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
atio->ccb_h.status |= CAM_CDB_RECVD;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index d5c4a0d23706..d3b1082654d5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1,3 +1,4 @@
+
/*
* Adaptec AIC7xxx device driver for Linux.
*
@@ -452,7 +453,7 @@ ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
- struct ahc_dma_seg *sg,
+ struct ahc_dma_seg *sg,
dma_addr_t addr, bus_size_t len);
static void
@@ -517,8 +518,7 @@ ahc_linux_info(struct Scsi_Host *host)
/*
* Queue an SCB to the controller.
*/
-static int
-ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
+static int ahc_linux_queue_lck(struct scsi_cmnd *cmd)
{
struct ahc_softc *ahc;
struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device);
@@ -529,7 +529,6 @@ ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd
ahc_lock(ahc, &flags);
if (ahc->platform_data->qfrozen == 0) {
- cmd->scsi_done = scsi_done;
cmd->result = CAM_REQ_INPROG << 16;
rtn = ahc_linux_run_command(ahc, dev, cmd);
}
@@ -564,8 +563,6 @@ ahc_linux_target_alloc(struct scsi_target *starget)
struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget);
unsigned short scsirate;
struct ahc_devinfo devinfo;
- struct ahc_initiator_tinfo *tinfo;
- struct ahc_tmode_tstate *tstate;
char channel = starget->channel + 'A';
unsigned int our_id = ahc->our_id;
unsigned int target_offset;
@@ -573,7 +570,7 @@ ahc_linux_target_alloc(struct scsi_target *starget)
target_offset = starget->id;
if (starget->channel != 0)
target_offset += 8;
-
+
if (starget->channel)
our_id = ahc->our_id_b;
@@ -599,22 +596,19 @@ ahc_linux_target_alloc(struct scsi_target *starget)
ultra = 0;
flags &= ~CFXFER;
}
-
+
if ((ahc->features & AHC_ULTRA2) != 0) {
scsirate = (flags & CFXFER) | (ultra ? 0x8 : 0);
} else {
scsirate = (flags & CFXFER) << 4;
- maxsync = ultra ? AHC_SYNCRATE_ULTRA :
+ maxsync = ultra ? AHC_SYNCRATE_ULTRA :
AHC_SYNCRATE_FAST;
}
spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
if (!(flags & CFSYNCH))
spi_max_offset(starget) = 0;
- spi_min_period(starget) =
+ spi_min_period(starget) =
ahc_find_period(ahc, scsirate, maxsync);
-
- tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id,
- starget->id, &tstate);
}
ahc_compile_devinfo(&devinfo, our_id, starget->id,
CAM_LUN_WILDCARD, channel,
@@ -662,7 +656,7 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
* a tagged queuing capable device.
*/
dev->maxtags = 0;
-
+
spi_period(starget) = 0;
return 0;
@@ -671,10 +665,6 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
static int
ahc_linux_slave_configure(struct scsi_device *sdev)
{
- struct ahc_softc *ahc;
-
- ahc = *((struct ahc_softc **)sdev->host->hostdata);
-
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
@@ -695,11 +685,9 @@ static int
ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
- uint8_t *bh;
int heads;
int sectors;
int cylinders;
- int ret;
int extended;
struct ahc_softc *ahc;
u_int channel;
@@ -707,14 +695,9 @@ ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
ahc = *((struct ahc_softc **)sdev->host->hostdata);
channel = sdev_channel(sdev);
- bh = scsi_bios_ptable(bdev);
- if (bh) {
- ret = scsi_partsize(bh, capacity,
- &geom[2], &geom[0], &geom[1]);
- kfree(bh);
- if (ret != -1)
- return (ret);
- }
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
+
heads = 64;
sectors = 32;
cylinders = aic_sector_div(capacity, heads, sectors);
@@ -746,7 +729,7 @@ ahc_linux_abort(struct scsi_cmnd *cmd)
int error;
error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
- if (error != 0)
+ if (error != SUCCESS)
printk("aic7xxx_abort returns 0x%x\n", error);
return (error);
}
@@ -760,7 +743,7 @@ ahc_linux_dev_reset(struct scsi_cmnd *cmd)
int error;
error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
- if (error != 0)
+ if (error != SUCCESS)
printk("aic7xxx_dev_reset returns 0x%x\n", error);
return (error);
}
@@ -1235,8 +1218,8 @@ ahc_platform_free(struct ahc_softc *ahc)
starget = ahc->platform_data->starget[i];
if (starget != NULL) {
ahc->platform_data->starget[i] = NULL;
- }
- }
+ }
+ }
if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
free_irq(ahc->platform_data->irq, ahc);
@@ -1283,7 +1266,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
default:
case AHC_QUEUE_NONE:
now_queuing = 0;
- break;
+ break;
case AHC_QUEUE_BASIC:
now_queuing = AHC_DEV_Q_BASIC;
break;
@@ -1484,10 +1467,10 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
hscb->scsioffset = tinfo->curr.offset;
if ((tstate->ultraenb & mask) != 0)
hscb->control |= ULTRAENB;
-
+
if ((ahc->user_discenable & mask) != 0)
hscb->control |= DISCENB;
-
+
if ((tstate->auto_negotiate & mask) != 0) {
scb->flags |= SCB_AUTO_NEGOTIATE;
scb->hscb->control |= MK_MESSAGE;
@@ -1496,10 +1479,10 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
&& (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
- hscb->control |= MSG_ORDERED_TASK;
+ hscb->control |= ORDERED_QUEUE_TAG;
dev->commands_since_idle_or_otag = 0;
} else {
- hscb->control |= MSG_SIMPLE_TASK;
+ hscb->control |= SIMPLE_QUEUE_TAG;
}
}
@@ -1547,7 +1530,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
*/
scb->hscb->sgptr =
ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
-
+
/*
* Copy the first SG into the "current"
* data pointer area.
@@ -1567,7 +1550,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
dev->commands_issued++;
if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
dev->commands_since_idle_or_otag++;
-
+
scb->flags |= SCB_ACTIVE;
if (untagged_q) {
TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
@@ -1588,7 +1571,7 @@ ahc_linux_isr(int irq, void *dev_id)
int ours;
ahc = (struct ahc_softc *) dev_id;
- ahc_lock(ahc, &flags);
+ ahc_lock(ahc, &flags);
ours = ahc_intr(ahc);
ahc_unlock(ahc, &flags);
return IRQ_RETVAL(ours);
@@ -1608,7 +1591,6 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
case AC_TRANSFER_NEG:
{
struct scsi_target *starget;
- struct ahc_linux_target *targ;
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
int target_offset;
@@ -1642,7 +1624,6 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
starget = ahc->platform_data->starget[target_offset];
if (starget == NULL)
break;
- targ = scsi_transport_target_data(starget);
target_ppr_options =
(spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
@@ -1665,22 +1646,22 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
spi_display_xfer_agreement(starget);
break;
}
- case AC_SENT_BDR:
+ case AC_SENT_BDR:
{
WARN_ON(lun != CAM_LUN_WILDCARD);
scsi_report_device_reset(ahc->platform_data->host,
channel - 'A', target);
break;
}
- case AC_BUS_RESET:
+ case AC_BUS_RESET:
if (ahc->platform_data->host != NULL) {
scsi_report_bus_reset(ahc->platform_data->host,
channel - 'A');
}
- break;
- default:
- panic("ahc_send_async: Unexpected async event");
- }
+ break;
+ default:
+ panic("ahc_send_async: Unexpected async event");
+ }
}
/*
@@ -1729,10 +1710,12 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
*/
cmd->sense_buffer[0] = 0;
if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
+#ifdef AHC_REPORT_UNDERFLOWS
uint32_t amount_xferred;
amount_xferred =
ahc_get_transfer_length(scb) - ahc_get_residual(scb);
+#endif
if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
#ifdef AHC_DEBUG
if ((ahc_debug & AHC_SHOW_MISC) != 0) {
@@ -1775,7 +1758,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
if (dev->openings == 1
&& ahc_get_transaction_status(scb) == CAM_REQ_CMP
- && ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
+ && ahc_get_scsi_status(scb) != SAM_STAT_TASK_SET_FULL)
dev->tag_success_count++;
/*
* Some devices deal with temporary internal resource
@@ -1818,7 +1801,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
sdev->sdev_target->id, sdev->lun,
sdev->sdev_target->channel == 0 ? 'A' : 'B',
ROLE_INITIATOR);
-
+
/*
* We don't currently trust the mid-layer to
* properly deal with queue full or busy. So,
@@ -1832,8 +1815,8 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
switch (ahc_get_scsi_status(scb)) {
default:
break;
- case SCSI_STATUS_CHECK_COND:
- case SCSI_STATUS_CMD_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
+ case SAM_STAT_COMMAND_TERMINATED:
{
struct scsi_cmnd *cmd;
@@ -1853,7 +1836,6 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
if (sense_size < SCSI_SENSE_BUFFERSIZE)
memset(&cmd->sense_buffer[sense_size], 0,
SCSI_SENSE_BUFFERSIZE - sense_size);
- cmd->result |= (DRIVER_SENSE << 24);
#ifdef AHC_DEBUG
if (ahc_debug & AHC_SHOW_SENSE) {
int i;
@@ -1871,7 +1853,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
}
break;
}
- case SCSI_STATUS_QUEUE_FULL:
+ case SAM_STAT_TASK_SET_FULL:
{
/*
* By the time the core driver has returned this
@@ -1915,7 +1897,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
dev->last_queuefull_same_count = 0;
}
ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
- ahc_set_scsi_status(scb, SCSI_STATUS_OK);
+ ahc_set_scsi_status(scb, SAM_STAT_GOOD);
ahc_platform_set_tags(ahc, sdev, &devinfo,
(dev->flags & AHC_DEV_Q_BASIC)
? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
@@ -1926,7 +1908,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
* as if the target returned BUSY SCSI status.
*/
dev->openings = 1;
- ahc_set_scsi_status(scb, SCSI_STATUS_BUSY);
+ ahc_set_scsi_status(scb, SAM_STAT_BUSY);
ahc_platform_set_tags(ahc, sdev, &devinfo,
(dev->flags & AHC_DEV_Q_BASIC)
? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
@@ -2002,7 +1984,7 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd)
ahc_cmd_set_transaction_status(cmd, new_status);
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
static void
@@ -2124,7 +2106,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
/* Any SCB for this device will do for a target reset */
LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
- if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
+ if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
scmd_channel(cmd) + 'A',
CAM_LUN_WILDCARD,
SCB_LIST_NULL, ROLE_INITIATOR))
@@ -2345,7 +2327,7 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2377,7 +2359,8 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
ppr_options &= MSG_EXT_PPR_QAS_REQ;
}
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -2389,7 +2372,7 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2402,7 +2385,8 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
if (offset != 0) {
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
period = tinfo->goal.period;
ppr_options = tinfo->goal.ppr_options;
}
@@ -2417,7 +2401,7 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2438,7 +2422,8 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -2455,7 +2440,7 @@ static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2471,7 +2456,8 @@ static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -2483,7 +2469,7 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2499,7 +2485,8 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -2515,7 +2502,7 @@ static void ahc_linux_get_signalling(struct Scsi_Host *shost)
if (!(ahc->features & AHC_ULTRA2)) {
/* non-LVD chipset, may not have SBLKCTL reg */
- spi_signalling(shost) =
+ spi_signalling(shost) =
ahc->features & AHC_HVD ?
SPI_SIGNAL_HVD :
SPI_SIGNAL_SE;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index f8489078f003..51d9f4de0734 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -209,7 +209,7 @@ int ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t);
/*
* XXX
* ahc_dmamap_sync is only used on buffers allocated with
- * the pci_alloc_consistent() API. Although I'm not sure how
+ * the dma_alloc_coherent() API. Although I'm not sure how
* this works on architectures with a write buffer, Linux does
* not have an API to sync "coherent" memory. Perhaps we need
* to do an mb()?
@@ -258,7 +258,7 @@ struct ahc_linux_device {
int active;
/*
- * The currently allowed number of
+ * The currently allowed number of
* transactions that can be queued to
* the device. Must be signed for
* conversion from tagged to untagged
@@ -272,7 +272,7 @@ struct ahc_linux_device {
* device's queue is halted.
*/
u_int qfrozen;
-
+
/*
* Cumulative command counter.
*/
@@ -351,16 +351,16 @@ struct ahc_platform_data {
/*
* Fields accessed from interrupt context.
*/
- struct scsi_target *starget[AHC_NUM_TARGETS];
+ struct scsi_target *starget[AHC_NUM_TARGETS];
spinlock_t spin_lock;
u_int qfrozen;
struct completion *eh_done;
- struct Scsi_Host *host; /* pointer to scsi host */
+ struct Scsi_Host *host; /* pointer to scsi host */
#define AHC_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
- resource_size_t mem_busaddr; /* Mem Base Addr */
+ resource_size_t mem_busaddr; /* Mem Base Addr */
};
void ahc_delay(long);
@@ -433,8 +433,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
typedef enum
{
@@ -515,29 +513,6 @@ int ahc_linux_show_info(struct seq_file *, struct Scsi_Host *);
/*************************** Domain Validation ********************************/
/*********************** Transaction Access Wrappers *************************/
-static inline void ahc_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
-static inline void ahc_set_transaction_status(struct scb *, uint32_t);
-static inline void ahc_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
-static inline void ahc_set_scsi_status(struct scb *, uint32_t);
-static inline uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd);
-static inline uint32_t ahc_get_transaction_status(struct scb *);
-static inline uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd);
-static inline uint32_t ahc_get_scsi_status(struct scb *);
-static inline void ahc_set_transaction_tag(struct scb *, int, u_int);
-static inline u_long ahc_get_transfer_length(struct scb *);
-static inline int ahc_get_transfer_dir(struct scb *);
-static inline void ahc_set_residual(struct scb *, u_long);
-static inline void ahc_set_sense_residual(struct scb *scb, u_long resid);
-static inline u_long ahc_get_residual(struct scb *);
-static inline u_long ahc_get_sense_residual(struct scb *);
-static inline int ahc_perform_autosense(struct scb *);
-static inline uint32_t ahc_get_sense_bufsize(struct ahc_softc *,
- struct scb *);
-static inline void ahc_notify_xfer_settings_change(struct ahc_softc *,
- struct ahc_devinfo *);
-static inline void ahc_platform_scb_free(struct ahc_softc *ahc,
- struct scb *scb);
-static inline void ahc_freeze_scb(struct scb *scb);
static inline
void ahc_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
@@ -671,9 +646,9 @@ static inline void
ahc_freeze_scb(struct scb *scb)
{
if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
- scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
- scb->platform_data->dev->qfrozen++;
- }
+ scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+ scb->platform_data->dev->qfrozen++;
+ }
}
void ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 9b293b1f0b71..a07e94fac673 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -121,47 +121,23 @@ static const struct pci_device_id ahc_linux_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table);
-#ifdef CONFIG_PM
-static int
-ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
+static int __maybe_unused
+ahc_linux_pci_dev_suspend(struct device *dev)
{
- struct ahc_softc *ahc = pci_get_drvdata(pdev);
- int rc;
-
- if ((rc = ahc_suspend(ahc)))
- return rc;
+ struct ahc_softc *ahc = dev_get_drvdata(dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- if (mesg.event & PM_EVENT_SLEEP)
- pci_set_power_state(pdev, PCI_D3hot);
-
- return rc;
+ return ahc_suspend(ahc);
}
-static int
-ahc_linux_pci_dev_resume(struct pci_dev *pdev)
+static int __maybe_unused
+ahc_linux_pci_dev_resume(struct device *dev)
{
- struct ahc_softc *ahc = pci_get_drvdata(pdev);
- int rc;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- if ((rc = pci_enable_device(pdev))) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to enable device after resume (%d)\n", rc);
- return rc;
- }
-
- pci_set_master(pdev);
+ struct ahc_softc *ahc = dev_get_drvdata(dev);
ahc_pci_resume(ahc);
return (ahc_resume(ahc));
}
-#endif
static void
ahc_linux_pci_dev_remove(struct pci_dev *pdev)
@@ -319,14 +295,14 @@ ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
}
}
+static SIMPLE_DEV_PM_OPS(ahc_linux_pci_dev_pm_ops,
+ ahc_linux_pci_dev_suspend,
+ ahc_linux_pci_dev_resume);
static struct pci_driver aic7xxx_pci_driver = {
.name = "aic7xxx",
.probe = ahc_linux_pci_dev_probe,
-#ifdef CONFIG_PM
- .suspend = ahc_linux_pci_dev_suspend,
- .resume = ahc_linux_pci_dev_resume,
-#endif
+ .driver.pm = &ahc_linux_pci_dev_pm_ops,
.remove = ahc_linux_pci_dev_remove,
.id_table = ahc_linux_pci_id_table
};
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 656f680c7802..2d4c85426dc3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -673,8 +673,8 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
/*
@@ -2008,8 +2008,7 @@ ahc_pci_chip_init(struct ahc_softc *ahc)
return (ahc_chip_init(ahc));
}
-#ifdef CONFIG_PM
-void
+void __maybe_unused
ahc_pci_resume(struct ahc_softc *ahc)
{
/*
@@ -2040,7 +2039,6 @@ ahc_pci_resume(struct ahc_softc *ahc)
ahc_release_seeprom(&sd);
}
}
-#endif
static int
ahc_aic785X_setup(struct ahc_softc *ahc)
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 18459605d991..4bc9e2dfccf6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -97,17 +97,17 @@ ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo)
u_int freq;
u_int mb;
- speed = 3300;
- freq = 0;
+ speed = 3300;
+ freq = 0;
if (tinfo->offset != 0) {
freq = ahc_calc_syncsrate(tinfo->period);
speed = freq;
}
speed *= (0x01 << tinfo->width);
- mb = speed / 1000;
- if (mb > 0)
+ mb = speed / 1000;
+ if (mb > 0)
seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
- else
+ else
seq_printf(m, "%dKB/s transfers", speed);
if (freq != 0) {
@@ -234,7 +234,7 @@ ahc_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
if ((ahc->chip & AHC_VL) != 0) {
sd.sd_control_offset = SEECTL_2840;
sd.sd_status_offset = STATUS_2840;
- sd.sd_dataout_offset = STATUS_2840;
+ sd.sd_dataout_offset = STATUS_2840;
sd.sd_chip = C46;
sd.sd_MS = 0;
sd.sd_RDY = EEPROM_TF;
@@ -255,7 +255,8 @@ ahc_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
u_int start_addr;
if (ahc->seep_config == NULL) {
- ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
+ ahc->seep_config = kmalloc(sizeof(*ahc->seep_config),
+ GFP_ATOMIC);
if (ahc->seep_config == NULL) {
printk("aic7xxx: Unable to allocate serial "
"eeprom buffer. Write failing\n");
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 5f474e490f3e..cd692a4c5f85 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -283,7 +283,7 @@ main(int argc, char *argv[])
/*
* Decend the tree of scopes and insert/emit
* patches as appropriate. We perform a depth first
- * tranversal, recursively handling each scope.
+ * traversal, recursively handling each scope.
*/
/* start at the root scope */
dump_scope(SLIST_FIRST(&scope_stack));
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 924d55a8acbf..65182ad9cdf8 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -58,7 +58,6 @@
#include "aicasm_symbol.h"
#include "aicasm_insformat.h"
-int yylineno;
char *yyfilename;
char stock_prefix[] = "aic_";
char *prefix = stock_prefix;
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 7bf7fd5953ac..ed3bdd43c297 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -108,7 +108,7 @@ struct macro_arg {
regex_t arg_regex;
char *replacement_text;
};
-STAILQ_HEAD(macro_arg_list, macro_arg) args;
+STAILQ_HEAD(macro_arg_list, macro_arg);
struct macro_info {
struct macro_arg_list args;
diff --git a/drivers/scsi/aic7xxx/aiclib.h b/drivers/scsi/aic7xxx/aiclib.h
index f8fd198aafbc..ba08eb3c4e3b 100644
--- a/drivers/scsi/aic7xxx/aiclib.h
+++ b/drivers/scsi/aic7xxx/aiclib.h
@@ -117,21 +117,6 @@ struct scsi_sense_data
#define SSD_FULL_SIZE sizeof(struct scsi_sense_data)
};
-/*
- * Status Byte
- */
-#define SCSI_STATUS_OK 0x00
-#define SCSI_STATUS_CHECK_COND 0x02
-#define SCSI_STATUS_COND_MET 0x04
-#define SCSI_STATUS_BUSY 0x08
-#define SCSI_STATUS_INTERMED 0x10
-#define SCSI_STATUS_INTERMED_COND_MET 0x14
-#define SCSI_STATUS_RESERV_CONFLICT 0x18
-#define SCSI_STATUS_CMD_TERMINATED 0x22 /* Obsolete in SAM-2 */
-#define SCSI_STATUS_QUEUE_FULL 0x28
-#define SCSI_STATUS_ACA_ACTIVE 0x30
-#define SCSI_STATUS_TASK_ABORTED 0x40
-
/************************* Large Disk Handling ********************************/
static inline int
aic_sector_div(sector_t capacity, int heads, int sectors)
diff --git a/drivers/scsi/aic7xxx/scsi_message.h b/drivers/scsi/aic7xxx/scsi_message.h
index 75811e245ec7..53343a6d8ae1 100644
--- a/drivers/scsi/aic7xxx/scsi_message.h
+++ b/drivers/scsi/aic7xxx/scsi_message.h
@@ -4,42 +4,15 @@
*/
/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */
-#define MSG_CMDCOMPLETE 0x00 /* M/M */
-#define MSG_TASK_COMPLETE 0x00 /* M/M */ /* SPI3 Terminology */
-#define MSG_EXTENDED 0x01 /* O/O */
#define MSG_SAVEDATAPOINTER 0x02 /* O/O */
#define MSG_RESTOREPOINTERS 0x03 /* O/O */
#define MSG_DISCONNECT 0x04 /* O/O */
-#define MSG_INITIATOR_DET_ERR 0x05 /* M/M */
-#define MSG_ABORT 0x06 /* O/M */
-#define MSG_ABORT_TASK_SET 0x06 /* O/M */ /* SPI3 Terminology */
#define MSG_MESSAGE_REJECT 0x07 /* M/M */
#define MSG_NOOP 0x08 /* M/M */
-#define MSG_PARITY_ERROR 0x09 /* M/M */
-#define MSG_LINK_CMD_COMPLETE 0x0a /* O/O */
-#define MSG_LINK_CMD_COMPLETEF 0x0b /* O/O */
-#define MSG_BUS_DEV_RESET 0x0c /* O/M */
-#define MSG_TARGET_RESET 0x0c /* O/M */ /* SPI3 Terminology */
-#define MSG_ABORT_TAG 0x0d /* O/O */
-#define MSG_ABORT_TASK 0x0d /* O/O */ /* SPI3 Terminology */
-#define MSG_CLEAR_QUEUE 0x0e /* O/O */
-#define MSG_CLEAR_TASK_SET 0x0e /* O/O */ /* SPI3 Terminology */
-#define MSG_INIT_RECOVERY 0x0f /* O/O */ /* Deprecated in SPI3 */
-#define MSG_REL_RECOVERY 0x10 /* O/O */ /* Deprecated in SPI3 */
-#define MSG_TERM_IO_PROC 0x11 /* O/O */ /* Deprecated in SPI3 */
-#define MSG_CLEAR_ACA 0x16 /* O/O */ /* SPI3 */
-#define MSG_LOGICAL_UNIT_RESET 0x17 /* O/O */ /* SPI3 */
-#define MSG_QAS_REQUEST 0x55 /* O/O */ /* SPI3 */
/* Messages (2 byte) */
#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */
-#define MSG_SIMPLE_TASK 0x20 /* O/O */ /* SPI3 Terminology */
-#define MSG_HEAD_OF_Q_TAG 0x21 /* O/O */
-#define MSG_HEAD_OF_QUEUE_TASK 0x21 /* O/O */ /* SPI3 Terminology */
-#define MSG_ORDERED_Q_TAG 0x22 /* O/O */
-#define MSG_ORDERED_TASK 0x22 /* O/O */ /* SPI3 Terminology */
#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */
-#define MSG_ACA_TASK 0x24 /* 0/0 */ /* SPI3 */
/* Identify message */ /* M/M */
#define MSG_IDENTIFYFLAG 0x80
@@ -49,16 +22,13 @@
#define MSG_IDENTIFY_LUNMASK 0x3F
/* Extended messages (opcode and length) */
-#define MSG_EXT_SDTR 0x01
#define MSG_EXT_SDTR_LEN 0x03
-#define MSG_EXT_WDTR 0x03
#define MSG_EXT_WDTR_LEN 0x02
#define MSG_EXT_WDTR_BUS_8_BIT 0x00
#define MSG_EXT_WDTR_BUS_16_BIT 0x01
#define MSG_EXT_WDTR_BUS_32_BIT 0x02 /* Deprecated in SPI3 */
-#define MSG_EXT_PPR 0x04 /* SPI3 */
#define MSG_EXT_PPR_LEN 0x06
#define MSG_EXT_PPR_PCOMP_EN 0x80
#define MSG_EXT_PPR_RTI 0x40
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index c23bbb609126..f595bc2ee45e 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -33,7 +33,7 @@
#ifdef ASD_DEBUG
#define ASD_DPRINTK asd_printk
#else
-#define ASD_DPRINTK(fmt, ...)
+#define ASD_DPRINTK(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
/* 2*ITNL timeout + 1 second */
@@ -42,14 +42,6 @@
extern struct kmem_cache *asd_dma_token_cache;
extern struct kmem_cache *asd_ascb_cache;
-static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
-{
- int i;
- for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2)
- snprintf(p, 3, "%02X", sas_addr[i]);
- *p = '\0';
-}
-
struct asd_ha_struct;
struct asd_ascb;
@@ -68,7 +60,6 @@ void asd_set_dmamode(struct domain_device *dev);
/* ---------- TMFs ---------- */
int asd_abort_task(struct sas_task *);
int asd_abort_task_set(struct domain_device *, u8 *lun);
-int asd_clear_aca(struct domain_device *, u8 *lun);
int asd_clear_task_set(struct domain_device *, u8 *lun);
int asd_lu_reset(struct domain_device *, u8 *lun);
int asd_I_T_nexus_reset(struct domain_device *dev);
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 604a5331f639..91d196f26b76 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -159,7 +159,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
flags |= OPEN_REQUIRED;
if ((dev->dev_type == SAS_SATA_DEV) ||
(dev->tproto & SAS_PROTOCOL_STP)) {
- struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
+ struct smp_rps_resp *rps_resp = &dev->sata_dev.rps_resp;
if (rps_resp->frame_type == SMP_RESPONSE &&
rps_resp->function == SMP_REPORT_PHY_SATA &&
rps_resp->result == SMP_RESP_FUNC_ACC) {
@@ -236,7 +236,7 @@ static int asd_init_sata_pm_table_ddb(struct domain_device *dev)
/**
* asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port
- * dev: pointer to domain device
+ * @dev: pointer to domain device
*
* For SATA Port Multiplier Ports we need to allocate one SATA Port
* Multiplier Port DDB and depending on whether the target on it
@@ -281,7 +281,7 @@ static int asd_init_initiator_ddb(struct domain_device *dev)
/**
* asd_init_sata_pm_ddb -- SATA Port Multiplier
- * dev: pointer to domain device
+ * @dev: pointer to domain device
*
* For STP and direct-attached SATA Port Multipliers we need
* one target port DDB entry and one SATA PM table DDB entry.
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
index 7c4c53a54b78..552f1913e95e 100644
--- a/drivers/scsi/aic94xx/aic94xx_dump.c
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -720,154 +720,8 @@ static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq)
PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
}
-#if 0
-
/**
- * asd_dump_ddb_site -- dump a CSEQ DDB site
- * @asd_ha: pointer to host adapter structure
- * @site_no: site number of interest
- */
-void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no)
-{
- if (site_no >= asd_ha->hw_prof.max_ddbs)
- return;
-
-#define DDB_FIELDB(__name) \
- asd_ddbsite_read_byte(asd_ha, site_no, \
- offsetof(struct asd_ddb_ssp_smp_target_port, __name))
-#define DDB2_FIELDB(__name) \
- asd_ddbsite_read_byte(asd_ha, site_no, \
- offsetof(struct asd_ddb_stp_sata_target_port, __name))
-#define DDB_FIELDW(__name) \
- asd_ddbsite_read_word(asd_ha, site_no, \
- offsetof(struct asd_ddb_ssp_smp_target_port, __name))
-
-#define DDB_FIELDD(__name) \
- asd_ddbsite_read_dword(asd_ha, site_no, \
- offsetof(struct asd_ddb_ssp_smp_target_port, __name))
-
- asd_printk("DDB: 0x%02x\n", site_no);
- asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type));
- asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate));
- asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag)));
- asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head)));
- asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended));
- asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type));
- asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def));
- asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features));
- asd_printk("Pathway Blocked Count: 0x%02x\n",
- DDB_FIELDB(pathway_blocked_count));
- asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time));
- asd_printk("more_compat_features: 0x%08x\n",
- DDB_FIELDD(more_compat_features));
- asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask));
- asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags));
- asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2));
- asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail));
- asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail));
- asd_printk("Active Task Count: 0x%04x\n",
- DDB_FIELDW(active_task_count));
- asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason));
- asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout));
- asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp));
-}
-
-void asd_dump_ddb_0(struct asd_ha_struct *asd_ha)
-{
-#define DDB0_FIELDB(__name) \
- asd_ddbsite_read_byte(asd_ha, 0, \
- offsetof(struct asd_ddb_seq_shared, __name))
-#define DDB0_FIELDW(__name) \
- asd_ddbsite_read_word(asd_ha, 0, \
- offsetof(struct asd_ddb_seq_shared, __name))
-
-#define DDB0_FIELDD(__name) \
- asd_ddbsite_read_dword(asd_ha,0 , \
- offsetof(struct asd_ddb_seq_shared, __name))
-
-#define DDB0_FIELDA(__name, _o) \
- asd_ddbsite_read_byte(asd_ha, 0, \
- offsetof(struct asd_ddb_seq_shared, __name)+_o)
-
-
- asd_printk("DDB: 0\n");
- asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head));
- asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail));
- asd_printk("q_free_ddb_cnt:%04x\n", DDB0_FIELDW(q_free_ddb_cnt));
- asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head));
- asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail));
- asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock));
- asd_printk("smp_conn_tag:%04x\n", DDB0_FIELDW(smp_conn_tag));
- asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt));
- asd_printk("est_nexus_buf_thresh:%04x\n",
- DDB0_FIELDW(est_nexus_buf_thresh));
- asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active));
- asd_printk("phy_is_up:%02x\n", DDB0_FIELDB(phy_is_up));
- asd_printk("port_map_by_links:%02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- DDB0_FIELDA(port_map_by_links, 0),
- DDB0_FIELDA(port_map_by_links, 1),
- DDB0_FIELDA(port_map_by_links, 2),
- DDB0_FIELDA(port_map_by_links, 3),
- DDB0_FIELDA(port_map_by_links, 4),
- DDB0_FIELDA(port_map_by_links, 5),
- DDB0_FIELDA(port_map_by_links, 6),
- DDB0_FIELDA(port_map_by_links, 7));
-}
-
-static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no)
-{
-
-#define SCB_FIELDB(__name) \
- asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header) \
- + offsetof(struct initiate_ssp_task, __name))
-#define SCB_FIELDW(__name) \
- asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header) \
- + offsetof(struct initiate_ssp_task, __name))
-#define SCB_FIELDD(__name) \
- asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header) \
- + offsetof(struct initiate_ssp_task, __name))
-
- asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len));
- asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type));
- asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag));
- asd_printk("Target Port Xfer Tag: 0x%04x.\n",
- SCB_FIELDW(ssp_frame.tptt));
- asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs));
- asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count));
-}
-
-/**
- * asd_dump_scb_sites -- dump currently used CSEQ SCB sites
- * @asd_ha: pointer to host adapter struct
- */
-void asd_dump_scb_sites(struct asd_ha_struct *asd_ha)
-{
- u16 site_no;
-
- for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) {
- u8 opcode;
-
- if (!SCB_SITE_VALID(site_no))
- continue;
-
- /* We are only interested in SCB sites currently used.
- */
- opcode = asd_scbsite_read_byte(asd_ha, site_no,
- offsetof(struct scb_header,
- opcode));
- if (opcode == 0xFF)
- continue;
-
- asd_printk("\nSCB: 0x%x\n", site_no);
- asd_dump_scb_site(asd_ha, site_no);
- }
-}
-
-#endif /* 0 */
-
-/**
- * ads_dump_seq_state -- dump CSEQ and LSEQ states
+ * asd_dump_seq_state -- dump CSEQ and LSEQ states
* @asd_ha: pointer to host adapter structure
* @lseq_mask: mask of LSEQs of interest
*/
@@ -908,42 +762,4 @@ void asd_dump_frame_rcvd(struct asd_phy *phy,
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
}
-#if 0
-
-static void asd_dump_scb(struct asd_ascb *ascb, int ind)
-{
- asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, "
- "index:%d, opcode:0x%02x\n",
- ind, ascb->dma_scb.vaddr,
- (unsigned long long)ascb->dma_scb.dma_handle,
- (unsigned long long)
- le64_to_cpu(ascb->scb->header.next_scb),
- le16_to_cpu(ascb->scb->header.index),
- ascb->scb->header.opcode);
-}
-
-void asd_dump_scb_list(struct asd_ascb *ascb, int num)
-{
- int i = 0;
-
- asd_printk("dumping %d scbs:\n", num);
-
- asd_dump_scb(ascb, i++);
- --num;
-
- if (num > 0 && !list_empty(&ascb->list)) {
- struct list_head *el;
-
- list_for_each(el, &ascb->list) {
- struct asd_ascb *s = list_entry(el, struct asd_ascb,
- list);
- asd_dump_scb(s, i++);
- if (--num <= 0)
- break;
- }
- }
-}
-
-#endif /* 0 */
-
#endif /* ASD_DEBUG */
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index c5a46c59d4f8..3dd110143471 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -575,7 +575,7 @@ static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha)
/**
* asd_init_ctxmem -- initialize context memory
- * asd_ha: pointer to host adapter structure
+ * @asd_ha: pointer to host adapter structure
*
* This function sets the maximum number of SCBs and
* DDBs which can be used by the sequencer. This is normally
@@ -903,7 +903,7 @@ static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
}
/**
- * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
+ * asd_rbi_exsi_isr -- process external system interface interrupt (INITERR)
* @asd_ha: pointer to host adapter structure
*/
static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
@@ -1144,9 +1144,8 @@ static void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
}
/**
- * asd_start_timers -- (add and) start timers of SCBs
+ * asd_start_scb_timers -- (add and) start timers of SCBs
* @list: pointer to struct list_head of the scbs
- * @to: timeout in jiffies
*
* If an SCB in the @list has no timer function, assign the default
* one, then start the timer of the SCB. This function is
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index d022407e5645..954d0c5ae2e2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -40,6 +40,7 @@ static struct scsi_host_template aic94xx_sht = {
/* .name is initialized */
.name = "aic94xx",
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = asd_scan_finished,
@@ -52,6 +53,7 @@ static struct scsi_host_template aic94xx_sht = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
@@ -529,7 +531,7 @@ static int asd_create_ha_caches(struct asd_ha_struct *asd_ha)
return 0;
}
-/**
+/*
* asd_free_edbs -- free empty data buffers
* asd_ha: pointer to host adapter structure
*/
@@ -958,7 +960,6 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
.lldd_abort_task = asd_abort_task,
.lldd_abort_task_set = asd_abort_task_set,
- .lldd_clear_aca = asd_clear_aca,
.lldd_clear_task_set = asd_clear_task_set,
.lldd_I_T_nexus_reset = asd_I_T_nexus_reset,
.lldd_lu_reset = asd_lu_reset,
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 4a80ec08f0c9..68214a58b160 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -68,7 +68,6 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
struct done_list_struct *dl)
{
struct asd_ha_struct *asd_ha = ascb->ha;
- struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
int phy_id = dl->status_block[0] & DL_PHY_MASK;
struct asd_phy *phy = &asd_ha->phys[phy_id];
@@ -81,7 +80,8 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
ASD_DPRINTK("phy%d: device unplugged\n", phy_id);
asd_turn_led(asd_ha, phy_id, 0);
sas_phy_disconnected(&phy->sas_phy);
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL,
+ GFP_ATOMIC);
break;
case CURRENT_OOB_DONE:
/* hot plugged device */
@@ -89,12 +89,13 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
get_lrate_mode(phy, oob_mode);
ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n",
phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto);
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
break;
case CURRENT_SPINUP_HOLD:
/* hot plug SATA, no COMWAKE sent */
asd_turn_led(asd_ha, phy_id, 1);
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
+ GFP_ATOMIC);
break;
case CURRENT_GTO_TIMEOUT:
case CURRENT_OOB_ERROR:
@@ -102,7 +103,7 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
dl->status_block[1]);
asd_turn_led(asd_ha, phy_id, 0);
sas_phy_disconnected(&phy->sas_phy);
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
break;
}
}
@@ -123,8 +124,8 @@ static unsigned ord_phy(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
/**
* asd_get_attached_sas_addr -- extract/generate attached SAS address
- * phy: pointer to asd_phy
- * sas_addr: pointer to buffer where the SAS address is to be written
+ * @phy: pointer to asd_phy
+ * @sas_addr: pointer to buffer where the SAS address is to be written
*
* This function extracts the SAS address from an IDENTIFY frame
* received. If OOB is SATA, then a SAS address is generated from the
@@ -222,7 +223,6 @@ static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
int edb_el = edb_id + ascb->edb_index;
struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el];
struct asd_phy *phy = &ascb->ha->phys[phy_id];
- struct sas_ha_struct *sas_ha = phy->sas_phy.ha;
u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2];
size = min(size, (u16) sizeof(phy->frame_rcvd));
@@ -234,7 +234,7 @@ static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
asd_dump_frame_rcvd(phy, dl);
asd_form_port(ascb->ha, phy);
- sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
+ sas_notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC);
}
static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
@@ -270,7 +270,7 @@ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
asd_turn_led(asd_ha, phy_id, 0);
sas_phy_disconnected(sas_phy);
asd_deform_port(asd_ha, phy);
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, GFP_ATOMIC);
if (retries_left == 0) {
int num = 1;
@@ -315,7 +315,8 @@ static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = ffs(cont);
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case LmUNKNOWNP:
@@ -336,7 +337,8 @@ static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
/* The sequencer disables all phys on that port.
* We have to re-enable the phys ourselves. */
asd_deform_port(asd_ha, phy);
- sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+ sas_notify_port_event(sas_phy, PORTE_HARD_RESET,
+ GFP_ATOMIC);
break;
default:
@@ -567,7 +569,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
/* the device is gone */
sas_phy_disconnected(sas_phy);
asd_deform_port(asd_ha, phy);
- sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
+ sas_notify_port_event(sas_phy, PORTE_TIMER_EVENT, GFP_ATOMIC);
break;
default:
ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
@@ -706,11 +708,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask &= ~SAS_SPEED_60_DIS;
- /* fall through*/
+ fallthrough;
default:
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SAS_SPEED_30_DIS;
- /* fall through*/
+ fallthrough;
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SAS_SPEED_15_DIS;
}
@@ -718,9 +720,10 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->min_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask |= SAS_SPEED_30_DIS;
- /* fall through*/
+ fallthrough;
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SAS_SPEED_15_DIS;
+ fallthrough;
default:
case SAS_LINK_RATE_1_5_GBPS:
/* nothing to do */
@@ -730,7 +733,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sata_lrate) {
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SATA_SPEED_30_DIS;
- /* fall through*/
+ fallthrough;
default:
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SATA_SPEED_15_DIS;
@@ -739,6 +742,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->min_sata_lrate) {
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SATA_SPEED_15_DIS;
+ fallthrough;
default:
case SAS_LINK_RATE_1_5_GBPS:
/* nothing to do */
@@ -789,7 +793,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
/* link reset retries, this should be nominal */
control_phy->link_reset_retries = 10;
- /* fall through */
+ fallthrough;
case RELEASE_SPINUP_HOLD: /* 0x02 */
/* decide the func_mask */
@@ -847,7 +851,7 @@ void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
/**
* asd_ascb_timedout -- called when a pending SCB's timer has expired
- * @data: unsigned long, a pointer to the ascb in question
+ * @t: Timer context used to fetch the SCB
*
* This is the default timeout function which does the most necessary.
* Upper layers can implement their own timeout function, say to free
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 3ddc8852bc32..5def83c88f13 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -406,7 +406,7 @@ struct asd_manuf_sec {
u8 sas_addr[SAS_ADDR_SIZE];
u8 pcba_sn[ASD_PCBA_SN_SIZE];
/* Here start the other segments */
- u8 linked_list[0];
+ u8 linked_list[];
} __attribute__ ((packed));
struct asd_manuf_phy_desc {
@@ -449,7 +449,7 @@ struct asd_ms_sb_desc {
u8 type;
u8 node_desc_index;
u8 conn_desc_index;
- u8 _recvd[0];
+ u8 _recvd[];
} __attribute__ ((packed));
#if 0
@@ -478,12 +478,12 @@ struct asd_ms_conn_desc {
u8 size_sideband_desc;
u32 _resvd;
u8 name[16];
- struct asd_ms_sb_desc sb_desc[0];
+ struct asd_ms_sb_desc sb_desc[];
} __attribute__ ((packed));
struct asd_nd_phy_desc {
u8 vp_attch_type;
- u8 attch_specific[0];
+ u8 attch_specific[];
} __attribute__ ((packed));
#if 0
@@ -503,7 +503,7 @@ struct asd_ms_node_desc {
u8 size_phy_desc;
u8 _resvd;
u8 name[16];
- struct asd_nd_phy_desc phy_desc[0];
+ struct asd_nd_phy_desc phy_desc[];
} __attribute__ ((packed));
struct asd_ms_conn_map {
@@ -517,8 +517,10 @@ struct asd_ms_conn_map {
u8 num_nodes;
u8 usage_model_id;
u32 _resvd;
- struct asd_ms_conn_desc conn_desc[0];
- struct asd_ms_node_desc node_desc[0];
+ union {
+ DECLARE_FLEX_ARRAY(struct asd_ms_conn_desc, conn_desc);
+ DECLARE_FLEX_ARRAY(struct asd_ms_node_desc, node_desc);
+ };
} __attribute__ ((packed));
struct asd_ctrla_phy_entry {
@@ -542,7 +544,7 @@ struct asd_ll_el {
u8 id0;
u8 id1;
__le16 next;
- u8 something_here[0];
+ u8 something_here[];
} __attribute__ ((packed));
static int asd_poll_flash(struct asd_ha_struct *asd_ha)
@@ -718,10 +720,12 @@ static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1)
do {
switch (id1) {
default:
- if (el->id1 == id1)
+ if (el->id1 == id1) {
+ fallthrough;
case 0xFF:
if (el->id0 == id0)
return el;
+ }
}
el = start + le16_to_cpu(el->next);
} while (el != start);
@@ -1244,7 +1248,7 @@ int asd_chk_write_status(struct asd_ha_struct *asd_ha,
}
/**
- * asd_hwi_erase_nv_sector - Erase the flash memory sectors.
+ * asd_erase_nv_sector - Erase the flash memory sectors.
* @asd_ha: pointer to the host adapter structure
* @flash_addr: pointer to offset from flash memory
* @size: total bytes to erase.
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index 11853ec29d87..c0f685c86851 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -582,6 +582,7 @@ static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha)
/**
* asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
* @asd_ha: pointer to host adapter structure
+ * @lseq: link sequencer
*/
static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
{
@@ -669,6 +670,7 @@ static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
/**
* asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
* @asd_ha: pointer to host adapter structure
+ * @lseq: link sequencer
*/
static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
{
@@ -953,6 +955,7 @@ static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
/**
* asd_init_lseq_cio -- initialize LmSEQ CIO registers
* @asd_ha: pointer to host adapter structure
+ * @lseq: link sequencer
*/
static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq)
{
@@ -1345,7 +1348,8 @@ int asd_start_seqs(struct asd_ha_struct *asd_ha)
/**
* asd_update_port_links -- update port_map_by_links and phy_is_up
- * @sas_phy: pointer to the phy which has been added to a port
+ * @asd_ha: pointer to host adapter structure
+ * @phy: pointer to the phy which has been added to a port
*
* 1) When a link reset has completed and we got BYTES DMAED with a
* valid frame we call this function for that phy, to indicate that
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index f923ed019d4a..ed119a3f6f2e 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -205,7 +205,7 @@ Again:
switch (opcode) {
case TC_NO_ERROR:
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
break;
case TC_UNDERRUN:
ts->resp = SAS_TASK_COMPLETE;
@@ -269,7 +269,6 @@ Again:
case TA_I_T_NEXUS_LOSS:
opcode = dl->status_block[0];
goto Again;
- break;
case TF_INV_CONN_HANDLE:
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEVICE_UNKNOWN;
@@ -316,13 +315,13 @@ Again:
break;
case SAS_PROTOCOL_SSP:
asd_unbuild_ssp_ascb(ascb);
+ break;
default:
break;
}
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
struct completion *completion = ascb->completion;
@@ -532,7 +531,6 @@ int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
struct sas_task *t = task;
struct asd_ascb *ascb = NULL, *a;
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
- unsigned long flags;
res = asd_can_queue(asd_ha, 1);
if (res)
@@ -575,10 +573,6 @@ int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
}
if (res)
goto out_err_unmap;
-
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&t->task_state_lock, flags);
}
list_del_init(&alist);
@@ -597,9 +591,6 @@ out_err_unmap:
if (a == b)
break;
t = a->uldd_task;
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&t->task_state_lock, flags);
switch (t->task_proto) {
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
@@ -610,6 +601,7 @@ out_err_unmap:
break;
case SAS_PROTOCOL_SSP:
asd_unbuild_ssp_ascb(a);
+ break;
default:
break;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index f814026f26fa..27d32b8c2987 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -287,7 +287,7 @@ static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
fh = edb->vaddr + 16;
ru = edb->vaddr + 16 + sizeof(*fh);
res = ru->status;
- if (ru->datapres == 1) /* Response data present */
+ if (ru->datapres == SAS_DATAPRES_RESPONSE_DATA)
res = ru->resp_data[3];
#if 0
ascb->tag = fh->tag;
@@ -490,7 +490,7 @@ int asd_abort_task(struct sas_task *task)
switch (tcs.dl_opcode) {
default:
res = asd_clear_nexus(task);
- /* fallthrough */
+ fallthrough;
case TC_NO_ERROR:
break;
/* The task hasn't been sent to the device xor
@@ -644,15 +644,6 @@ int asd_abort_task_set(struct domain_device *dev, u8 *lun)
return res;
}
-int asd_clear_aca(struct domain_device *dev, u8 *lun)
-{
- int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
-
- if (res == TMF_RESP_FUNC_COMPLETE)
- asd_clear_nexus_I_T_L(dev, lun);
- return res;
-}
-
int asd_clear_task_set(struct domain_device *dev, u8 *lun)
{
int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
@@ -673,7 +664,7 @@ int asd_lu_reset(struct domain_device *dev, u8 *lun)
/**
* asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
- * task: pointer to sas_task struct of interest
+ * @task: pointer to sas_task struct of interest
*
* Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
* or TMF_RESP_FUNC_SUCC if the task is in the task set.
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 9220bcf8388f..07df255c4b1b 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -49,7 +49,7 @@ struct device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.40.00.10-20190116"
+#define ARCMSR_DRIVER_VERSION "v1.50.00.05-20210429"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -80,8 +80,10 @@ struct device_attribute;
#ifndef PCI_DEVICE_ID_ARECA_1884
#define PCI_DEVICE_ID_ARECA_1884 0x1884
#endif
+#define PCI_DEVICE_ID_ARECA_1886 0x188A
#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
#define ARCMSR_MINUTES (1000 * 60 * 60)
+#define ARCMSR_DEFAULT_TIMEOUT 90
/*
**********************************************************************************
**
@@ -436,6 +438,21 @@ struct FIRMWARE_INFO
#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080
+
+/*
+*******************************************************************************
+** SPEC. for Areca Type F adapter
+*******************************************************************************
+*/
+#define ARCMSR_SIGNATURE_1886 0x188617D3
+// Doorbell and interrupt definition are same as Type E adapter
+/* ARC-1886 doorbell sync */
+#define ARCMSR_HBFMU_DOORBELL_SYNC 0x100
+//set host rw buffer physical address at inbound message 0, 1 (low,high)
+#define ARCMSR_HBFMU_DOORBELL_SYNC1 0x300
+#define ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK 0x80000000
+#define ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE 0x20000000
+
/*
*******************************************************************************
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
@@ -720,6 +737,80 @@ struct MessageUnit_E{
uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
};
+/*
+*********************************************************************
+** Messaging Unit (MU) of Type F processor(LSI)
+*********************************************************************
+*/
+struct MessageUnit_F {
+ uint32_t iobound_doorbell; /*0000 0003*/
+ uint32_t write_sequence_3xxx; /*0004 0007*/
+ uint32_t host_diagnostic_3xxx; /*0008 000B*/
+ uint32_t posted_outbound_doorbell; /*000C 000F*/
+ uint32_t master_error_attribute; /*0010 0013*/
+ uint32_t master_error_address_low; /*0014 0017*/
+ uint32_t master_error_address_high; /*0018 001B*/
+ uint32_t hcb_size; /*001C 001F*/
+ uint32_t inbound_doorbell; /*0020 0023*/
+ uint32_t diagnostic_rw_data; /*0024 0027*/
+ uint32_t diagnostic_rw_address_low; /*0028 002B*/
+ uint32_t diagnostic_rw_address_high; /*002C 002F*/
+ uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_mask; /*0034 0037*/
+ uint32_t dcr_data; /*0038 003B*/
+ uint32_t dcr_address; /*003C 003F*/
+ uint32_t inbound_queueport; /*0040 0043*/
+ uint32_t outbound_queueport; /*0044 0047*/
+ uint32_t hcb_pci_address_low; /*0048 004B*/
+ uint32_t hcb_pci_address_high; /*004C 004F*/
+ uint32_t iop_int_status; /*0050 0053*/
+ uint32_t iop_int_mask; /*0054 0057*/
+ uint32_t iop_inbound_queue_port; /*0058 005B*/
+ uint32_t iop_outbound_queue_port; /*005C 005F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t reply_post_producer_index; /*0068 006B*/
+ uint32_t reply_post_consumer_index; /*006C 006F*/
+ uint32_t inbound_doorbell_clear; /*0070 0073*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t last_used_message_source_address_low; /*0078 007B*/
+ uint32_t last_used_message_source_address_high; /*007C 007F*/
+ uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
+ uint32_t message_dest_address_index; /*0090 0093*/
+ uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
+ uint32_t utility_A_int_counter_timer; /*0098 009B*/
+ uint32_t outbound_doorbell; /*009C 009F*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t message_source_address_index; /*00A4 00A7*/
+ uint32_t message_done_queue_index; /*00A8 00AB*/
+ uint32_t reserved0; /*00AC 00AF*/
+ uint32_t inbound_msgaddr0; /*00B0 00B3*/
+ uint32_t inbound_msgaddr1; /*00B4 00B7*/
+ uint32_t outbound_msgaddr0; /*00B8 00BB*/
+ uint32_t outbound_msgaddr1; /*00BC 00BF*/
+ uint32_t inbound_queueport_low; /*00C0 00C3*/
+ uint32_t inbound_queueport_high; /*00C4 00C7*/
+ uint32_t outbound_queueport_low; /*00C8 00CB*/
+ uint32_t outbound_queueport_high; /*00CC 00CF*/
+ uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
+ uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
+ uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
+ uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
+ uint32_t message_dest_queue_port_low; /*00E0 00E3*/
+ uint32_t message_dest_queue_port_high; /*00E4 00E7*/
+ uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
+ uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
+ uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
+ uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t write_sequence; /*00FC 00FF*/
+ uint32_t reserved1[46]; /*0100 01B7*/
+ uint32_t reply_post_producer_index1; /*01B8 01BB*/
+ uint32_t reply_post_consumer_index1; /*01BC 01BF*/
+};
+
+#define MESG_RW_BUFFER_SIZE (256 * 3)
+
typedef struct deliver_completeQ {
uint16_t cmdFlag;
uint16_t cmdSMID;
@@ -739,6 +830,7 @@ struct AdapterControlBlock
#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
+#define ACB_ADAPTER_TYPE_F 0x00000005 /* hba L IOP */
u32 ioqueue_size;
struct pci_dev * pdev;
struct Scsi_Host * host;
@@ -760,10 +852,16 @@ struct AdapterControlBlock
struct MessageUnit_C __iomem *pmuC;
struct MessageUnit_D *pmuD;
struct MessageUnit_E __iomem *pmuE;
+ struct MessageUnit_F __iomem *pmuF;
};
/* message unit ATU inbound base address0 */
void __iomem *mem_base0;
void __iomem *mem_base1;
+ //0x000 - COMPORT_IN (Host sent to ROC)
+ uint32_t *message_wbuffer;
+ //0x100 - COMPORT_OUT (ROC sent to Host)
+ uint32_t *message_rbuffer;
+ uint32_t *msgcode_rwbuffer; //0x200 - BIOS_AREA
uint32_t acb_flags;
u16 dev_id;
uint8_t adapter_index;
@@ -836,8 +934,6 @@ struct AdapterControlBlock
#define FW_NORMAL 0x0000
#define FW_BOG 0x0001
#define FW_DEADLOCK 0x0010
- atomic_t rq_map_token;
- atomic_t ante_token_value;
uint32_t maxOutstanding;
int vector_count;
uint32_t maxFreeCCB;
@@ -848,6 +944,7 @@ struct AdapterControlBlock
uint32_t out_doorbell;
uint32_t completionQ_entry;
pCompletion_Q pCompletionQ;
+ uint32_t completeQ_size;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
@@ -944,6 +1041,6 @@ extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
struct QBUFFER __iomem *);
extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *);
extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *);
-extern struct device_attribute *arcmsr_host_attrs[];
+extern const struct attribute_group *arcmsr_host_groups[];
extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *);
void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 259d9c20bf25..baeb5e795690 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -41,7 +41,7 @@
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
-** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
+** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
*******************************************************************************
*/
#include <linux/module.h>
@@ -58,8 +58,6 @@
#include <scsi/scsi_transport.h>
#include "arcmsr.h"
-struct device_attribute *arcmsr_host_attrs[];
-
static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
@@ -389,16 +387,25 @@ static DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_q
static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
-struct device_attribute *arcmsr_host_attrs[] = {
- &dev_attr_host_driver_version,
- &dev_attr_host_driver_posted_cmd,
- &dev_attr_host_driver_reset,
- &dev_attr_host_driver_abort,
- &dev_attr_host_fw_model,
- &dev_attr_host_fw_version,
- &dev_attr_host_fw_request_len,
- &dev_attr_host_fw_numbers_queue,
- &dev_attr_host_fw_sdram_size,
- &dev_attr_host_fw_hd_channels,
+static struct attribute *arcmsr_host_attrs[] = {
+ &dev_attr_host_driver_version.attr,
+ &dev_attr_host_driver_posted_cmd.attr,
+ &dev_attr_host_driver_reset.attr,
+ &dev_attr_host_driver_abort.attr,
+ &dev_attr_host_fw_model.attr,
+ &dev_attr_host_fw_version.attr,
+ &dev_attr_host_fw_request_len.attr,
+ &dev_attr_host_fw_numbers_queue.attr,
+ &dev_attr_host_fw_sdram_size.attr,
+ &dev_attr_host_fw_hd_channels.attr,
NULL,
};
+
+static const struct attribute_group arcmsr_host_attr_group = {
+ .attrs = arcmsr_host_attrs,
+};
+
+const struct attribute_group *arcmsr_host_groups[] = {
+ &arcmsr_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 40dc8eac0e3a..d3fb8a9c1c39 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -41,7 +41,7 @@
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
-** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
+** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
*******************************************************************************
*/
#include <linux/module.h>
@@ -99,6 +99,10 @@ static int set_date_time = 0;
module_param(set_date_time, int, S_IRUGO);
MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
+static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT;
+module_param(cmd_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90");
+
#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12
@@ -113,8 +117,8 @@ static int arcmsr_bios_param(struct scsi_device *sdev,
static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int arcmsr_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
-static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
-static int arcmsr_resume(struct pci_dev *pdev);
+static int __maybe_unused arcmsr_suspend(struct device *dev);
+static int __maybe_unused arcmsr_resume(struct device *dev);
static void arcmsr_remove(struct pci_dev *pdev);
static void arcmsr_shutdown(struct pci_dev *pdev);
static void arcmsr_iop_init(struct AdapterControlBlock *acb);
@@ -133,12 +137,14 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
static void arcmsr_set_iop_datetime(struct timer_list *);
+static int arcmsr_slave_config(struct scsi_device *sdev);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -154,13 +160,14 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
+ .slave_configure = arcmsr_slave_config,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
.can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
.this_id = ARCMSR_SCSI_INITIATOR_ID,
.sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
.max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
.cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
- .shost_attrs = arcmsr_host_attrs,
+ .shost_groups = arcmsr_host_groups,
.no_write_same = 1,
};
@@ -209,17 +216,20 @@ static struct pci_device_id arcmsr_device_id_table[] = {
.driver_data = ACB_ADAPTER_TYPE_C},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
.driver_data = ACB_ADAPTER_TYPE_E},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
+ .driver_data = ACB_ADAPTER_TYPE_F},
{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
+static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume);
+
static struct pci_driver arcmsr_pci_driver = {
.name = "arcmsr",
.id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
- .suspend = arcmsr_suspend,
- .resume = arcmsr_resume,
+ .driver.pm = &arcmsr_pm_ops,
.shutdown = arcmsr_shutdown,
};
/*
@@ -232,12 +242,12 @@ static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
case ACB_ADAPTER_TYPE_D:
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
acb->dma_coherent2, acb->dma_coherent_handle2);
break;
}
- }
}
static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
@@ -283,11 +293,10 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
}
case ACB_ADAPTER_TYPE_D: {
void __iomem *mem_base0;
- unsigned long addr, range, flags;
+ unsigned long addr, range;
addr = (unsigned long)pci_resource_start(pdev, 0);
range = pci_resource_len(pdev, 0);
- flags = pci_resource_flags(pdev, 0);
mem_base0 = ioremap(addr, range);
if (!mem_base0) {
pr_notice("arcmsr%d: memory mapping region fail\n",
@@ -311,6 +320,19 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
acb->out_doorbell = 0;
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ if (!acb->pmuF) {
+ pr_notice("arcmsr%d: memory mapping region fail\n",
+ acb->host->host_no);
+ return false;
+ }
+ writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
+ writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ break;
+ }
}
return true;
}
@@ -318,26 +340,25 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A:{
+ case ACB_ADAPTER_TYPE_A:
iounmap(acb->pmuA);
- }
- break;
- case ACB_ADAPTER_TYPE_B:{
+ break;
+ case ACB_ADAPTER_TYPE_B:
iounmap(acb->mem_base0);
iounmap(acb->mem_base1);
- }
-
- break;
- case ACB_ADAPTER_TYPE_C:{
+ break;
+ case ACB_ADAPTER_TYPE_C:
iounmap(acb->pmuC);
- }
- break;
+ break;
case ACB_ADAPTER_TYPE_D:
iounmap(acb->mem_base0);
break;
case ACB_ADAPTER_TYPE_E:
iounmap(acb->pmuE);
break;
+ case ACB_ADAPTER_TYPE_F:
+ iounmap(acb->pmuF);
+ break;
}
}
@@ -353,16 +374,11 @@ static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
static int arcmsr_bios_param(struct scsi_device *sdev,
struct block_device *bdev, sector_t capacity, int *geom)
{
- int ret, heads, sectors, cylinders, total_capacity;
- unsigned char *buffer;/* return copy of block device's partition table */
+ int heads, sectors, cylinders, total_capacity;
+
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
- buffer = scsi_bios_ptable(bdev);
- if (buffer) {
- ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
- kfree(buffer);
- if (ret != -1)
- return ret;
- }
total_capacity = capacity;
heads = 64;
sectors = 32;
@@ -558,23 +574,20 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
+ case ACB_ADAPTER_TYPE_A:
arcmsr_hbaA_flush_cache(acb);
- }
break;
-
- case ACB_ADAPTER_TYPE_B: {
+ case ACB_ADAPTER_TYPE_B:
arcmsr_hbaB_flush_cache(acb);
- }
break;
- case ACB_ADAPTER_TYPE_C: {
+ case ACB_ADAPTER_TYPE_C:
arcmsr_hbaC_flush_cache(acb);
- }
break;
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_flush_cache(acb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
arcmsr_hbaE_flush_cache(acb);
break;
}
@@ -632,6 +645,27 @@ static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
}
+static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
+{
+ dma_addr_t host_buffer_dma;
+ struct MessageUnit_F __iomem *pmuF;
+
+ memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
+ acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
+ acb->completeQ_size, 4);
+ acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
+ acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
+ memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
+ host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
+ pmuF = acb->pmuF;
+ /* host buffer low address, bit0:1 all buffer active */
+ writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0);
+ /* host buffer high address */
+ writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1);
+ /* set host buffer physical address */
+ writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell);
+}
+
static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
{
bool rtn = true;
@@ -685,6 +719,28 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
acb->doneq_index = 0;
}
break;
+ case ACB_ADAPTER_TYPE_F: {
+ uint32_t QueueDepth;
+ uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32};
+
+ arcmsr_wait_firmware_ready(acb);
+ QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
+ acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
+ acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
+ &dma_coherent_handle, GFP_KERNEL);
+ if (!dma_coherent) {
+ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
+ return false;
+ }
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = dma_coherent;
+ acb->pCompletionQ = dma_coherent;
+ acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
+ acb->doneq_index = 0;
+ arcmsr_hbaF_assign_regAddr(acb);
+ }
+ break;
default:
break;
}
@@ -719,7 +775,8 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->host->sg_tablesize = max_sg_entrys;
roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
- acb->uncache_size += acb->ioqueue_size;
+ if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
+ acb->uncache_size += acb->ioqueue_size;
dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
if(!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -742,6 +799,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_C:
case ACB_ADAPTER_TYPE_D:
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
ccb_tmp->cdb_phyaddr = cdb_phyaddr;
break;
}
@@ -760,8 +818,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
dma_coherent_handle = next_ccb_phy;
}
- acb->dma_coherent_handle2 = dma_coherent_handle;
- acb->dma_coherent2 = ccb_tmp;
+ if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = ccb_tmp;
+ }
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
@@ -791,7 +851,6 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
struct scsi_device *psdev;
char diff, temp;
- acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -828,8 +887,12 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
+ break;
+ }
}
- atomic_inc(&acb->rq_map_token);
if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
return;
for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
@@ -860,6 +923,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
devicemap++;
acb_dev_map++;
}
+ acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
}
static int
@@ -912,8 +976,6 @@ out_free_irq:
static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
{
INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&pacb->rq_map_token, 16);
- atomic_set(&pacb->ante_token_value, 16);
pacb->fw_flag = FW_NORMAL;
timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
@@ -1015,7 +1077,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(!error){
goto free_hbb_mu;
}
- arcmsr_free_io_queue(acb);
+ if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
+ arcmsr_free_io_queue(acb);
error = arcmsr_alloc_ccb_pool(acb);
if(error){
goto unmap_pci_region;
@@ -1070,14 +1133,14 @@ static void arcmsr_free_irq(struct pci_dev *pdev,
pci_free_irq_vectors(pdev);
}
-static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused arcmsr_suspend(struct device *dev)
{
- uint32_t intmask_org;
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
- intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
del_timer_sync(&acb->eternal_timer);
if (set_date_time)
@@ -1085,29 +1148,18 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
- pci_set_drvdata(pdev, host);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int arcmsr_resume(struct pci_dev *pdev)
+static int __maybe_unused arcmsr_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- if (pci_enable_device(pdev)) {
- pr_warn("%s: pci_enable_device error\n", __func__);
- return -ENODEV;
- }
if (arcmsr_set_dma_mask(acb))
goto controller_unregister;
- pci_set_master(pdev);
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
switch (acb->adapter_type) {
@@ -1129,6 +1181,14 @@ static int arcmsr_resume(struct pci_dev *pdev)
acb->out_doorbell = 0;
acb->doneq_index = 0;
break;
+ case ACB_ADAPTER_TYPE_F:
+ writel(0, &acb->pmuF->host_int_status);
+ writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ acb->doneq_index = 0;
+ arcmsr_hbaF_assign_regAddr(acb);
+ break;
}
arcmsr_iop_init(acb);
arcmsr_init_get_devmap_timer(acb);
@@ -1141,10 +1201,10 @@ controller_stop:
controller_unregister:
scsi_remove_host(host);
arcmsr_free_ccb_pool(acb);
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
+ arcmsr_free_io_queue(acb);
arcmsr_unmap_pciregion(acb);
- pci_release_regions(pdev);
scsi_host_put(host);
- pci_disable_device(pdev);
return -ENODEV;
}
@@ -1220,25 +1280,20 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
uint8_t rtnval = 0;
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
+ case ACB_ADAPTER_TYPE_A:
rtnval = arcmsr_hbaA_abort_allcmd(acb);
- }
break;
-
- case ACB_ADAPTER_TYPE_B: {
+ case ACB_ADAPTER_TYPE_B:
rtnval = arcmsr_hbaB_abort_allcmd(acb);
- }
break;
-
- case ACB_ADAPTER_TYPE_C: {
+ case ACB_ADAPTER_TYPE_C:
rtnval = arcmsr_hbaC_abort_allcmd(acb);
- }
break;
-
case ACB_ADAPTER_TYPE_D:
rtnval = arcmsr_hbaD_abort_allcmd(acb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
rtnval = arcmsr_hbaE_abort_allcmd(acb);
break;
}
@@ -1263,24 +1318,26 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
spin_lock_irqsave(&acb->ccblist_lock, flags);
list_add_tail(&ccb->list, &acb->ccb_free_list);
spin_unlock_irqrestore(&acb->ccblist_lock, flags);
- pcmd->scsi_done(pcmd);
+ scsi_done(pcmd);
}
static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
{
-
struct scsi_cmnd *pcmd = ccb->pcmd;
- struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
- pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- if (sensebuffer) {
- int sense_data_length =
- sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
- ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
- memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
- memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
+
+ pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ if (pcmd->sense_buffer) {
+ struct SENSE_DATA *sensebuffer;
+
+ memcpy_and_pad(pcmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ ccb->arcmsr_cdb.SenseData,
+ sizeof(ccb->arcmsr_cdb.SenseData),
+ 0);
+
+ sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
- pcmd->result |= (DRIVER_SENSE << 24);
}
}
@@ -1314,7 +1371,8 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
orig_mask = readl(&reg->host_int_mask);
writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
@@ -1412,7 +1470,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
struct ARCMSR_CDB *pARCMSR_CDB;
bool error;
struct CommandControlBlock *pCCB;
- unsigned long ccb_cdb_phy, cdb_phy_hipart;
+ unsigned long ccb_cdb_phy;
switch (acb->adapter_type) {
@@ -1494,8 +1552,6 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
((toggle ^ 0x4000) + 1);
doneq_index = pmu->doneq_index;
spin_unlock_irqrestore(&acb->doneq_lock, flags);
- cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
- 0xFFF].addressHigh;
addressLow = pmu->done_qbuffer[doneq_index &
0xFFF].addressLow;
ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
@@ -1523,6 +1579,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_E:
arcmsr_hbaE_postqueue_isr(acb);
break;
+ case ACB_ADAPTER_TYPE_F:
+ arcmsr_hbaF_postqueue_isr(acb);
+ break;
}
}
@@ -1539,7 +1598,7 @@ static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->pcmd->result = DID_NO_CONNECT << 16;
arcmsr_pci_unmap_dma(ccb);
- ccb->pcmd->scsi_done(ccb->pcmd);
+ scsi_done(ccb->pcmd);
}
}
for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
@@ -1577,6 +1636,8 @@ static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
pdev = acb->pdev;
arcmsr_free_irq(pdev, acb);
arcmsr_free_ccb_pool(acb);
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
+ arcmsr_free_io_queue(acb);
arcmsr_unmap_pciregion(acb);
pci_release_regions(pdev);
scsi_host_put(host);
@@ -1634,6 +1695,8 @@ static void arcmsr_remove(struct pci_dev *pdev)
}
arcmsr_free_irq(pdev, acb);
arcmsr_free_ccb_pool(acb);
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
+ arcmsr_free_io_queue(acb);
arcmsr_unmap_pciregion(acb);
pci_release_regions(pdev);
scsi_host_put(host);
@@ -1711,7 +1774,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
writel(intmask_org | mask, reg->pcief0_int_enable);
break;
}
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
@@ -1855,6 +1919,23 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
writel(ccb_post_stamp, &pmu->inbound_queueport_low);
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ struct MessageUnit_F __iomem *pmu = acb->pmuF;
+ u32 ccb_post_stamp, arc_cdb_size;
+
+ if (ccb->arc_cdb_size <= 0x300)
+ arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
+ else {
+ arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
+ if (arc_cdb_size > 0xF)
+ arc_cdb_size = 0xF;
+ arc_cdb_size = (arc_cdb_size << 1) | 1;
+ }
+ ccb_post_stamp = (ccb->smid | arc_cdb_size);
+ writel(0, &pmu->inbound_queueport_high);
+ writel(ccb_post_stamp, &pmu->inbound_queueport_low);
+ break;
+ }
}
}
@@ -1925,23 +2006,20 @@ static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
+ case ACB_ADAPTER_TYPE_A:
arcmsr_hbaA_stop_bgrb(acb);
- }
break;
-
- case ACB_ADAPTER_TYPE_B: {
+ case ACB_ADAPTER_TYPE_B:
arcmsr_hbaB_stop_bgrb(acb);
- }
break;
- case ACB_ADAPTER_TYPE_C: {
+ case ACB_ADAPTER_TYPE_C:
arcmsr_hbaC_stop_bgrb(acb);
- }
break;
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_stop_bgrb(acb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
arcmsr_hbaE_stop_bgrb(acb);
break;
}
@@ -1960,7 +2038,6 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
}
break;
-
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
@@ -1978,7 +2055,8 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
writel(acb->out_doorbell, &reg->iobound_doorbell);
@@ -2024,7 +2102,8 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
writel(acb->out_doorbell, &reg->iobound_doorbell);
@@ -2043,7 +2122,6 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
}
break;
-
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
@@ -2064,6 +2142,10 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_F: {
+ qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer;
+ }
+ break;
}
return qbuffer;
}
@@ -2078,7 +2160,6 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
}
break;
-
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
@@ -2099,6 +2180,9 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_F:
+ pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer;
+ break;
}
return pqbuffer;
}
@@ -2337,10 +2421,17 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
{
- uint32_t outbound_doorbell, in_doorbell, tmp;
+ uint32_t outbound_doorbell, in_doorbell, tmp, i;
struct MessageUnit_E __iomem *reg = pACB->pmuE;
- in_doorbell = readl(&reg->iobound_doorbell);
+ if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
+ for (i = 0; i < 5; i++) {
+ in_doorbell = readl(&reg->iobound_doorbell);
+ if (in_doorbell != 0)
+ break;
+ }
+ } else
+ in_doorbell = readl(&reg->iobound_doorbell);
outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
do {
writel(0, &reg->host_int_status); /* clear interrupt */
@@ -2450,7 +2541,7 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
struct MessageUnit_D *pmu;
struct ARCMSR_CDB *arcmsr_cdb;
struct CommandControlBlock *ccb;
- unsigned long flags, ccb_cdb_phy, cdb_phy_hipart;
+ unsigned long flags, ccb_cdb_phy;
spin_lock_irqsave(&acb->doneq_lock, flags);
pmu = acb->pmuD;
@@ -2464,8 +2555,6 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
((toggle ^ 0x4000) + 1);
doneq_index = pmu->doneq_index;
- cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
- 0xFFF].addressHigh;
addressLow = pmu->done_qbuffer[doneq_index &
0xFFF].addressLow;
ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
@@ -2515,6 +2604,36 @@ static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
spin_unlock_irqrestore(&acb->doneq_lock, flags);
}
+static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t doneq_index;
+ uint16_t cmdSMID;
+ int error;
+ struct MessageUnit_F __iomem *phbcmu;
+ struct CommandControlBlock *ccb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ phbcmu = acb->pmuF;
+ while (1) {
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ if (cmdSMID == 0xffff)
+ break;
+ ccb = acb->pccb_pool[cmdSMID];
+ error = (acb->pCompletionQ[doneq_index].cmdFlag &
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_drain_donequeue(acb, ccb, error);
+ acb->pCompletionQ[doneq_index].cmdSMID = 0xffff;
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ }
+ acb->doneq_index = doneq_index;
+ writel(doneq_index, &phbcmu->reply_post_consumer_index);
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
/*
**********************************************************************************
** Handle a message interrupt
@@ -2705,21 +2824,46 @@ static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
return IRQ_HANDLED;
}
+static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t host_interrupt_status;
+ struct MessageUnit_F __iomem *phbcmu = pACB->pmuF;
+
+ host_interrupt_status = readl(&phbcmu->host_int_status) &
+ (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ /* MU post queue interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR)
+ arcmsr_hbaF_postqueue_isr(pACB);
+
+ /* MU ioctl transfer doorbell interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)
+ arcmsr_hbaE_doorbell_isr(pACB);
+
+ host_interrupt_status = readl(&phbcmu->host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
return arcmsr_hbaA_handle_isr(acb);
- break;
case ACB_ADAPTER_TYPE_B:
return arcmsr_hbaB_handle_isr(acb);
- break;
case ACB_ADAPTER_TYPE_C:
return arcmsr_hbaC_handle_isr(acb);
case ACB_ADAPTER_TYPE_D:
return arcmsr_hbaD_handle_isr(acb);
case ACB_ADAPTER_TYPE_E:
return arcmsr_hbaE_handle_isr(acb);
+ case ACB_ADAPTER_TYPE_F:
+ return arcmsr_hbaF_handle_isr(acb);
default:
return IRQ_NONE;
}
@@ -3020,10 +3164,12 @@ message_out:
static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
{
- struct list_head *head = &acb->ccb_free_list;
+ struct list_head *head;
struct CommandControlBlock *ccb = NULL;
unsigned long flags;
+
spin_lock_irqsave(&acb->ccblist_lock, flags);
+ head = &acb->ccb_free_list;
if (!list_empty(head)) {
ccb = list_entry(head->next, struct CommandControlBlock, list);
list_del_init(&ccb->list);
@@ -3046,7 +3192,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
if (cmd->device->lun) {
cmd->result = (DID_TIME_OUT << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return;
}
inqdata[0] = TYPE_PROCESSOR;
@@ -3057,11 +3203,11 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
/* ISO, ECMA, & ANSI versions */
inqdata[4] = 31;
/* length of additional data */
- strncpy(&inqdata[8], "Areca ", 8);
+ memcpy(&inqdata[8], "Areca ", 8);
/* Vendor Identification */
- strncpy(&inqdata[16], "RAID controller ", 16);
+ memcpy(&inqdata[16], "RAID controller ", 16);
/* Product Identification */
- strncpy(&inqdata[32], "R001", 4); /* Product Revision */
+ memcpy(&inqdata[32], "R001", 4); /* Product Revision */
sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
@@ -3070,23 +3216,22 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
break;
case WRITE_BUFFER:
case READ_BUFFER: {
if (arcmsr_iop_message_xfer(acb, cmd))
cmd->result = (DID_ERROR << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
break;
default:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
-static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
- void (* done)(struct scsi_cmnd *))
+static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
@@ -3095,10 +3240,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
cmd->result = (DID_NO_CONNECT << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
- cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
if (target == 16) {
@@ -3110,8 +3254,8 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
if (!ccb)
return SCSI_MLQUEUE_HOST_BUSY;
if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
- cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
- cmd->scsi_done(cmd);
+ cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT;
+ scsi_done(cmd);
return 0;
}
arcmsr_post_ccb(acb, ccb);
@@ -3120,6 +3264,16 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(arcmsr_queue_command)
+static int arcmsr_slave_config(struct scsi_device *sdev)
+{
+ unsigned int dev_timeout;
+
+ dev_timeout = sdev->request_queue->rq_timeout;
+ if ((cmd_timeout > 0) && ((cmd_timeout * HZ) > dev_timeout))
+ blk_queue_rq_timeout(sdev->request_queue, cmd_timeout * HZ);
+ return 0;
+}
+
static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
{
int count;
@@ -3268,6 +3422,31 @@ static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
return true;
}
+static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_F __iomem *reg = pACB->pmuF;
+ uint32_t intmask_org;
+
+ /* disable all outbound interrupt */
+ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
+ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
+ /* wait firmware ready */
+ arcmsr_wait_firmware_ready(pACB);
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ /* wait message ready */
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n",
+ pACB->host->host_no);
+ return false;
+ }
+ arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer);
+ return true;
+}
+
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
{
bool rtn = false;
@@ -3288,6 +3467,9 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_E:
rtn = arcmsr_hbaE_get_config(acb);
break;
+ case ACB_ADAPTER_TYPE_F:
+ rtn = arcmsr_hbaF_get_config(acb);
+ break;
default:
break;
}
@@ -3500,7 +3682,7 @@ static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
bool error;
uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb;
int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
- unsigned long flags, ccb_cdb_phy, cdb_phy_hipart;
+ unsigned long flags, ccb_cdb_phy;
struct ARCMSR_CDB *arcmsr_cdb;
struct CommandControlBlock *pCCB;
struct MessageUnit_D *pmu = acb->pmuD;
@@ -3532,8 +3714,6 @@ polling_hbaD_ccb_retry:
((toggle ^ 0x4000) + 1);
doneq_index = pmu->doneq_index;
spin_unlock_irqrestore(&acb->doneq_lock, flags);
- cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
- 0xFFF].addressHigh;
flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
if (acb->cdb_phyadd_hipart)
@@ -3647,23 +3827,20 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
int rtn = 0;
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
+ case ACB_ADAPTER_TYPE_A:
rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
- }
break;
-
- case ACB_ADAPTER_TYPE_B: {
+ case ACB_ADAPTER_TYPE_B:
rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
- }
break;
- case ACB_ADAPTER_TYPE_C: {
+ case ACB_ADAPTER_TYPE_C:
rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
- }
break;
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
break;
}
@@ -3744,6 +3921,16 @@ static void arcmsr_set_iop_datetime(struct timer_list *t)
writel(pacb->out_doorbell, &reg->iobound_doorbell);
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ struct MessageUnit_F __iomem *reg = pacb->pmuF;
+
+ pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0];
+ pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1];
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pacb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
}
if (sys_tz.tz_minuteswest)
next_time = ARCMSR_HOURS;
@@ -3769,6 +3956,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
dma_coherent_handle = acb->dma_coherent_handle2;
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
dma_coherent_handle = acb->dma_coherent_handle +
offsetof(struct CommandControlBlock, arcmsr_cdb);
break;
@@ -3886,11 +4074,8 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
- dma_coherent_handle = acb->dma_coherent_handle2;
- cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
- cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
- writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
- writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
+ writel(lower_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[5]);
+ writel(upper_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[6]);
writel(acb->ioqueue_size, &reg->msgcode_rwbuffer[7]);
writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
@@ -3902,6 +4087,27 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_F: {
+ struct MessageUnit_F __iomem *reg = acb->pmuF;
+
+ acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG;
+ acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886;
+ acb->msgcode_rwbuffer[2] = cdb_phyaddr;
+ acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32;
+ acb->msgcode_rwbuffer[4] = acb->ccbsize;
+ acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2);
+ acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2);
+ acb->msgcode_rwbuffer[7] = acb->completeQ_size;
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: 'set command Q window' timeout\n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
}
return 0;
}
@@ -3950,7 +4156,8 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
do {
if (!(acb->acb_flags & ACB_F_IOP_INITED))
@@ -3965,24 +4172,10 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
static void arcmsr_request_device_map(struct timer_list *t)
{
struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
- (acb->acb_flags & ACB_F_BUS_RESET) ||
- (acb->acb_flags & ACB_F_ABORT)) {
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
+ if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
} else {
acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) ==
- atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value,
- atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies +
- msecs_to_jiffies(6 * HZ));
- return;
- }
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -4012,10 +4205,23 @@ static void arcmsr_request_device_map(struct timer_list *t)
writel(acb->out_doorbell, &reg->iobound_doorbell);
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ struct MessageUnit_F __iomem *reg = acb->pmuF;
+ uint32_t outMsg1 = readl(&reg->outbound_msgaddr1);
+
+ if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) ||
+ (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE))
+ goto nxt6s;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
default:
return;
}
acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
+nxt6s:
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
}
@@ -4097,6 +4303,7 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
arcmsr_hbaD_start_bgrb(acb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
arcmsr_hbaE_start_bgrb(acb);
break;
}
@@ -4176,7 +4383,8 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
uint32_t i, tmp;
@@ -4303,7 +4511,8 @@ static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
true : false;
}
break;
- case ACB_ADAPTER_TYPE_E:{
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:{
struct MessageUnit_E __iomem *reg = acb->pmuE;
rtn = (readl(&reg->host_diagnostic_3xxx) &
ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
@@ -4402,8 +4611,6 @@ wait_reset_done:
goto wait_reset_done;
}
arcmsr_iop_init(acb);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
mod_timer(&acb->eternal_timer, jiffies +
msecs_to_jiffies(6 * HZ));
@@ -4412,8 +4619,6 @@ wait_reset_done:
pr_notice("arcmsr: scsi bus reset eh returns with success\n");
} else {
acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
mod_timer(&acb->eternal_timer, jiffies +
msecs_to_jiffies(6 * HZ));
@@ -4483,7 +4688,7 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1202:
case PCI_DEVICE_ID_ARECA_1210:
raid6 = 0;
- /*FALLTHRU*/
+ fallthrough;
case PCI_DEVICE_ID_ARECA_1120:
case PCI_DEVICE_ID_ARECA_1130:
case PCI_DEVICE_ID_ARECA_1160:
@@ -4506,6 +4711,9 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1884:
type = "SAS/SATA";
break;
+ case PCI_DEVICE_ID_ARECA_1886:
+ type = "NVMe/SAS/SATA";
+ break;
default:
type = "unknown";
raid6 = 0;
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index f34badc75196..9f64133f976a 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -10,17 +10,6 @@ config SCSI_ACORNSCSI_3
This enables support for the Acorn SCSI card (aka30). If you have an
Acorn system with one of these, say Y. If unsure, say N.
-config SCSI_ACORNSCSI_TAGGED_QUEUE
- bool "Support SCSI 2 Tagged queueing"
- depends on SCSI_ACORNSCSI_3
- help
- Say Y here to enable tagged queuing support on the Acorn SCSI card.
-
- This is a feature of SCSI-2 which improves performance: the host
- adapter can send several SCSI commands to a device's queue even if
- previous commands haven't finished yet. Some SCSI devices don't
- implement this properly, so the safe answer is N.
-
config SCSI_ACORNSCSI_SYNC
bool "Support SCSI 2 Synchronous Transfers"
depends on SCSI_ACORNSCSI_3
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index ddb52e7ba622..7602639da9b3 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -52,12 +52,8 @@
* You can tell if you have a device that supports tagged queueing my
* cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported
* as '2 TAG'.
- *
- * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config
- * scripts, but disabled here. Once debugged, remove the #undef, otherwise to debug,
- * comment out the undef.
*/
-#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+
/*
* SCSI-II Synchronous transfer support.
*
@@ -130,13 +126,17 @@
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_spi.h>
#include "acornscsi.h"
#include "msgqueue.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
@@ -144,12 +144,6 @@
#define VER_MINOR 0
#define VER_PATCH 6
-#ifndef ABORT_TAG
-#define ABORT_TAG 0xd
-#else
-#error "Yippee! ABORT TAG is now defined! Remove this error!"
-#endif
-
#ifdef USE_DMAC
/*
* DMAC setup parameters
@@ -177,7 +171,7 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
unsigned int result);
static int acornscsi_reconnect_finish(AS_Host *host);
static void acornscsi_dma_cleanup(AS_Host *host);
-static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);
+static void acornscsi_abortcmd(AS_Host *host);
/* ====================================================================================
* Miscellaneous
@@ -735,7 +729,7 @@ intr_ret_t acornscsi_kick(AS_Host *host)
*/
host->scsi.phase = PHASE_CONNECTING;
host->SCpnt = SCpnt;
- host->scsi.SCp = SCpnt->SCp;
+ host->scsi.SCp = *arm_scsi_pointer(SCpnt);
host->dma.xfer_setup = 0;
host->dma.xfer_required = 0;
host->dma.xfer_done = 0;
@@ -747,17 +741,6 @@ intr_ret_t acornscsi_kick(AS_Host *host)
#endif
if (from_queue) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- /*
- * tagged queueing - allocate a new tag to this command
- */
- if (SCpnt->device->simple_tags) {
- SCpnt->device->current_tag += 1;
- if (SCpnt->device->current_tag == 0)
- SCpnt->device->current_tag = 1;
- SCpnt->tag = SCpnt->device->current_tag;
- } else
-#endif
set_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x07), host->busyluns);
@@ -800,7 +783,10 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
acornscsi_dma_cleanup(host);
- SCpnt->result = result << 16 | host->scsi.SCp.Message << 8 | host->scsi.SCp.Status;
+ set_host_byte(SCpnt, result);
+ if (result == DID_OK)
+ scsi_msg_to_host_byte(SCpnt, host->scsi.SCp.Message);
+ set_status_byte(SCpnt, host->scsi.SCp.Status);
/*
* In theory, this should not happen. In practice, it seems to.
@@ -839,12 +825,12 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
xfer_warn = 0;
if (xfer_warn) {
- switch (status_byte(SCpnt->result)) {
- case CHECK_CONDITION:
- case COMMAND_TERMINATED:
- case BUSY:
- case QUEUE_FULL:
- case RESERVATION_CONFLICT:
+ switch (get_status_byte(SCpnt)) {
+ case SAM_STAT_CHECK_CONDITION:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_BUSY:
+ case SAM_STAT_TASK_SET_FULL:
+ case SAM_STAT_RESERVATION_CONFLICT:
break;
default:
@@ -859,13 +845,10 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
}
}
- if (!SCpnt->scsi_done)
- panic("scsi%d.H: null scsi_done function in acornscsi_done", host->host->host_no);
-
clear_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x7), host->busyluns);
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
} else
printk("scsi%d: null command in acornscsi_done", host->host->host_no);
@@ -1195,7 +1178,7 @@ void acornscsi_dma_intr(AS_Host *host)
* the device recognises the attention.
*/
if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
dmac_write(host, DMAC_TXCNTLO, 0);
dmac_write(host, DMAC_TXCNTHI, 0);
@@ -1441,6 +1424,7 @@ unsigned char acornscsi_readmessagebyte(AS_Host *host)
static
void acornscsi_message(AS_Host *host)
{
+ struct scsi_pointer *scsi_pointer;
unsigned char message[16];
unsigned int msgidx = 0, msglen = 1;
@@ -1490,8 +1474,8 @@ void acornscsi_message(AS_Host *host)
}
switch (message[0]) {
- case ABORT:
- case ABORT_TAG:
+ case ABORT_TASK_SET:
+ case ABORT_TASK:
case COMMAND_COMPLETE:
if (host->scsi.phase != PHASE_STATUSIN) {
printk(KERN_ERR "scsi%d.%c: command complete following non-status in phase?\n",
@@ -1510,8 +1494,9 @@ void acornscsi_message(AS_Host *host)
* the saved data pointer for the current I/O process.
*/
acornscsi_dma_cleanup(host);
- host->SCpnt->SCp = host->scsi.SCp;
- host->SCpnt->SCp.sent_command = 0;
+ scsi_pointer = arm_scsi_pointer(host->SCpnt);
+ *scsi_pointer = host->scsi.SCp;
+ scsi_pointer->sent_command = 0;
host->scsi.phase = PHASE_MSGIN;
break;
@@ -1526,7 +1511,7 @@ void acornscsi_message(AS_Host *host)
* the present command and status areas.'
*/
acornscsi_dma_cleanup(host);
- host->scsi.SCp = host->SCpnt->SCp;
+ host->scsi.SCp = *arm_scsi_pointer(host->SCpnt);
host->scsi.phase = PHASE_MSGIN;
break;
@@ -1563,23 +1548,6 @@ void acornscsi_message(AS_Host *host)
acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
switch (host->scsi.last_message) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- case HEAD_OF_QUEUE_TAG:
- case ORDERED_QUEUE_TAG:
- case SIMPLE_QUEUE_TAG:
- /*
- * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17)
- * If a target does not implement tagged queuing and a queue tag
- * message is received, it shall respond with a MESSAGE REJECT
- * message and accept the I/O process as if it were untagged.
- */
- printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n",
- host->host->host_no, acornscsi_target(host));
- host->SCpnt->device->simple_tags = 0;
- set_bit(host->SCpnt->device->id * 8 +
- (u8)(host->SCpnt->device->lun & 0x7), host->busyluns);
- break;
-#endif
case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8):
/*
* Target can't handle synchronous transfers
@@ -1596,10 +1564,6 @@ void acornscsi_message(AS_Host *host)
}
break;
- case QUEUE_FULL:
- /* TODO: target queue is full */
- break;
-
case SIMPLE_QUEUE_TAG:
/* tag queue reconnect... message[1] = queue tag. Print something to indicate something happened! */
printk("scsi%d.%c: reconnect queue tag %02X\n",
@@ -1694,24 +1658,11 @@ void acornscsi_buildmessages(AS_Host *host)
#if 0
/* does the device need the current command aborted */
if (cmd_aborted) {
- acornscsi_abortcmd(host->SCpnt->tag);
+ acornscsi_abortcmd(host);
return;
}
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- if (host->SCpnt->tag) {
- unsigned int tag_type;
-
- if (host->SCpnt->cmnd[0] == REQUEST_SENSE ||
- host->SCpnt->cmnd[0] == TEST_UNIT_READY ||
- host->SCpnt->cmnd[0] == INQUIRY)
- tag_type = HEAD_OF_QUEUE_TAG;
- else
- tag_type = SIMPLE_QUEUE_TAG;
- msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag);
- }
-#endif
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) {
@@ -1805,7 +1756,7 @@ int acornscsi_reconnect(AS_Host *host)
"to reconnect with\n",
host->host->host_no, '0' + target);
acornscsi_dumplog(host, target);
- acornscsi_abortcmd(host, 0);
+ acornscsi_abortcmd(host);
if (host->SCpnt) {
queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);
host->SCpnt = NULL;
@@ -1828,7 +1779,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
host->scsi.disconnectable = 0;
if (host->SCpnt->device->id == host->scsi.reconnected.target &&
host->SCpnt->device->lun == host->scsi.reconnected.lun &&
- host->SCpnt->tag == host->scsi.reconnected.tag) {
+ scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) {
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
host->host->host_no, acornscsi_target(host)));
@@ -1855,12 +1806,12 @@ int acornscsi_reconnect_finish(AS_Host *host)
}
if (!host->SCpnt)
- acornscsi_abortcmd(host, host->scsi.reconnected.tag);
+ acornscsi_abortcmd(host);
else {
/*
* Restore data pointer from SAVED pointers.
*/
- host->scsi.SCp = host->SCpnt->SCp;
+ host->scsi.SCp = *arm_scsi_pointer(host->SCpnt);
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
printk(", data pointers: [%p, %X]",
host->scsi.SCp.ptr, host->scsi.SCp.this_residual);
@@ -1896,21 +1847,15 @@ void acornscsi_disconnect_unexpected(AS_Host *host)
* Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag)
* Purpose : abort a currently executing command
* Params : host - host with connected command to abort
- * tag - tag to abort
*/
static
-void acornscsi_abortcmd(AS_Host *host, unsigned char tag)
+void acornscsi_abortcmd(AS_Host *host)
{
host->scsi.phase = PHASE_ABORTED;
sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);
msgqueue_flush(&host->scsi.msgs);
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- if (tag)
- msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag);
- else
-#endif
- msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
+ msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
}
/* ==========================================================================================
@@ -2000,7 +1945,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n",
host->host->host_no, acornscsi_target(host), ssr);
acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
}
return INTR_PROCESSING;
@@ -2036,7 +1981,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n",
host->host->host_no, acornscsi_target(host), ssr);
acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
}
return INTR_PROCESSING;
@@ -2082,20 +2027,20 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x18: /* -> PHASE_DATAOUT */
/* COMMAND -> DATA OUT */
if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
acornscsi_dma_setup(host, DMA_OUT);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAOUT;
return INTR_IDLE;
case 0x19: /* -> PHASE_DATAIN */
/* COMMAND -> DATA IN */
if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
acornscsi_dma_setup(host, DMA_IN);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAIN;
return INTR_IDLE;
@@ -2163,7 +2108,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
/* MESSAGE IN -> DATA OUT */
acornscsi_dma_setup(host, DMA_OUT);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAOUT;
return INTR_IDLE;
@@ -2172,7 +2117,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
/* MESSAGE IN -> DATA IN */
acornscsi_dma_setup(host, DMA_IN);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAIN;
return INTR_IDLE;
@@ -2213,7 +2158,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
switch (ssr) {
case 0x19: /* -> PHASE_DATAIN */
case 0x89: /* -> PHASE_DATAIN */
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
return INTR_IDLE;
case 0x1b: /* -> PHASE_STATUSIN */
@@ -2262,7 +2207,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
switch (ssr) {
case 0x18: /* -> PHASE_DATAOUT */
case 0x88: /* -> PHASE_DATAOUT */
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
return INTR_IDLE;
case 0x1b: /* -> PHASE_STATUSIN */
@@ -2458,41 +2403,32 @@ acornscsi_intr(int irq, void *dev_id)
*/
/*
- * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+ * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd)
* Purpose : queues a SCSI command
* Params : cmd - SCSI command
- * done - function called on completion, with pointer to command descriptor
* Returns : 0, or < 0 on error.
*/
-static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt)
{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+ void (*done)(struct scsi_cmnd *) = scsi_done;
AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
- if (!done) {
- /* there should be some way of rejecting errors like this without panicing... */
- panic("scsi%d: queuecommand called with NULL done function [cmd=%p]",
- host->host->host_no, SCpnt);
- return -EINVAL;
- }
-
#if (DEBUG & DEBUG_NO_WRITE)
if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) {
printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n",
host->host->host_no, '0' + SCpnt->device->id);
- SCpnt->result = DID_NO_CONNECT << 16;
+ set_host_byte(SCpnt, DID_NO_CONNECT);
done(SCpnt);
return 0;
}
#endif
- SCpnt->scsi_done = done;
SCpnt->host_scribble = NULL;
SCpnt->result = 0;
- SCpnt->tag = 0;
- SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
- SCpnt->SCp.sent_command = 0;
- SCpnt->SCp.scsi_xferred = 0;
+ scsi_pointer->phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
+ scsi_pointer->sent_command = 0;
+ scsi_pointer->scsi_xferred = 0;
init_SCp(SCpnt);
@@ -2502,7 +2438,7 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
unsigned long flags;
if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) {
- SCpnt->result = DID_ERROR << 16;
+ set_host_byte(SCpnt, DID_ERROR);
done(SCpnt);
return 0;
}
@@ -2516,31 +2452,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
DEF_SCSI_QCMD(acornscsi_queuecmd)
-/*
- * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result)
- * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2
- * Params : SCpntp1 - pointer to command to return
- * SCpntp2 - pointer to command to check
- * result - result to pass back to mid-level done function
- * Returns : *SCpntp2 = NULL if *SCpntp1 is the same command structure as *SCpntp2.
- */
-static inline void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1,
- struct scsi_cmnd **SCpntp2,
- int result)
-{
- struct scsi_cmnd *SCpnt = *SCpntp1;
-
- if (SCpnt) {
- *SCpntp1 = NULL;
-
- SCpnt->result = result;
- SCpnt->scsi_done(SCpnt);
- }
-
- if (SCpnt == *SCpntp2)
- *SCpntp2 = NULL;
-}
-
enum res_abort { res_not_running, res_success, res_success_clear, res_snooze };
/*
@@ -2613,7 +2524,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
break;
default:
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
res = res_snooze;
}
local_irq_restore(flags);
@@ -2674,6 +2585,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
//#endif
clear_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x7), host->busyluns);
+ fallthrough;
/*
* We found the command, and cleared it out. Either
@@ -2778,9 +2690,6 @@ char *acornscsi_info(struct Scsi_Host *host)
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
" SYNC"
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- " TAG"
-#endif
#if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")"
#endif
@@ -2801,9 +2710,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
" SYNC"
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- " TAG"
-#endif
#if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")"
#endif
@@ -2858,9 +2764,8 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "Device/Lun TaggedQ Sync\n");
seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
- seq_printf(m, "%3sabled(%3d) ",
- scd->simple_tags ? "en" : "dis",
- scd->current_tag);
+ seq_printf(m, "%3sabled ",
+ scd->simple_tags ? "en" : "dis");
else
seq_printf(m, "unsupported ");
@@ -2889,6 +2794,7 @@ static struct scsi_host_template acornscsi_template = {
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.proc_name = "acornscsi",
+ .cmd_size = sizeof(struct arm_cmd_priv),
};
static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
@@ -2911,8 +2817,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
- if (!ashost->base || !ashost->fast)
+ if (!ashost->base || !ashost->fast) {
+ ret = -ENOMEM;
goto out_put;
+ }
host->irq = ec->irq;
ashost->host = host;
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/arm_scsi.h
index 4d5ff7b4e864..ea9fcd92c6de 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/arm_scsi.h
@@ -1,16 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * linux/drivers/acorn/scsi/scsi.h
- *
* Copyright (C) 2002 Russell King
*
- * Commonly used scsi driver functions.
+ * Commonly used functions by the ARM SCSI-II drivers.
*/
#include <linux/scatterlist.h>
#define BELT_AND_BRACES
+struct arm_cmd_priv {
+ struct scsi_pointer scsi_pointer;
+};
+
+static inline struct scsi_pointer *arm_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ struct arm_cmd_priv *acmd = scsi_cmd_priv(cmd);
+
+ return &acmd->scsi_pointer;
+}
+
/*
* The scatter-gather list handling. This contains all
* the yucky stuff that needs to be fixed properly.
@@ -78,16 +87,18 @@ static inline void put_next_SCp_byte(struct scsi_pointer *SCp, unsigned char c)
static inline void init_SCp(struct scsi_cmnd *SCpnt)
{
- memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+
+ memset(scsi_pointer, 0, sizeof(struct scsi_pointer));
if (scsi_bufflen(SCpnt)) {
unsigned long len = 0;
- SCpnt->SCp.buffer = scsi_sglist(SCpnt);
- SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
- SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
- SCpnt->SCp.phase = scsi_bufflen(SCpnt);
+ scsi_pointer->buffer = scsi_sglist(SCpnt);
+ scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->phase = scsi_bufflen(SCpnt);
#ifdef BELT_AND_BRACES
{ /*
@@ -111,15 +122,15 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
* FIXME: Totaly naive fixup. We should abort
* with error
*/
- SCpnt->SCp.phase =
+ scsi_pointer->phase =
min_t(unsigned long, len,
scsi_bufflen(SCpnt));
}
}
#endif
} else {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
- SCpnt->SCp.phase = 0;
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->phase = 0;
}
}
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 591414120754..2527b542bcdd 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -35,8 +35,12 @@
#include <asm/io.h>
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
struct arxescsi_info {
@@ -243,6 +247,7 @@ static struct scsi_host_template arxescsi_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
+ .cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 0,
.this_id = 7,
.sg_tablesize = SG_ALL,
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 3fd944374631..5d4f67ba74c0 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -223,7 +223,7 @@ static struct scsi_host_template cumanascsi_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.proc_name = "CumanaSCSI-1",
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
.dma_boundary = PAGE_SIZE - 1,
};
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index a1f3e9ee4e63..d15053f02472 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -23,16 +23,20 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <asm/dma.h>
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
@@ -166,14 +170,15 @@ cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
- if (direction == DMA_OUT)
- map_dir = DMA_TO_DEVICE,
- dma_dir = DMA_MODE_WRITE,
+ if (direction == DMA_OUT) {
+ map_dir = DMA_TO_DEVICE;
+ dma_dir = DMA_MODE_WRITE;
alatch_dir = ALATCH_DMA_OUT;
- else
- map_dir = DMA_FROM_DEVICE,
- dma_dir = DMA_MODE_READ,
+ } else {
+ map_dir = DMA_FROM_DEVICE;
+ dma_dir = DMA_MODE_READ;
alatch_dir = ALATCH_DMA_IN;
+ }
dma_map_sg(dev, info->sg, bufs, map_dir);
@@ -326,10 +331,12 @@ cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
cumanascsi_2_terminator_ctl(host, 0);
else
ret = -EINVAL;
- } else
+ } else {
ret = -EINVAL;
- } else
+ }
+ } else {
ret = -EINVAL;
+ }
return ret;
}
@@ -360,6 +367,7 @@ static struct scsi_host_template cumanascsi2_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
+ .cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
@@ -450,7 +458,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,
if (info->info.scsi.dma != NO_DMA)
free_dma(info->info.scsi.dma);
- free_irq(ec->irq, host);
+ free_irq(ec->irq, info);
out_release:
fas216_release(host);
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 134f040d58e2..6f374af9f45f 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -29,16 +29,20 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/ecard.h>
-#include <asm/pgtable.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
@@ -165,12 +169,13 @@ eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
- if (direction == DMA_OUT)
- map_dir = DMA_TO_DEVICE,
+ if (direction == DMA_OUT) {
+ map_dir = DMA_TO_DEVICE;
dma_dir = DMA_MODE_WRITE;
- else
- map_dir = DMA_FROM_DEVICE,
+ } else {
+ map_dir = DMA_FROM_DEVICE;
dma_dir = DMA_MODE_READ;
+ }
dma_map_sg(dev, info->sg, bufs, map_dir);
@@ -479,6 +484,7 @@ static struct scsi_host_template eesox_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
+ .cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
@@ -571,7 +577,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
if (info->info.scsi.dma != NO_DMA)
free_dma(info->info.scsi.dma);
- free_irq(ec->irq, host);
+ free_irq(ec->irq, info);
out_remove:
fas216_remove(host);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 6c68c2303638..4ce0b2d73614 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -47,11 +47,15 @@
#include <asm/irq.h>
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
/* NOTE: SCSI2 Synchronous transfers *require* DMA according to
* the data sheet. This restriction is crazy, especially when
@@ -77,7 +81,6 @@
* I was thinking that this was a good chip until I found this restriction ;(
*/
#define SCSI2_SYNC
-#undef SCSI2_TAG
#undef DEBUG_CONNECT
#undef DEBUG_MESSAGES
@@ -603,7 +606,7 @@ static void fas216_handlesync(FAS216_Info *info, char *msg)
msgqueue_flush(&info->scsi.msgs);
msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT);
info->scsi.phase = PHASE_MSGOUT_EXPECT;
- /* fall through */
+ fallthrough;
case async:
dev->period = info->ifcfg.asyncperiod / 4;
@@ -758,7 +761,7 @@ static void fas216_transfer(FAS216_Info *info)
fas216_log(info, LOG_ERROR, "null buffer passed to "
"fas216_starttransfer");
print_SCp(&info->scsi.SCp, "SCp: ", "\n");
- print_SCp(&info->SCpnt->SCp, "Cmnd SCp: ", "\n");
+ print_SCp(arm_scsi_pointer(info->SCpnt), "Cmnd SCp: ", "\n");
return;
}
@@ -916,7 +919,7 @@ static void fas216_disconnect_intr(FAS216_Info *info)
fas216_done(info, DID_ABORT);
break;
}
- /* else, fall through */
+ fallthrough;
default: /* huh? */
printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n",
@@ -990,7 +993,7 @@ fas216_reselected_intr(FAS216_Info *info)
info->scsi.disconnectable = 0;
if (info->SCpnt->device->id == target &&
info->SCpnt->device->lun == lun &&
- info->SCpnt->tag == tag) {
+ scsi_cmd_to_rq(info->SCpnt)->tag == tag) {
fas216_log(info, LOG_CONNECT, "reconnected previously executing command");
} else {
queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt);
@@ -1008,7 +1011,7 @@ fas216_reselected_intr(FAS216_Info *info)
/*
* Restore data pointer from SAVED data pointer
*/
- info->scsi.SCp = info->SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(info->SCpnt);
fas216_log(info, LOG_CONNECT, "data pointers: [%p, %X]",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
@@ -1051,6 +1054,7 @@ fas216_reselected_intr(FAS216_Info *info)
static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int msglen)
{
+ struct scsi_pointer *scsi_pointer;
int i;
switch (message[0]) {
@@ -1075,8 +1079,9 @@ static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int
* as required by the SCSI II standard. These always
* point to the start of their respective areas.
*/
- info->SCpnt->SCp = info->scsi.SCp;
- info->SCpnt->SCp.sent_command = 0;
+ scsi_pointer = arm_scsi_pointer(info->SCpnt);
+ *scsi_pointer = info->scsi.SCp;
+ scsi_pointer->sent_command = 0;
fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
"save data pointers: [%p, %X]",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
@@ -1089,7 +1094,7 @@ static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int
/*
* Restore current data pointer from SAVED data pointer
*/
- info->scsi.SCp = info->SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(info->SCpnt);
fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
"restore data pointers: [%p, 0x%x]",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
@@ -1375,6 +1380,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
case IS_COMPLETE:
break;
}
+ break;
default:
break;
@@ -1413,7 +1419,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */
case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */
fas216_stoptransfer(info);
- /* fall through */
+ fallthrough;
case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */
case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */
@@ -1426,7 +1432,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */
case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */
fas216_stoptransfer(info);
- /* fall through */
+ fallthrough;
case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */
case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */
@@ -1479,7 +1485,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
if (msgqueue_msglength(&info->scsi.msgs) > 1)
fas216_cmd(info, CMD_SETATN);
- /*FALLTHROUGH*/
+ fallthrough;
/*
* Any -> Message Out
@@ -1581,7 +1587,7 @@ static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned
fas216_message(info);
break;
}
- /* else, fall through */
+ fallthrough;
default:
fas216_log(info, 0, "internal phase %s for function done?"
@@ -1766,7 +1772,7 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
* claim host busy
*/
info->scsi.phase = PHASE_SELECTION;
- info->scsi.SCp = SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(SCpnt);
info->SCpnt = SCpnt;
info->dma.transfer_type = fasdma_none;
@@ -1790,8 +1796,9 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
/*
* add tag message if required
*/
- if (SCpnt->tag)
- msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag);
+ if (SCpnt->device->simple_tags)
+ msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG,
+ scsi_cmd_to_rq(SCpnt)->tag);
do {
#ifdef SCSI2_SYNC
@@ -1814,20 +1821,8 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)
{
-#ifdef SCSI2_TAG
- /*
- * tagged queuing - allocate a new tag to this command
- */
- if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE &&
- SCpnt->cmnd[0] != INQUIRY) {
- SCpnt->device->current_tag += 1;
- if (SCpnt->device->current_tag == 0)
- SCpnt->device->current_tag = 1;
- SCpnt->tag = SCpnt->device->current_tag;
- } else
-#endif
- set_bit(SCpnt->device->id * 8 +
- (u8)(SCpnt->device->lun & 0x7), info->busyluns);
+ set_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);
info->stats.removes += 1;
switch (SCpnt->cmnd[0]) {
@@ -1856,7 +1851,7 @@ static void fas216_do_bus_device_reset(FAS216_Info *info,
* claim host busy
*/
info->scsi.phase = PHASE_SELECTION;
- info->scsi.SCp = SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(SCpnt);
info->SCpnt = SCpnt;
info->dma.transfer_type = fasdma_none;
@@ -1964,7 +1959,7 @@ static void fas216_kick(FAS216_Info *info)
switch (where_from) {
case TYPE_QUEUE:
fas216_allocate_tag(info, SCpnt);
- /* fall through */
+ fallthrough;
case TYPE_OTHER:
fas216_start_command(info, SCpnt);
break;
@@ -2006,11 +2001,13 @@ static void fas216_devicereset_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
unsigned int result)
{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+
fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
"request sense complete, result=0x%04x%02x%02x",
- result, SCpnt->SCp.Message, SCpnt->SCp.Status);
+ result, scsi_pointer->Message, scsi_pointer->Status);
- if (result != DID_OK || SCpnt->SCp.Status != GOOD)
+ if (result != DID_OK || scsi_pointer->Status != SAM_STAT_GOOD)
/*
* Something went wrong. Make sure that we don't
* have valid data in the sense buffer that could
@@ -2026,7 +2023,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* correctly by fas216_std_done.
*/
scsi_eh_restore_cmnd(SCpnt, &info->ses);
- SCpnt->scsi_done(SCpnt);
+ fas216_cmd_priv(SCpnt)->scsi_done(SCpnt);
}
/**
@@ -2040,10 +2037,14 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
static void
fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+
info->stats.fins += 1;
- SCpnt->result = result << 16 | info->scsi.SCp.Message << 8 |
- info->scsi.SCp.Status;
+ set_host_byte(SCpnt, result);
+ if (result == DID_OK)
+ scsi_msg_to_host_byte(SCpnt, info->scsi.SCp.Message);
+ set_status_byte(SCpnt, info->scsi.SCp.Status);
fas216_log_command(info, LOG_CONNECT, SCpnt,
"command complete, result=0x%08x", SCpnt->result);
@@ -2051,23 +2052,22 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
/*
* If the driver detected an error, we're all done.
*/
- if (host_byte(SCpnt->result) != DID_OK ||
- msg_byte(SCpnt->result) != COMMAND_COMPLETE)
+ if (get_host_byte(SCpnt) != DID_OK)
goto done;
/*
* If the command returned CHECK_CONDITION or COMMAND_TERMINATED
* status, request the sense information.
*/
- if (status_byte(SCpnt->result) == CHECK_CONDITION ||
- status_byte(SCpnt->result) == COMMAND_TERMINATED)
+ if (get_status_byte(SCpnt) == SAM_STAT_CHECK_CONDITION ||
+ get_status_byte(SCpnt) == SAM_STAT_COMMAND_TERMINATED)
goto request_sense;
/*
* If the command did not complete with GOOD status,
* we are all done here.
*/
- if (status_byte(SCpnt->result) != GOOD)
+ if (get_status_byte(SCpnt) != SAM_STAT_GOOD)
goto done;
/*
@@ -2096,8 +2096,8 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
}
done:
- if (SCpnt->scsi_done) {
- SCpnt->scsi_done(SCpnt);
+ if (fas216_cmd_priv(SCpnt)->scsi_done) {
+ fas216_cmd_priv(SCpnt)->scsi_done(SCpnt);
return;
}
@@ -2113,9 +2113,8 @@ request_sense:
fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
"requesting sense");
init_SCp(SCpnt);
- SCpnt->SCp.Message = 0;
- SCpnt->SCp.Status = 0;
- SCpnt->tag = 0;
+ scsi_pointer->Message = 0;
+ scsi_pointer->Status = 0;
SCpnt->host_scribble = (void *)fas216_rq_sns_done;
/*
@@ -2195,7 +2194,7 @@ no_command:
}
/**
- * fas216_queue_command - queue a command for adapter to process.
+ * fas216_queue_command_internal - queue a command for the adapter to process
* @SCpnt: Command to queue
* @done: done function to call once command is complete
*
@@ -2203,8 +2202,8 @@ no_command:
* Returns: 0 on success, else error.
* Notes: io_request_lock is held, interrupts are disabled.
*/
-static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int fas216_queue_command_internal(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
int result;
@@ -2214,14 +2213,13 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
fas216_log_command(info, LOG_CONNECT, SCpnt,
"received command (%p)", SCpnt);
- SCpnt->scsi_done = done;
+ fas216_cmd_priv(SCpnt)->scsi_done = done;
SCpnt->host_scribble = (void *)fas216_std_done;
SCpnt->result = 0;
init_SCp(SCpnt);
info->stats.queues += 1;
- SCpnt->tag = 0;
spin_lock(&info->host_lock);
@@ -2245,6 +2243,11 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
return result;
}
+static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt)
+{
+ return fas216_queue_command_internal(SCpnt, scsi_done);
+}
+
DEF_SCSI_QCMD(fas216_queue_command)
/**
@@ -2270,8 +2273,7 @@ static void fas216_internal_done(struct scsi_cmnd *SCpnt)
* Returns: scsi result code.
* Notes: io_request_lock is held, interrupts are disabled.
*/
-static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt)
{
FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
@@ -2284,7 +2286,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
BUG_ON(info->scsi.irq);
info->internal_done = 0;
- fas216_queue_command_lck(SCpnt, fas216_internal_done);
+ fas216_queue_command_internal(SCpnt, fas216_internal_done);
/*
* This wastes time, since we can't return until the command is
@@ -2312,7 +2314,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
spin_lock_irq(info->host->host_lock);
- done(SCpnt);
+ scsi_done(SCpnt);
return 0;
}
@@ -3001,9 +3003,8 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
dev = &info->device[scd->id];
seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
- seq_printf(m, "%3sabled(%3d) ",
- scd->simple_tags ? "en" : "dis",
- scd->current_tag);
+ seq_printf(m, "%3sabled ",
+ scd->simple_tags ? "en" : "dis");
else
seq_puts(m, "unsupported ");
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 847413ce14cf..08113277a2a9 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -310,6 +310,20 @@ typedef struct {
unsigned long magic_end;
} FAS216_Info;
+/* driver-private data per SCSI command. */
+struct fas216_cmd_priv {
+ /*
+ * @scsi_pointer must be the first member. See also arm_scsi_pointer().
+ */
+ struct scsi_pointer scsi_pointer;
+ void (*scsi_done)(struct scsi_cmnd *cmd);
+};
+
+static inline struct fas216_cmd_priv *fas216_cmd_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/* Function: int fas216_init (struct Scsi_Host *instance)
* Purpose : initialise FAS/NCR/AMD SCSI structures.
* Params : instance - a driver-specific filled-out structure
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 7c9d361e91a9..f18a0620c808 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -113,14 +113,14 @@ static struct scsi_host_template oakscsi_template = {
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.proc_name = "oakscsi",
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
};
static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
- int ret = -ENOMEM;
+ int ret;
ret = ecard_request_resources(ec);
if (ret)
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index c795537a671c..7586d2a03812 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -14,16 +14,20 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <asm/dma.h>
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
@@ -138,12 +142,13 @@ powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
- if (direction == DMA_OUT)
- map_dir = DMA_TO_DEVICE,
+ if (direction == DMA_OUT) {
+ map_dir = DMA_TO_DEVICE;
dma_dir = DMA_MODE_WRITE;
- else
- map_dir = DMA_FROM_DEVICE,
+ } else {
+ map_dir = DMA_FROM_DEVICE;
dma_dir = DMA_MODE_READ;
+ }
dma_map_sg(dev, info->sg, bufs, map_dir);
@@ -285,7 +290,7 @@ static struct scsi_host_template powertecscsi_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
-
+ .cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 8,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
@@ -378,7 +383,7 @@ static int powertecscsi_probe(struct expansion_card *ec,
if (info->info.scsi.dma != NO_DMA)
free_dma(info->info.scsi.dma);
- free_irq(ec->irq, host);
+ free_irq(ec->irq, info);
out_release:
fas216_release(host);
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
index e5559f27669d..978df23ce188 100644
--- a/drivers/scsi/arm/queue.c
+++ b/drivers/scsi/arm/queue.c
@@ -20,7 +20,11 @@
#include <linux/list.h>
#include <linux/init.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#define DEBUG
@@ -214,7 +218,7 @@ struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
list_for_each(l, &queue->head) {
QE_t *q = list_entry(l, QE_t, list);
if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun &&
- q->SCpnt->tag == tag) {
+ scsi_cmd_to_rq(q->SCpnt)->tag == tag) {
SCpnt = __queue_remove(queue, l);
break;
}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a82b63a66635..d401cf27113a 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -376,15 +376,11 @@ static int falcon_get_lock(struct Scsi_Host *instance)
if (IS_A_TT())
return 1;
- if (stdma_is_locked_by(scsi_falcon_intr) &&
- instance->hostt->can_queue > 1)
+ if (stdma_is_locked_by(scsi_falcon_intr))
return 1;
- if (in_interrupt())
- return stdma_try_lock(scsi_falcon_intr, instance);
-
- stdma_lock(scsi_falcon_intr, instance);
- return 1;
+ /* stdma_lock() may sleep which means it can't be used here */
+ return stdma_try_lock(scsi_falcon_intr, instance);
}
#ifndef MODULE
@@ -542,7 +538,7 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd)
static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- int wanted_len = cmd->SCp.this_residual;
+ int wanted_len = NCR5380_to_ncmd(cmd)->this_residual;
int possible_len, limit;
if (wanted_len < DMA_MIN_SIZE)
@@ -614,7 +610,7 @@ static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
}
/* Last step: apply the hard limit on DMA transfers */
- limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ?
+ limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(NCR5380_to_ncmd(cmd)->ptr))) ?
STRAM_BUFFER_SIZE : 255*512;
if (possible_len > limit)
possible_len = limit;
@@ -715,7 +711,7 @@ static struct scsi_host_template atari_scsi_template = {
.this_id = 7,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
};
static int __init atari_scsi_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index c6a752309dda..7143418d690f 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -42,7 +42,8 @@
static struct scsi_host_template atp870u_template;
static void send_s870(struct atp_unit *dev,unsigned char c);
-static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode);
+static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip,
+ unsigned char lvdmode);
static inline void atp_writeb_base(struct atp_unit *atp, u8 reg, u8 val)
{
@@ -137,16 +138,17 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
}
if ((j & 0x80) == 0)
return IRQ_NONE;
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("atp870u_intr_handle enter\n");
-#endif
+#endif
dev->in_int[c] = 1;
cmdp = atp_readb_io(dev, c, 0x10);
if (dev->working[c] != 0) {
if (is885(dev)) {
if ((atp_readb_io(dev, c, 0x16) & 0x80) == 0)
- atp_writeb_io(dev, c, 0x16, (atp_readb_io(dev, c, 0x16) | 0x80));
- }
+ atp_writeb_io(dev, c, 0x16,
+ (atp_readb_io(dev, c, 0x16) | 0x80));
+ }
if ((atp_readb_pci(dev, c, 0x00) & 0x08) != 0)
{
for (k=0; k < 1000; k++) {
@@ -157,9 +159,9 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
}
}
atp_writeb_pci(dev, c, 0, 0x00);
-
+
i = atp_readb_io(dev, c, 0x17);
-
+
if (is885(dev))
atp_writeb_pci(dev, c, 2, 0x06);
@@ -185,44 +187,51 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->r1f[c][target_id] |= j;
#ifdef ED_DBGP
printk("atp870u_intr_handle status = %x\n",i);
-#endif
+#endif
if (i == 0x85) {
if ((dev->last_cmd[c] & 0xf0) != 0x40) {
dev->last_cmd[c] = 0xff;
}
if (is885(dev)) {
adrcnt = 0;
- ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12);
- ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13);
- ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14);
+ ((unsigned char *) &adrcnt)[2] =
+ atp_readb_io(dev, c, 0x12);
+ ((unsigned char *) &adrcnt)[1] =
+ atp_readb_io(dev, c, 0x13);
+ ((unsigned char *) &adrcnt)[0] =
+ atp_readb_io(dev, c, 0x14);
if (dev->id[c][target_id].last_len != adrcnt) {
k = dev->id[c][target_id].last_len;
- k -= adrcnt;
- dev->id[c][target_id].tran_len = k;
+ k -= adrcnt;
+ dev->id[c][target_id].tran_len = k;
dev->id[c][target_id].last_len = adrcnt;
}
#ifdef ED_DBGP
- printk("dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len);
-#endif
+ printk("dev->id[c][target_id].last_len = %d "
+ "dev->id[c][target_id].tran_len = %d\n",
+ dev->id[c][target_id].last_len,
+ dev->id[c][target_id].tran_len);
+#endif
}
/*
* Flip wide
- */
+ */
if (dev->wide_id[c] != 0) {
atp_writeb_io(dev, c, 0x1b, 0x01);
while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01)
atp_writeb_io(dev, c, 0x1b, 0x01);
- }
+ }
/*
* Issue more commands
*/
- spin_lock_irqsave(dev->host->host_lock, flags);
- if (((dev->quhd[c] != dev->quend[c]) || (dev->last_cmd[c] != 0xff)) &&
+ spin_lock_irqsave(dev->host->host_lock, flags);
+ if (((dev->quhd[c] != dev->quend[c]) ||
+ (dev->last_cmd[c] != 0xff)) &&
(dev->in_snd[c] == 0)) {
#ifdef ED_DBGP
printk("Call sent_s870\n");
-#endif
+#endif
send_s870(dev,c);
}
spin_unlock_irqrestore(dev->host->host_lock, flags);
@@ -232,7 +241,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->in_int[c] = 0;
#ifdef ED_DBGP
printk("Status 0x85 return\n");
-#endif
+#endif
return IRQ_HANDLED;
}
@@ -247,9 +256,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->last_cmd[c] = 0xff;
}
adrcnt = 0;
- ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12);
- ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13);
- ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14);
+ ((unsigned char *) &adrcnt)[2] =
+ atp_readb_io(dev, c, 0x12);
+ ((unsigned char *) &adrcnt)[1] =
+ atp_readb_io(dev, c, 0x13);
+ ((unsigned char *) &adrcnt)[0] =
+ atp_readb_io(dev, c, 0x14);
k = dev->id[c][target_id].last_len;
k -= adrcnt;
dev->id[c][target_id].tran_len = k;
@@ -262,17 +274,16 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
if (is885(dev)) {
if ((i == 0x4c) || (i == 0x4d) || (i == 0x8c) || (i == 0x8d)) {
- if ((i == 0x4c) || (i == 0x8c))
- i=0x48;
- else
- i=0x49;
- }
-
+ if ((i == 0x4c) || (i == 0x8c))
+ i=0x48;
+ else
+ i=0x49;
+ }
}
if ((i == 0x80) || (i == 0x8f)) {
#ifdef ED_DBGP
printk(KERN_DEBUG "Device reselect\n");
-#endif
+#endif
lun = 0;
if (cmdp == 0x44 || i == 0x80)
lun = atp_readb_io(dev, c, 0x1d) & 0x07;
@@ -283,11 +294,14 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
if (cmdp == 0x41) {
#ifdef ED_DBGP
printk("cmdp = 0x41\n");
-#endif
+#endif
adrcnt = 0;
- ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12);
- ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13);
- ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14);
+ ((unsigned char *) &adrcnt)[2] =
+ atp_readb_io(dev, c, 0x12);
+ ((unsigned char *) &adrcnt)[1] =
+ atp_readb_io(dev, c, 0x13);
+ ((unsigned char *) &adrcnt)[0] =
+ atp_readb_io(dev, c, 0x14);
k = dev->id[c][target_id].last_len;
k -= adrcnt;
dev->id[c][target_id].tran_len = k;
@@ -298,7 +312,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
} else {
#ifdef ED_DBGP
printk("cmdp != 0x41\n");
-#endif
+#endif
atp_writeb_io(dev, c, 0x10, 0x46);
dev->id[c][target_id].dirct = 0x00;
atp_writeb_io(dev, c, 0x12, 0x00);
@@ -330,13 +344,13 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
if (is885(dev))
atp_writeb_io(dev, c, 0x10, 0x45);
workreq = dev->id[c][target_id].curr_req;
-#ifdef ED_DBGP
+#ifdef ED_DBGP
scmd_printk(KERN_DEBUG, workreq, "CDB");
for (l = 0; l < workreq->cmd_len; l++)
printk(KERN_DEBUG " %x",workreq->cmnd[l]);
printk("\n");
-#endif
-
+#endif
+
atp_writeb_io(dev, c, 0x0f, lun);
atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp);
adrcnt = dev->id[c][target_id].tran_len;
@@ -345,9 +359,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
atp_writeb_io(dev, c, 0x12, ((unsigned char *) &k)[2]);
atp_writeb_io(dev, c, 0x13, ((unsigned char *) &k)[1]);
atp_writeb_io(dev, c, 0x14, ((unsigned char *) &k)[0]);
-#ifdef ED_DBGP
- printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, atp_readb_io(dev, c, 0x14), atp_readb_io(dev, c, 0x13), atp_readb_io(dev, c, 0x12));
-#endif
+#ifdef ED_DBGP
+ printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k,
+ atp_readb_io(dev, c, 0x14),
+ atp_readb_io(dev, c, 0x13),
+ atp_readb_io(dev, c, 0x12));
+#endif
/* Remap wide */
j = target_id;
if (target_id > 7) {
@@ -357,26 +374,39 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
j |= dev->id[c][target_id].dirct;
atp_writeb_io(dev, c, 0x15, j);
atp_writeb_io(dev, c, 0x16, 0x80);
-
- /* enable 32 bit fifo transfer */
+
+ /* enable 32 bit fifo transfer */
if (is885(dev)) {
i = atp_readb_pci(dev, c, 1) & 0xf3;
- //j=workreq->cmnd[0];
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
+ //j=workreq->cmnd[0];
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10)) {
i |= 0x0c;
}
atp_writeb_pci(dev, c, 1, i);
} else if (is880(dev)) {
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a))
- atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0);
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10))
+ atp_writeb_base(dev, 0x3b,
+ (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0);
else
- atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f);
- } else {
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a))
- atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08);
+ atp_writeb_base(dev, 0x3b,
+ atp_readb_base(dev, 0x3b) & 0x3f);
+ } else {
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10))
+ atp_writeb_base(dev, 0x3a,
+ (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08);
else
- atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3);
- }
+ atp_writeb_base(dev, 0x3a,
+ atp_readb_base(dev, 0x3a) & 0xf3);
+ }
j = 0;
id = 1;
id = id << target_id;
@@ -394,12 +424,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->in_int[c] = 0;
#ifdef ED_DBGP
printk("dev->id[c][target_id].last_len = 0\n");
-#endif
+#endif
return IRQ_HANDLED;
}
#ifdef ED_DBGP
printk("target_id = %d adrcnt = %d\n",target_id,adrcnt);
-#endif
+#endif
prd = dev->id[c][target_id].prd_pos;
while (adrcnt != 0) {
id = ((unsigned short int *)prd)[2];
@@ -409,8 +439,8 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
k = id;
}
if (k > adrcnt) {
- ((unsigned short int *)prd)[2] = (unsigned short int)
- (k - adrcnt);
+ ((unsigned short int *)prd)[2] =
+ (unsigned short int)(k - adrcnt);
((unsigned long *)prd)[0] += adrcnt;
adrcnt = 0;
dev->id[c][target_id].prd_pos = prd;
@@ -421,11 +451,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
if (adrcnt == 0) {
dev->id[c][target_id].prd_pos = prd;
}
- }
+ }
}
atp_writel_pci(dev, c, 0x04, dev->id[c][target_id].prdaddr);
#ifdef ED_DBGP
- printk("dev->id[%d][%d].prdaddr 0x%8x\n", c, target_id, dev->id[c][target_id].prdaddr);
+ printk("dev->id[%d][%d].prdaddr 0x%8x\n",
+ c, target_id, dev->id[c][target_id].prdaddr);
#endif
if (!is885(dev)) {
atp_writeb_pci(dev, c, 2, 0x06);
@@ -440,7 +471,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->in_int[c] = 0;
#ifdef ED_DBGP
printk("status 0x80 return dirct != 0\n");
-#endif
+#endif
return IRQ_HANDLED;
}
atp_writeb_io(dev, c, 0x18, 0x08);
@@ -448,7 +479,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->in_int[c] = 0;
#ifdef ED_DBGP
printk("status 0x80 return dirct = 0\n");
-#endif
+#endif
return IRQ_HANDLED;
}
@@ -466,10 +497,10 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
workreq->result = atp_readb_io(dev, c, 0x0f);
if (((dev->r1f[c][target_id] & 0x10) != 0) && is885(dev)) {
printk(KERN_WARNING "AEC67162 CRC ERROR !\n");
- workreq->result = 0x02;
+ workreq->result = SAM_STAT_CHECK_CONDITION;
}
} else
- workreq->result = 0x02;
+ workreq->result = SAM_STAT_CHECK_CONDITION;
if (is885(dev)) {
j = atp_readb_base(dev, 0x29) | 0x01;
@@ -481,10 +512,10 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
scsi_dma_unmap(workreq);
spin_lock_irqsave(dev->host->host_lock, flags);
- (*workreq->scsi_done) (workreq);
+ scsi_done(workreq);
#ifdef ED_DBGP
printk("workreq->scsi_done\n");
-#endif
+#endif
/*
* Clear it off the queue
*/
@@ -498,16 +529,17 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
atp_writeb_io(dev, c, 0x1b, 0x01);
while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01)
atp_writeb_io(dev, c, 0x1b, 0x01);
- }
+ }
/*
* If there is stuff to send and nothing going then send it
*/
spin_lock_irqsave(dev->host->host_lock, flags);
- if (((dev->last_cmd[c] != 0xff) || (dev->quhd[c] != dev->quend[c])) &&
+ if (((dev->last_cmd[c] != 0xff) ||
+ (dev->quhd[c] != dev->quend[c])) &&
(dev->in_snd[c] == 0)) {
#ifdef ED_DBGP
printk("Call sent_s870(scsi_done)\n");
-#endif
+#endif
send_s870(dev,c);
}
spin_unlock_irqrestore(dev->host->host_lock, flags);
@@ -528,9 +560,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
atp_writeb_io(dev, c, 0x10, 0x41);
if (is885(dev)) {
k = dev->id[c][target_id].last_len;
- atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]);
- atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]);
- atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]);
+ atp_writeb_io(dev, c, 0x12,
+ ((unsigned char *) (&k))[2]);
+ atp_writeb_io(dev, c, 0x13,
+ ((unsigned char *) (&k))[1]);
+ atp_writeb_io(dev, c, 0x14,
+ ((unsigned char *) (&k))[0]);
dev->id[c][target_id].dirct = 0x00;
} else {
dev->id[c][target_id].dirct = 0x00;
@@ -547,11 +582,15 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
atp_writeb_io(dev, c, 0x10, 0x41);
if (is885(dev)) {
k = dev->id[c][target_id].last_len;
- atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]);
- atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]);
- atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]);
+ atp_writeb_io(dev, c, 0x12,
+ ((unsigned char *) (&k))[2]);
+ atp_writeb_io(dev, c, 0x13,
+ ((unsigned char *) (&k))[1]);
+ atp_writeb_io(dev, c, 0x14,
+ ((unsigned char *) (&k))[0]);
}
- atp_writeb_io(dev, c, 0x15, atp_readb_io(dev, c, 0x15) | 0x20);
+ atp_writeb_io(dev, c, 0x15,
+ atp_readb_io(dev, c, 0x15) | 0x20);
dev->id[c][target_id].dirct = 0x20;
atp_writeb_io(dev, c, 0x18, 0x08);
atp_writeb_pci(dev, c, 0, 0x01);
@@ -573,15 +612,14 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
return IRQ_HANDLED;
}
/**
- * atp870u_queuecommand - Queue SCSI command
+ * atp870u_queuecommand_lck - Queue SCSI command
* @req_p: request block
- * @done: completion function
*
* Queue a command to the ATP queue. Called with the host lock held.
*/
-static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
- void (*done) (struct scsi_cmnd *))
+static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
unsigned char c;
unsigned int m;
struct atp_unit *dev;
@@ -591,19 +629,17 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
req_p->sense_buffer[0]=0;
scsi_set_resid(req_p, 0);
if (scmd_channel(req_p) > 1) {
- req_p->result = 0x00040000;
+ req_p->result = DID_BAD_TARGET << 16;
done(req_p);
-#ifdef ED_DBGP
- printk("atp870u_queuecommand : req_p->device->channel > 1\n");
-#endif
+#ifdef ED_DBGP
+ printk("atp870u_queuecommand : req_p->device->channel > 1\n");
+#endif
return 0;
}
host = req_p->device->host;
dev = (struct atp_unit *)&host->hostdata;
-
-
m = 1;
m = m << scmd_id(req_p);
@@ -612,22 +648,11 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
*/
if ((m & dev->active_id[c]) == 0) {
- req_p->result = 0x00040000;
+ req_p->result = DID_BAD_TARGET << 16;
done(req_p);
return 0;
}
- if (done) {
- req_p->scsi_done = done;
- } else {
-#ifdef ED_DBGP
- printk( "atp870u_queuecommand: done can't be NULL\n");
-#endif
- req_p->result = 0;
- done(req_p);
- return 0;
- }
-
/*
* Count new command
*/
@@ -635,7 +660,7 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
if (dev->quend[c] >= qcnt) {
dev->quend[c] = 0;
}
-
+
/*
* Check queue state
*/
@@ -643,42 +668,46 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
if (dev->quend[c] == 0) {
dev->quend[c] = qcnt;
}
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("atp870u_queuecommand : dev->quhd[c] == dev->quend[c]\n");
-#endif
+#endif
dev->quend[c]--;
- req_p->result = 0x00020000;
- done(req_p);
+ req_p->result = DID_BUS_BUSY << 16;
+ done(req_p);
return 0;
}
dev->quereq[c][dev->quend[c]] = req_p;
-#ifdef ED_DBGP
- printk("dev->ioport[c] = %x atp_readb_io(dev, c, 0x1c) = %x dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",dev->ioport[c],atp_readb_io(dev, c, 0x1c),c,dev->in_int[c],c,dev->in_snd[c]);
+#ifdef ED_DBGP
+ printk("dev->ioport[c] = %x atp_readb_io(dev, c, 0x1c) = %x "
+ "dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",
+ dev->ioport[c], atp_readb_io(dev, c, 0x1c), c,
+ dev->in_int[c],c,dev->in_snd[c]);
#endif
- if ((atp_readb_io(dev, c, 0x1c) == 0) && (dev->in_int[c] == 0) && (dev->in_snd[c] == 0)) {
+ if ((atp_readb_io(dev, c, 0x1c) == 0) &&
+ (dev->in_int[c] == 0) &&
+ (dev->in_snd[c] == 0)) {
#ifdef ED_DBGP
printk("Call sent_s870(atp870u_queuecommand)\n");
-#endif
+#endif
send_s870(dev,c);
}
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("atp870u_queuecommand : exit\n");
-#endif
+#endif
return 0;
}
static DEF_SCSI_QCMD(atp870u_queuecommand)
-/**
+/*
* send_s870 - send a command to the controller
- * @host: host
*
* On entry there is work queued to be done. We move some of that work to the
- * controller itself.
+ * controller itself.
*
* Caller holds the host lock.
*/
-static void send_s870(struct atp_unit *dev,unsigned char c)
+static void send_s870(struct atp_unit *dev, unsigned char c)
{
struct scsi_cmnd *workreq = NULL;
unsigned int i;//,k;
@@ -689,7 +718,7 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
unsigned long sg_count;
if (dev->in_snd[c] != 0) {
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("cmnd in_snd\n");
#endif
return;
@@ -729,7 +758,8 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
dev->id[c][scmd_id(workreq)].curr_req = workreq;
dev->last_cmd[c] = scmd_id(workreq);
}
- if ((atp_readb_io(dev, c, 0x1f) & 0xb0) != 0 || atp_readb_io(dev, c, 0x1c) != 0) {
+ if ((atp_readb_io(dev, c, 0x1f) & 0xb0) != 0 ||
+ atp_readb_io(dev, c, 0x1c) != 0) {
#ifdef ED_DBGP
printk("Abort to Send\n");
#endif
@@ -744,7 +774,7 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
printk(" %x",workreq->cmnd[i]);
}
printk("\n");
-#endif
+#endif
l = scsi_bufflen(workreq);
if (is885(dev)) {
@@ -752,12 +782,12 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
atp_writeb_base(dev, 0x29, j);
dev->r1f[c][scmd_id(workreq)] = 0;
}
-
+
if (workreq->cmnd[0] == READ_CAPACITY) {
if (l > 8)
l = 8;
}
- if (workreq->cmnd[0] == 0x00) {
+ if (workreq->cmnd[0] == TEST_UNIT_READY) {
l = 0;
}
@@ -796,8 +826,9 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
* Write the target
*/
atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp);
-#ifdef ED_DBGP
- printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp);
+#ifdef ED_DBGP
+ printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,
+ dev->id[c][target_id].devsp);
#endif
sg_count = scsi_dma_map(workreq);
@@ -807,12 +838,12 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&l))[2]);
atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&l))[1]);
atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&l))[0]);
- j = target_id;
+ j = target_id;
dev->id[c][j].last_len = l;
dev->id[c][j].tran_len = 0;
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("dev->id[%2d][%2d].last_len = %d\n",c,j,dev->id[c][j].last_len);
-#endif
+#endif
/*
* Flip the wide bits
*/
@@ -832,8 +863,8 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
if (l == 0) {
if (atp_readb_io(dev, c, 0x1c) == 0) {
#ifdef ED_DBGP
- printk("change SCSI_CMD_REG 0x08\n");
-#endif
+ printk("change SCSI_CMD_REG 0x08\n");
+#endif
atp_writeb_io(dev, c, 0x18, 0x08);
} else
dev->last_cmd[c] |= 0x40;
@@ -854,9 +885,9 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
scsi_for_each_sg(workreq, sgpnt, sg_count, j) {
bttl = sg_dma_address(sgpnt);
l=sg_dma_len(sgpnt);
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("1. bttl %x, l %x\n",bttl, l);
-#endif
+#endif
while (l > 0x10000) {
(((u16 *) (prd))[i + 3]) = 0x0000;
(((u16 *) (prd))[i + 2]) = 0x0000;
@@ -868,48 +899,65 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
(((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
(((u16 *) (prd))[i + 2]) = cpu_to_le16(l);
(((u16 *) (prd))[i + 3]) = 0;
- i += 0x04;
+ i += 0x04;
}
- (((u16 *) (prd))[i - 1]) = cpu_to_le16(0x8000);
-#ifdef ED_DBGP
- printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3]));
+ (((u16 *) (prd))[i - 1]) = cpu_to_le16(0x8000);
+#ifdef ED_DBGP
+ printk("prd %4x %4x %4x %4x\n",
+ (((unsigned short int *)prd)[0]),
+ (((unsigned short int *)prd)[1]),
+ (((unsigned short int *)prd)[2]),
+ (((unsigned short int *)prd)[3]));
printk("2. bttl %x, l %x\n",bttl, l);
-#endif
+#endif
}
-#ifdef ED_DBGP
- printk("send_s870: prdaddr_2 0x%8x target_id %d\n", dev->id[c][target_id].prdaddr,target_id);
-#endif
+#ifdef ED_DBGP
+ printk("send_s870: prdaddr_2 0x%8x target_id %d\n",
+ dev->id[c][target_id].prdaddr,target_id);
+#endif
dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus;
atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr);
atp_writeb_pci(dev, c, 2, 0x06);
atp_writeb_pci(dev, c, 2, 0x00);
if (is885(dev)) {
j = atp_readb_pci(dev, c, 1) & 0xf3;
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) ||
- (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
- j |= 0x0c;
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10)) {
+ j |= 0x0c;
}
atp_writeb_pci(dev, c, 1, j);
} else if (is880(dev)) {
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a))
- atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0);
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10))
+ atp_writeb_base(dev, 0x3b,
+ (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0);
else
- atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f);
- } else {
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a))
- atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08);
+ atp_writeb_base(dev, 0x3b,
+ atp_readb_base(dev, 0x3b) & 0x3f);
+ } else {
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10))
+ atp_writeb_base(dev, 0x3a,
+ (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08);
else
- atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3);
- }
+ atp_writeb_base(dev, 0x3a,
+ atp_readb_base(dev, 0x3a) & 0xf3);
+ }
if(workreq->sc_data_direction == DMA_TO_DEVICE) {
dev->id[c][target_id].dirct = 0x20;
if (atp_readb_io(dev, c, 0x1c) == 0) {
atp_writeb_io(dev, c, 0x18, 0x08);
atp_writeb_pci(dev, c, 0, 0x01);
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk( "start DMA(to target)\n");
-#endif
+#endif
} else {
dev->last_cmd[c] |= 0x40;
}
@@ -919,9 +967,9 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
if (atp_readb_io(dev, c, 0x1c) == 0) {
atp_writeb_io(dev, c, 0x18, 0x08);
atp_writeb_pci(dev, c, 0, 0x09);
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk( "start DMA(to host)\n");
-#endif
+#endif
} else {
dev->last_cmd[c] |= 0x40;
}
@@ -1193,7 +1241,9 @@ static void atp870u_free_tables(struct Scsi_Host *host)
for (k = 0; k < 16; k++) {
if (!atp_dev->id[j][k].prd_table)
continue;
- dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
+ dma_free_coherent(&atp_dev->pdev->dev, 1024,
+ atp_dev->id[j][k].prd_table,
+ atp_dev->id[j][k].prd_bus);
atp_dev->id[j][k].prd_table = NULL;
}
}
@@ -1204,35 +1254,38 @@ static int atp870u_init_tables(struct Scsi_Host *host)
struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata;
int c,k;
for(c=0;c < 2;c++) {
- for(k=0;k<16;k++) {
- atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL);
- if (!atp_dev->id[c][k].prd_table) {
- printk("atp870u_init_tables fail\n");
+ for(k=0;k<16;k++) {
+ atp_dev->id[c][k].prd_table =
+ dma_alloc_coherent(&atp_dev->pdev->dev, 1024,
+ &(atp_dev->id[c][k].prd_bus),
+ GFP_KERNEL);
+ if (!atp_dev->id[c][k].prd_table) {
+ printk("atp870u_init_tables fail\n");
atp870u_free_tables(host);
return -ENOMEM;
}
atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus;
atp_dev->id[c][k].devsp=0x20;
atp_dev->id[c][k].devtype = 0x7f;
- atp_dev->id[c][k].curr_req = NULL;
- }
-
- atp_dev->active_id[c] = 0;
- atp_dev->wide_id[c] = 0;
- atp_dev->host_id[c] = 0x07;
- atp_dev->quhd[c] = 0;
- atp_dev->quend[c] = 0;
- atp_dev->last_cmd[c] = 0xff;
- atp_dev->in_snd[c] = 0;
- atp_dev->in_int[c] = 0;
-
- for (k = 0; k < qcnt; k++) {
- atp_dev->quereq[c][k] = NULL;
- }
- for (k = 0; k < 16; k++) {
+ atp_dev->id[c][k].curr_req = NULL;
+ }
+
+ atp_dev->active_id[c] = 0;
+ atp_dev->wide_id[c] = 0;
+ atp_dev->host_id[c] = 0x07;
+ atp_dev->quhd[c] = 0;
+ atp_dev->quend[c] = 0;
+ atp_dev->last_cmd[c] = 0xff;
+ atp_dev->in_snd[c] = 0;
+ atp_dev->in_int[c] = 0;
+
+ for (k = 0; k < qcnt; k++) {
+ atp_dev->quereq[c][k] = NULL;
+ }
+ for (k = 0; k < 16; k++) {
atp_dev->id[c][k].curr_req = NULL;
atp_dev->sp[c][k] = 0x04;
- }
+ }
}
return 0;
}
@@ -1263,7 +1316,8 @@ static void atp870_init(struct Scsi_Host *shpnt)
pci_read_config_byte(pdev, 0x49, &host_id);
- dev_info(&pdev->dev, "ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: IO:%lx, IRQ:%d.\n",
+ dev_info(&pdev->dev, "ACARD AEC-671X PCI Ultra/W SCSI-2/3 "
+ "Host Adapter: IO:%lx, IRQ:%d.\n",
shpnt->io_port, shpnt->irq);
atpdev->ioport[0] = shpnt->io_port;
@@ -1314,7 +1368,8 @@ static void atp880_init(struct Scsi_Host *shpnt)
host_id = atp_readb_base(atpdev, 0x39) >> 4;
- dev_info(&pdev->dev, "ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: IO:%lx, IRQ:%d.\n",
+ dev_info(&pdev->dev, "ACARD AEC-67160 PCI Ultra3 LVD "
+ "Host Adapter: IO:%lx, IRQ:%d.\n",
shpnt->io_port, shpnt->irq);
atpdev->host_id[0] = host_id;
@@ -1393,7 +1448,8 @@ static void atp885_init(struct Scsi_Host *shpnt)
unsigned int n;
unsigned char setupdata[2][16];
- dev_info(&pdev->dev, "ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%lx, IRQ:%d.\n",
+ dev_info(&pdev->dev, "ACARD AEC-67162 PCI Ultra3 LVD "
+ "Host Adapter: IO:%lx, IRQ:%d.\n",
shpnt->io_port, shpnt->irq);
atpdev->ioport[0] = shpnt->io_port + 0x80;
@@ -1413,11 +1469,13 @@ static void atp885_init(struct Scsi_Host *shpnt)
atpdev->global_map[m] = 0;
for (k = 0; k < 4; k++) {
atp_writew_base(atpdev, 0x3c, n++);
- ((u32 *)&setupdata[m][0])[k] = atp_readl_base(atpdev, 0x38);
+ ((u32 *)&setupdata[m][0])[k] =
+ atp_readl_base(atpdev, 0x38);
}
for (k = 0; k < 4; k++) {
atp_writew_base(atpdev, 0x3c, n++);
- ((u32 *)&atpdev->sp[m][0])[k] = atp_readl_base(atpdev, 0x38);
+ ((u32 *)&atpdev->sp[m][0])[k] =
+ atp_readl_base(atpdev, 0x38);
}
n += 8;
}
@@ -1510,17 +1568,17 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto fail;
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
- err = -EIO;
- goto disable_device;
- }
+ printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
+ err = -EIO;
+ goto disable_device;
+ }
err = pci_request_regions(pdev, "atp870u");
if (err)
goto disable_device;
pci_set_master(pdev);
- err = -ENOMEM;
+ err = -ENOMEM;
shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
if (!shpnt)
goto release_region;
@@ -1586,7 +1644,7 @@ static int atp870u_abort(struct scsi_cmnd * SCpnt)
{
unsigned char j, k, c;
struct scsi_cmnd *workrequ;
- struct atp_unit *dev;
+ struct atp_unit *dev;
struct Scsi_Host *host;
host = SCpnt->device->host;
@@ -1655,11 +1713,10 @@ static int atp870u_biosparam(struct scsi_device *disk, struct block_device *dev,
}
static void atp870u_remove (struct pci_dev *pdev)
-{
+{
struct atp_unit *devext = pci_get_drvdata(pdev);
struct Scsi_Host *pshost = devext->host;
-
-
+
scsi_remove_host(pshost);
free_irq(pshost->irq, pshost);
pci_release_regions(pdev);
@@ -1671,23 +1728,23 @@ MODULE_LICENSE("GPL");
static struct scsi_host_template atp870u_template = {
.module = THIS_MODULE,
- .name = "atp870u" /* name */,
+ .name = "atp870u" /* name */,
.proc_name = "atp870u",
.show_info = atp870u_show_info,
- .info = atp870u_info /* info */,
- .queuecommand = atp870u_queuecommand /* queuecommand */,
- .eh_abort_handler = atp870u_abort /* abort */,
- .bios_param = atp870u_biosparam /* biosparm */,
- .can_queue = qcnt /* can_queue */,
- .this_id = 7 /* SCSI ID */,
- .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/,
+ .info = atp870u_info /* info */,
+ .queuecommand = atp870u_queuecommand /* queuecommand */,
+ .eh_abort_handler = atp870u_abort /* abort */,
+ .bios_param = atp870u_biosparam /* biosparm */,
+ .can_queue = qcnt /* can_queue */,
+ .this_id = 7 /* SCSI ID */,
+ .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/,
.max_sectors = ATP870U_MAX_SECTORS,
};
static struct pci_device_id atp870u_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7610) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612UW) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612U) },
@@ -1709,7 +1766,8 @@ static struct pci_driver atp870u_driver = {
module_pci_driver(atp870u_driver);
-static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode)
+static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip,
+ unsigned char lvdmode)
{
unsigned char i, j, k, rmb, n;
unsigned short int m;
@@ -1982,8 +2040,9 @@ u3p_cmd:
m = m << i;
dev->wide_id[c] |= m;
dev->id[c][i].devsp = 0xce;
-#ifdef ED_DBGP
- printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp);
+#ifdef ED_DBGP
+ printk("dev->id[%2d][%2d].devsp = %2x\n",
+ c, i, dev->id[c][i].devsp);
#endif
continue;
}
@@ -2005,7 +2064,8 @@ chg_wide:
while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00)
cpu_relax();
- if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e)
+ if (atp_readb_io(dev, c, 0x17) != 0x11 &&
+ atp_readb_io(dev, c, 0x17) != 0x8e)
continue;
while (atp_readb_io(dev, c, 0x17) != 0x8e)
@@ -2109,7 +2169,9 @@ widep_cmd:
m = m << i;
dev->wide_id[c] |= m;
not_wide:
- if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) || ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) {
+ if ((dev->id[c][i].devtype == 0x00) ||
+ (dev->id[c][i].devtype == 0x07) ||
+ ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) {
m = 1;
m = m << i;
if ((dev->async[c] & m) != 0) {
@@ -2148,7 +2210,8 @@ set_sync:
while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00)
cpu_relax();
- if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e)
+ if (atp_readb_io(dev, c, 0x17) != 0x11 &&
+ atp_readb_io(dev, c, 0x17) != 0x8e)
continue;
while (atp_readb_io(dev, c, 0x17) != 0x8e)
@@ -2310,7 +2373,8 @@ tar_dcons:
set_syn_ok:
dev->id[c][i].devsp = (dev->id[c][i].devsp & 0x0f) | j;
#ifdef ED_DBGP
- printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp);
+ printk("dev->id[%2d][%2d].devsp = %2x\n",
+ c,i,dev->id[c][i].devsp);
#endif
}
}
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 75c44399fc88..31f6ab24b5cb 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -7,10 +7,10 @@
/* I/O Port */
-#define MAX_CDB 12
-#define MAX_SENSE 14
-#define qcnt 32
-#define ATP870U_SCATTER 128
+#define MAX_CDB 12
+#define MAX_SENSE 14
+#define qcnt 32
+#define ATP870U_SCATTER 128
#define MAX_ADAPTER 8
#define MAX_SCSI_ID 16
@@ -40,7 +40,7 @@ struct atp_unit
unsigned short ultra_map[2];
unsigned short async[2];
unsigned char sp[2][16];
- unsigned char r1f[2][16];
+ unsigned char r1f[2][16];
struct scsi_cmnd *quereq[2][qcnt];
struct atp_id
{
@@ -55,8 +55,8 @@ struct atp_unit
dma_addr_t prdaddr; /* Dynamically updated in driver */
struct scsi_cmnd *curr_req;
} id[2][16];
- struct Scsi_Host *host;
- struct pci_dev *pdev;
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
unsigned int unit;
};
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 2058d50d62e1..8aeaddc93b16 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -27,6 +27,7 @@ extern struct iscsi_transport beiscsi_iscsi_transport;
/**
* beiscsi_session_create - creates a new iscsi session
+ * @ep: pointer to iscsi ep
* @cmds_max: max commands supported
* @qdepth: max queue depth supported
* @initial_cmdsn: initial iscsi CMDSN
@@ -164,6 +165,7 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
* @cls_session: pointer to iscsi cls session
* @cls_conn: pointer to iscsi cls conn
* @transport_fd: EP handle(64 bit)
+ * @is_leading: indicate if this is the session leading connection (MCS)
*
* This function binds the TCP Conn with iSCSI Connection and Session.
*/
@@ -180,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
uint16_t cri_index;
+ int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -187,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
beiscsi_ep = ep->dd_data;
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
if (beiscsi_ep->phba != phba) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
beiscsi_ep->phba, phba);
-
- return -EEXIST;
+ rc = -EEXIST;
+ goto put_ep;
}
cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
if (phba->conn_table[cri_index]) {
@@ -207,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
beiscsi_ep->ep_cid,
beiscsi_conn,
phba->conn_table[cri_index]);
- return -EINVAL;
+ rc = -EINVAL;
+ goto put_ep;
}
}
@@ -224,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
"BS_%d : cid %d phba->conn_table[%u]=%p\n",
beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
phba->conn_table[cri_index] = beiscsi_conn;
- return 0;
+
+put_ep:
+ iscsi_put_endpoint(ep);
+ return rc;
}
static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
@@ -293,7 +302,7 @@ void beiscsi_iface_destroy_default(struct beiscsi_hba *phba)
}
/**
- * beiscsi_set_vlan_tag()- Set the VLAN TAG
+ * beiscsi_iface_config_vlan()- Set the VLAN TAG
* @shost: Scsi Host for the driver instance
* @iface_param: Interface paramters
*
@@ -675,7 +684,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
if (conn->max_xmit_dlength > 65536)
conn->max_xmit_dlength = 65536;
- /* fall through */
+ fallthrough;
default:
return 0;
}
@@ -992,7 +1001,7 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
/**
* beiscsi_free_ep - free endpoint
- * @ep: pointer to iscsi endpoint structure
+ * @beiscsi_ep: pointer to device endpoint struct
*/
static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
{
@@ -1027,9 +1036,10 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
/**
* beiscsi_open_conn - Ask FW to open a TCP connection
- * @ep: endpoint to be used
+ * @ep: pointer to device endpoint struct
* @src_addr: The source IP address
* @dst_addr: The Destination IP address
+ * @non_blocking: blocking or non-blocking call
*
* Asks the FW to open a TCP connection
*/
@@ -1123,7 +1133,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
/**
* beiscsi_ep_connect - Ask chip to create TCP Conn
- * @scsi_host: Pointer to scsi_host structure
+ * @shost: Pointer to scsi_host structure
* @dst_addr: The IP address of Target
* @non_blocking: blocking or non-blocking call
*
@@ -1228,7 +1238,7 @@ static void beiscsi_flush_cq(struct beiscsi_hba *phba)
/**
* beiscsi_conn_close - Invalidate and upload connection
- * @ep: The iscsi endpoint
+ * @beiscsi_ep: pointer to device endpoint struct
*
* Returns 0 on success, -1 on failure.
*/
@@ -1290,7 +1300,6 @@ static int beiscsi_conn_close(struct beiscsi_endpoint *beiscsi_ep)
void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
{
struct beiscsi_endpoint *beiscsi_ep;
- struct beiscsi_conn *beiscsi_conn;
struct beiscsi_hba *phba;
uint16_t cri_index;
@@ -1309,11 +1318,6 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
return;
}
- if (beiscsi_ep->conn) {
- beiscsi_conn = beiscsi_ep->conn;
- iscsi_suspend_queue(beiscsi_conn->conn);
- }
-
if (!beiscsi_hba_is_online(phba)) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : HBA in error 0x%lx\n", phba->state);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 9b81cfbbc5c5..50a577ac3bb4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -143,8 +143,7 @@ DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
beiscsi_##_name##_disp, beiscsi_##_name##_store)
/*
- * When new log level added update the
- * the MAX allowed value for log_enable
+ * When new log level added update MAX allowed value for log_enable
*/
BEISCSI_RW_ATTR(log_enable, 0x00,
0xFF, 0x00, "Enable logging Bit Mask\n"
@@ -164,17 +163,20 @@ DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
beiscsi_active_session_disp, NULL);
DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
beiscsi_free_session_disp, NULL);
-struct device_attribute *beiscsi_attrs[] = {
- &dev_attr_beiscsi_log_enable,
- &dev_attr_beiscsi_drvr_ver,
- &dev_attr_beiscsi_adapter_family,
- &dev_attr_beiscsi_fw_ver,
- &dev_attr_beiscsi_active_session_count,
- &dev_attr_beiscsi_free_session_count,
- &dev_attr_beiscsi_phys_port,
+
+static struct attribute *beiscsi_attrs[] = {
+ &dev_attr_beiscsi_log_enable.attr,
+ &dev_attr_beiscsi_drvr_ver.attr,
+ &dev_attr_beiscsi_adapter_family.attr,
+ &dev_attr_beiscsi_fw_ver.attr,
+ &dev_attr_beiscsi_active_session_count.attr,
+ &dev_attr_beiscsi_free_session_count.attr,
+ &dev_attr_beiscsi_phys_port.attr,
NULL,
};
+ATTRIBUTE_GROUPS(beiscsi);
+
static char const *cqe_desc[] = {
"RESERVED_DESC",
"SOL_CMD_COMPLETE",
@@ -216,7 +218,7 @@ static char const *cqe_desc[] = {
static int beiscsi_eh_abort(struct scsi_cmnd *sc)
{
- struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
+ struct iscsi_task *abrt_task = iscsi_cmd(sc)->task;
struct iscsi_cls_session *cls_session;
struct beiscsi_io_task *abrt_io_task;
struct beiscsi_conn *beiscsi_conn;
@@ -229,6 +231,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
+completion_check:
/* check if we raced, task just got cleaned up under us */
spin_lock_bh(&session->back_lock);
if (!abrt_task || !abrt_task->sc) {
@@ -236,7 +239,13 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(abrt_task);
+ if (!iscsi_get_task(abrt_task)) {
+ spin_unlock(&session->back_lock);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(5);
+ goto completion_check;
+ }
+
abrt_io_task = abrt_task->dd_data;
conn = abrt_task->conn;
beiscsi_conn = conn->dd_data;
@@ -321,7 +330,15 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ /*
+ * The task has completed in the driver and is
+ * completing in libiscsi. Just ignore it here. When we
+ * call iscsi_eh_device_reset, it will wait for us.
+ */
+ continue;
+ }
+
io_task = task->dd_data;
/* mark WRB invalid which have been not processed by FW yet */
if (is_chip_be2_be3r(phba)) {
@@ -392,7 +409,7 @@ static struct scsi_host_template beiscsi_sht = {
.eh_abort_handler = beiscsi_eh_abort,
.eh_device_reset_handler = beiscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_session_reset,
- .shost_attrs = beiscsi_attrs,
+ .shost_groups = beiscsi_groups,
.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
.can_queue = BE2_IO_DEPTH,
.this_id = -1,
@@ -401,6 +418,7 @@ static struct scsi_host_template beiscsi_sht = {
.cmd_per_lun = BEISCSI_CMD_PER_LUN,
.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct scsi_transport_template *beiscsi_scsi_transport;
@@ -416,7 +434,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
return NULL;
}
- shost->max_id = BE2_MAX_SESSIONS;
+ shost->max_id = BE2_MAX_SESSIONS - 1;
shost->max_channel = 0;
shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
shost->max_lun = BEISCSI_NUM_MAX_LUN;
@@ -825,9 +843,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
&phwi_context->be_eq[i]);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_init_irqs-Failed to"
- "register msix for i = %d\n",
- i);
+ "BM_%d : %s-Failed to register msix for i = %d\n",
+ __func__, i);
kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
@@ -841,9 +858,9 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
phba->msi_name[i], &phwi_context->be_eq[i]);
if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
- "BM_%d : beiscsi_init_irqs-"
- "Failed to register beiscsi_msix_mcc\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : %s-Failed to register beiscsi_msix_mcc\n",
+ __func__);
kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
@@ -853,8 +870,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
"beiscsi", phba);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_init_irqs-"
- "Failed to register irq\\n");
+ "BM_%d : %s-Failed to register irq\n",
+ __func__);
return ret;
}
}
@@ -977,7 +994,7 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
* alloc_wrb_handle - To allocate a wrb handle
* @phba: The hba pointer
* @cid: The cid to use for allocation
- * @pwrb_context: ptr to ptr to wrb context
+ * @pcontext: ptr to ptr to wrb context
*
* This happens under session_lock until submission to chip
*/
@@ -1030,7 +1047,7 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
phba->params.wrbs_per_cxn);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
+ "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x "
"wrb_handles_available=%d\n",
pwrb_handle, pwrb_context->free_index,
pwrb_context->wrb_handles_available);
@@ -1374,7 +1391,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
"BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
- " hwi_complete_cmd- Solicited path\n");
+ " %s- Solicited path\n", __func__);
break;
case HWH_TYPE_NOP:
@@ -1384,8 +1401,8 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
default:
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : In hwi_complete_cmd, unknown type = %d"
- "wrb_index 0x%x CID 0x%x\n", type,
+ "BM_%d : In %s, unknown type = %d "
+ "wrb_index 0x%x CID 0x%x\n", __func__, type,
csol_cqe.wrb_index,
csol_cqe.cid);
break;
@@ -1394,7 +1411,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
spin_unlock_bh(&session->back_lock);
}
-/**
+/*
* ASYNC PDUs include
* a. Unsolicited NOP-In (target initiated NOP-In)
* b. ASYNC Messages
@@ -1532,7 +1549,7 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
error = 1;
- /* fall through */
+ fallthrough;
case UNSOL_DATA_NOTIFY:
pasync_handle = pasync_ctx->async_entry[ci].data;
break;
@@ -1883,9 +1900,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
cid = AMAP_GET_BITS(
struct amap_i_t_dpdu_cqe_v2,
cid, sol);
- else
- cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
- cid, sol);
+ else
+ cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, sol);
}
cri_index = BE_GET_CRI_FROM_CID(cid);
@@ -2010,8 +2027,7 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
default:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Invalid CQE Event Received Code : %d"
- "CID 0x%x...\n",
+ "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n",
code, cid);
break;
}
@@ -3001,7 +3017,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
void *eq_vaddress;
dma_addr_t paddr;
- num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
+ num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries *
sizeof(struct be_eq_entry));
if (phba->pcidev->msix_enabled)
@@ -3034,8 +3050,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
BEISCSI_EQ_DELAY_DEF);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_cmd_eq_create"
- "Failed for EQ\n");
+ "BM_%d : beiscsi_cmd_eq_create Failed for EQ\n");
goto create_eq_error;
}
@@ -3068,7 +3083,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
int ret = -ENOMEM;
dma_addr_t paddr;
- num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
+ num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries *
sizeof(struct sol_cqe));
for (i = 0; i < phba->num_cpus; i++) {
@@ -3090,8 +3105,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
sizeof(struct sol_cqe), cq_vaddress);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : be_fill_queue Failed "
- "for ISCSI CQ\n");
+ "BM_%d : be_fill_queue Failed for ISCSI CQ\n");
goto create_cq_error;
}
@@ -3100,8 +3114,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
false, 0);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_cmd_eq_create"
- "Failed for ISCSI CQ\n");
+ "BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n");
goto create_cq_error;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
@@ -3226,8 +3239,8 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
phwi_context->be_def_dataq[ulp_num].id);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : DEFAULT PDU DATA RING CREATED"
- "on ULP : %d\n", ulp_num);
+ "BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n",
+ ulp_num);
return 0;
}
@@ -3253,13 +3266,13 @@ beiscsi_post_template_hdr(struct beiscsi_hba *phba)
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Post Template HDR Failed for"
+ "BM_%d : Post Template HDR Failed for "
"ULP_%d\n", ulp_num);
return status;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : Template HDR Pages Posted for"
+ "BM_%d : Template HDR Pages Posted for "
"ULP_%d\n", ulp_num);
}
}
@@ -3374,18 +3387,17 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
} else {
idx++;
wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
- pa_addr_lo = mem_descr->mem_array[idx].\
+ pa_addr_lo = mem_descr->mem_array[idx].
bus_address.u.a64.address;
num_wrb_rings = mem_descr->mem_array[idx].size /
(phba->params.wrbs_per_cxn *
sizeof(struct iscsi_wrb));
pwrb_arr[num].virtual_address = wrb_vaddr;
- pwrb_arr[num].bus_address.u.a64.address\
- = pa_addr_lo;
+ pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
pwrb_arr[num].size = phba->params.wrbs_per_cxn *
sizeof(struct iscsi_wrb);
wrb_vaddr += pwrb_arr[num].size;
- pa_addr_lo += pwrb_arr[num].size;
+ pa_addr_lo += pwrb_arr[num].size;
num_wrb_rings--;
}
}
@@ -3858,8 +3870,6 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
int i, j;
mem_descr = phba->init_mem;
- i = 0;
- j = 0;
for (i = 0; i < SE_MEM_MAX; i++) {
for (j = mem_descr->num_elements; j > 0; j--) {
dma_free_coherent(&phba->pcidev->dev,
@@ -3939,7 +3949,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
idx++;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : phba->io_sgl_hndl_avbl=%d"
+ "BM_%d : phba->io_sgl_hndl_avbl=%d "
"phba->eh_sgl_hndl_avbl=%d\n",
phba->io_sgl_hndl_avbl,
phba->eh_sgl_hndl_avbl);
@@ -3997,13 +4007,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
GFP_KERNEL);
if (!ptr_cid_info) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory"
- "for ULP_CID_INFO for ULP : %d\n",
- ulp_num);
ret = -ENOMEM;
goto free_memory;
-
}
/* Allocate memory for CID array */
@@ -4012,10 +4017,6 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
sizeof(*ptr_cid_info->cid_array),
GFP_KERNEL);
if (!ptr_cid_info->cid_array) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory"
- "for CID_ARRAY for ULP : %d\n",
- ulp_num);
kfree(ptr_cid_info);
ptr_cid_info = NULL;
ret = -ENOMEM;
@@ -4033,9 +4034,6 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
sizeof(struct iscsi_endpoint *),
GFP_KERNEL);
if (!phba->ep_array) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory in "
- "hba_setup_cid_tbls\n");
ret = -ENOMEM;
goto free_memory;
@@ -4045,10 +4043,6 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
sizeof(struct beiscsi_conn *),
GFP_KERNEL);
if (!phba->conn_table) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory in"
- "hba_setup_cid_tbls\n");
-
kfree(phba->ep_array);
phba->ep_array = NULL;
ret = -ENOMEM;
@@ -4401,7 +4395,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (!io_task->psgl_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of IO_SGL_ICD Failed"
+ "BM_%d : Alloc of IO_SGL_ICD Failed "
"for the CID : %d\n",
beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
@@ -4412,7 +4406,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (!io_task->pwrb_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of WRB_HANDLE Failed"
+ "BM_%d : Alloc of WRB_HANDLE Failed "
"for the CID : %d\n",
beiscsi_conn->beiscsi_conn_cid);
goto free_io_hndls;
@@ -4428,10 +4422,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed "
"for the CID : %d\n",
- beiscsi_conn->
- beiscsi_conn_cid);
+ beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
}
@@ -4446,10 +4439,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of WRB_HANDLE Failed"
+ "BM_%d : Alloc of WRB_HANDLE Failed "
"for the CID : %d\n",
- beiscsi_conn->
- beiscsi_conn_cid);
+ beiscsi_conn->beiscsi_conn_cid);
goto free_mgmt_hndls;
}
beiscsi_conn->plogin_wrb_handle =
@@ -4467,10 +4459,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed "
"for the CID : %d\n",
- beiscsi_conn->
- beiscsi_conn_cid);
+ beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
}
io_task->pwrb_handle =
@@ -4480,7 +4471,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (!io_task->pwrb_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of WRB_HANDLE Failed"
+ "BM_%d : Alloc of WRB_HANDLE Failed "
"for the CID : %d\n",
beiscsi_conn->beiscsi_conn_cid);
goto free_mgmt_hndls;
@@ -4926,13 +4917,13 @@ void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
schedule_work(&phba->boot_work);
}
-/**
+#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
+/*
+ * beiscsi_show_boot_tgt_info()
* Boot flag info for iscsi-utilities
* Bit 0 Block valid flag
* Bit 1 Firmware booting selected
*/
-#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
-
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
{
struct beiscsi_hba *phba = data;
@@ -5318,7 +5309,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
/* Re-enable UER. If different TPE occurs then it is recoverable. */
beiscsi_set_uer_feature(phba);
- phba->shost->max_id = phba->params.cxns_per_ctrl;
+ phba->shost->max_id = phba->params.cxns_per_ctrl - 1;
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_init_port(phba);
if (ret < 0) {
@@ -5745,6 +5736,7 @@ free_hba:
pci_disable_msix(phba->pcidev);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
+ pci_disable_pcie_error_reporting(pcidev);
pci_set_drvdata(pcidev, NULL);
disable_pci:
pci_release_regions(pcidev);
@@ -5768,7 +5760,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
cancel_work_sync(&phba->sess_work);
beiscsi_iface_destroy_default(phba);
- iscsi_host_remove(phba->shost);
+ iscsi_host_remove(phba->shost, false);
beiscsi_disable_port(phba, 1);
/* after cancelling boot_work */
@@ -5809,6 +5801,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.destroy_session = beiscsi_session_destroy,
.create_conn = beiscsi_conn_create,
.bind_conn = beiscsi_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = beiscsi_attr_is_visible,
.set_iface_param = beiscsi_iface_set_param,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index d4febaadfaa3..4e899ec1477d 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -97,6 +97,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
/**
* mgmt_open_connection()- Establish a TCP CXN
+ * @phba: driver priv structure
* @dst_addr: Destination Address
* @beiscsi_ep: ptr to device endpoint struct
* @nonemb_cmd: ptr to memory allocated for command
@@ -209,7 +210,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
return tag;
}
-/*
+/**
* beiscsi_exec_nemb_cmd()- execute non-embedded MBX cmd
* @phba: driver priv structure
* @nonemb_cmd: DMA address of the MBX command to be issued
@@ -234,8 +235,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
wrb = alloc_mcc_wrb(phba, &tag);
if (!wrb) {
mutex_unlock(&ctrl->mbox_lock);
- rc = -ENOMEM;
- goto free_cmd;
+ return -ENOMEM;
}
sge = nonembedded_sgl(wrb);
@@ -268,24 +268,6 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
/* copy the response, if any */
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
- /**
- * This is special case of NTWK_GET_IF_INFO where the size of
- * response is not known. beiscsi_if_get_info checks the return
- * value to free DMA buffer.
- */
- if (rc == -EAGAIN)
- return rc;
-
- /**
- * If FW is busy that is driver timed out, DMA buffer is saved with
- * the tag, only when the cmd completes this buffer is freed.
- */
- if (rc == -EBUSY)
- return rc;
-
-free_cmd:
- dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
- nonemb_cmd->va, nonemb_cmd->dma);
return rc;
}
@@ -308,6 +290,19 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
return 0;
}
+static void beiscsi_free_nemb_cmd(struct beiscsi_hba *phba,
+ struct be_dma_mem *cmd, int rc)
+{
+ /*
+ * If FW is busy the DMA buffer is saved with the tag. When the cmd
+ * completes this buffer is freed.
+ */
+ if (rc == -EBUSY)
+ return;
+
+ dma_free_coherent(&phba->ctrl.pdev->dev, cmd->size, cmd->va, cmd->dma);
+}
+
static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
{
struct be_dma_mem *tag_mem;
@@ -343,8 +338,16 @@ int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
cpu_to_le32(set_eqd[i].delay_multiplier);
}
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd,
- __beiscsi_eq_delay_compl, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, __beiscsi_eq_delay_compl,
+ NULL, 0);
+ if (rc) {
+ /*
+ * Only free on failure. Async cmds are handled like -EBUSY
+ * where it's handled for us.
+ */
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+ }
+ return rc;
}
/**
@@ -371,6 +374,7 @@ int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg)
req->hdr.version = 1;
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
&resp, sizeof(resp));
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -448,7 +452,9 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
req->ip_addr.ip_type = ip_type;
memcpy(req->ip_addr.addr, gw,
(ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+ rt_val = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rt_val);
+ return rt_val;
}
int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
@@ -498,8 +504,10 @@ int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
req = nonemb_cmd.va;
req->ip_type = ip_type;
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
- resp, sizeof(*resp));
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, resp,
+ sizeof(*resp));
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+ return rc;
}
static int
@@ -536,6 +544,7 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
"BG_%d : failed to clear IP: rc %d status %d\n",
rc, req->ip_params.ip_record.status);
}
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
return rc;
}
@@ -580,6 +589,7 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
if (req->ip_params.ip_record.status)
rc = -EINVAL;
}
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
return rc;
}
@@ -607,6 +617,7 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
reldhcp->interface_hndl = phba->interface_handle;
reldhcp->ip_type = ip_type;
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
if (rc < 0) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : failed to release existing DHCP: %d\n",
@@ -688,7 +699,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
dhcpreq->interface_hndl = phba->interface_handle;
dhcpreq->ip_type = ip_type;
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
-
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
exit:
kfree(if_info);
return rc;
@@ -761,11 +772,8 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : Memory Allocation Failure\n");
- /* Free the DMA memory for the IOCTL issuing */
- dma_free_coherent(&phba->ctrl.pdev->dev,
- nonemb_cmd.size,
- nonemb_cmd.va,
- nonemb_cmd.dma);
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd,
+ -ENOMEM);
return -ENOMEM;
}
@@ -780,15 +788,13 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
nonemb_cmd.va)->actual_resp_len;
ioctl_size += sizeof(struct be_cmd_req_hdr);
- /* Free the previous allocated DMA memory */
- dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
- nonemb_cmd.va,
- nonemb_cmd.dma);
-
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
/* Free the virtual memory */
kfree(*if_info);
- } else
+ } else {
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
break;
+ }
} while (true);
return rc;
}
@@ -805,8 +811,9 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
if (rc)
return rc;
- return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
- nic, sizeof(*nic));
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, nic, sizeof(*nic));
+ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+ return rc;
}
static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
@@ -1178,12 +1185,12 @@ beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,
if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num);
total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num);
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num,
- (total_cids - avlbl_cids));
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ (total_cids - avlbl_cids));
} else
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num, 0);
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
}
return len;
@@ -1208,12 +1215,12 @@ beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr,
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported))
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num,
- BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
else
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num, 0);
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
}
return len;
@@ -1243,23 +1250,19 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
case OC_DEVICE_ID2:
return snprintf(buf, PAGE_SIZE,
"Obsolete/Unsupported BE2 Adapter Family\n");
- break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID3:
return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n");
- break;
case OC_SKH_ID1:
return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n");
- break;
default:
return snprintf(buf, PAGE_SIZE,
"Unknown Adapter Family: 0x%x\n", dev_id);
- break;
}
}
/**
- * beiscsi_phys_port()- Display Physical Port Identifier
+ * beiscsi_phys_port_disp()- Display Physical Port Identifier
* @dev: ptr to device not used.
* @attr: device attribute, not used.
* @buf: contains formatted text port identifier
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 0f554ebb8f2c..6846ca8f7313 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -708,7 +708,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
}
}
-bfa_boolean_t
+static bfa_boolean_t
bfa_isr_rspq(struct bfa_s *bfa, int qid)
{
struct bfi_msg_s *m;
@@ -1237,7 +1237,7 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
complete(&bfad->disable_comp);
}
-/**
+/*
* configure queue registers from firmware response
*/
static void
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 8439951d95ac..f2c49f0e5c8b 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -871,7 +871,7 @@ enum bfa_port_linkstate_rsn {
/*
* Initially flash content may be fff. On making LUN mask enable and disable
- * state chnage. when report lun command is being processed it goes from
+ * state change. when report lun command is being processed it goes from
* BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
* BFA_LUN_MASK_ACTIVE.
*/
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index b00fb2409c50..0314e4b9e1fb 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -33,21 +33,6 @@ struct scsi_cdb_s {
u8 scsi_cdb[SCSI_MAX_CDBLEN];
};
-/* ------------------------------------------------------------
- * SCSI status byte values
- * ------------------------------------------------------------
- */
-#define SCSI_STATUS_GOOD 0x00
-#define SCSI_STATUS_CHECK_CONDITION 0x02
-#define SCSI_STATUS_CONDITION_MET 0x04
-#define SCSI_STATUS_BUSY 0x08
-#define SCSI_STATUS_INTERMEDIATE 0x10
-#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */
-#define SCSI_STATUS_RESERVATION_CONFLICT 0x18
-#define SCSI_STATUS_COMMAND_TERMINATED 0x22
-#define SCSI_STATUS_QUEUE_FULL 0x28
-#define SCSI_STATUS_ACA_ACTIVE 0x30
-
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
/*
@@ -1208,7 +1193,7 @@ enum {
};
/*
- * defintions for CT reason code
+ * definitions for CT reason code
*/
enum {
CT_RSN_INV_CMD = 0x01,
@@ -1255,7 +1240,7 @@ enum {
};
/*
- * defintions for the explanation code for all servers
+ * definitions for the explanation code for all servers
*/
enum {
CT_EXP_AUTH_EXCEPTION = 0xF1,
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 284baa3b0c8e..7ad22288071b 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -436,7 +436,7 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
return BFA_STATUS_OK;
}
-void
+static void
bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
{
struct bfa_itnim_latency_s *io_lat =
@@ -453,7 +453,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
io_lat->avg[idx] += val;
}
-void
+static void
bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
{
ioim->start_time = jiffies;
@@ -2146,7 +2146,7 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
/*
* setup sense information, if present
*/
- if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
+ if ((m->scsi_status == SAM_STAT_CHECK_CONDITION) &&
m->sns_len) {
sns_len = m->sns_len;
snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
@@ -2335,9 +2335,7 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
wwn_t rpwwn, struct scsi_lun lun)
{
struct bfa_lun_mask_s *lunm_list;
- struct bfa_rport_s *rp = NULL;
struct bfa_fcs_lport_s *port = NULL;
- struct bfa_fcs_rport_s *rp_fcs;
int i;
/* in min cfg lunm_list could be NULL but no commands should run. */
@@ -2353,12 +2351,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
port = bfa_fcs_lookup_port(
&((struct bfad_s *)bfa->bfad)->bfa_fcs,
vf_id, *pwwn);
- if (port) {
+ if (port)
*pwwn = port->port_cfg.pwwn;
- rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
- if (rp_fcs)
- rp = rp_fcs->bfa_rport;
- }
}
lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2578,7 +2572,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
case FCP_IODIR_RW:
bfa_stats(itnim, input_reqs);
bfa_stats(itnim, output_reqs);
- /* fall through */
+ fallthrough;
default:
bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
}
@@ -2813,7 +2807,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
case BFI_IOIM_STS_TIMEDOUT:
bfa_stats(ioim->itnim, iocomp_timedout);
- /* fall through */
+ fallthrough;
case BFI_IOIM_STS_ABORTED:
rsp->io_status = BFI_IOIM_STS_ABORTED;
bfa_stats(ioim->itnim, iocomp_aborted);
@@ -3209,7 +3203,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
switch (event) {
case BFA_TSKIM_SM_DONE:
bfa_reqq_wcancel(&tskim->reqq_wait);
- /* fall through */
+ fallthrough;
case BFA_TSKIM_SM_QRESUME:
bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
bfa_tskim_send_abort(tskim);
@@ -3818,7 +3812,7 @@ bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
}
-/**
+/*
* To send config req, first try to use throttle value from flash
* If 0, then use driver parameter
* We need to use min(flash_val, drv_val) because
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 3e117fed95c9..c1baf5cd0d3e 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -217,9 +217,6 @@ struct bfa_vf_event_s {
u32 undefined;
};
-struct bfa_fcs_s;
-struct bfa_fcs_fabric_s;
-
/*
* @todo : need to move to a global config file.
* Maximum Rports supported per port (physical/logical).
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 7c3eadc58b98..b12afcc4b189 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1283,7 +1283,7 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
n2n_port->reply_oxid = 0;
}
-void
+static void
bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
{
int i = 0, j = 0, bit = 0, alpa_bit = 0;
@@ -1408,7 +1408,7 @@ static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_fdmi_timeout(void *arg);
-static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
+static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld);
static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld);
@@ -1887,6 +1887,8 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
(u8 *) ((struct ct_hdr_s *) pyld
+ 1));
+ if (attr_len < 0)
+ return;
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, (len + attr_len), &fchs,
@@ -1896,17 +1898,20 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
}
-static u16
+static int
bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
- struct bfa_fcs_fdmi_hba_attr_s hba_attr;
- struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
+ struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr;
struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
struct fdmi_attr_s *attr;
+ int len;
u8 *curr_ptr;
- u16 len, count;
- u16 templen;
+ u16 templen, count;
+
+ fcs_hba_attr = kzalloc(sizeof(*fcs_hba_attr), GFP_KERNEL);
+ if (!fcs_hba_attr)
+ return -ENOMEM;
/*
* get hba attributes
@@ -2148,6 +2153,9 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
+
+ kfree(fcs_hba_attr);
+
return len;
}
@@ -4358,7 +4366,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
bfa_sm_set_state(ns,
bfa_fcs_lport_ns_sm_sending_gid_ft);
bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
- };
+ }
break;
default:
@@ -5671,7 +5679,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
bfa_fcs_lport_ms_fabric_rscn(port);
break;
}
- /* !!!!!!!!! Fall Through !!!!!!!!!!!!! */
+ fallthrough;
case FC_RSCN_FORMAT_AREA:
case FC_RSCN_FORMAT_DOMAIN:
@@ -6422,7 +6430,7 @@ bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
- /* fall through */
+ fallthrough;
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
@@ -6448,7 +6456,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
- /* fall through */
+ fallthrough;
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 82801b366500..c21aa37b8adb 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -419,13 +419,13 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
bfa_fcxp_discard(rport->fcxp);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_FAILED:
if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
rport->plogi_retries++;
@@ -856,7 +856,7 @@ bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
* At least go offline when a PLOGI is received.
*/
bfa_fcxp_discard(rport->fcxp);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_FAILED:
case RPSM_EVENT_ADDRESS_CHANGE:
@@ -1042,7 +1042,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
@@ -1131,7 +1131,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
}
- /* fall through */
+ fallthrough;
case RPSM_EVENT_ADDRESS_CHANGE:
if (!bfa_fcs_lport_is_online(rport->port)) {
@@ -1288,7 +1288,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
@@ -1332,7 +1332,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
@@ -1575,7 +1575,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
- };
+ }
break;
case RPSM_EVENT_DELETE:
@@ -2240,15 +2240,12 @@ bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
struct bfa_fcxp_s *fcxp;
struct fchs_s fchs;
struct bfa_fcs_lport_s *port = rport->port;
- struct fc_adisc_s *adisc;
bfa_trc(port->fcs, rx_fchs->s_id);
bfa_trc(port->fcs, rx_fchs->d_id);
rport->stats.adisc_rcvd++;
- adisc = (struct fc_adisc_s *) (rx_fchs + 1);
-
/*
* Accept if the itnim for this rport is online.
* Else reject the ADISC.
@@ -2449,7 +2446,7 @@ bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
bfa_fcs_itnim_brp_online(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_online(rport);
- };
+ }
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 93471d7c61d0..5740302d83ac 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -387,7 +387,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
- /* !!! fall through !!! */
+ fallthrough;
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
@@ -437,7 +437,7 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_hb_timer_stop(ioc);
- /* !!! fall through !!! */
+ fallthrough;
case IOC_E_HBFAIL:
if (ioc->iocpf.auto_recover)
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
@@ -701,7 +701,7 @@ static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
{
struct bfi_ioc_image_hdr_s fwhdr;
- u32 r32, fwstate, pgnum, pgoff, loff = 0;
+ u32 r32, fwstate, pgnum, loff = 0;
int i;
/*
@@ -731,7 +731,6 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
* Clear fwver hdr
*/
pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
- pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
@@ -970,7 +969,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
case IOCPF_E_INITFAIL:
bfa_iocpf_timer_stop(ioc);
- /* fall through */
+ fallthrough;
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
@@ -1046,7 +1045,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
case IOCPF_E_FAIL:
bfa_iocpf_timer_stop(ioc);
- /* fall through */
+ fallthrough;
case IOCPF_E_TIMEOUT:
bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
@@ -1440,13 +1439,12 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
void
bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
- u32 pgnum, pgoff;
+ u32 pgnum;
u32 loff = 0;
int i;
u32 *fwsig = (u32 *) fwhdr;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
- pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
@@ -1662,7 +1660,7 @@ bfa_status_t
bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
{
- u32 pgnum, pgoff;
+ u32 pgnum;
u32 loff = 0;
enum bfi_ioc_state ioc_fwstate;
@@ -1671,7 +1669,6 @@ bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
return BFA_STATUS_ADAPTER_ENABLED;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
- pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
@@ -1863,7 +1860,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 boot_env)
{
u32 *fwimg;
- u32 pgnum, pgoff;
+ u32 pgnum;
u32 loff = 0;
u32 chunkno = 0;
u32 i;
@@ -1892,8 +1889,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
- pgoff = PSS_SMEM_PGOFF(loff);
-
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < fwimg_size; i++) {
@@ -3304,6 +3299,7 @@ bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
case BFI_ABLK_I2H_PORT_CONFIG:
/* update config port mode */
ablk->ioc->port_mode_cfg = rsp->port_mode;
+ break;
case BFI_ABLK_I2H_PF_DELETE:
case BFI_ABLK_I2H_PF_UPDATE:
@@ -4763,11 +4759,9 @@ bfa_diag_memtest_done(void *cbarg)
struct bfa_ioc_s *ioc = diag->ioc;
struct bfa_diag_memtest_result *res = diag->result;
u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
- u32 pgnum, pgoff, i;
+ u32 pgnum, i;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
- pgoff = PSS_SMEM_PGOFF(loff);
-
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
@@ -5026,7 +5020,7 @@ diag_portbeacon_comp(struct bfa_diag_s *diag)
/*
* Diag hmbox handler
*/
-void
+static void
bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
{
struct bfa_diag_s *diag = diagarg;
@@ -5878,6 +5872,7 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
break;
case BFA_DCONF_SM_EXIT:
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+ break;
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_WR:
case BFA_DCONF_SM_FLASH_COMP:
@@ -5995,7 +5990,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
- /* fall through */
+ fallthrough;
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
@@ -6649,8 +6644,8 @@ enum bfa_flash_cmd {
BFA_FLASH_READ_STATUS = 0x05, /* read status */
};
-/**
- * @brief hardware error definition
+/*
+ * Hardware error definition
*/
enum bfa_flash_err {
BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
@@ -6664,8 +6659,8 @@ enum bfa_flash_err {
BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
};
-/**
- * @brief flash command register data structure
+/*
+ * Flash command register data structure
*/
union bfa_flash_cmd_reg_u {
struct {
@@ -6688,8 +6683,8 @@ union bfa_flash_cmd_reg_u {
u32 i;
};
-/**
- * @brief flash device status register data structure
+/*
+ * Flash device status register data structure
*/
union bfa_flash_dev_status_reg_u {
struct {
@@ -6714,8 +6709,8 @@ union bfa_flash_dev_status_reg_u {
u32 i;
};
-/**
- * @brief flash address register data structure
+/*
+ * Flash address register data structure
*/
union bfa_flash_addr_reg_u {
struct {
@@ -6730,7 +6725,7 @@ union bfa_flash_addr_reg_u {
u32 i;
};
-/**
+/*
* dg flash_raw_private Flash raw private functions
*/
static void
@@ -6771,7 +6766,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar)
return 0;
}
-/**
+/*
* @brief
* Flush FLI data fifo.
*
@@ -6784,7 +6779,6 @@ static u32
bfa_flash_fifo_flush(void __iomem *pci_bar)
{
u32 i;
- u32 t;
union bfa_flash_dev_status_reg_u dev_status;
dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
@@ -6794,7 +6788,7 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
/* fifo counter in terms of words */
for (i = 0; i < dev_status.r.fifo_cnt; i++)
- t = readl(pci_bar + FLI_RDDATA_REG);
+ readl(pci_bar + FLI_RDDATA_REG);
/*
* Check the device status. It may take some time.
@@ -6811,7 +6805,7 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
return 0;
}
-/**
+/*
* @brief
* Read flash status.
*
@@ -6856,7 +6850,7 @@ bfa_flash_status_read(void __iomem *pci_bar)
return ret_status;
}
-/**
+/*
* @brief
* Start flash read operation.
*
@@ -6902,7 +6896,7 @@ bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
return 0;
}
-/**
+/*
* @brief
* Check flash read operation.
*
@@ -6918,7 +6912,8 @@ bfa_flash_read_check(void __iomem *pci_bar)
return 0;
}
-/**
+
+/*
* @brief
* End flash read operation.
*
@@ -6944,7 +6939,7 @@ bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
bfa_flash_fifo_flush(pci_bar);
}
-/**
+/*
* @brief
* Perform flash raw read.
*
@@ -6970,7 +6965,7 @@ bfa_raw_sem_get(void __iomem *bar)
}
-bfa_status_t
+static bfa_status_t
bfa_flash_sem_get(void __iomem *bar)
{
u32 n = FLASH_BLOCKING_OP_MAX;
@@ -6983,7 +6978,7 @@ bfa_flash_sem_get(void __iomem *bar)
return BFA_STATUS_OK;
}
-void
+static void
bfa_flash_sem_put(void __iomem *bar)
{
writel(0, (bar + FLASH_SEM_LOCK_REG));
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 18b58b2f304f..fb748291676b 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -364,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
writel(r32, rb + FNC_PERS_REG);
}
-bfa_boolean_t
+static bfa_boolean_t
bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
{
u32 r32;
@@ -496,7 +496,7 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
return BFA_FALSE;
}
-/**
+/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
static void
@@ -517,7 +517,7 @@ bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
}
-/**
+/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
@@ -532,7 +532,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
ioc->ioc_hwif = &hwif_ct;
}
-/**
+/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
@@ -744,7 +744,7 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
writel(0, (rb + CT2_MBIST_CTL_REG));
}
-void
+static void
bfa_ioc_ct2_mac_reset(void __iomem *rb)
{
/* put port0, port1 MAC & AHB in reset */
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index 4511ec865f06..cfe2c9c336bf 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -756,7 +756,7 @@ bfa_cee_reset_stats(struct bfa_cee_s *cee,
* @return void
*/
-void
+static void
bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
{
union bfi_cee_i2h_msg_u *msg;
@@ -792,7 +792,7 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
* @return void
*/
-void
+static void
bfa_cee_notify(void *arg, enum bfa_ioc_event_e event)
{
struct bfa_cee_s *cee = (struct bfa_cee_s *) arg;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 6d2131441f0a..4e3cef02f10f 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -369,13 +369,10 @@ bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event,
u16 misc, struct fchs_s *fchdr)
{
- struct bfa_plog_rec_s lp;
u32 *tmp_int = (u32 *) fchdr;
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
- memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
-
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
ints[2] = tmp_int[4];
@@ -389,13 +386,10 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
u32 pld_w0)
{
- struct bfa_plog_rec_s lp;
u32 *tmp_int = (u32 *) fchdr;
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
- memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
-
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
ints[2] = tmp_int[4];
@@ -2718,7 +2712,7 @@ bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
case BFA_FCPORT_SM_DPORTDISABLE:
case BFA_FCPORT_SM_ENABLE:
case BFA_FCPORT_SM_START:
- /**
+ /*
* Ignore event for a port that is ddport
*/
break;
@@ -3173,7 +3167,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
m->port_cfg = fcport->cfg;
m->msgtag = fcport->msgtag;
m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
- m->use_flash_cfg = fcport->use_flash_cfg;
+ m->use_flash_cfg = fcport->use_flash_cfg;
bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
@@ -3839,7 +3833,7 @@ bfa_fcport_get_topology(struct bfa_s *bfa)
return fcport->topology;
}
-/**
+/*
* Get config topology.
*/
enum bfa_port_topology
@@ -4284,7 +4278,7 @@ bfa_fcport_dportdisable(struct bfa_s *bfa)
bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
}
-void
+static void
bfa_fcport_ddportenable(struct bfa_s *bfa)
{
/*
@@ -4293,7 +4287,7 @@ bfa_fcport_ddportenable(struct bfa_s *bfa)
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
}
-void
+static void
bfa_fcport_ddportdisable(struct bfa_s *bfa)
{
/*
@@ -5517,7 +5511,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
struct bfa_uf_buf_s *uf_buf;
uint8_t *buf;
- struct fchs_s *fchs;
uf_buf = (struct bfa_uf_buf_s *)
bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
@@ -5526,8 +5519,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
m->frm_len = be16_to_cpu(m->frm_len);
m->xfr_len = be16_to_cpu(m->xfr_len);
- fchs = (struct fchs_s *)uf_buf;
-
list_del(&uf->qe); /* dequeue from posted queue */
uf->data_ptr = buf;
@@ -6400,7 +6391,7 @@ bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
dport->test_state = BFA_DPORT_ST_INP;
bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
}
- /* fall thru */
+ fallthrough;
case BFA_DPORT_SM_REQFAIL:
bfa_sm_set_state(dport, bfa_dport_sm_enabled);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index eb0c76338295..e5aa982ffedc 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -50,7 +50,7 @@ int pcie_max_read_reqsz;
int bfa_debugfs_enable = 1;
int msix_disable_cb = 0, msix_disable_ct = 0;
int max_xfer_size = BFAD_MAX_SECTORS >> 1;
-int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
+static int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
/* Firmware releated */
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
@@ -732,9 +732,6 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-
if (rc) {
rc = -ENODEV;
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
@@ -749,6 +746,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
if (bfad->pci_bar0_kva == NULL) {
printk(KERN_ERR "Fail to map bar0\n");
+ rc = -ENODEV;
goto out_release_region;
}
@@ -1559,9 +1557,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
if (rc)
- rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
- DMA_BIT_MASK(32));
- if (rc)
goto out_disable_device;
if (restart_bfa(bfad) == -1)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index fbfce02e5b93..5a85401e9e2d 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -437,7 +437,7 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
return status;
}
-int
+static int
bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
@@ -562,7 +562,7 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
-void
+static void
bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
{
struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
@@ -711,7 +711,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
- return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
+ return sysfs_emit(buf, "%s\n", serial_num);
}
static ssize_t
@@ -725,7 +725,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
char model[BFA_ADAPTER_MODEL_NAME_LEN];
bfa_get_adapter_model(&bfad->bfa, model);
- return snprintf(buf, PAGE_SIZE, "%s\n", model);
+ return sysfs_emit(buf, "%s\n", model);
}
static ssize_t
@@ -805,7 +805,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Invalid Model");
- return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
+ return sysfs_emit(buf, "%s\n", model_descr);
}
static ssize_t
@@ -819,7 +819,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
u64 nwwn;
nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
- return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
+ return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn));
}
static ssize_t
@@ -836,7 +836,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
- return snprintf(buf, PAGE_SIZE, "%s\n", symname);
+ return sysfs_emit(buf, "%s\n", symname);
}
static ssize_t
@@ -850,14 +850,14 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
char hw_ver[BFA_VERSION_LEN];
bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
+ return sysfs_emit(buf, "%s\n", hw_ver);
}
static ssize_t
bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
+ return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION);
}
static ssize_t
@@ -871,7 +871,7 @@ bfad_im_optionrom_version_show(struct device *dev,
char optrom_ver[BFA_VERSION_LEN];
bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
+ return sysfs_emit(buf, "%s\n", optrom_ver);
}
static ssize_t
@@ -885,7 +885,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
char fw_ver[BFA_VERSION_LEN];
bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
+ return sysfs_emit(buf, "%s\n", fw_ver);
}
static ssize_t
@@ -897,7 +897,7 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
bfa_get_nports(&bfad->bfa));
}
@@ -905,7 +905,7 @@ static ssize_t
bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
+ return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME);
}
static ssize_t
@@ -924,14 +924,14 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s),
GFP_ATOMIC);
if (rports == NULL)
- return snprintf(buf, PAGE_SIZE, "Failed\n");
+ return sysfs_emit(buf, "Failed\n");
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
kfree(rports);
- return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
+ return sysfs_emit(buf, "%d\n", nrports);
}
static DEVICE_ATTR(serial_number, S_IRUGO,
@@ -956,36 +956,52 @@ static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL);
static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO,
bfad_im_num_of_discovered_ports_show, NULL);
-struct device_attribute *bfad_im_host_attrs[] = {
- &dev_attr_serial_number,
- &dev_attr_model,
- &dev_attr_model_description,
- &dev_attr_node_name,
- &dev_attr_symbolic_name,
- &dev_attr_hardware_version,
- &dev_attr_driver_version,
- &dev_attr_option_rom_version,
- &dev_attr_firmware_version,
- &dev_attr_number_of_ports,
- &dev_attr_driver_name,
- &dev_attr_number_of_discovered_ports,
+static struct attribute *bfad_im_host_attrs[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_model.attr,
+ &dev_attr_model_description.attr,
+ &dev_attr_node_name.attr,
+ &dev_attr_symbolic_name.attr,
+ &dev_attr_hardware_version.attr,
+ &dev_attr_driver_version.attr,
+ &dev_attr_option_rom_version.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_number_of_ports.attr,
+ &dev_attr_driver_name.attr,
+ &dev_attr_number_of_discovered_ports.attr,
NULL,
};
-struct device_attribute *bfad_im_vport_attrs[] = {
- &dev_attr_serial_number,
- &dev_attr_model,
- &dev_attr_model_description,
- &dev_attr_node_name,
- &dev_attr_symbolic_name,
- &dev_attr_hardware_version,
- &dev_attr_driver_version,
- &dev_attr_option_rom_version,
- &dev_attr_firmware_version,
- &dev_attr_number_of_ports,
- &dev_attr_driver_name,
- &dev_attr_number_of_discovered_ports,
+static const struct attribute_group bfad_im_host_attr_group = {
+ .attrs = bfad_im_host_attrs
+};
+
+const struct attribute_group *bfad_im_host_groups[] = {
+ &bfad_im_host_attr_group,
+ NULL
+};
+
+static struct attribute *bfad_im_vport_attrs[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_model.attr,
+ &dev_attr_model_description.attr,
+ &dev_attr_node_name.attr,
+ &dev_attr_symbolic_name.attr,
+ &dev_attr_hardware_version.attr,
+ &dev_attr_driver_version.attr,
+ &dev_attr_option_rom_version.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_number_of_ports.attr,
+ &dev_attr_driver_name.attr,
+ &dev_attr_number_of_discovered_ports.attr,
NULL,
};
+static const struct attribute_group bfad_im_vport_attr_group = {
+ .attrs = bfad_im_vport_attrs
+};
+const struct attribute_group *bfad_im_vport_groups[] = {
+ &bfad_im_vport_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a76c968dbac5..be8dfbe13e90 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -15,7 +15,7 @@
BFA_TRC_FILE(LDRV, BSG);
-int
+static int
bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -38,7 +38,7 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -136,7 +136,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
@@ -146,7 +146,7 @@ bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
@@ -176,7 +176,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -194,7 +194,7 @@ bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
return 0;
}
-int
+static int
bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
@@ -208,7 +208,7 @@ bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
return 0;
}
-int
+static int
bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
@@ -219,7 +219,7 @@ bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -231,7 +231,7 @@ bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
@@ -244,7 +244,7 @@ bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -265,7 +265,7 @@ bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -315,7 +315,7 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
@@ -349,7 +349,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -370,7 +370,7 @@ bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
{
struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
@@ -390,7 +390,7 @@ bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
return 0;
}
-int
+static int
bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
@@ -404,7 +404,7 @@ bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
{
struct bfa_bsg_bbcr_enable_s *iocmd =
@@ -427,7 +427,7 @@ bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
return 0;
}
-int
+static int
bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
@@ -465,7 +465,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_lport_s *fcs_port;
@@ -489,7 +489,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_lport_s *fcs_port;
@@ -523,7 +523,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_lport_s *fcs_port;
@@ -548,7 +548,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
@@ -590,7 +590,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
@@ -676,7 +676,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_stats_s *iocmd =
@@ -717,7 +717,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_reset_stats_s *iocmd =
@@ -753,7 +753,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_set_speed_s *iocmd =
@@ -789,7 +789,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_vport_s *fcs_vport;
@@ -812,7 +812,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_vport_s *fcs_vport;
@@ -840,7 +840,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_vport_s *fcs_vport;
@@ -907,7 +907,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
@@ -920,7 +920,7 @@ bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
return 0;
}
-int
+static int
bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
@@ -949,7 +949,7 @@ bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
return 0;
}
-int
+static int
bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
{
struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
@@ -978,7 +978,7 @@ bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
return 0;
}
-int
+static int
bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
@@ -991,7 +991,7 @@ bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_modstats_s *iocmd =
@@ -1013,7 +1013,7 @@ bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
@@ -1035,7 +1035,7 @@ bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
@@ -1160,7 +1160,7 @@ bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -1173,7 +1173,7 @@ bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -1186,7 +1186,7 @@ bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
@@ -1208,7 +1208,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
@@ -1231,7 +1231,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
@@ -1253,7 +1253,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
@@ -1277,7 +1277,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_adapter_cfg_mode_s *iocmd =
@@ -1300,7 +1300,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_port_cfg_mode_s *iocmd =
@@ -1324,7 +1324,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
@@ -1350,7 +1350,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
@@ -1373,7 +1373,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
{
struct bfa_bsg_cee_attr_s *iocmd =
@@ -1409,7 +1409,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
@@ -1446,7 +1446,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -1460,7 +1460,7 @@ bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
@@ -1482,7 +1482,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
@@ -1503,7 +1503,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_flash_attr_s *iocmd =
@@ -1524,7 +1524,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
@@ -1544,7 +1544,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
@@ -1576,7 +1576,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
@@ -1608,7 +1608,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_get_temp_s *iocmd =
@@ -1630,7 +1630,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_memtest_s *iocmd =
@@ -1653,7 +1653,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_loopback_s *iocmd =
@@ -1676,7 +1676,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_fwping_s *iocmd =
@@ -1700,7 +1700,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
@@ -1721,7 +1721,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_sfp_show_s *iocmd =
@@ -1744,7 +1744,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
@@ -1757,7 +1757,7 @@ bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_beacon_s *iocmd =
@@ -1772,7 +1772,7 @@ bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_lb_stat_s *iocmd =
@@ -1787,7 +1787,7 @@ bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_dport_enable_s *iocmd =
@@ -1809,7 +1809,7 @@ bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
return 0;
}
-int
+static int
bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
@@ -1829,7 +1829,7 @@ bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
return 0;
}
-int
+static int
bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_dport_enable_s *iocmd =
@@ -1854,7 +1854,7 @@ bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
return 0;
}
-int
+static int
bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_diag_dport_show_s *iocmd =
@@ -1869,7 +1869,7 @@ bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
}
-int
+static int
bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_phy_attr_s *iocmd =
@@ -1890,7 +1890,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_phy_stats_s *iocmd =
@@ -1911,7 +1911,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
{
struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
@@ -1943,7 +1943,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_vhba_attr_s *iocmd =
@@ -1962,7 +1962,7 @@ bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
{
struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
@@ -1992,7 +1992,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
@@ -2012,7 +2012,7 @@ out:
}
#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
-int
+static int
bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
@@ -2046,7 +2046,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -2067,7 +2067,7 @@ bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
return 0;
}
-int
+static int
bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
@@ -2081,7 +2081,7 @@ bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_fcpim_profile_s *iocmd =
@@ -2125,7 +2125,7 @@ bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcport_stats_s *iocmd =
@@ -2150,7 +2150,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -2174,7 +2174,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
@@ -2196,7 +2196,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
@@ -2218,7 +2218,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
@@ -2237,7 +2237,7 @@ bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
@@ -2260,7 +2260,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
@@ -2283,7 +2283,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -2323,7 +2323,7 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
return 0;
}
-int
+static int
bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
@@ -2346,7 +2346,7 @@ bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -2374,7 +2374,7 @@ bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
return 0;
}
-int
+static int
bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
@@ -2400,7 +2400,7 @@ bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_qos_vc_attr_s *iocmd =
@@ -2432,7 +2432,7 @@ bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcport_stats_s *iocmd =
@@ -2464,7 +2464,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
@@ -2495,7 +2495,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_vf_stats_s *iocmd =
@@ -2518,7 +2518,7 @@ out:
return 0;
}
-int
+static int
bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_vf_reset_stats_s *iocmd =
@@ -2555,7 +2555,7 @@ bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
}
-int
+static int
bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
@@ -2578,7 +2578,7 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
@@ -2592,7 +2592,7 @@ bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_fcpim_lunmask_s *iocmd =
@@ -2611,7 +2611,7 @@ bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_throttle_s *iocmd =
@@ -2626,7 +2626,7 @@ bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_throttle_s *iocmd =
@@ -2641,7 +2641,7 @@ bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_tfru_s *iocmd =
@@ -2663,7 +2663,7 @@ bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_tfru_s *iocmd =
@@ -2685,7 +2685,7 @@ bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fruvpd_s *iocmd =
@@ -2707,7 +2707,7 @@ bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fruvpd_s *iocmd =
@@ -2729,7 +2729,7 @@ bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fruvpd_max_size_s *iocmd =
@@ -3177,7 +3177,7 @@ out:
}
/* FC passthru call backs */
-u64
+static u64
bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
{
struct bfad_fcxp *drv_fcxp = bfad_fcxp;
@@ -3189,7 +3189,7 @@ bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
return addr;
}
-u32
+static u32
bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
{
struct bfad_fcxp *drv_fcxp = bfad_fcxp;
@@ -3199,7 +3199,7 @@ bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
return sge->sg_len;
}
-u64
+static u64
bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
{
struct bfad_fcxp *drv_fcxp = bfad_fcxp;
@@ -3211,7 +3211,7 @@ bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
return addr;
}
-u32
+static u32
bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
{
struct bfad_fcxp *drv_fcxp = bfad_fcxp;
@@ -3221,7 +3221,7 @@ bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
return sge->sg_len;
}
-void
+static void
bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
@@ -3236,7 +3236,7 @@ bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
complete(&drv_fcxp->comp);
}
-struct bfad_buf_info *
+static struct bfad_buf_info *
bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
uint32_t payload_len, uint32_t *num_sgles)
{
@@ -3280,7 +3280,7 @@ out_free_mem:
return NULL;
}
-void
+static void
bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
uint32_t num_sgles)
{
@@ -3298,7 +3298,7 @@ bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
}
}
-int
+static int
bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
bfa_bsg_fcpt_t *bsg_fcpt)
{
@@ -3338,7 +3338,7 @@ bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
return BFA_STATUS_OK;
}
-int
+static int
bfad_im_bsg_els_ct_request(struct bsg_job *job)
{
struct bfa_bsg_data *bsg_data;
@@ -3409,7 +3409,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
drv_fcxp->port = fcs_port->bfad_port;
- if (drv_fcxp->port->bfad == 0)
+ if (!drv_fcxp->port->bfad)
drv_fcxp->port->bfad = bfad;
/* Fetch the bfa_rport - if nexus needed */
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index fd1b378a263a..52db147d9979 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -371,8 +371,7 @@ bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file)
if (!fw_debug)
return 0;
- if (fw_debug->debug_buffer)
- vfree(fw_debug->debug_buffer);
+ vfree(fw_debug->debug_buffer);
file->private_data = NULL;
kfree(fw_debug);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 22f06be2606f..c335f7a188d2 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -96,7 +96,7 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
}
}
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
void
@@ -106,7 +106,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
struct bfad_itnim_data_s *itnim_data;
struct bfad_itnim_s *itnim;
- cmnd->result = DID_OK << 16 | SCSI_STATUS_GOOD;
+ cmnd->result = DID_OK << 16 | SAM_STAT_GOOD;
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
if (cmnd->device->host != NULL)
@@ -124,7 +124,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
}
}
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
void
@@ -150,10 +150,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
wait_queue_head_t *wq;
- cmnd->SCp.Status |= tsk_status << 1;
- set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
- wq = (wait_queue_head_t *) cmnd->SCp.ptr;
- cmnd->SCp.ptr = NULL;
+ bfad_priv(cmnd)->status |= tsk_status << 1;
+ set_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status);
+ wq = bfad_priv(cmnd)->wq;
+ bfad_priv(cmnd)->wq = NULL;
if (wq)
wake_up(wq);
@@ -226,7 +226,7 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
timeout *= 2;
}
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
bfa_trc(bfad, hal_io->iotag);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"scsi%d: complete abort 0x%p iotag 0x%x\n",
@@ -259,7 +259,7 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
* happens.
*/
cmnd->host_scribble = NULL;
- cmnd->SCp.Status = 0;
+ bfad_priv(cmnd)->status = 0;
bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
/*
* bfa_itnim can be NULL if the port gets disconnected and the bfa
@@ -326,8 +326,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
* if happens.
*/
cmnd->host_scribble = NULL;
- cmnd->SCp.ptr = (char *)&wq;
- cmnd->SCp.Status = 0;
+ bfad_priv(cmnd)->wq = &wq;
+ bfad_priv(cmnd)->status = 0;
bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
/*
* bfa_itnim can be NULL if the port gets disconnected and the bfa
@@ -347,10 +347,9 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- wait_event(wq, test_bit(IO_DONE_BIT,
- (unsigned long *)&cmnd->SCp.Status));
+ wait_event(wq, test_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status));
- task_status = cmnd->SCp.Status >> 1;
+ task_status = bfad_priv(cmnd)->status >> 1;
if (task_status != BFI_TSKIM_STS_OK) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"LUN reset failure, status: %d\n", task_status);
@@ -381,16 +380,16 @@ bfad_im_reset_target_handler(struct scsi_cmnd *cmnd)
spin_lock_irqsave(&bfad->bfad_lock, flags);
itnim = bfad_get_itnim(im_port, starget->id);
if (itnim) {
- cmnd->SCp.ptr = (char *)&wq;
+ bfad_priv(cmnd)->wq = &wq;
rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
if (rc == BFA_STATUS_OK) {
/* wait target reset to complete */
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
wait_event(wq, test_bit(IO_DONE_BIT,
- (unsigned long *)&cmnd->SCp.Status));
+ &bfad_priv(cmnd)->status));
spin_lock_irqsave(&bfad->bfad_lock, flags);
- task_status = cmnd->SCp.Status >> 1;
+ task_status = bfad_priv(cmnd)->status >> 1;
if (task_status != BFI_TSKIM_STS_OK)
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"target reset failure,"
@@ -756,7 +755,6 @@ void
bfad_destroy_workq(struct bfad_im_s *im)
{
if (im && im->drv_workq) {
- flush_workqueue(im->drv_workq);
destroy_workqueue(im->drv_workq);
im->drv_workq = NULL;
}
@@ -797,6 +795,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.name = BFAD_DRIVER_NAME,
.info = bfad_im_info,
.queuecommand = bfad_im_queuecommand,
+ .cmd_size = sizeof(struct bfad_cmd_priv),
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = bfad_im_abort_handler,
.eh_device_reset_handler = bfad_im_reset_lun_handler,
@@ -809,7 +808,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
.cmd_per_lun = 3,
- .shost_attrs = bfad_im_host_attrs,
+ .shost_groups = bfad_im_host_groups,
.max_sectors = BFAD_MAX_SECTORS,
.vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
};
@@ -819,6 +818,7 @@ struct scsi_host_template bfad_im_vport_template = {
.name = BFAD_DRIVER_NAME,
.info = bfad_im_info,
.queuecommand = bfad_im_queuecommand,
+ .cmd_size = sizeof(struct bfad_cmd_priv),
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = bfad_im_abort_handler,
.eh_device_reset_handler = bfad_im_reset_lun_handler,
@@ -831,7 +831,7 @@ struct scsi_host_template bfad_im_vport_template = {
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
.cmd_per_lun = 3,
- .shost_attrs = bfad_im_vport_attrs,
+ .shost_groups = bfad_im_vport_groups,
.max_sectors = BFAD_MAX_SECTORS,
};
@@ -1199,9 +1199,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
/*
* Scsi_Host template entry, queue a SCSI command to the BFAD.
*/
-static int
-bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+static int bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
@@ -1233,8 +1233,6 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
if (sg_cnt < 0)
return SCSI_MLQUEUE_HOST_BUSY;
- cmnd->scsi_done = done;
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
printk(KERN_WARNING
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index f16d4b219e44..c03b225ea1ba 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -43,6 +43,22 @@ u32 bfad_im_supported_speeds(struct bfa_s *bfa);
*/
#define IO_DONE_BIT 0
+/**
+ * struct bfad_cmd_priv - private data per SCSI command.
+ * @status: Lowest bit represents IO_DONE. The next seven bits hold a value of
+ * type enum bfi_tskim_status.
+ * @wq: Wait queue used to wait for completion of an operation.
+ */
+struct bfad_cmd_priv {
+ unsigned long status;
+ wait_queue_head_t *wq;
+};
+
+static inline struct bfad_cmd_priv *bfad_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
struct bfad_itnim_data_s {
struct bfad_itnim_s *itnim;
};
@@ -174,8 +190,8 @@ extern struct fc_function_template bfad_im_vport_fc_function_template;
extern struct scsi_transport_template *bfad_im_scsi_transport_template;
extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
-extern struct device_attribute *bfad_im_host_attrs[];
-extern struct device_attribute *bfad_im_vport_attrs[];
+extern const struct attribute_group *bfad_im_host_groups[];
+extern const struct attribute_group *bfad_im_vport_groups[];
irqreturn_t bfad_intx(int irq, void *dev_id);
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
index e0ccb48ec961..ecdc0f0f4f4e 100644
--- a/drivers/scsi/bnx2fc/Kconfig
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -5,9 +5,10 @@ config SCSI_BNX2X_FCOE
depends on (IPV6 || IPV6=n)
depends on LIBFC
depends on LIBFCOE
+ depends on MMU
select NETDEVICES
select ETHERNET
select NET_VENDOR_BROADCOM
select CNIC
- ---help---
+ help
This driver supports FCoE offload for the QLogic devices.
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 3b84db8d13a9..046247420cfa 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -51,7 +51,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
-#include <scsi/fc_encode.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fip.h>
@@ -66,7 +65,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "2.12.10"
+#define BNX2FC_VERSION "2.12.13"
#define PFX "bnx2fc: "
@@ -138,8 +137,6 @@
#define BNX2FC_FW_TIMEOUT (3 * HZ)
#define PORT_MAX 2
-#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
-
/* FC FCP Status */
#define FC_GOOD 0
@@ -482,7 +479,10 @@ struct io_bdt {
struct bnx2fc_work {
struct list_head list;
struct bnx2fc_rport *tgt;
+ struct fcoe_task_ctx_entry *task;
+ unsigned char rq_data[BNX2FC_RQ_BUF_SZ];
u16 wqe;
+ u8 num_rq;
};
struct bnx2fc_unsol_els {
struct fc_lport *lport;
@@ -491,7 +491,14 @@ struct bnx2fc_unsol_els {
struct work_struct unsol_els_work;
};
+struct bnx2fc_priv {
+ struct bnx2fc_cmd *io_req;
+};
+static inline struct bnx2fc_priv *bnx2fc_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
@@ -550,7 +557,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
enum fc_rport_event event);
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
- u8 num_rq);
+ u8 num_rq, unsigned char *rq_data);
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
@@ -559,7 +566,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
u8 num_rq);
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
- u8 num_rq);
+ u8 num_rq, unsigned char *rq_data);
void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
@@ -577,7 +584,9 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout);
void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
-void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task);
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
u32 port_id);
void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index b4bfab5edf8f..05ddbb9bb7d8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -16,6 +16,8 @@
#include "bnx2fc.h"
+#include <linux/ethtool.h>
+
static struct list_head adapter_list;
static struct list_head if_list;
static u32 adapter_count;
@@ -50,7 +52,7 @@ struct workqueue_struct *bnx2fc_wq;
* Here the io threads are per cpu but the l2 thread is just one
*/
struct fcoe_percpu_s bnx2fc_global;
-DEFINE_SPINLOCK(bnx2fc_global_lock);
+static DEFINE_SPINLOCK(bnx2fc_global_lock);
static struct cnic_ulp_ops bnx2fc_cnic_cb;
static struct libfc_function_template bnx2fc_libfc_fcn_templ;
@@ -80,7 +82,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv);
-static void bnx2fc_destroy_work(struct work_struct *work);
+static void bnx2fc_port_destroy(struct fcoe_port *port);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
@@ -108,22 +110,22 @@ MODULE_PARM_DESC(debug_logging,
"\t\t0x10 - fcoe L2 fame related logs.\n"
"\t\t0xff - LOG all messages.");
-uint bnx2fc_devloss_tmo;
+static uint bnx2fc_devloss_tmo;
module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO);
MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports "
"attached via bnx2fc.");
-uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
+static uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO);
MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default "
"0xffff.");
-uint bnx2fc_queue_depth;
+static uint bnx2fc_queue_depth;
module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO);
MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices "
"attached via bnx2fc.");
-uint bnx2fc_log_fka;
+static uint bnx2fc_log_fka;
module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
"initiating a FIP keep alive when debug logging is enabled.");
@@ -271,7 +273,6 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct fcoe_port *port;
struct fcoe_hdr *hp;
struct bnx2fc_rport *tgt;
- struct fc_stats *stats;
u8 sof, eof;
u32 crc;
unsigned int hlen, tlen, elen;
@@ -397,10 +398,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
/*update tx stats */
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->TxFrames++;
- stats->TxWords += wlen;
- put_cpu();
+ this_cpu_inc(lport->stats->TxFrames);
+ this_cpu_add(lport->stats->TxWords, wlen);
/* send down to lld */
fr_dev(fp) = lport;
@@ -506,10 +505,10 @@ static int bnx2fc_l2_rcv_thread(void *arg)
static void bnx2fc_recv_frame(struct sk_buff *skb)
{
- u32 fr_len;
+ u64 crc_err;
+ u32 fr_len, fr_crc;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
- struct fc_stats *stats;
struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
@@ -540,6 +539,9 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+ this_cpu_inc(lport->stats->RxFrames);
+ this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
+
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_dev(fp) = lport;
@@ -622,16 +624,13 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
- stats = per_cpu_ptr(lport->stats, smp_processor_id());
- stats->RxFrames++;
- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ fr_crc = le32_to_cpu(fr_crc(fp));
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
+ if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
+ crc_err = this_cpu_inc_return(lport->stats->InvalidCRCCount);
+ if (crc_err < 5)
printk(KERN_WARNING PFX "dropping frame with "
"CRC error\n");
- stats->InvalidCRCCount++;
kfree_skb(skb);
return;
}
@@ -660,7 +659,10 @@ static int bnx2fc_percpu_io_thread(void *arg)
list_for_each_entry_safe(work, tmp, &work_list, list) {
list_del_init(&work->list);
- bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe,
+ work->rq_data,
+ work->num_rq,
+ work->task);
kfree(work);
}
@@ -902,9 +904,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface);
}
mutex_unlock(&bnx2fc_dev_lock);
-
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
return;
default:
@@ -942,7 +941,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
*/
if (interface->enabled)
fcoe_ctlr_link_up(ctlr);
- };
+ }
} else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) {
case FCOE_CTLR_DISABLED:
@@ -957,12 +956,10 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
mutex_unlock(&lport->lp_mutex);
fc_host_port_type(lport->host) =
FC_PORTTYPE_UNKNOWN;
- per_cpu_ptr(lport->stats,
- get_cpu())->LinkFailureCount++;
- put_cpu();
+ this_cpu_inc(lport->stats->LinkFailureCount);
fcoe_clean_pending_queue(lport);
wait_for_upload = 1;
- };
+ }
}
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -1068,9 +1065,8 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
/**
* bnx2fc_update_src_mac - Update Ethernet MAC filters.
*
- * @fip: FCoE controller.
- * @old: Unicast MAC address to delete if the MAC is non-zero.
- * @new: Unicast MAC address to add.
+ * @lport: The local port
+ * @addr: Location of data to copy
*
* Remove any previously-set unicast MAC filter.
* Add secondary FCoE MAC address filter for our OUI.
@@ -1211,8 +1207,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex);
bnx2fc_free_vport(interface->hba, port->lport);
bnx2fc_port_shutdown(port->lport);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
return 0;
}
@@ -1521,7 +1517,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port->lport = lport;
port->priv = interface;
port->get_netdev = bnx2fc_netdev;
- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
rc = bnx2fc_lport_config(lport);
@@ -1649,15 +1644,14 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
bnx2fc_interface_cleanup(interface);
bnx2fc_stop(interface);
list_del(&interface->list);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
}
/**
* bnx2fc_destroy - Destroy a bnx2fc FCoE interface
*
- * @buffer: The name of the Ethernet interface to be destroyed
- * @kp: The associated kernel parameter
+ * @netdev: The net device that the FCoE interface is on
*
* Called from sysfs.
*
@@ -1691,15 +1685,12 @@ netdev_err:
return rc;
}
-static void bnx2fc_destroy_work(struct work_struct *work)
+static void bnx2fc_port_destroy(struct fcoe_port *port)
{
- struct fcoe_port *port;
struct fc_lport *lport;
- port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
-
- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
bnx2fc_if_destroy(lport);
}
@@ -1793,7 +1784,7 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
/**
* bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
*
- * @handle: transport handle pointing to adapter struture
+ * @handle: transport handle pointing to adapter structure
*/
static int bnx2fc_ulp_get_stats(void *handle)
{
@@ -2085,7 +2076,7 @@ static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
{
struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
- if (interface->enabled == true) {
+ if (interface->enabled) {
if (!ctlr->lp) {
pr_err(PFX "__bnx2fc_disable: lport not found\n");
return -ENODEV;
@@ -2098,7 +2089,7 @@ static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
return 0;
}
-/**
+/*
* Deperecated: Use bnx2fc_enabled()
*/
static int bnx2fc_disable(struct net_device *netdev)
@@ -2183,7 +2174,7 @@ static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
struct cnic_fc_npiv_tbl *npiv_tbl;
struct fc_lport *lport;
- if (interface->enabled == false) {
+ if (!interface->enabled) {
if (!ctlr->lp) {
pr_err(PFX "__bnx2fc_enable: lport not found\n");
return -ENODEV;
@@ -2226,7 +2217,7 @@ done:
return 0;
}
-/**
+/*
* Deprecated: Use bnx2fc_enabled()
*/
static int bnx2fc_enable(struct net_device *netdev)
@@ -2274,7 +2265,7 @@ static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
case FCOE_CTLR_UNUSED:
default:
return -ENOTSUPP;
- };
+ }
}
enum bnx2fc_create_link_state {
@@ -2520,7 +2511,7 @@ static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device
/**
* bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
*
- * @dev cnic device handle
+ * @dev: cnic device handle
*/
static void bnx2fc_ulp_exit(struct cnic_dev *dev)
{
@@ -2553,9 +2544,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
-
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
@@ -2655,7 +2643,8 @@ static int bnx2fc_cpu_offline(unsigned int cpu)
/* Free all work in the list */
list_for_each_entry_safe(work, tmp, &p->work_list, list) {
list_del_init(&work->list);
- bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data,
+ work->num_rq, work->task);
kfree(work);
}
@@ -2719,14 +2708,13 @@ static int __init bnx2fc_mod_init(void)
bg = &bnx2fc_global;
skb_queue_head_init(&bg->fcoe_rx_list);
- l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
- (void *)bg,
- "bnx2fc_l2_thread");
+ l2_thread = kthread_run(bnx2fc_l2_rcv_thread,
+ (void *)bg,
+ "bnx2fc_l2_thread");
if (IS_ERR(l2_thread)) {
rc = PTR_ERR(l2_thread);
goto free_wq;
}
- wake_up_process(l2_thread);
spin_lock_bh(&bg->fcoe_rx_list.lock);
bg->kthread = l2_thread;
spin_unlock_bh(&bg->fcoe_rx_list.lock);
@@ -2947,12 +2935,14 @@ bnx2fc_tm_timeout_store(struct device *dev,
static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show,
bnx2fc_tm_timeout_store);
-static struct device_attribute *bnx2fc_host_attrs[] = {
- &dev_attr_tm_timeout,
+static struct attribute *bnx2fc_host_attrs[] = {
+ &dev_attr_tm_timeout.attr,
NULL,
};
-/**
+ATTRIBUTE_GROUPS(bnx2fc_host);
+
+/*
* scsi_host_template structure used while registering with SCSI-ml
*/
static struct scsi_host_template bnx2fc_shost_template = {
@@ -2973,7 +2963,8 @@ static struct scsi_host_template bnx2fc_shost_template = {
.max_sectors = 0x3fbf,
.track_queue_depth = 1,
.slave_configure = bnx2fc_slave_configure,
- .shost_attrs = bnx2fc_host_attrs,
+ .shost_groups = bnx2fc_host_groups,
+ .cmd_size = sizeof(struct bnx2fc_priv),
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
@@ -2985,7 +2976,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
.rport_event_callback = bnx2fc_rport_event_handler,
};
-/**
+/*
* bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
* structure carrying callback function pointers
*/
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 6f8335ddb1f2..776544385598 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -485,7 +485,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
/**
* bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
*
- * @port: port structure pointer
+ * @hba: adapter structure pointer
* @tgt: bnx2fc_rport structure pointer
*/
int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
@@ -635,7 +635,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
struct bnx2fc_cmd *io_req = NULL;
struct bnx2fc_interface *interface = tgt->port->priv;
struct bnx2fc_hba *hba = interface->hba;
- int task_idx, index;
int rc = 0;
u64 err_warn_bit_map;
u8 err_warn = 0xff;
@@ -701,15 +700,12 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
-
if (xid > hba->max_xid) {
BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
xid);
goto ret_err_rqe;
}
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (!io_req)
@@ -774,7 +770,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
} else
printk(KERN_ERR PFX "SRR in progress\n");
goto ret_err_rqe;
- break;
default:
break;
}
@@ -833,8 +828,6 @@ ret_err_rqe:
}
BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (!io_req)
goto ret_warn_rqe;
@@ -863,36 +856,22 @@ ret_warn_rqe:
}
}
-void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task)
{
- struct fcoe_task_ctx_entry *task;
- struct fcoe_task_ctx_entry *task_page;
struct fcoe_port *port = tgt->port;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_cmd *io_req;
- int task_idx, index;
+
u16 xid;
u8 cmd_type;
u8 rx_state = 0;
- u8 num_rq;
spin_lock_bh(&tgt->tgt_lock);
- xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
- if (xid >= hba->max_tasks) {
- printk(KERN_ERR PFX "ERROR:xid out of range\n");
- spin_unlock_bh(&tgt->tgt_lock);
- return;
- }
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
- task = &(task_page[index]);
-
- num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (io_req == NULL) {
@@ -912,7 +891,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
switch (cmd_type) {
case BNX2FC_SCSI_CMD:
if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
- bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
+ rq_data);
spin_unlock_bh(&tgt->tgt_lock);
return;
}
@@ -929,7 +909,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
case BNX2FC_TASK_MGMT_CMD:
BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
- bnx2fc_process_tm_compl(io_req, task, num_rq);
+ bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
break;
case BNX2FC_ABTS:
@@ -987,7 +967,9 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
}
-static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task)
{
struct bnx2fc_work *work;
work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
@@ -997,29 +979,86 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
INIT_LIST_HEAD(&work->list);
work->tgt = tgt;
work->wqe = wqe;
+ work->num_rq = num_rq;
+ work->task = task;
+ if (rq_data)
+ memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
+
return work;
}
/* Pending work request completion */
-static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
{
unsigned int cpu = wqe % num_possible_cpus();
struct bnx2fc_percpu_s *fps;
struct bnx2fc_work *work;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ unsigned char *rq_data = NULL;
+ unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ];
+ int task_idx, index;
+ u16 xid;
+ u8 num_rq;
+ int i;
+
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+ if (xid >= hba->max_tasks) {
+ pr_err(PFX "ERROR:xid out of range\n");
+ return false;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+ task = &task_page[index];
+
+ num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
+
+ memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ);
+
+ if (!num_rq)
+ goto num_rq_zero;
+
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+ if (num_rq > 1) {
+ /* We do not need extra sense data */
+ for (i = 1; i < num_rq; i++)
+ bnx2fc_get_next_rqe(tgt, 1);
+ }
+
+ if (rq_data)
+ memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ);
+
+ /* return RQ entries */
+ for (i = 0; i < num_rq; i++)
+ bnx2fc_return_rqe(tgt, 1);
+
+num_rq_zero:
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (fps->iothread) {
- work = bnx2fc_alloc_work(tgt, wqe);
+ work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
+ num_rq, task);
if (work) {
list_add_tail(&work->list, &fps->work_list);
wake_up_process(fps->iothread);
spin_unlock_bh(&fps->fp_work_lock);
- return;
+ return true;
}
}
spin_unlock_bh(&fps->fp_work_lock);
- bnx2fc_process_cq_compl(tgt, wqe);
+ bnx2fc_process_cq_compl(tgt, wqe,
+ rq_data_buff, num_rq, task);
+
+ return true;
}
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
@@ -1056,8 +1095,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
- bnx2fc_pending_work(tgt, wqe);
- num_free_sqes++;
+ if (bnx2fc_pending_work(tgt, wqe))
+ num_free_sqes++;
}
cqe++;
tgt->cq_cons_idx++;
@@ -1130,7 +1169,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
ofld_kcqe->fcoe_conn_context_id);
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n");
goto ofld_cmpl_err;
}
/*
@@ -1187,12 +1226,12 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
* and enable
*/
if (tgt->context_id != context_id) {
- printk(KERN_ERR PFX "context id mis-match\n");
+ printk(KERN_ERR PFX "context id mismatch\n");
return;
}
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n");
goto enbl_cmpl_err;
}
if (!ofld_kcqe->completion_status)
@@ -1292,10 +1331,10 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
}
/**
- * bnx2fc_indicae_kcqe - process KCQE
+ * bnx2fc_indicate_kcqe() - process KCQE
*
- * @hba: adapter structure pointer
- * @kcqe: kcqe pointer
+ * @context: adapter structure pointer
+ * @kcq: kcqe pointer
* @num_cqe: Number of completion queue elements
*
* Generic KCQ event handler
@@ -1364,7 +1403,6 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
break;
case FCOE_KCQE_OPCODE_FCOE_ERROR:
- /* fall thru */
default:
printk(KERN_ERR PFX "unknown opcode 0x%x\n",
kcqe->op_code);
@@ -1463,7 +1501,6 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
u32 orig_offset = offset;
int bd_count;
- int orig_task_idx, index;
int i;
memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
@@ -1513,8 +1550,6 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
offset; /* adjusted offset */
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
} else {
- orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
- index = orig_xid % BNX2FC_TASKS_PER_PAGE;
/* Multiple SGEs were used for this IO */
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
@@ -2042,11 +2077,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
pbl = hba->hash_tbl_pbl;
i = 0;
while (*pbl && *(pbl + 1)) {
- u32 lo;
- u32 hi;
- lo = *pbl;
++pbl;
- hi = *pbl;
++pbl;
++i;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4c8122a82322..b42a9accb832 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -24,7 +24,7 @@ static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp,
- u8 num_rq);
+ u8 num_rq, unsigned char *rq_data);
void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
unsigned int timer_msec)
@@ -204,8 +204,8 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
sc_cmd->allowed);
scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
- sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
+ scsi_done(sc_cmd);
}
struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
@@ -472,7 +472,7 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
u32 free_sqes;
u32 max_sqes;
u16 xid;
- int index = get_cpu();
+ int index = raw_smp_processor_id();
max_sqes = BNX2FC_SCSI_MAX_SQES;
/*
@@ -485,7 +485,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
(tgt->num_active_ios.counter >= max_sqes) ||
(free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
- put_cpu();
return NULL;
}
@@ -498,7 +497,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
atomic_inc(&tgt->num_active_ios);
atomic_dec(&tgt->free_sqes);
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
- put_cpu();
INIT_LIST_HEAD(&io_req->link);
@@ -765,7 +763,7 @@ retry_tmf:
task = &(task_page[index]);
bnx2fc_init_mp_task(io_req, task);
- sc_cmd->SCp.ptr = (char *)io_req;
+ bnx2fc_priv(sc_cmd)->io_req = io_req;
/* Obtain free SQ entry */
spin_lock_bh(&tgt->tgt_lock);
@@ -864,7 +862,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
if (!abts_io_req) {
- printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
+ printk(KERN_ERR PFX "abts: couldn't allocate cmd\n");
rc = FAILED;
goto abts_err;
}
@@ -957,7 +955,7 @@ int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
if (!seq_clnp_req) {
- printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
rc = -ENOMEM;
kfree(cb_arg);
goto cleanup_err;
@@ -1015,7 +1013,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
if (!cleanup_io_req) {
- printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
rc = -1;
goto cleanup_err;
}
@@ -1081,6 +1079,7 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
}
static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
+ __must_hold(&tgt->tgt_lock)
{
struct bnx2fc_rport *tgt = io_req->tgt;
unsigned int time_left;
@@ -1146,7 +1145,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
spin_lock_bh(&tgt->tgt_lock);
- io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
+ io_req = bnx2fc_priv(sc_cmd)->io_req;
if (!io_req) {
/* Command might have just completed */
printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
@@ -1212,13 +1211,14 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
* cleanup the command and return that I/O was successfully
* aborted.
*/
- rc = bnx2fc_abts_cleanup(io_req);
+ bnx2fc_abts_cleanup(io_req);
/* This only occurs when an task abort was requested while ABTS
is in progress. Setting the IO_CLEANUP flag will skip the
RRQ process in the case when the fw generated SCSI_CMD cmpl
was a result from the ABTS request rather than the CLEANUP
request */
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+ rc = FAILED;
goto done;
}
@@ -1518,7 +1518,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
}
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
- struct fcoe_task_ctx_entry *task, u8 num_rq)
+ struct fcoe_task_ctx_entry *task, u8 num_rq,
+ unsigned char *rq_data)
{
struct bnx2fc_mp_req *tm_req;
struct fc_frame_header *fc_hdr;
@@ -1557,7 +1558,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
bnx2fc_parse_fcp_rsp(io_req,
(struct fcoe_fcp_rsp_payload *)
- rsp_buf, num_rq);
+ rsp_buf, num_rq, rq_data);
if (io_req->fcp_rsp_code == 0) {
/* TM successful */
if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
@@ -1569,8 +1570,8 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
fc_hdr->fh_r_ctl);
}
- if (!sc_cmd->SCp.ptr) {
- printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
+ if (!bnx2fc_priv(sc_cmd)->io_req) {
+ printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
return;
}
switch (io_req->fcp_status) {
@@ -1606,8 +1607,8 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
return;
}
- sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
+ scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
if (io_req->wait_for_abts_comp) {
@@ -1755,15 +1756,11 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp,
- u8 num_rq)
+ u8 num_rq, unsigned char *rq_data)
{
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
- struct bnx2fc_rport *tgt = io_req->tgt;
u8 rsp_flags = fcp_rsp->fcp_flags.flags;
u32 rq_buff_len = 0;
- int i;
- unsigned char *rq_data;
- unsigned char *dummy;
int fcp_sns_len = 0;
int fcp_rsp_len = 0;
@@ -1774,8 +1771,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
io_req->fcp_resid = fcp_rsp->fcp_resid;
io_req->scsi_comp_flags = rsp_flags;
- CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
- fcp_rsp->scsi_status_code;
+ io_req->cdb_status = fcp_rsp->scsi_status_code;
/* Fetch fcp_rsp_info and fcp_sns_info if available */
if (num_rq) {
@@ -1809,14 +1805,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
}
- rq_data = bnx2fc_get_next_rqe(tgt, 1);
-
- if (num_rq > 1) {
- /* We do not need extra sense data */
- for (i = 1; i < num_rq; i++)
- dummy = bnx2fc_get_next_rqe(tgt, 1);
- }
-
/* fetch fcp_rsp_code */
if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
/* Only for task management function */
@@ -1837,9 +1825,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
if (fcp_sns_len)
memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
- /* return RQ entries */
- for (i = 0; i < num_rq; i++)
- bnx2fc_return_rqe(tgt, 1);
}
}
@@ -1865,7 +1850,7 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -1918,7 +1903,7 @@ exit_qcmd:
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
- u8 num_rq)
+ u8 num_rq, unsigned char *rq_data)
{
struct fcoe_fcp_rsp_payload *fcp_rsp;
struct bnx2fc_rport *tgt = io_req->tgt;
@@ -1931,6 +1916,12 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* we will not receive ABTS response for this IO */
BNX2FC_IO_DBG(io_req, "Timer context finished processing "
"this scsi cmd\n");
+ if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req,
+ "Actual completion after cleanup request cleaning up\n");
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ }
return;
}
@@ -1950,10 +1941,10 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
&(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
/* parse fcp_rsp and obtain sense data from RQ if available */
- bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
+ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
- if (!sc_cmd->SCp.ptr) {
- printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+ if (!bnx2fc_priv(sc_cmd)->io_req) {
+ printk(KERN_ERR PFX "io_req is NULL\n");
return;
}
@@ -2024,8 +2015,8 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
io_req->fcp_status);
break;
}
- sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
+ scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
@@ -2039,7 +2030,6 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct fc_lport *lport = port->lport;
- struct fc_stats *stats;
int task_idx, index;
u16 xid;
@@ -2050,22 +2040,20 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
io_req->port = port;
io_req->tgt = tgt;
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
- sc_cmd->SCp.ptr = (char *)io_req;
+ bnx2fc_priv(sc_cmd)->io_req = io_req;
- stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
io_req->io_req_flags = BNX2FC_READ;
- stats->InputRequests++;
- stats->InputBytes += io_req->data_xfer_len;
+ this_cpu_inc(lport->stats->InputRequests);
+ this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len);
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
io_req->io_req_flags = BNX2FC_WRITE;
- stats->OutputRequests++;
- stats->OutputBytes += io_req->data_xfer_len;
+ this_cpu_inc(lport->stats->OutputRequests);
+ this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len);
} else {
io_req->io_req_flags = 0;
- stats->ControlRequests++;
+ this_cpu_inc(lport->stats->ControlRequests);
}
- put_cpu();
xid = io_req->xid;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 50384b4a817c..2c246e80c1c4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -431,7 +431,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
return 0;
}
-/**
+/*
* This event_callback is called after successful completion of libfc
* initiated target login. bnx2fc can proceed with initiating the session
* establishment.
@@ -482,7 +482,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
}
/*
- * Offlaod process is protected with hba mutex.
+ * Offload process is protected with hba mutex.
* Use the same mutex_lock for upload process too
*/
mutex_lock(&hba->hba_mutex);
@@ -656,9 +656,8 @@ static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
spin_unlock_bh(&hba->hba_lock);
}
-/**
- *bnx2fc_alloc_session_resc - Allocate qp resources for the session
- *
+/*
+ * bnx2fc_alloc_session_resc - Allocate qp resources for the session
*/
static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
struct bnx2fc_rport *tgt)
@@ -820,7 +819,7 @@ mem_alloc_failure:
}
/**
- * bnx2i_free_session_resc - free qp resources for the session
+ * bnx2fc_free_session_resc - free qp resources for the session
*
* @hba: adapter structure pointer
* @tgt: bnx2fc_rport structure pointer
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index 702dc82c9501..0cc06c2ce0b8 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -4,11 +4,12 @@ config SCSI_BNX2_ISCSI
depends on NET
depends on PCI
depends on (IPV6 || IPV6=n)
+ depends on MMU
select SCSI_ISCSI_ATTRS
select NETDEVICES
select ETHERNET
select NET_VENDOR_BROADCOM
select CNIC
- ---help---
+ help
This driver supports iSCSI offload for the QLogic NetXtreme II
devices.
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 663a63d4dae4..df7d04afce05 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -795,7 +795,7 @@ extern struct cnic_ulp_ops bnx2i_cnic_cb;
extern unsigned int sq_size;
extern unsigned int rq_size;
-extern struct device_attribute *bnx2i_dev_attributes[];
+extern const struct attribute_group *bnx2i_dev_groups[];
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e53ebc5eff85..6c864b093ac9 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -181,7 +181,7 @@ int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
/**
* bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
- * @conn: iscsi connection on which RQ event occurred
+ * @bnx2i_conn: iscsi connection on which RQ event occurred
* @ptr: driver buffer to which RQ buffer contents is to
* be copied
* @len: length of valid data inside RQ buf
@@ -223,7 +223,7 @@ static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
/**
* bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
- * @conn: iscsi connection on which event to post
+ * @bnx2i_conn: iscsi connection on which event to post
* @count: number of RQ buffer being posted to chip
*
* No need to ring hardware doorbell for 57710 family of devices
@@ -258,7 +258,7 @@ void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
/**
* bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
- * @conn: iscsi connection to which new SQ entries belong
+ * @bnx2i_conn: iscsi connection to which new SQ entries belong
* @count: number of SQ WQEs to post
*
* SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
@@ -283,7 +283,7 @@ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
/**
* bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
- * @conn: iscsi connection to which new SQ entries belong
+ * @bnx2i_conn: iscsi connection to which new SQ entries belong
* @count: number of SQ WQEs to post
*
* this routine will update SQ driver parameters and ring the doorbell
@@ -320,9 +320,9 @@ static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
/**
* bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
- * @conn: iscsi connection
- * @cmd: driver command structure which is requesting
- * a WQE to sent to chip for further processing
+ * @bnx2i_conn: iscsi connection
+ * @task: transport layer's command structure pointer which is requesting
+ * a WQE to sent to chip for further processing
*
* prepare and post an iSCSI Login request WQE to CNIC firmware
*/
@@ -373,7 +373,7 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
/**
* bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
- * @conn: iscsi connection
+ * @bnx2i_conn: iscsi connection
* @mtask: driver command structure which is requesting
* a WQE to sent to chip for further processing
*
@@ -447,7 +447,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
/**
* bnx2i_send_iscsi_text - post iSCSI text WQE to hardware
- * @conn: iscsi connection
+ * @bnx2i_conn: iscsi connection
* @mtask: driver command structure which is requesting
* a WQE to sent to chip for further processing
*
@@ -495,7 +495,7 @@ int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
/**
* bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
- * @conn: iscsi connection
+ * @bnx2i_conn: iscsi connection
* @cmd: driver command structure which is requesting
* a WQE to sent to chip for further processing
*
@@ -517,9 +517,9 @@ int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
/**
* bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
- * @conn: iscsi connection
- * @cmd: driver command structure which is requesting
- * a WQE to sent to chip for further processing
+ * @bnx2i_conn: iscsi connection
+ * @task: transport layer's command structure pointer which is
+ * requesting a WQE to sent to chip for further processing
* @datap: payload buffer pointer
* @data_len: payload data length
* @unsol: indicated whether nopout pdu is unsolicited pdu or
@@ -579,9 +579,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
/**
* bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
- * @conn: iscsi connection
- * @cmd: driver command structure which is requesting
- * a WQE to sent to chip for further processing
+ * @bnx2i_conn: iscsi connection
+ * @task: transport layer's command structure pointer which is
+ * requesting a WQE to sent to chip for further processing
*
* prepare and post logout request WQE to CNIC firmware
*/
@@ -678,7 +678,8 @@ void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
/**
* bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
- * @data: endpoint (transport handle) structure pointer
+ * @t: timer context used to fetch the endpoint (transport
+ * handle) structure pointer
*
* routine to handle connection offload/destroy request timeout
*/
@@ -1662,7 +1663,7 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
/**
* bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
- * @conn: iscsi connection
+ * @bnx2i_conn: iscsi connection
*
* Firmware advances RQ producer index for every unsolicited PDU even if
* payload data length is '0'. This function makes corresponding
@@ -1885,7 +1886,9 @@ int bnx2i_percpu_io_thread(void *arg)
/**
* bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread
+ * @session: iscsi session
* @bnx2i_conn: bnx2i connection
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
*
* this function is called by generic KCQ handler to queue all pending cmd
* completion CQEs
@@ -1915,7 +1918,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
spin_unlock(&session->back_lock);
- p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
+ p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(scsi_cmd_to_rq(sc)));
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
rc = -EINVAL;
@@ -1974,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
break;
- if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "
@@ -2203,10 +2206,8 @@ static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
{
struct bnx2i_conn *bnx2i_conn;
u32 iscsi_cid;
- char warn_notice[] = "iscsi_warning";
- char error_notice[] = "iscsi_error";
- char additional_notice[64];
- char *message;
+ const char *additional_notice = "";
+ const char *message;
int need_recovery;
u64 err_mask64;
@@ -2221,133 +2222,132 @@ static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
if (err_mask64 & iscsi_error_mask) {
need_recovery = 0;
- message = warn_notice;
+ message = "iscsi_warning";
} else {
need_recovery = 1;
- message = error_notice;
+ message = "iscsi_error";
}
switch (iscsi_err->completion_status) {
case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
- strcpy(additional_notice, "hdr digest err");
+ additional_notice = "hdr digest err";
break;
case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
- strcpy(additional_notice, "data digest err");
+ additional_notice = "data digest err";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
- strcpy(additional_notice, "wrong opcode rcvd");
+ additional_notice = "wrong opcode rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
- strcpy(additional_notice, "AHS len > 0 rcvd");
+ additional_notice = "AHS len > 0 rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
- strcpy(additional_notice, "invalid ITT rcvd");
+ additional_notice = "invalid ITT rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
- strcpy(additional_notice, "wrong StatSN rcvd");
+ additional_notice = "wrong StatSN rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
- strcpy(additional_notice, "wrong DataSN rcvd");
+ additional_notice = "wrong DataSN rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
- strcpy(additional_notice, "pend R2T violation");
+ additional_notice = "pend R2T violation";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
- strcpy(additional_notice, "ERL0, UO");
+ additional_notice = "ERL0, UO";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
- strcpy(additional_notice, "ERL0, U1");
+ additional_notice = "ERL0, U1";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
- strcpy(additional_notice, "ERL0, U2");
+ additional_notice = "ERL0, U2";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
- strcpy(additional_notice, "ERL0, U3");
+ additional_notice = "ERL0, U3";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
- strcpy(additional_notice, "ERL0, U4");
+ additional_notice = "ERL0, U4";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
- strcpy(additional_notice, "ERL0, U5");
+ additional_notice = "ERL0, U5";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
- strcpy(additional_notice, "ERL0, U6");
+ additional_notice = "ERL0, U6";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
- strcpy(additional_notice, "invalid resi len");
+ additional_notice = "invalid resi len";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
- strcpy(additional_notice, "MRDSL violation");
+ additional_notice = "MRDSL violation";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
- strcpy(additional_notice, "F-bit not set");
+ additional_notice = "F-bit not set";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
- strcpy(additional_notice, "invalid TTT");
+ additional_notice = "invalid TTT";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
- strcpy(additional_notice, "invalid DataSN");
+ additional_notice = "invalid DataSN";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
- strcpy(additional_notice, "burst len violation");
+ additional_notice = "burst len violation";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
- strcpy(additional_notice, "buf offset violation");
+ additional_notice = "buf offset violation";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
- strcpy(additional_notice, "invalid LUN field");
+ additional_notice = "invalid LUN field";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
- strcpy(additional_notice, "invalid R2TSN field");
+ additional_notice = "invalid R2TSN field";
break;
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
- strcpy(additional_notice, "invalid cmd len1");
+ additional_notice = "invalid cmd len1";
break;
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
- strcpy(additional_notice, "invalid cmd len2");
+ additional_notice = "invalid cmd len2";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
- strcpy(additional_notice,
- "pend r2t exceeds MaxOutstandingR2T value");
+ additional_notice = "pend r2t exceeds MaxOutstandingR2T value";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
- strcpy(additional_notice, "TTT is rsvd");
+ additional_notice = "TTT is rsvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
- strcpy(additional_notice, "MBL violation");
+ additional_notice = "MBL violation";
break;
#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
- strcpy(additional_notice, "data seg len != 0");
+ additional_notice = "data seg len != 0";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
- strcpy(additional_notice, "reject pdu len error");
+ additional_notice = "reject pdu len error";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
- strcpy(additional_notice, "async pdu len error");
+ additional_notice = "async pdu len error";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
- strcpy(additional_notice, "nopin pdu len error");
+ additional_notice = "nopin pdu len error";
break;
#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
case BNX2_ERR_PEND_R2T_IN_CLEANUP:
- strcpy(additional_notice, "pend r2t in cleanup");
+ additional_notice = "pend r2t in cleanup";
break;
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
- strcpy(additional_notice, "IP fragments rcvd");
+ additional_notice = "IP fragments rcvd";
break;
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
- strcpy(additional_notice, "IP options error");
+ additional_notice = "IP options error";
break;
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
- strcpy(additional_notice, "urgent flag error");
+ additional_notice = "urgent flag error";
break;
default:
printk(KERN_ALERT "iscsi_err - unknown err %x\n",
@@ -2398,7 +2398,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+ printk(KERN_ALERT "conn destroy- error hba mismatch\n");
return;
}
@@ -2432,7 +2432,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+ printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n");
return;
}
@@ -2466,8 +2466,9 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
/**
* bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
- * @hba: adapter structure pointer
- * @update_kcqe: kcqe pointer
+ * @context: adapter structure pointer
+ * @kcqe: kcqe pointer
+ * @num_cqe: number of kcqes to process
*
* Generic KCQ event handler/dispatcher
*/
@@ -2614,8 +2615,7 @@ static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
/**
* bnx2i_cm_remote_close - process received TCP FIN
- * @hba: adapter structure pointer
- * @update_kcqe: kcqe pointer
+ * @cm_sk: cnic sock structure pointer
*
* function callback exported via bnx2i - cnic driver interface to indicate
* async TCP events such as FIN
@@ -2631,8 +2631,7 @@ static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
/**
* bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
- * @hba: adapter structure pointer
- * @update_kcqe: kcqe pointer
+ * @cm_sk: cnic sock structure pointer
*
* function callback exported via bnx2i - cnic driver interface to
* indicate async TCP events (RST) sent by the peer.
@@ -2669,10 +2668,9 @@ static int bnx2i_send_nl_mesg(void *context, u32 msg_type,
}
-/**
+/*
* bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
* carrying callback function pointers
- *
*/
struct cnic_ulp_ops bnx2i_cnic_cb = {
.cnic_init = bnx2i_ulp_init,
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 4ebcda8d9500..2b3f0c10478e 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -73,7 +73,7 @@ DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
/**
* bnx2i_identify_device - identifies NetXtreme II device type
* @hba: Adapter structure pointer
- * @cnic: Corresponding cnic device
+ * @dev: Corresponding cnic device
*
* This function identifies the NX2 device type and sets appropriate
* queue mailbox register access method, 5709 requires driver to
@@ -474,8 +474,6 @@ static int __init bnx2i_mod_init(void)
if (sq_size && !is_power_of_2(sq_size))
sq_size = roundup_pow_of_two(sq_size);
- mutex_init(&bnx2i_dev_lock);
-
bnx2i_scsi_xport_template =
iscsi_register_transport(&bnx2i_iscsi_transport);
if (!bnx2i_scsi_xport_template) {
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 0b28d44d3573..a3c800e04a2e 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -228,7 +228,7 @@ static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
/**
* bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
* @hba: pointer to adapter instance
- * @conn: pointer to iscsi connection
+ * @bnx2i_conn: pointer to iscsi connection
* @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
*
* update iscsi cid table entry with connection pointer. This enables
@@ -463,7 +463,6 @@ static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
* bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
* @hba: adapter instance pointer
* @session: iscsi session pointer
- * @cmd: iscsi command structure
*/
static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
struct iscsi_session *session)
@@ -582,8 +581,7 @@ static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
/**
* bnx2i_drop_session - notifies iscsid of connection error.
- * @hba: adapter instance pointer
- * @session: iscsi session pointer
+ * @cls_session: iscsi cls session pointer
*
* This notifies iscsid that there is a error, so it can initiate
* recovery.
@@ -793,7 +791,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
return NULL;
shost->dma_boundary = cnic->pcidev->dma_mask;
shost->transportt = bnx2i_scsi_xport_template;
- shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
+ shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1;
shost->max_channel = 0;
shost->max_lun = 512;
shost->max_cmd_len = 16;
@@ -911,7 +909,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
{
struct Scsi_Host *shost = hba->shost;
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
@@ -1173,10 +1171,8 @@ static void bnx2i_cleanup_task(struct iscsi_task *task)
bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
spin_unlock_bh(&conn->session->back_lock);
- spin_unlock_bh(&conn->session->frwd_lock);
wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
- spin_lock_bh(&conn->session->frwd_lock);
spin_lock_bh(&conn->session->back_lock);
}
bnx2i_iscsi_unmap_sg_list(task->dd_data);
@@ -1277,7 +1273,8 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
/**
* bnx2i_session_create - create a new iscsi session
- * @cmds_max: max commands supported
+ * @ep: pointer to iscsi endpoint
+ * @cmds_max: user specified maximum commands
* @qdepth: scsi queue depth to support
* @initial_cmdsn: initial iscsi CMDSN to be used for this session
*
@@ -1423,17 +1420,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
* Forcefully terminate all in progress connection recovery at the
* earliest, either in bind(), send_pdu(LOGIN), or conn_start()
*/
- if (bnx2i_adapter_ready(hba))
- return -EIO;
+ if (bnx2i_adapter_ready(hba)) {
+ ret_code = -EIO;
+ goto put_ep;
+ }
bnx2i_ep = ep->dd_data;
if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
- (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+ (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
/* Peer disconnect via' FIN or RST */
- return -EINVAL;
+ ret_code = -EINVAL;
+ goto put_ep;
+ }
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ ret_code = -EINVAL;
+ goto put_ep;
+ }
if (bnx2i_ep->hba != hba) {
/* Error - TCP connection does not belong to this device
@@ -1444,7 +1447,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
"belong to hba (%s)\n",
hba->netdev->name);
- return -EEXIST;
+ ret_code = -EEXIST;
+ goto put_ep;
}
bnx2i_ep->conn = bnx2i_conn;
bnx2i_conn->ep = bnx2i_ep;
@@ -1461,6 +1465,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
bnx2i_put_rq_buf(bnx2i_conn, 0);
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+put_ep:
+ iscsi_put_endpoint(ep);
return ret_code;
}
@@ -1715,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
/* Must suspend all rx queue activity for this ep */
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
}
/* CONN_DISCONNECT timeout may or may not be an issue depending
* on what transcribed in TCP layer, different targets behave
@@ -1971,7 +1977,7 @@ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
/**
* bnx2i_ep_tcp_conn_active - check EP state transition
- * @ep: endpoint pointer
+ * @bnx2i_ep: endpoint pointer
*
* check if underlying TCP connection is active
*/
@@ -2014,9 +2020,9 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
}
-/*
+/**
* bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw
- * @ep: TCP connection (bnx2i endpoint) handle
+ * @bnx2i_ep: TCP connection (bnx2i endpoint) handle
*
* executes TCP connection teardown process
*/
@@ -2116,7 +2122,6 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
{
struct bnx2i_endpoint *bnx2i_ep;
struct bnx2i_conn *bnx2i_conn = NULL;
- struct iscsi_conn *conn = NULL;
struct bnx2i_hba *hba;
bnx2i_ep = ep->dd_data;
@@ -2129,11 +2134,8 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
!time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
msleep(250);
- if (bnx2i_ep->conn) {
+ if (bnx2i_ep->conn)
bnx2i_conn = bnx2i_ep->conn;
- conn = bnx2i_conn->cls_conn->dd_data;
- iscsi_suspend_queue(conn);
- }
hba = bnx2i_ep->hba;
mutex_lock(&hba->net_dev_lock);
@@ -2171,8 +2173,8 @@ out:
/**
* bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
- * @buf: pointer to buffer containing iscsi path message
- *
+ * @shost: scsi host pointer
+ * @params: pointer to buffer containing iscsi path message
*/
static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
{
@@ -2264,8 +2266,9 @@ static struct scsi_host_template bnx2i_host_template = {
.cmd_per_lun = 128,
.this_id = -1,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
- .shost_attrs = bnx2i_dev_attributes,
+ .shost_groups = bnx2i_dev_groups,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
struct iscsi_transport bnx2i_iscsi_transport = {
@@ -2279,6 +2282,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.destroy_session = bnx2i_session_destroy,
.create_conn = bnx2i_conn_create,
.bind_conn = bnx2i_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = bnx2i_conn_destroy,
.attr_is_visible = bnx2i_attr_is_visible,
.set_param = iscsi_set_param,
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index 6d56fd60cb2b..d6b0bbb5176b 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -30,6 +30,7 @@ static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
/**
* bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
* @dev: device pointer
+ * @attr: device attribute (unused)
* @buf: buffer to return current SQ size parameter
*
* Returns current SQ size parameter, this paramater determines the number
@@ -47,6 +48,7 @@ static ssize_t bnx2i_show_sq_info(struct device *dev,
/**
* bnx2i_set_sq_info - update send queue (SQ) size parameter
* @dev: device pointer
+ * @attr: device attribute (unused)
* @buf: buffer to return current SQ size parameter
* @count: parameter buffer size
*
@@ -87,6 +89,7 @@ skip_config:
/**
* bnx2i_show_ccell_info - returns command cell (HQ) size
* @dev: device pointer
+ * @attr: device attribute (unused)
* @buf: buffer to return current SQ size parameter
*
* returns per-connection TCP history queue size parameter
@@ -101,8 +104,9 @@ static ssize_t bnx2i_show_ccell_info(struct device *dev,
/**
- * bnx2i_get_link_state - set command cell (HQ) size
+ * bnx2i_set_ccell_info - set command cell (HQ) size
* @dev: device pointer
+ * @attr: device attribute (unused)
* @buf: buffer to return current SQ size parameter
* @count: parameter buffer size
*
@@ -138,8 +142,17 @@ static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
bnx2i_show_ccell_info, bnx2i_set_ccell_info);
-struct device_attribute *bnx2i_dev_attributes[] = {
- &dev_attr_sq_size,
- &dev_attr_num_ccell,
+static struct attribute *bnx2i_dev_attributes[] = {
+ &dev_attr_sq_size.attr,
+ &dev_attr_num_ccell.attr,
+ NULL
+};
+
+static const struct attribute_group bnx2i_dev_attr_group = {
+ .attrs = bnx2i_dev_attributes
+};
+
+const struct attribute_group *bnx2i_dev_groups[] = {
+ &bnx2i_dev_attr_group,
NULL
};
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index ed5f4a6ae270..7ab29eaec6f3 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -44,7 +44,6 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER);
-static DEFINE_MUTEX(ch_mutex);
static int init = 1;
module_param(init, int, 0444);
MODULE_PARM_DESC(init, \
@@ -64,7 +63,7 @@ static int verbose = 1;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose,"be verbose (default: on)");
-static int debug = 0;
+static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more "
"detailed sense codes on scsi errors (default: off)");
@@ -199,8 +198,9 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
result = scsi_execute_req(ch->device, cmd, direction, buffer,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
-
- if (driver_byte(result) == DRIVER_SENSE) {
+ if (result < 0)
+ return result;
+ if (scsi_sense_valid(&sshdr)) {
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
@@ -239,7 +239,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
u_char *buffer;
int result;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if(!buffer)
return -ENOMEM;
@@ -297,7 +297,7 @@ ch_readconfig(scsi_changer *ch)
int result,id,lun,i;
u_int elem;
- buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kzalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -569,6 +569,7 @@ static void ch_destroy(struct kref *ref)
{
scsi_changer *ch = container_of(ref, scsi_changer, ref);
+ ch->device = NULL;
kfree(ch->dt);
kfree(ch);
}
@@ -590,20 +591,22 @@ ch_open(struct inode *inode, struct file *file)
scsi_changer *ch;
int minor = iminor(inode);
- mutex_lock(&ch_mutex);
spin_lock(&ch_index_lock);
ch = idr_find(&ch_index_idr, minor);
- if (NULL == ch || scsi_device_get(ch->device)) {
+ if (ch == NULL || !kref_get_unless_zero(&ch->ref)) {
spin_unlock(&ch_index_lock);
- mutex_unlock(&ch_mutex);
return -ENXIO;
}
- kref_get(&ch->ref);
spin_unlock(&ch_index_lock);
-
+ if (scsi_device_get(ch->device)) {
+ kref_put(&ch->ref, ch_destroy);
+ return -ENXIO;
+ }
+ /* Synchronize with ch_probe() */
+ mutex_lock(&ch->lock);
file->private_data = ch;
- mutex_unlock(&ch_mutex);
+ mutex_unlock(&ch->lock);
return 0;
}
@@ -615,6 +618,12 @@ ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
return 0;
}
+struct changer_element_status32 {
+ int ces_type;
+ compat_uptr_t ces_data;
+};
+#define CHIOGSTATUS32 _IOW('c', 8, struct changer_element_status32)
+
static long ch_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -745,7 +754,20 @@ static long ch_ioctl(struct file *file,
return ch_gstatus(ch, ces.ces_type, ces.ces_data);
}
+#ifdef CONFIG_COMPAT
+ case CHIOGSTATUS32:
+ {
+ struct changer_element_status32 ces32;
+
+ if (copy_from_user(&ces32, argp, sizeof(ces32)))
+ return -EFAULT;
+ if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
+ return -EINVAL;
+ return ch_gstatus(ch, ces32.ces_type,
+ compat_ptr(ces32.ces_data));
+ }
+#endif
case CHIOGELEM:
{
struct changer_get_element cge;
@@ -761,7 +783,7 @@ static long ch_ioctl(struct file *file,
return -EINVAL;
elem = ch->firsts[cge.cge_type] + cge.cge_unit;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
mutex_lock(&ch->lock);
@@ -855,59 +877,11 @@ static long ch_ioctl(struct file *file,
}
default:
- return scsi_ioctl(ch->device, cmd, argp);
+ return scsi_ioctl(ch->device, file->f_mode, cmd, argp);
}
}
-#ifdef CONFIG_COMPAT
-
-struct changer_element_status32 {
- int ces_type;
- compat_uptr_t ces_data;
-};
-#define CHIOGSTATUS32 _IOW('c', 8,struct changer_element_status32)
-
-static long ch_ioctl_compat(struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- scsi_changer *ch = file->private_data;
- int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
- file->f_flags & O_NDELAY);
- if (retval)
- return retval;
-
- switch (cmd) {
- case CHIOGPARAMS:
- case CHIOGVPARAMS:
- case CHIOPOSITION:
- case CHIOMOVE:
- case CHIOEXCHANGE:
- case CHIOGELEM:
- case CHIOINITELEM:
- case CHIOSVOLTAG:
- /* compatible */
- return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
- case CHIOGSTATUS32:
- {
- struct changer_element_status32 ces32;
- unsigned char __user *data;
-
- if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
- return -EFAULT;
- if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
- return -EINVAL;
-
- data = compat_ptr(ces32.ces_data);
- return ch_gstatus(ch, ces32.ces_type, data);
- }
- default:
- return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg));
-
- }
-}
-#endif
-
/* ------------------------------------------------------------------------ */
static int ch_probe(struct device *dev)
@@ -938,7 +912,16 @@ static int ch_probe(struct device *dev)
ch->minor = ret;
sprintf(ch->name,"ch%d",ch->minor);
+ ret = scsi_device_get(sd);
+ if (ret) {
+ sdev_printk(KERN_WARNING, sd, "ch%d: failed to get device\n",
+ ch->minor);
+ goto remove_idr;
+ }
+ mutex_init(&ch->lock);
+ kref_init(&ch->ref);
+ ch->device = sd;
class_dev = device_create(ch_sysfs_class, dev,
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
"s%s", ch->name);
@@ -946,24 +929,27 @@ static int ch_probe(struct device *dev)
sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n",
ch->minor);
ret = PTR_ERR(class_dev);
- goto remove_idr;
+ goto put_device;
}
- mutex_init(&ch->lock);
- kref_init(&ch->ref);
- ch->device = sd;
+ mutex_lock(&ch->lock);
ret = ch_readconfig(ch);
- if (ret)
+ if (ret) {
+ mutex_unlock(&ch->lock);
goto destroy_dev;
+ }
if (init)
ch_init_elem(ch);
+ mutex_unlock(&ch->lock);
dev_set_drvdata(dev, ch);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
return 0;
destroy_dev:
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
+put_device:
+ scsi_device_put(sd);
remove_idr:
idr_remove(&ch_index_idr, ch->minor);
free_ch:
@@ -977,9 +963,11 @@ static int ch_remove(struct device *dev)
spin_lock(&ch_index_lock);
idr_remove(&ch_index_idr, ch->minor);
+ dev_set_drvdata(dev, NULL);
spin_unlock(&ch_index_lock);
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
+ scsi_device_put(ch->device);
kref_put(&ch->ref, ch_destroy);
return 0;
}
@@ -998,9 +986,7 @@ static const struct file_operations changer_fops = {
.open = ch_open,
.release = ch_release,
.unlocked_ioctl = ch_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ch_ioctl_compat,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
@@ -1042,9 +1028,3 @@ static void __exit exit_ch_module(void)
module_init(init_ch_module);
module_exit(exit_ch_module);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index d4c2a2e4c5d4..340785536998 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -404,16 +404,12 @@ static const char * const hostbyte_table[]={
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
-"DID_NEXUS_FAILURE" };
-
-static const char * const driverbyte_table[]={
-"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
-"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
+"DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" };
const char *scsi_hostbyte_string(int result)
{
+ enum scsi_host_status hb = host_byte(result);
const char *hb_string = NULL;
- int hb = host_byte(result);
if (hb < ARRAY_SIZE(hostbyte_table))
hb_string = hostbyte_table[hb];
@@ -421,17 +417,6 @@ const char *scsi_hostbyte_string(int result)
}
EXPORT_SYMBOL(scsi_hostbyte_string);
-const char *scsi_driverbyte_string(int result)
-{
- const char *db_string = NULL;
- int db = driver_byte(result);
-
- if (db < ARRAY_SIZE(driverbyte_table))
- db_string = driverbyte_table[db];
- return db_string;
-}
-EXPORT_SYMBOL(scsi_driverbyte_string);
-
#define scsi_mlreturn_name(result) { result, #result }
static const struct value_name_pair scsi_mlreturn_arr[] = {
scsi_mlreturn_name(NEEDS_RETRY),
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 950f9cdf0577..e43c5413ce29 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -306,7 +306,7 @@ csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
uint8_t *vpd, csum;
const struct t4_vpd_hdr *v;
/* To get around compilation warning from strstrip */
- char *s;
+ char __always_unused *s;
if (csio_is_valid_vpd(hw))
return 0;
@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
- ret = EINVAL;
+ ret = -EINVAL;
goto bye;
}
@@ -2939,7 +2939,7 @@ csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
case CSIO_HWE_FW_DLOAD:
csio_set_state(&hw->sm, csio_hws_resetting);
/* Download firmware */
- /* Fall through */
+ fallthrough;
case CSIO_HWE_HBA_RESET:
csio_set_state(&hw->sm, csio_hws_resetting);
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
index f24def6c6fd1..86fded97d799 100644
--- a/drivers/scsi/csiostor/csio_hw_t5.c
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -148,12 +148,11 @@ csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
{
int i;
uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
- uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
+ uint32_t mc_bist_data_pattern_reg;
mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx);
mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
- mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F)
@@ -196,7 +195,7 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
{
int i;
uint32_t edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
- uint32_t edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
+ uint32_t edc_bist_cmd_data_pattern;
/*
* These macro are missing in t4_regs.h file.
@@ -208,7 +207,6 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
- edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
#undef EDC_REG_T5
#undef EDC_STRIDE_T5
@@ -246,7 +244,7 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
*
* Reads/writes an [almost] arbitrary memory region in the firmware: the
* firmware memory address, length and host buffer must be aligned on
- * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * 32-bit boundaries. The memory is transferred as a raw byte sequence
* from/to the firmware's memory. If this memory contains data
* structures which contain multi-byte integers, it's the callers
* responsibility to perform appropriate byte order conversions.
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 8dea7d53788a..ccbded3353bd 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -582,7 +582,7 @@ csio_hw_free(struct csio_hw *hw)
* @hw: The HW module.
* @dev: The device associated with this invocation.
* @probe: Called from probe context or not?
- * @os_pln: Parent lnode if any.
+ * @pln: Parent lnode if any.
*
* Allocates lnode structure via scsi_host_alloc, initializes
* shost, initializes lnode module and registers with SCSI ML
@@ -1254,3 +1254,4 @@ MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
MODULE_FIRMWARE(FW_FNAME_T6);
+MODULE_SOFTDEP("pre: cxgb4");
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 74ff8adc41f7..d5ac93897023 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -619,7 +619,7 @@ csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
struct fc_els_csp *csp;
struct fc_els_cssp *clsp;
enum fw_retval retval;
- __be32 nport_id;
+ __be32 nport_id = 0;
retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
if (retval != FW_SUCCESS) {
@@ -1187,7 +1187,6 @@ csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
break;
case CSIO_LNE_LINK_DOWN:
- /* Fall through */
case CSIO_LNE_DOWN_LINK:
csio_set_state(&ln->sm, csio_lns_uninit);
if (csio_is_phys_ln(ln)) {
@@ -2068,10 +2067,9 @@ csio_ln_exit(struct csio_lnode *ln)
ln->fcfinfo = NULL;
}
-/**
+/*
* csio_lnode_init - Initialize the members of an lnode.
* @ln: lnode
- *
*/
int
csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
index e9c3b045f587..713e13adf4dc 100644
--- a/drivers/scsi/csiostor/csio_rnode.c
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -862,7 +862,7 @@ csio_rnode_devloss_handler(struct csio_rnode *rn)
/**
* csio_rnode_fwevt_handler - Event handler for firmware rnode events.
* @rn: rnode
- *
+ * @fwevt: firmware event to handle
*/
void
csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 00cf33573136..05e1a63e00c3 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -147,9 +147,9 @@ csio_scsi_itnexus_loss_error(uint16_t error)
case FW_ERR_RDEV_LOST:
case FW_ERR_RDEV_LOGO:
case FW_ERR_RDEV_IMPL_LOGO:
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
@@ -166,7 +166,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
/* Check for Task Management */
- if (likely(scmnd->SCp.Message == 0)) {
+ if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) {
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
fcp_cmnd->fc_tm_flags = 0;
fcp_cmnd->fc_cmdref = 0;
@@ -185,7 +185,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
} else {
memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
- fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
+ fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags;
}
}
@@ -933,14 +933,14 @@ csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
* abort for that I/O by the FW crossed each other.
* The FW returned FW_EINVAL. The original I/O would have
* returned with FW_SUCCESS or any other SCSI error.
- * 3. The FW couldnt sent the abort out on the wire, as there
+ * 3. The FW couldn't sent the abort out on the wire, as there
* was an I-T nexus loss (link down, remote device logged
* out etc). FW sent back an appropriate IT nexus loss status
* for the abort.
* 4. FW sent an abort, but abort timed out (remote device
* didnt respond). FW replied back with
* FW_SCSI_ABORT_TIMEDOUT.
- * 5. FW couldnt genuinely abort the request for some reason,
+ * 5. FW couldn't genuinely abort the request for some reason,
* and sent us an error.
*
* The first 3 scenarios are treated as succesful abort
@@ -1366,9 +1366,9 @@ csio_show_hw_state(struct device *dev,
struct csio_hw *hw = csio_lnode_to_hw(ln);
if (csio_is_hw_ready(hw))
- return snprintf(buf, PAGE_SIZE, "ready\n");
- else
- return snprintf(buf, PAGE_SIZE, "not ready\n");
+ return sysfs_emit(buf, "ready\n");
+
+ return sysfs_emit(buf, "not ready\n");
}
/* Device reset */
@@ -1430,7 +1430,7 @@ csio_show_dbg_level(struct device *dev,
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
+ return sysfs_emit(buf, "%x\n", ln->params.log_level);
}
/* Store debug level */
@@ -1460,31 +1460,35 @@ static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
csio_store_dbg_level);
-static struct device_attribute *csio_fcoe_lport_attrs[] = {
- &dev_attr_hw_state,
- &dev_attr_device_reset,
- &dev_attr_disable_port,
- &dev_attr_dbg_level,
+static struct attribute *csio_fcoe_lport_attrs[] = {
+ &dev_attr_hw_state.attr,
+ &dev_attr_device_reset.attr,
+ &dev_attr_disable_port.attr,
+ &dev_attr_dbg_level.attr,
NULL,
};
+ATTRIBUTE_GROUPS(csio_fcoe_lport);
+
static ssize_t
csio_show_num_reg_rnodes(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
+ return sysfs_emit(buf, "%d\n", ln->num_reg_rnodes);
}
static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
-static struct device_attribute *csio_fcoe_vport_attrs[] = {
- &dev_attr_num_reg_rnodes,
- &dev_attr_dbg_level,
+static struct attribute *csio_fcoe_vport_attrs[] = {
+ &dev_attr_num_reg_rnodes.attr,
+ &dev_attr_dbg_level.attr,
NULL,
};
+ATTRIBUTE_GROUPS(csio_fcoe_vport);
+
static inline uint32_t
csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
{
@@ -1720,7 +1724,7 @@ out:
}
cmnd->result = (((host_status) << 16) | scsi_status);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
/* Wake up waiting threads */
csio_scsi_cmnd(req) = NULL;
@@ -1748,7 +1752,7 @@ csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
}
cmnd->result = (((host_status) << 16) | scsi_status);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
csio_scsi_cmnd(req) = NULL;
CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
} else {
@@ -1786,7 +1790,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
struct csio_scsi_qset *sqset;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
- sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)];
+ sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))];
nr = fc_remote_port_chkready(rport);
if (nr) {
@@ -1851,7 +1855,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
/* Needed during abort */
cmnd->host_scribble = (unsigned char *)ioreq;
- cmnd->SCp.Message = 0;
+ csio_priv(cmnd)->fc_tm_flags = 0;
/* Kick off SCSI IO SM on the ioreq */
spin_lock_irqsave(&hw->lock, flags);
@@ -1859,7 +1863,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
spin_unlock_irqrestore(&hw->lock, flags);
if (retval != 0) {
- csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",
+ csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
ioreq, retval);
CSIO_INC_STATS(scsim, n_busy_error);
goto err_put_req;
@@ -1876,7 +1880,7 @@ err:
return rv;
err_done:
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
@@ -1979,7 +1983,7 @@ inval_scmnd:
spin_unlock_irq(&hw->lock);
cmnd->result = (DID_ERROR << 16);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return FAILED;
}
@@ -1989,13 +1993,13 @@ inval_scmnd:
csio_info(hw,
"Aborted SCSI command to (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
- cmnd->request->tag);
+ scsi_cmd_to_rq(cmnd)->tag);
return SUCCESS;
} else {
csio_info(hw,
"Failed to abort SCSI command, (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
- cmnd->request->tag);
+ scsi_cmd_to_rq(cmnd)->tag);
return FAILED;
}
}
@@ -2022,7 +2026,7 @@ csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
req, req->wr_status);
/* Cache FW return status */
- cmnd->SCp.Status = req->wr_status;
+ csio_priv(cmnd)->wr_status = req->wr_status;
/* Special handling based on FCP response */
@@ -2045,7 +2049,7 @@ csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
/* Modify return status if flags indicate success */
if (flags & FCP_RSP_LEN_VAL)
if (rsp_info->rsp_code == FCP_TMF_CMPL)
- cmnd->SCp.Status = FW_SUCCESS;
+ csio_priv(cmnd)->wr_status = FW_SUCCESS;
csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
}
@@ -2121,9 +2125,9 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
csio_scsi_cmnd(ioreq) = cmnd;
cmnd->host_scribble = (unsigned char *)ioreq;
- cmnd->SCp.Status = 0;
+ csio_priv(cmnd)->wr_status = 0;
- cmnd->SCp.Message = FCP_TMF_LUN_RESET;
+ csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET;
ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
/*
@@ -2174,9 +2178,10 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
}
/* LUN reset returned, check cached status */
- if (cmnd->SCp.Status != FW_SUCCESS) {
+ if (csio_priv(cmnd)->wr_status != FW_SUCCESS) {
csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
- cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
+ cmnd->device->id, cmnd->device->lun,
+ csio_priv(cmnd)->wr_status);
goto fail;
}
@@ -2267,6 +2272,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
.name = CSIO_DRV_DESC,
.proc_name = KBUILD_MODNAME,
.queuecommand = csio_queuecommand,
+ .cmd_size = sizeof(struct csio_cmd_priv),
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
@@ -2277,7 +2283,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
- .shost_attrs = csio_fcoe_lport_attrs,
+ .shost_groups = csio_fcoe_lport_groups,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};
@@ -2296,7 +2302,7 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
- .shost_attrs = csio_fcoe_vport_attrs,
+ .shost_groups = csio_fcoe_vport_groups,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};
diff --git a/drivers/scsi/csiostor/csio_scsi.h b/drivers/scsi/csiostor/csio_scsi.h
index 2257c3dcf724..39dda3c88f0d 100644
--- a/drivers/scsi/csiostor/csio_scsi.h
+++ b/drivers/scsi/csiostor/csio_scsi.h
@@ -188,6 +188,16 @@ struct csio_scsi_level_data {
uint64_t oslun;
};
+struct csio_cmd_priv {
+ uint8_t fc_tm_flags; /* task management flags */
+ uint16_t wr_status;
+};
+
+static inline struct csio_cmd_priv *csio_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static inline struct csio_ioreq *
csio_get_scsi_ioreq(struct csio_scsim *scm)
{
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 0ca695110f54..fe0355c964bc 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -808,7 +808,7 @@ csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
csio_q_eqid(hw, i) = CSIO_MAX_QID;
}
- /* fall through */
+ fallthrough;
case CSIO_INGRESS:
if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
csio_wr_cleanup_iq_ftr(hw, i);
@@ -830,6 +830,7 @@ csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
if (flq_idx != -1)
csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
}
+ break;
default:
break;
}
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig
index 3e4b644249cb..e20e6f3bfe64 100644
--- a/drivers/scsi/cxgbi/cxgb3i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig
@@ -8,5 +8,5 @@ config SCSI_CXGB3_ISCSI
select CHELSIO_T3
select CHELSIO_LIB
select SCSI_ISCSI_ATTRS
- ---help---
+ help
This driver supports iSCSI offload for the Chelsio T3 devices.
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 524cdbcd29aa..ff9d4287937a 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -98,6 +98,7 @@ static struct scsi_host_template cxgb3i_host_template = {
.dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport cxgb3i_iscsi_transport = {
@@ -117,6 +118,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
@@ -361,7 +363,7 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
/* len includes the length of any HW ULP additions */
req->len = htonl(len);
/* V_TX_ULP_SUBMODE sets both the mode and submode */
- req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) |
+ req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_tx_ulp_mode(skb)) |
V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
req->sndseq = htonl(csk->snd_nxt);
req->param = htonl(V_TX_PORT(l2t->smt_idx));
@@ -375,10 +377,8 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
}
}
-/**
+/*
* push_tx_frames -- start transmit
- * @c3cn: the offloaded connection
- * @req_completion: request wr_ack or not
*
* Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
* connection's send queue and sends them on to T3. Must be called with the
@@ -442,7 +442,7 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
req_completion = 1;
csk->wr_una_cred = 0;
}
- len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
+ len += cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
make_tx_data_wr(csk, skb, len, req_completion);
csk->snd_nxt += len;
cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
@@ -645,7 +645,7 @@ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
int *need_rst)
{
switch (abort_reason) {
- case CPL_ERR_BAD_SYN: /* fall through */
+ case CPL_ERR_BAD_SYN:
case CPL_ERR_CONN_RESET:
return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
case CPL_ERR_XMIT_TIMEDOUT:
@@ -886,11 +886,6 @@ free_cpl_skbs:
return -ENOMEM;
}
-/**
- * release_offload_resources - release offload resource
- * @c3cn: the offloaded iscsi tcp connection.
- * Release resources held by an offload connection (TID, L2T entry, etc.)
- */
static void l2t_put(struct cxgbi_sock *csk)
{
struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
@@ -902,6 +897,10 @@ static void l2t_put(struct cxgbi_sock *csk)
}
}
+/*
+ * release_offload_resources - release offload resource
+ * Release resources held by an offload connection (TID, L2T entry, etc.)
+ */
static void release_offload_resources(struct cxgbi_sock *csk)
{
struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
@@ -959,6 +958,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id];
struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
struct sk_buff *skb = NULL;
+ int ret;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
@@ -979,16 +979,16 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
if (csk->atid < 0) {
pr_err("NO atid available.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_sock;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
if (!skb) {
- cxgb3_free_atid(t3dev, csk->atid);
- cxgbi_sock_put(csk);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_atid;
}
skb->sk = (struct sock *)csk;
set_arp_failure_handler(skb, act_open_arp_failure);
@@ -1010,6 +1010,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
return 0;
+
+free_atid:
+ cxgb3_free_atid(t3dev, csk->atid);
+put_sock:
+ cxgbi_sock_put(csk);
+ l2t_release(t3dev, csk->l2t);
+ csk->l2t = NULL;
+
+ return ret;
}
cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
@@ -1170,7 +1179,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
}
/**
- * cxgb3i_setup_conn_digest - setup conn. digest setting
+ * ddp_setup_conn_digest - setup conn. digest setting
* @csk: cxgb tcp socket
* @tid: connection id
* @hcrc: header digest enabled
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index d1f1baba3285..63c8a0f3cd0c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -2,11 +2,13 @@
config SCSI_CXGB4_ISCSI
tristate "Chelsio T4 iSCSI support"
depends on PCI && INET && (IPV6 || IPV6=n)
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on THERMAL || !THERMAL
depends on ETHERNET
+ depends on TLS || TLS=n
select NET_VENDOR_CHELSIO
select CHELSIO_T4
select CHELSIO_LIB
select SCSI_ISCSI_ATTRS
- ---help---
+ help
This driver supports iSCSI offload for the Chelsio T4 devices.
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index bc1086ae6835..c07d2e3b4bcf 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -116,6 +116,7 @@ static struct scsi_host_template cxgb4i_host_template = {
.dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport cxgb4i_iscsi_transport = {
@@ -134,6 +135,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
@@ -197,7 +199,10 @@ static inline bool is_ofld_imm(const struct sk_buff *skb)
if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
len += sizeof(struct fw_ofld_tx_data_wr);
- return len <= MAX_IMM_TX_PKT_LEN;
+ if (likely(cxgbi_skcb_test_flag((struct sk_buff *)skb, SKCBF_TX_ISO)))
+ len += sizeof(struct cpl_tx_data_iso);
+
+ return (len <= MAX_IMM_OFLD_TX_DATA_WR_LEN);
}
static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
@@ -249,7 +254,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
} else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
@@ -277,7 +282,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
} else {
struct cpl_t6_act_open_req *req =
(struct cpl_t6_act_open_req *)skb->head;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
@@ -641,7 +646,10 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
flowc->mnemval[8].mnemonic = 0;
flowc->mnemval[8].val = 0;
flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
- flowc->mnemval[8].val = 16384;
+ if (csk->cdev->skb_iso_txhdr)
+ flowc->mnemval[8].val = cpu_to_be32(CXGBI_MAX_ISO_DATA_IN_SKB);
+ else
+ flowc->mnemval[8].val = cpu_to_be32(16128);
#ifdef CONFIG_CHELSIO_T4_DCB
flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
if (vlan == CPL_L2T_VLAN_NONE) {
@@ -667,38 +675,86 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
return flowclen16;
}
-static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
- int dlen, int len, u32 credits, int compl)
+static void
+cxgb4i_make_tx_iso_cpl(struct sk_buff *skb, struct cpl_tx_data_iso *cpl)
+{
+ struct cxgbi_iso_info *info = (struct cxgbi_iso_info *)skb->head;
+ u32 imm_en = !!(info->flags & CXGBI_ISO_INFO_IMM_ENABLE);
+ u32 fslice = !!(info->flags & CXGBI_ISO_INFO_FSLICE);
+ u32 lslice = !!(info->flags & CXGBI_ISO_INFO_LSLICE);
+ u32 pdu_type = (info->op == ISCSI_OP_SCSI_CMD) ? 0 : 1;
+ u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3;
+
+ cpl->op_to_scsi = cpu_to_be32(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
+ CPL_TX_DATA_ISO_FIRST_V(fslice) |
+ CPL_TX_DATA_ISO_LAST_V(lslice) |
+ CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
+ CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
+ CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
+ CPL_TX_DATA_ISO_IMMEDIATE_V(imm_en) |
+ CPL_TX_DATA_ISO_SCSI_V(pdu_type));
+
+ cpl->ahs_len = info->ahs;
+ cpl->mpdu = cpu_to_be16(DIV_ROUND_UP(info->mpdu, 4));
+ cpl->burst_size = cpu_to_be32(info->burst_size);
+ cpl->len = cpu_to_be32(info->len);
+ cpl->reserved2_seglen_offset =
+ cpu_to_be32(CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(info->segment_offset));
+ cpl->datasn_offset = cpu_to_be32(info->datasn_offset);
+ cpl->buffer_offset = cpu_to_be32(info->buffer_offset);
+ cpl->reserved3 = cpu_to_be32(0);
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "iso: flags 0x%x, op %u, ahs %u, num_pdu %u, mpdu %u, "
+ "burst_size %u, iso_len %u\n",
+ info->flags, info->op, info->ahs, info->num_pdu,
+ info->mpdu, info->burst_size << 2, info->len);
+}
+
+static void
+cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen,
+ int len, u32 credits, int compl)
{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct fw_ofld_tx_data_wr *req;
- unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
- unsigned int wr_ulp_mode = 0, val;
- bool imm = is_ofld_imm(skb);
-
- req = __skb_push(skb, sizeof(*req));
-
- if (imm) {
- req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL_F |
- FW_WR_IMMDLEN_V(dlen));
- req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
- FW_WR_LEN16_V(credits));
- } else {
- req->op_to_immdlen =
- cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL_F |
- FW_WR_IMMDLEN_V(0));
- req->flowid_len16 =
- cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
- FW_WR_LEN16_V(credits));
+ struct cpl_tx_data_iso *cpl;
+ u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3;
+ u32 wr_ulp_mode = 0;
+ u32 hdr_size = sizeof(*req);
+ u32 opcode = FW_OFLD_TX_DATA_WR;
+ u32 immlen = 0;
+ u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
+ T6_TX_FORCE_F;
+
+ if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) {
+ hdr_size += sizeof(struct cpl_tx_data_iso);
+ opcode = FW_ISCSI_TX_DATA_WR;
+ immlen += sizeof(struct cpl_tx_data_iso);
+ submode |= 8;
}
+
+ if (is_ofld_imm(skb))
+ immlen += dlen;
+
+ req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, hdr_size);
+ req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
+ FW_WR_COMPL_V(compl) |
+ FW_WR_IMMDLEN_V(immlen));
+ req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
+ req->plen = cpu_to_be32(len);
+ cpl = (struct cpl_tx_data_iso *)(req + 1);
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)))
+ cxgb4i_make_tx_iso_cpl(skb, cpl);
+
if (submode)
wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
- FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
- val = skb_peek(&csk->write_queue) ? 0 : 1;
- req->tunnel_to_proxy = htonl(wr_ulp_mode |
- FW_OFLD_TX_DATA_WR_SHOVE_V(val));
- req->plen = htonl(len);
+ FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
+
+ req->tunnel_to_proxy = cpu_to_be32(wr_ulp_mode | force |
+ FW_OFLD_TX_DATA_WR_SHOVE_V(1U));
+
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
}
@@ -716,30 +772,34 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
if (unlikely(csk->state < CTP_ESTABLISHED ||
csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
- 1 << CXGBI_DBG_PDU_TX,
- "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
- csk, csk->state, csk->flags, csk->tid);
+ 1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
+ csk, csk->state, csk->flags, csk->tid);
return 0;
}
- while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
- int dlen = skb->len;
- int len = skb->len;
- unsigned int credits_needed;
- int flowclen16 = 0;
+ while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) {
+ struct cxgbi_iso_info *iso_cpl;
+ u32 dlen = skb->len;
+ u32 len = skb->len;
+ u32 iso_cpl_len = 0;
+ u32 flowclen16 = 0;
+ u32 credits_needed;
+ u32 num_pdu = 1, hdr_len;
+
+ if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))
+ iso_cpl_len = sizeof(struct cpl_tx_data_iso);
- skb_reset_transport_header(skb);
if (is_ofld_imm(skb))
- credits_needed = DIV_ROUND_UP(dlen, 16);
+ credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
else
- credits_needed = DIV_ROUND_UP(
- 8 * calc_tx_flits_ofld(skb),
- 16);
+ credits_needed =
+ DIV_ROUND_UP((8 * calc_tx_flits_ofld(skb)) +
+ iso_cpl_len, 16);
if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
- credits_needed += DIV_ROUND_UP(
- sizeof(struct fw_ofld_tx_data_wr),
- 16);
+ credits_needed +=
+ DIV_ROUND_UP(sizeof(struct fw_ofld_tx_data_wr), 16);
/*
* Assumes the initial credits is large enough to support
@@ -754,14 +814,19 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
if (csk->wr_cred < credits_needed) {
log_debug(1 << CXGBI_DBG_PDU_TX,
- "csk 0x%p, skb %u/%u, wr %d < %u.\n",
- csk, skb->len, skb->data_len,
- credits_needed, csk->wr_cred);
+ "csk 0x%p, skb %u/%u, wr %d < %u.\n",
+ csk, skb->len, skb->data_len,
+ credits_needed, csk->wr_cred);
+
+ csk->no_tx_credits++;
break;
}
+
+ csk->no_tx_credits = 0;
+
__skb_unlink(skb, &csk->write_queue);
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
- skb->csum = credits_needed + flowclen16;
+ skb->csum = (__force __wsum)(credits_needed + flowclen16);
csk->wr_cred -= credits_needed;
csk->wr_una_cred += credits_needed;
cxgbi_sock_enqueue_wr(csk, skb);
@@ -771,25 +836,42 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
csk, skb->len, skb->data_len, credits_needed,
csk->wr_cred, csk->wr_una_cred);
+ if (!req_completion &&
+ ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
+ after(csk->write_seq, (csk->snd_una + csk->snd_win / 2))))
+ req_completion = 1;
+
if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
- len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
- make_tx_data_wr(csk, skb, dlen, len, credits_needed,
- req_completion);
+ u32 ulp_mode = cxgbi_skcb_tx_ulp_mode(skb);
+
+ if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) {
+ iso_cpl = (struct cxgbi_iso_info *)skb->head;
+ num_pdu = iso_cpl->num_pdu;
+ hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb);
+ len += (cxgbi_ulp_extra_len(ulp_mode) * num_pdu) +
+ (hdr_len * (num_pdu - 1));
+ } else {
+ len += cxgbi_ulp_extra_len(ulp_mode);
+ }
+
+ cxgb4i_make_tx_data_wr(csk, skb, dlen, len,
+ credits_needed, req_completion);
csk->snd_nxt += len;
cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
} else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
(csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
struct cpl_close_con_req *req =
(struct cpl_close_con_req *)skb->data;
- req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
+
+ req->wr.wr_hi |= cpu_to_be32(FW_WR_COMPL_F);
}
+
total_size += skb->truesize;
t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
- "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
- csk, csk->state, csk->flags, csk->tid, skb, len);
-
+ "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
+ csk, csk->state, csk->flags, csk->tid, skb, len);
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
}
return total_size;
@@ -1053,7 +1135,7 @@ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
int *need_rst)
{
switch (abort_reason) {
- case CPL_ERR_BAD_SYN: /* fall through */
+ case CPL_ERR_BAD_SYN:
case CPL_ERR_CONN_RESET:
return csk->state > CTP_ESTABLISHED ?
-EPIPE : -ECONNRESET;
@@ -1127,10 +1209,9 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
if (!csk)
goto rel_skb;
- if (csk)
- pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
- (&csk->saddr), (&csk->daddr), csk,
- csk->state, csk->flags, csk->tid, rpl->status);
+ pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+ (&csk->saddr), (&csk->daddr), csk,
+ csk->state, csk->flags, csk->tid, rpl->status);
if (rpl->status == CPL_ERR_ABORT_FAILED)
goto rel_skb;
@@ -2112,10 +2193,30 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
return 0;
}
+static bool is_memfree(struct adapter *adap)
+{
+ u32 io;
+
+ io = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (is_t5(adap->params.chip)) {
+ if ((io & EXT_MEM0_ENABLE_F) || (io & EXT_MEM1_ENABLE_F))
+ return false;
+ } else if (io & EXT_MEM_ENABLE_F) {
+ return false;
+ }
+
+ return true;
+}
+
static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
{
struct cxgbi_device *cdev;
struct port_info *pi;
+ struct net_device *ndev;
+ struct adapter *adap;
+ struct tid_info *t;
+ u32 max_cmds = CXGB4I_SCSI_HOST_QDEPTH;
+ u32 max_conn = CXGBI_MAX_CONN;
int i, rc;
cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
@@ -2155,14 +2256,40 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc);
goto err_out;
}
+
+ ndev = cdev->ports[0];
+ adap = netdev2adap(ndev);
+ if (adap) {
+ t = &adap->tids;
+ if (t->ntids <= CXGBI_MAX_CONN)
+ max_conn = t->ntids;
+
+ if (is_memfree(adap)) {
+ cdev->flags |= CXGBI_FLAG_DEV_ISO_OFF;
+ max_cmds = CXGB4I_SCSI_HOST_QDEPTH >> 2;
+
+ pr_info("%s: 0x%p, tid %u, SO adapter.\n",
+ ndev->name, cdev, t->ntids);
+ }
+ } else {
+ pr_info("%s, 0x%p, NO adapter struct.\n", ndev->name, cdev);
+ }
+
+ /* ISO is enabled in T5/T6 firmware version >= 1.13.43.0 */
+ if (!is_t4(lldi->adapter_type) &&
+ (lldi->fw_vers >= 0x10d2b00) &&
+ !(cdev->flags & CXGBI_FLAG_DEV_ISO_OFF))
+ cdev->skb_iso_txhdr = sizeof(struct cpl_tx_data_iso);
+
rc = cxgb4i_ofld_init(cdev);
if (rc) {
pr_info("t4 0x%p ofld init failed.\n", cdev);
goto err_out;
}
- rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
- &cxgb4i_host_template, cxgb4i_stt);
+ cxgb4i_host_template.can_queue = max_cmds;
+ rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, max_conn,
+ &cxgb4i_host_template, cxgb4i_stt);
if (rc)
goto err_out;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 4bc794d2f51c..af281e271f88 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -77,9 +77,9 @@ int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
{
struct cxgbi_ports_map *pmap = &cdev->pmap;
- pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
- sizeof(struct cxgbi_sock *),
- GFP_KERNEL);
+ pmap->port_csk = kvzalloc(array_size(max_conn,
+ sizeof(struct cxgbi_sock *)),
+ GFP_KERNEL | __GFP_NOWARN);
if (!pmap->port_csk) {
pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
return -ENOMEM;
@@ -124,7 +124,7 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
if (cdev->cdev2ppm)
cxgbi_ppm_release(cdev->cdev2ppm(cdev));
if (cdev->pmap.max_connect)
- cxgbi_free_big_mem(cdev->pmap.port_csk);
+ kvfree(cdev->pmap.port_csk);
kfree(cdev);
}
@@ -328,7 +328,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
chba = cdev->hbas[i];
if (chba) {
cdev->hbas[i] = NULL;
- iscsi_host_remove(chba->shost);
+ iscsi_host_remove(chba->shost, false);
pci_dev_put(cdev->pdev);
iscsi_host_free(chba->shost);
}
@@ -337,7 +337,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
- unsigned int max_id, struct scsi_host_template *sht,
+ unsigned int max_conns, struct scsi_host_template *sht,
struct scsi_transport_template *stt)
{
struct cxgbi_hba *chba;
@@ -357,15 +357,17 @@ int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
shost->transportt = stt;
shost->max_lun = max_lun;
- shost->max_id = max_id;
+ shost->max_id = max_conns - 1;
shost->max_channel = 0;
- shost->max_cmd_len = 16;
+ shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
chba = iscsi_host_priv(shost);
chba->cdev = cdev;
chba->ndev = cdev->ports[i];
chba->shost = shost;
+ shost->can_queue = sht->can_queue - ISCSI_MGMT_CMDS_MAX;
+
log_debug(1 << CXGBI_DBG_DEV,
"cdev 0x%p, p#%d %s: chba 0x%p.\n",
cdev, i, cdev->ports[i]->name, chba);
@@ -1136,82 +1138,6 @@ void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
}
EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
-static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
-{
- struct cxgbi_device *cdev = csk->cdev;
- struct sk_buff *next;
- int err, copied = 0;
-
- spin_lock_bh(&csk->lock);
-
- if (csk->state != CTP_ESTABLISHED) {
- log_debug(1 << CXGBI_DBG_PDU_TX,
- "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
- csk, csk->state, csk->flags, csk->tid);
- err = -EAGAIN;
- goto out_err;
- }
-
- if (csk->err) {
- log_debug(1 << CXGBI_DBG_PDU_TX,
- "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
- csk, csk->state, csk->flags, csk->tid, csk->err);
- err = -EPIPE;
- goto out_err;
- }
-
- if (csk->write_seq - csk->snd_una >= csk->snd_win) {
- log_debug(1 << CXGBI_DBG_PDU_TX,
- "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
- csk, csk->state, csk->flags, csk->tid, csk->write_seq,
- csk->snd_una, csk->snd_win);
- err = -ENOBUFS;
- goto out_err;
- }
-
- while (skb) {
- int frags = skb_shinfo(skb)->nr_frags +
- (skb->len != skb->data_len);
-
- if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
- pr_err("csk 0x%p, skb head %u < %u.\n",
- csk, skb_headroom(skb), cdev->skb_tx_rsvd);
- err = -EINVAL;
- goto out_err;
- }
-
- if (frags >= SKB_WR_LIST_SIZE) {
- pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
- csk, skb_shinfo(skb)->nr_frags, skb->len,
- skb->data_len, (uint)(SKB_WR_LIST_SIZE));
- err = -EINVAL;
- goto out_err;
- }
-
- next = skb->next;
- skb->next = NULL;
- cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
- cxgbi_sock_skb_entail(csk, skb);
- copied += skb->len;
- csk->write_seq += skb->len +
- cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
- skb = next;
- }
-
- if (likely(skb_queue_len(&csk->write_queue)))
- cdev->csk_push_tx_frames(csk, 1);
-done:
- spin_unlock_bh(&csk->lock);
- return copied;
-
-out_err:
- if (copied == 0 && err == -EPIPE)
- copied = csk->err ? csk->err : -EPIPE;
- else
- copied = err;
- goto done;
-}
-
static inline void
scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl,
unsigned int *sgcnt, unsigned int *dlen,
@@ -1284,8 +1210,6 @@ EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod);
* APIs interacting with open-iscsi libraries
*/
-static unsigned char padding[4];
-
int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
struct cxgbi_tag_format *tformat,
unsigned int iscsi_size, unsigned int llimit,
@@ -1531,7 +1455,7 @@ void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
if (conn) {
log_debug(1 << CXGBI_DBG_SOCK,
"csk 0x%p, cid %d.\n", csk, conn->id);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
}
EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
@@ -1710,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, conn 0x%p.\n", csk, conn);
- if (unlikely(!conn || conn->suspend_rx)) {
+ if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+ "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
csk, conn, conn ? conn->id : 0xFF,
- conn ? conn->suspend_rx : 0xFF);
+ conn ? conn->flags : 0xFF);
return;
}
@@ -1833,9 +1757,10 @@ static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
return -EFAULT;
}
-static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
- unsigned int dlen, struct page_frag *frags,
- int frag_max)
+static int
+sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
+ unsigned int dlen, struct page_frag *frags,
+ int frag_max, u32 *dlimit)
{
unsigned int datalen = dlen;
unsigned int sglen = sg->length - sgoffset;
@@ -1867,6 +1792,7 @@ static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
if (i >= frag_max) {
pr_warn("too many pages %u, dlen %u.\n",
frag_max, dlen);
+ *dlimit = dlen - datalen;
return -EINVAL;
}
@@ -1883,38 +1809,220 @@ static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
return i;
}
-int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
+static void cxgbi_task_data_sgl_check(struct iscsi_task *task)
{
- struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ struct scatterlist *sg, *sgl = NULL;
+ u32 sgcnt = 0;
+ int i;
+
+ tdata->flags = CXGBI_TASK_SGL_CHECKED;
+ if (!sc)
+ return;
+
+ scmd_get_params(sc, &sgl, &sgcnt, &tdata->dlen, 0);
+ if (!sgl || !sgcnt) {
+ tdata->flags |= CXGBI_TASK_SGL_COPY;
+ return;
+ }
+
+ for_each_sg(sgl, sg, sgcnt, i) {
+ if (page_count(sg_page(sg)) < 1) {
+ tdata->flags |= CXGBI_TASK_SGL_COPY;
+ return;
+ }
+ }
+}
+
+static int
+cxgbi_task_data_sgl_read(struct iscsi_task *task, u32 offset, u32 count,
+ u32 *dlimit)
+{
+ struct scsi_cmnd *sc = task->sc;
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ struct scatterlist *sgl = NULL;
+ struct scatterlist *sg;
+ u32 dlen = 0;
+ u32 sgcnt;
+ int err;
+
+ if (!sc)
+ return 0;
+
+ scmd_get_params(sc, &sgl, &sgcnt, &dlen, 0);
+ if (!sgl || !sgcnt)
+ return 0;
+
+ err = sgl_seek_offset(sgl, sgcnt, offset, &tdata->sgoffset, &sg);
+ if (err < 0) {
+ pr_warn("tpdu max, sgl %u, bad offset %u/%u.\n",
+ sgcnt, offset, tdata->dlen);
+ return err;
+ }
+ err = sgl_read_to_frags(sg, tdata->sgoffset, count,
+ tdata->frags, MAX_SKB_FRAGS, dlimit);
+ if (err < 0) {
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "sgl max limit, sgl %u, offset %u, %u/%u, dlimit %u.\n",
+ sgcnt, offset, count, tdata->dlen, *dlimit);
+ return err;
+ }
+ tdata->offset = offset;
+ tdata->count = count;
+ tdata->nr_frags = err;
+ tdata->total_count = count;
+ tdata->total_offset = offset;
+
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "%s: offset %u, count %u,\n"
+ "err %u, total_count %u, total_offset %u\n",
+ __func__, offset, count, err, tdata->total_count, tdata->total_offset);
+
+ return 0;
+}
+
+int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 op)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = task->conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgbi_conn *cconn = tcp_conn->dd_data;
struct cxgbi_device *cdev = cconn->chba->cdev;
- struct iscsi_conn *conn = task->conn;
+ struct cxgbi_sock *csk = cconn->cep ? cconn->cep->csk : NULL;
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
struct scsi_cmnd *sc = task->sc;
- struct cxgbi_sock *csk = cconn->cep->csk;
- struct net_device *ndev = cdev->ports[csk->port_id];
- int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
+ u32 headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
+ u32 max_txdata_len = conn->max_xmit_dlength;
+ u32 iso_tx_rsvd = 0, local_iso_info = 0;
+ u32 last_tdata_offset, last_tdata_count;
+ int err = 0;
+
+ if (!tcp_task) {
+ pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p.\n",
+ task, tcp_task, tdata);
+ return -ENOMEM;
+ }
+ if (!csk) {
+ pr_err("task 0x%p, csk gone.\n", task);
+ return -EPIPE;
+ }
+
+ op &= ISCSI_OPCODE_MASK;
tcp_task->dd_data = tdata;
task->hdr = NULL;
- if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
- (opcode == ISCSI_OP_SCSI_DATA_OUT ||
- (opcode == ISCSI_OP_SCSI_CMD &&
- sc->sc_data_direction == DMA_TO_DEVICE)))
- /* data could goes into skb head */
- headroom += min_t(unsigned int,
- SKB_MAX_HEAD(cdev->skb_tx_rsvd),
- conn->max_xmit_dlength);
+ last_tdata_count = tdata->count;
+ last_tdata_offset = tdata->offset;
+
+ if ((op == ISCSI_OP_SCSI_DATA_OUT) ||
+ ((op == ISCSI_OP_SCSI_CMD) &&
+ (sc->sc_data_direction == DMA_TO_DEVICE))) {
+ u32 remaining_data_tosend, dlimit = 0;
+ u32 max_pdu_size, max_num_pdu, num_pdu;
+ u32 count;
+
+ /* Preserve conn->max_xmit_dlength because it can get updated to
+ * ISO data size.
+ */
+ if (task->state == ISCSI_TASK_PENDING)
+ tdata->max_xmit_dlength = conn->max_xmit_dlength;
- tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
+ if (!tdata->offset)
+ cxgbi_task_data_sgl_check(task);
+
+ remaining_data_tosend =
+ tdata->dlen - tdata->offset - tdata->count;
+
+recalculate_sgl:
+ max_txdata_len = tdata->max_xmit_dlength;
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "tdata->dlen %u, remaining to send %u "
+ "conn->max_xmit_dlength %u, "
+ "tdata->max_xmit_dlength %u\n",
+ tdata->dlen, remaining_data_tosend,
+ conn->max_xmit_dlength, tdata->max_xmit_dlength);
+
+ if (cdev->skb_iso_txhdr && !csk->disable_iso &&
+ (remaining_data_tosend > tdata->max_xmit_dlength) &&
+ !(remaining_data_tosend % 4)) {
+ u32 max_iso_data;
+
+ if ((op == ISCSI_OP_SCSI_CMD) &&
+ session->initial_r2t_en)
+ goto no_iso;
+
+ max_pdu_size = tdata->max_xmit_dlength +
+ ISCSI_PDU_NONPAYLOAD_LEN;
+ max_iso_data = rounddown(CXGBI_MAX_ISO_DATA_IN_SKB,
+ csk->advmss);
+ max_num_pdu = max_iso_data / max_pdu_size;
+
+ num_pdu = (remaining_data_tosend +
+ tdata->max_xmit_dlength - 1) /
+ tdata->max_xmit_dlength;
+
+ if (num_pdu > max_num_pdu)
+ num_pdu = max_num_pdu;
+
+ conn->max_xmit_dlength = tdata->max_xmit_dlength * num_pdu;
+ max_txdata_len = conn->max_xmit_dlength;
+ iso_tx_rsvd = cdev->skb_iso_txhdr;
+ local_iso_info = sizeof(struct cxgbi_iso_info);
+
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "max_pdu_size %u, max_num_pdu %u, "
+ "max_txdata %u, num_pdu %u\n",
+ max_pdu_size, max_num_pdu,
+ max_txdata_len, num_pdu);
+ }
+no_iso:
+ count = min_t(u32, max_txdata_len, remaining_data_tosend);
+ err = cxgbi_task_data_sgl_read(task,
+ tdata->offset + tdata->count,
+ count, &dlimit);
+ if (unlikely(err < 0)) {
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "task 0x%p, tcp_task 0x%p, tdata 0x%p, "
+ "sgl err %d, count %u, dlimit %u\n",
+ task, tcp_task, tdata, err, count, dlimit);
+ if (dlimit) {
+ remaining_data_tosend =
+ rounddown(dlimit,
+ tdata->max_xmit_dlength);
+ if (!remaining_data_tosend)
+ remaining_data_tosend = dlimit;
+
+ dlimit = 0;
+
+ conn->max_xmit_dlength = remaining_data_tosend;
+ goto recalculate_sgl;
+ }
+
+ pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p, "
+ "sgl err %d\n",
+ task, tcp_task, tdata, err);
+ goto ret_err;
+ }
+
+ if ((tdata->flags & CXGBI_TASK_SGL_COPY) ||
+ (tdata->nr_frags > MAX_SKB_FRAGS))
+ headroom += conn->max_xmit_dlength;
+ }
+
+ tdata->skb = alloc_skb(local_iso_info + cdev->skb_tx_rsvd +
+ iso_tx_rsvd + headroom, GFP_ATOMIC);
if (!tdata->skb) {
- ndev->stats.tx_dropped++;
- return -ENOMEM;
+ tdata->count = last_tdata_count;
+ tdata->offset = last_tdata_offset;
+ err = -ENOMEM;
+ goto ret_err;
}
- skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
+ skb_reserve(tdata->skb, local_iso_info + cdev->skb_tx_rsvd +
+ iso_tx_rsvd);
if (task->sc) {
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
@@ -1923,25 +2031,100 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
if (!task->hdr) {
__kfree_skb(tdata->skb);
tdata->skb = NULL;
- ndev->stats.tx_dropped++;
return -ENOMEM;
}
}
- task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
+
+ task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX;
+
+ if (iso_tx_rsvd)
+ cxgbi_skcb_set_flag(tdata->skb, SKCBF_TX_ISO);
/* data_out uses scsi_cmd's itt */
- if (opcode != ISCSI_OP_SCSI_DATA_OUT)
+ if (op != ISCSI_OP_SCSI_DATA_OUT)
task_reserve_itt(task, &task->hdr->itt);
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
- "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
- task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
- conn->max_xmit_dlength, ntohl(task->hdr->itt));
+ "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
+ task, op, tdata->skb, cdev->skb_tx_rsvd, headroom,
+ conn->max_xmit_dlength, be32_to_cpu(task->hdr->itt));
return 0;
+
+ret_err:
+ conn->max_xmit_dlength = tdata->max_xmit_dlength;
+ return err;
}
EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
+static int
+cxgbi_prep_iso_info(struct iscsi_task *task, struct sk_buff *skb,
+ u32 count)
+{
+ struct cxgbi_iso_info *iso_info = (struct cxgbi_iso_info *)skb->head;
+ struct iscsi_r2t_info *r2t;
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ u32 burst_size = 0, r2t_dlength = 0, dlength;
+ u32 max_pdu_len = tdata->max_xmit_dlength;
+ u32 segment_offset = 0;
+ u32 num_pdu;
+
+ if (unlikely(!cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)))
+ return 0;
+
+ memset(iso_info, 0, sizeof(struct cxgbi_iso_info));
+
+ if (task->hdr->opcode == ISCSI_OP_SCSI_CMD && session->imm_data_en) {
+ iso_info->flags |= CXGBI_ISO_INFO_IMM_ENABLE;
+ burst_size = count;
+ }
+
+ dlength = ntoh24(task->hdr->dlength);
+ dlength = min(dlength, max_pdu_len);
+ hton24(task->hdr->dlength, dlength);
+
+ num_pdu = (count + max_pdu_len - 1) / max_pdu_len;
+
+ if (iscsi_task_has_unsol_data(task))
+ r2t = &task->unsol_r2t;
+ else
+ r2t = tcp_task->r2t;
+
+ if (r2t) {
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "count %u, tdata->count %u, num_pdu %u,"
+ "task->hdr_len %u, r2t->data_length %u, r2t->sent %u\n",
+ count, tdata->count, num_pdu, task->hdr_len,
+ r2t->data_length, r2t->sent);
+
+ r2t_dlength = r2t->data_length - r2t->sent;
+ segment_offset = r2t->sent;
+ r2t->datasn += num_pdu - 1;
+ }
+
+ if (!r2t || !r2t->sent)
+ iso_info->flags |= CXGBI_ISO_INFO_FSLICE;
+
+ if (task->hdr->flags & ISCSI_FLAG_CMD_FINAL)
+ iso_info->flags |= CXGBI_ISO_INFO_LSLICE;
+
+ task->hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+
+ iso_info->op = task->hdr->opcode;
+ iso_info->ahs = task->hdr->hlength;
+ iso_info->num_pdu = num_pdu;
+ iso_info->mpdu = max_pdu_len;
+ iso_info->burst_size = (burst_size + r2t_dlength) >> 2;
+ iso_info->len = count + task->hdr_len;
+ iso_info->segment_offset = segment_offset;
+
+ cxgbi_skcb_tx_iscsi_hdrlen(skb) = task->hdr_len;
+ return 0;
+}
+
static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
{
if (hcrc || dcrc) {
@@ -1951,133 +2134,260 @@ static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
submode |= 1;
if (dcrc)
submode |= 2;
- cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ cxgbi_skcb_tx_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
} else
- cxgbi_skcb_ulp_mode(skb) = 0;
+ cxgbi_skcb_tx_ulp_mode(skb) = 0;
}
+static struct page *rsvd_page;
+
int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
unsigned int count)
{
struct iscsi_conn *conn = task->conn;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
- struct sk_buff *skb = tdata->skb;
- unsigned int datalen = count;
- int i, padlen = iscsi_padding(count);
+ struct sk_buff *skb;
+ struct scsi_cmnd *sc = task->sc;
+ u32 expected_count, expected_offset;
+ u32 datalen = count, dlimit = 0;
+ u32 i, padlen = iscsi_padding(count);
struct page *pg;
+ int err;
+
+ if (!tcp_task || (tcp_task->dd_data != tdata)) {
+ pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
+ task, task->sc, tcp_task,
+ tcp_task ? tcp_task->dd_data : NULL, tdata);
+ return -EINVAL;
+ }
+ skb = tdata->skb;
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
- "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
- task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
- ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count);
+ "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
+ task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
+ be32_to_cpu(task->cmdsn), be32_to_cpu(task->hdr->itt), offset, count);
skb_put(skb, task->hdr_len);
tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
- if (!count)
+ if (!count) {
+ tdata->count = count;
+ tdata->offset = offset;
+ tdata->nr_frags = 0;
+ tdata->total_offset = 0;
+ tdata->total_count = 0;
+ if (tdata->max_xmit_dlength)
+ conn->max_xmit_dlength = tdata->max_xmit_dlength;
+ cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO);
return 0;
+ }
- if (task->sc) {
- struct scsi_data_buffer *sdb = &task->sc->sdb;
- struct scatterlist *sg = NULL;
- int err;
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "data->total_count %u, tdata->total_offset %u\n",
+ tdata->total_count, tdata->total_offset);
- tdata->offset = offset;
- tdata->count = count;
- err = sgl_seek_offset(
- sdb->table.sgl, sdb->table.nents,
- tdata->offset, &tdata->sgoffset, &sg);
- if (err < 0) {
- pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
- sdb->table.nents, tdata->offset, sdb->length);
- return err;
- }
- err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
- tdata->frags, MAX_PDU_FRAGS);
+ expected_count = tdata->total_count;
+ expected_offset = tdata->total_offset;
+
+ if ((count != expected_count) ||
+ (offset != expected_offset)) {
+ err = cxgbi_task_data_sgl_read(task, offset, count, &dlimit);
if (err < 0) {
- pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
- sdb->table.nents, tdata->offset, tdata->count);
+ pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p "
+ "dlimit %u, sgl err %d.\n", task, task->sc,
+ tcp_task, tcp_task ? tcp_task->dd_data : NULL,
+ tdata, dlimit, err);
return err;
}
- tdata->nr_frags = err;
+ }
+
+ /* Restore original value of conn->max_xmit_dlength because
+ * it can get updated to ISO data size.
+ */
+ conn->max_xmit_dlength = tdata->max_xmit_dlength;
- if (tdata->nr_frags > MAX_SKB_FRAGS ||
- (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
+ if (sc) {
+ struct page_frag *frag = tdata->frags;
+
+ if ((tdata->flags & CXGBI_TASK_SGL_COPY) ||
+ (tdata->nr_frags > MAX_SKB_FRAGS) ||
+ (padlen && (tdata->nr_frags ==
+ MAX_SKB_FRAGS))) {
char *dst = skb->data + task->hdr_len;
- struct page_frag *frag = tdata->frags;
/* data fits in the skb's headroom */
for (i = 0; i < tdata->nr_frags; i++, frag++) {
char *src = kmap_atomic(frag->page);
- memcpy(dst, src+frag->offset, frag->size);
+ memcpy(dst, src + frag->offset, frag->size);
dst += frag->size;
kunmap_atomic(src);
}
+
if (padlen) {
memset(dst, 0, padlen);
padlen = 0;
}
skb_put(skb, count + padlen);
} else {
- /* data fit into frag_list */
- for (i = 0; i < tdata->nr_frags; i++) {
- __skb_fill_page_desc(skb, i,
- tdata->frags[i].page,
- tdata->frags[i].offset,
- tdata->frags[i].size);
- skb_frag_ref(skb, i);
+ for (i = 0; i < tdata->nr_frags; i++, frag++) {
+ get_page(frag->page);
+ skb_fill_page_desc(skb, i, frag->page,
+ frag->offset, frag->size);
}
- skb_shinfo(skb)->nr_frags = tdata->nr_frags;
+
skb->len += count;
skb->data_len += count;
skb->truesize += count;
}
-
} else {
- pg = virt_to_page(task->data);
-
+ pg = virt_to_head_page(task->data);
get_page(pg);
- skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
- count);
+ skb_fill_page_desc(skb, 0, pg,
+ task->data - (char *)page_address(pg),
+ count);
skb->len += count;
skb->data_len += count;
skb->truesize += count;
}
if (padlen) {
- i = skb_shinfo(skb)->nr_frags;
+ get_page(rsvd_page);
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- virt_to_page(padding), offset_in_page(padding),
- padlen);
+ rsvd_page, 0, padlen);
skb->data_len += padlen;
skb->truesize += padlen;
skb->len += padlen;
}
+ if (likely(count > tdata->max_xmit_dlength))
+ cxgbi_prep_iso_info(task, skb, count);
+ else
+ cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO);
+
return 0;
}
EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
+static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct cxgbi_iso_info *iso_cpl;
+ u32 frags = skb_shinfo(skb)->nr_frags;
+ u32 extra_len, num_pdu, hdr_len;
+ u32 iso_tx_rsvd = 0;
+
+ if (csk->state != CTP_ESTABLISHED) {
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ return -EPIPE;
+ }
+
+ if (csk->err) {
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
+ csk, csk->state, csk->flags, csk->tid, csk->err);
+ return -EPIPE;
+ }
+
+ if ((cdev->flags & CXGBI_FLAG_DEV_T3) &&
+ before((csk->snd_win + csk->snd_una), csk->write_seq)) {
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
+ csk, csk->state, csk->flags, csk->tid, csk->write_seq,
+ csk->snd_una, csk->snd_win);
+ return -ENOBUFS;
+ }
+
+ if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))
+ iso_tx_rsvd = cdev->skb_iso_txhdr;
+
+ if (unlikely(skb_headroom(skb) < (cdev->skb_tx_rsvd + iso_tx_rsvd))) {
+ pr_err("csk 0x%p, skb head %u < %u.\n",
+ csk, skb_headroom(skb), cdev->skb_tx_rsvd);
+ return -EINVAL;
+ }
+
+ if (skb->len != skb->data_len)
+ frags++;
+
+ if (frags >= SKB_WR_LIST_SIZE) {
+ pr_err("csk 0x%p, frags %u, %u,%u >%lu.\n",
+ csk, skb_shinfo(skb)->nr_frags, skb->len,
+ skb->data_len, SKB_WR_LIST_SIZE);
+ return -EINVAL;
+ }
+
+ cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
+ skb_reset_transport_header(skb);
+ cxgbi_sock_skb_entail(csk, skb);
+
+ extra_len = cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) {
+ iso_cpl = (struct cxgbi_iso_info *)skb->head;
+ num_pdu = iso_cpl->num_pdu;
+ hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb);
+ extra_len = (cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)) *
+ num_pdu) + (hdr_len * (num_pdu - 1));
+ }
+
+ csk->write_seq += (skb->len + extra_len);
+
+ return 0;
+}
+
+static int cxgbi_sock_send_skb(struct cxgbi_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ int len = skb->len;
+ int err;
+
+ spin_lock_bh(&csk->lock);
+ err = cxgbi_sock_tx_queue_up(csk, skb);
+ if (err < 0) {
+ spin_unlock_bh(&csk->lock);
+ return err;
+ }
+
+ if (likely(skb_queue_len(&csk->write_queue)))
+ cdev->csk_push_tx_frames(csk, 0);
+ spin_unlock_bh(&csk->lock);
+ return len;
+}
+
int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
{
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
- struct sk_buff *skb = tdata->skb;
+ struct sk_buff *skb;
struct cxgbi_sock *csk = NULL;
- unsigned int datalen;
+ u32 pdulen = 0;
+ u32 datalen;
int err;
+ if (!tcp_task || (tcp_task->dd_data != tdata)) {
+ pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
+ task, task->sc, tcp_task,
+ tcp_task ? tcp_task->dd_data : NULL, tdata);
+ return -EINVAL;
+ }
+
+ skb = tdata->skb;
if (!skb) {
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
- "task 0x%p\n", task);
+ "task 0x%p, skb NULL.\n", task);
return 0;
}
if (cconn && cconn->cep)
csk = cconn->cep->csk;
+
if (!csk) {
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
"task 0x%p, csk gone.\n", task);
@@ -2101,13 +2411,12 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
if (!task->sc)
memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
- err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
+ err = cxgbi_sock_send_skb(csk, skb);
if (err > 0) {
- int pdulen = err;
+ pdulen += err;
- log_debug(1 << CXGBI_DBG_PDU_TX,
- "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
- task, task->sc, skb, skb->len, skb->data_len, err);
+ log_debug(1 << CXGBI_DBG_PDU_TX, "task 0x%p,0x%p, rv %d.\n",
+ task, task->sc, err);
if (task->conn->hdrdgst_en)
pdulen += ISCSI_DIGEST_SIZE;
@@ -2116,24 +2425,42 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
pdulen += ISCSI_DIGEST_SIZE;
task->conn->txdata_octets += pdulen;
+
+ if (unlikely(cxgbi_is_iso_config(csk) && cxgbi_is_iso_disabled(csk))) {
+ if (time_after(jiffies, csk->prev_iso_ts + HZ)) {
+ csk->disable_iso = false;
+ csk->prev_iso_ts = 0;
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "enable iso: csk 0x%p\n", csk);
+ }
+ }
+
return 0;
}
if (err == -EAGAIN || err == -ENOBUFS) {
log_debug(1 << CXGBI_DBG_PDU_TX,
- "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
- task, skb, skb->len, skb->data_len, err);
+ "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
+ task, skb, skb->len, skb->data_len, err);
/* reset skb to send when we are called again */
tdata->skb = skb;
+
+ if (cxgbi_is_iso_config(csk) && !cxgbi_is_iso_disabled(csk) &&
+ (csk->no_tx_credits++ >= 2)) {
+ csk->disable_iso = true;
+ csk->prev_iso_ts = jiffies;
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "disable iso:csk 0x%p, ts:%lu\n",
+ csk, csk->prev_iso_ts);
+ }
+
return err;
}
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
- "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
- task->itt, skb, skb->len, skb->data_len, err);
-
+ "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
+ task->itt, skb, skb->len, skb->data_len, err);
__kfree_skb(skb);
-
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
return err;
@@ -2145,7 +2472,7 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
- if (!tcp_task || !tdata || (tcp_task->dd_data != tdata)) {
+ if (!tcp_task || (tcp_task->dd_data != tdata)) {
pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
task, task->sc, tcp_task,
tcp_task ? tcp_task->dd_data : NULL, tdata);
@@ -2363,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
ppm->tformat.pgsz_idx_dflt);
if (err < 0)
- return err;
+ goto put_ep;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
- if (err)
- return -EINVAL;
+ if (err) {
+ err = -EINVAL;
+ goto put_ep;
+ }
/* calculate the tag idx bits needed for this conn based on cmds_max */
cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
@@ -2388,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
/* init recv engine */
iscsi_tcp_hdr_recv_prep(tcp_conn);
- return 0;
+put_ep:
+ iscsi_put_endpoint(ep);
+ return err;
}
EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
@@ -2641,7 +2972,6 @@ void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
ep, cep, cconn, csk, csk->state, csk->flags);
if (cconn && cconn->iconn) {
- iscsi_suspend_tx(cconn->iconn);
write_lock_bh(&csk->callback_lock);
cep->csk->user_data = NULL;
cconn->cep = NULL;
@@ -2749,12 +3079,17 @@ static int __init libcxgbi_init_module(void)
BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
sizeof(struct cxgbi_skb_cb));
+ rsvd_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!rsvd_page)
+ return -ENOMEM;
+
return 0;
}
static void __exit libcxgbi_exit_module(void)
{
cxgbi_device_unregister_all(0xFF);
+ put_page(rsvd_page);
return;
}
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 84b96af52655..3687b5c0cf90 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -76,6 +76,14 @@ do { \
#define ULP2_MAX_PDU_PAYLOAD \
(ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
+#define CXGBI_ULP2_MAX_ISO_PAYLOAD 65535
+
+#define CXGBI_MAX_ISO_DATA_IN_SKB \
+ min_t(u32, MAX_SKB_FRAGS << PAGE_SHIFT, CXGBI_ULP2_MAX_ISO_PAYLOAD)
+
+#define cxgbi_is_iso_config(csk) ((csk)->cdev->skb_iso_txhdr)
+#define cxgbi_is_iso_disabled(csk) ((csk)->disable_iso)
+
/*
* For iscsi connections HW may inserts digest bytes into the pdu. Those digest
* bytes are not sent by the host but are part of the TCP payload and therefore
@@ -162,6 +170,10 @@ struct cxgbi_sock {
u32 write_seq;
u32 snd_win;
u32 rcv_win;
+
+ bool disable_iso;
+ u32 no_tx_credits;
+ unsigned long prev_iso_ts;
};
/*
@@ -203,6 +215,8 @@ struct cxgbi_skb_tx_cb {
void *handle;
void *arp_err_handler;
struct sk_buff *wr_next;
+ u16 iscsi_hdr_len;
+ u8 ulp_mode;
};
enum cxgbi_skcb_flags {
@@ -218,6 +232,7 @@ enum cxgbi_skcb_flags {
SKCBF_RX_HCRC_ERR, /* header digest error */
SKCBF_RX_DCRC_ERR, /* data digest error */
SKCBF_RX_PAD_ERR, /* padding byte error */
+ SKCBF_TX_ISO, /* iso cpl in tx skb */
};
struct cxgbi_skb_cb {
@@ -225,18 +240,18 @@ struct cxgbi_skb_cb {
struct cxgbi_skb_rx_cb rx;
struct cxgbi_skb_tx_cb tx;
};
- unsigned char ulp_mode;
unsigned long flags;
unsigned int seq;
};
#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
#define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags)
-#define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode)
#define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq)
#define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest)
#define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen)
#define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next)
+#define cxgbi_skcb_tx_iscsi_hdrlen(skb) (CXGBI_SKB_CB(skb)->tx.iscsi_hdr_len)
+#define cxgbi_skcb_tx_ulp_mode(skb) (CXGBI_SKB_CB(skb)->tx.ulp_mode)
static inline void cxgbi_skcb_set_flag(struct sk_buff *skb,
enum cxgbi_skcb_flags flag)
@@ -458,6 +473,7 @@ struct cxgbi_ports_map {
#define CXGBI_FLAG_IPV4_SET 0x10
#define CXGBI_FLAG_USE_PPOD_OFLDQ 0x40
#define CXGBI_FLAG_DDP_OFF 0x100
+#define CXGBI_FLAG_DEV_ISO_OFF 0x400
struct cxgbi_device {
struct list_head list_head;
@@ -477,6 +493,7 @@ struct cxgbi_device {
unsigned int pfvf;
unsigned int rx_credit_thres;
unsigned int skb_tx_rsvd;
+ u32 skb_iso_txhdr;
unsigned int skb_rx_extra; /* for msg coalesced mode */
unsigned int tx_max_size;
unsigned int rx_max_size;
@@ -523,35 +540,40 @@ struct cxgbi_endpoint {
struct cxgbi_sock *csk;
};
-#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
struct cxgbi_task_data {
+#define CXGBI_TASK_SGL_CHECKED 0x1
+#define CXGBI_TASK_SGL_COPY 0x2
+ u8 flags;
unsigned short nr_frags;
- struct page_frag frags[MAX_PDU_FRAGS];
+ struct page_frag frags[MAX_SKB_FRAGS];
struct sk_buff *skb;
unsigned int dlen;
unsigned int offset;
unsigned int count;
unsigned int sgoffset;
+ u32 total_count;
+ u32 total_offset;
+ u32 max_xmit_dlength;
struct cxgbi_task_tag_info ttinfo;
};
#define iscsi_task_cxgbi_data(task) \
((task)->dd_data + sizeof(struct iscsi_tcp_task))
-static inline void *cxgbi_alloc_big_mem(unsigned int size,
- gfp_t gfp)
-{
- void *p = kzalloc(size, gfp | __GFP_NOWARN);
-
- if (!p)
- p = vzalloc(size);
-
- return p;
-}
-
-static inline void cxgbi_free_big_mem(void *addr)
-{
- kvfree(addr);
-}
+struct cxgbi_iso_info {
+#define CXGBI_ISO_INFO_FSLICE 0x1
+#define CXGBI_ISO_INFO_LSLICE 0x2
+#define CXGBI_ISO_INFO_IMM_ENABLE 0x4
+ u8 flags;
+ u8 op;
+ u8 ahs;
+ u8 num_pdu;
+ u32 mpdu;
+ u32 burst_size;
+ u32 len;
+ u32 segment_offset;
+ u32 datasn_offset;
+ u32 buffer_offset;
+};
static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
{
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index fbd2ae40dab4..cd1324ec742d 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -47,9 +47,6 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
struct sisl_ioasa *ioasa;
u32 resid;
- if (unlikely(!cmd))
- return;
-
ioasa = &(cmd->sa);
if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
@@ -135,7 +132,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
break;
case SISL_AFU_RC_OUT_OF_DATA_BUFS:
/* Retry */
- scp->result = (DID_ALLOC_FAILURE << 16);
+ scp->result = (DID_ERROR << 16);
break;
default:
scp->result = (DID_ERROR << 16);
@@ -174,7 +171,7 @@ static void cmd_complete(struct afu_cmd *cmd)
dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
__func__, scp, scp->result, cmd->sa.ioasc);
- scp->scsi_done(scp);
+ scsi_done(scp);
} else if (cmd->cmd_tmf) {
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
cfg->tmf_active = false;
@@ -208,7 +205,7 @@ static void flush_pending_cmds(struct hwq *hwq)
if (cmd->scp) {
scp = cmd->scp;
scp->result = (DID_IMM_RETRY << 16);
- scp->scsi_done(scp);
+ scsi_done(scp);
} else {
cmd->cmd_aborted = true;
@@ -436,7 +433,7 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
hwq = afu->hwq_rr_count++ % afu->num_hwqs;
break;
case HWQ_MODE_TAG:
- tag = blk_mq_unique_tag(scp->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
hwq = blk_mq_unique_tag_to_hwq(tag);
break;
case HWQ_MODE_CPU:
@@ -604,7 +601,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
case STATE_FAILTERM:
dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
scp->result = (DID_NO_CONNECT << 16);
- scp->scsi_done(scp);
+ scsi_done(scp);
rc = 0;
goto out;
default:
@@ -751,16 +748,16 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ)
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
- /* fall through */
+ fallthrough;
case UNMAP_TWO:
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
- /* fall through */
+ fallthrough;
case UNMAP_ONE:
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
- /* fall through */
+ fallthrough;
case FREE_IRQ:
cfg->ops->free_afu_irqs(hwq->ctx_cookie);
- /* fall through */
+ fallthrough;
case UNDO_NOOP:
/* No action required */
break;
@@ -974,18 +971,18 @@ static void cxlflash_remove(struct pci_dev *pdev)
switch (cfg->init_state) {
case INIT_STATE_CDEV:
cxlflash_release_chrdev(cfg);
- /* fall through */
+ fallthrough;
case INIT_STATE_SCSI:
cxlflash_term_local_luns(cfg);
scsi_remove_host(cfg->host);
- /* fall through */
+ fallthrough;
case INIT_STATE_AFU:
term_afu(cfg);
- /* fall through */
+ fallthrough;
case INIT_STATE_PCI:
cfg->ops->destroy_afu(cfg->afu_cookie);
pci_disable_device(pdev);
- /* fall through */
+ fallthrough;
case INIT_STATE_NONE:
free_mem(cfg);
scsi_host_put(cfg->host);
@@ -1360,7 +1357,7 @@ cxlflash_sync_err_irq_exit:
/**
* process_hrrq() - process the read-response queue
- * @afu: AFU associated with the host.
+ * @hwq: HWQ associated with the host.
* @doneq: Queue of commands harvested from the RRQ.
* @budget: Threshold of RRQ entries to process.
*
@@ -1632,8 +1629,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
{
struct device *dev = &cfg->dev->dev;
struct pci_dev *pdev = cfg->dev;
- int rc = 0;
- int ro_start, ro_size, i, j, k;
+ int i, k, rc = 0;
+ unsigned int kw_size;
ssize_t vpd_size;
char vpd_data[CXLFLASH_VPD_LEN];
char tmp_buf[WWPN_BUF_LEN] = { 0 };
@@ -1651,25 +1648,6 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
goto out;
}
- /* Get the read only section offset */
- ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
- PCI_VPD_LRDT_RO_DATA);
- if (unlikely(ro_start < 0)) {
- dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
- rc = -ENODEV;
- goto out;
- }
-
- /* Get the read only section size, cap when extends beyond read VPD */
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (unlikely((i + j) > vpd_size)) {
- dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
- __func__, (i + j), vpd_size);
- ro_size = vpd_size - i;
- }
-
/*
* Find the offset of the WWPN tag within the read only
* VPD data and validate the found field (partials are
@@ -1685,11 +1663,9 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
* ports programmed and operate in an undefined state.
*/
for (k = 0; k < cfg->num_fc_ports; k++) {
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
-
- i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
- if (i < 0) {
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ wwpn_vpd_tags[k], &kw_size);
+ if (i == -ENOENT) {
if (wwpn_vpd_required)
dev_err(dev, "%s: Port %d WWPN not found\n",
__func__, k);
@@ -1697,9 +1673,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
continue;
}
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
+ if (i < 0 || kw_size != WWPN_LEN) {
dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
__func__, k);
rc = -ENODEV;
@@ -2000,7 +1974,7 @@ out:
/**
* init_mc() - create and register as the master context
* @cfg: Internal structure associated with the host.
- * index: HWQ Index of the master context.
+ * @index: HWQ Index of the master context.
*
* Return: 0 on success, -errno on failure
*/
@@ -2358,11 +2332,11 @@ retry:
cxlflash_schedule_async_reset(cfg);
break;
}
- /* fall through - to retry */
+ fallthrough; /* to retry */
case -EAGAIN:
if (++nretry < 2)
goto retry;
- /* fall through - to exit */
+ fallthrough; /* to exit */
default:
break;
}
@@ -2536,12 +2510,12 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
cfg->state = STATE_NORMAL;
wake_up_all(&cfg->reset_waitq);
ssleep(1);
- /* fall through */
+ fallthrough;
case STATE_RESET:
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
break;
- /* fall through */
+ fallthrough;
default:
rc = FAILED;
break;
@@ -3022,7 +2996,7 @@ retry:
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
goto retry;
- /* else, fall through */
+ fallthrough;
default:
/* Ideally should not happen */
dev_err(dev, "%s: Device is not ready, state=%d\n",
@@ -3129,33 +3103,37 @@ static DEVICE_ATTR_RW(irqpoll_weight);
static DEVICE_ATTR_RW(num_hwqs);
static DEVICE_ATTR_RW(hwq_mode);
-static struct device_attribute *cxlflash_host_attrs[] = {
- &dev_attr_port0,
- &dev_attr_port1,
- &dev_attr_port2,
- &dev_attr_port3,
- &dev_attr_lun_mode,
- &dev_attr_ioctl_version,
- &dev_attr_port0_lun_table,
- &dev_attr_port1_lun_table,
- &dev_attr_port2_lun_table,
- &dev_attr_port3_lun_table,
- &dev_attr_irqpoll_weight,
- &dev_attr_num_hwqs,
- &dev_attr_hwq_mode,
+static struct attribute *cxlflash_host_attrs[] = {
+ &dev_attr_port0.attr,
+ &dev_attr_port1.attr,
+ &dev_attr_port2.attr,
+ &dev_attr_port3.attr,
+ &dev_attr_lun_mode.attr,
+ &dev_attr_ioctl_version.attr,
+ &dev_attr_port0_lun_table.attr,
+ &dev_attr_port1_lun_table.attr,
+ &dev_attr_port2_lun_table.attr,
+ &dev_attr_port3_lun_table.attr,
+ &dev_attr_irqpoll_weight.attr,
+ &dev_attr_num_hwqs.attr,
+ &dev_attr_hwq_mode.attr,
NULL
};
+ATTRIBUTE_GROUPS(cxlflash_host);
+
/*
* Device attributes
*/
static DEVICE_ATTR_RO(mode);
-static struct device_attribute *cxlflash_dev_attrs[] = {
- &dev_attr_mode,
+static struct attribute *cxlflash_dev_attrs[] = {
+ &dev_attr_mode.attr,
NULL
};
+ATTRIBUTE_GROUPS(cxlflash_dev);
+
/*
* Host template
*/
@@ -3176,8 +3154,8 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = 1, /* No scatter gather support */
.max_sectors = CXLFLASH_MAX_SECTORS,
- .shost_attrs = cxlflash_host_attrs,
- .sdev_attrs = cxlflash_dev_attrs,
+ .shost_groups = cxlflash_host_groups,
+ .sdev_groups = cxlflash_dev_groups,
};
/*
@@ -3297,7 +3275,7 @@ static char *decode_hioctl(unsigned int cmd)
/**
* cxlflash_lun_provision() - host LUN provisioning handler
* @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
+ * @lunprov: Kernel copy of userspace ioctl data structure.
*
* Return: 0 on success, -errno on failure
*/
@@ -3388,7 +3366,7 @@ out:
/**
* cxlflash_afu_debug() - host AFU debug handler
* @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
+ * @afu_dbg: Kernel copy of userspace ioctl data structure.
*
* For debug requests requiring a data buffer, always provide an aligned
* (cache line) buffer to the AFU to appease any alignment requirements.
@@ -3534,7 +3512,7 @@ static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
if (likely(do_ioctl))
break;
- /* fall through */
+ fallthrough;
default:
rc = -EINVAL;
goto out;
@@ -3744,6 +3722,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->afu_cookie = cfg->ops->create_afu(pdev);
if (unlikely(!cfg->afu_cookie)) {
dev_err(dev, "%s: create_afu failed\n", __func__);
+ rc = -ENOMEM;
goto out_remove;
}
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 7018cd802569..631eda2d467e 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -15,7 +15,9 @@
#include <linux/pseudo_fs.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
-
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <asm/xive.h>
#include <misc/ocxl.h>
#include <uapi/misc/cxl.h>
@@ -180,7 +182,7 @@ static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
struct ocxl_hw_afu *afu = ctx->hw_afu;
struct device *dev = afu->dev;
struct ocxlflash_irqs *irq;
- void __iomem *vtrig;
+ struct xive_irq_data *xd;
u32 virq;
int rc = 0;
@@ -204,15 +206,15 @@ static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
goto err1;
}
- vtrig = ioremap(irq->ptrig, PAGE_SIZE);
- if (unlikely(!vtrig)) {
- dev_err(dev, "%s: Trigger page mapping failed\n", __func__);
- rc = -ENOMEM;
+ xd = irq_get_handler_data(virq);
+ if (unlikely(!xd)) {
+ dev_err(dev, "%s: Can't get interrupt data\n", __func__);
+ rc = -ENXIO;
goto err2;
}
irq->virq = virq;
- irq->vtrig = vtrig;
+ irq->vtrig = xd->trig_mmio;
out:
return rc;
err2:
@@ -259,8 +261,6 @@ static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
}
irq = &ctx->irqs[num];
- if (irq->vtrig)
- iounmap(irq->vtrig);
if (irq_find_mapping(NULL, irq->hwirq)) {
free_irq(irq->virq, cookie);
@@ -330,6 +330,7 @@ static int start_context(struct ocxlflash_context *ctx)
struct ocxl_hw_afu *afu = ctx->hw_afu;
struct ocxl_afu_config *acfg = &afu->acfg;
void *link_token = afu->link_token;
+ struct pci_dev *pdev = afu->pdev;
struct device *dev = afu->dev;
bool master = ctx->master;
struct mm_struct *mm;
@@ -361,8 +362,9 @@ static int start_context(struct ocxlflash_context *ctx)
mm = current->mm;
}
- rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
- ocxlflash_xsl_fault, ctx);
+ rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
+ pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
+ ctx);
if (unlikely(rc)) {
dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
__func__, rc);
@@ -615,7 +617,6 @@ static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
struct ocxl_hw_afu *afu = ctx->hw_afu;
struct device *dev = afu->dev;
struct ocxlflash_irqs *irqs;
- u64 addr;
int rc = 0;
int hwirq;
int i;
@@ -640,7 +641,7 @@ static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
}
for (i = 0; i < num; i++) {
- rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr);
+ rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
if (unlikely(rc)) {
dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
__func__, rc);
@@ -648,7 +649,6 @@ static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
}
irqs[i].hwirq = hwirq;
- irqs[i].ptrig = addr;
}
ctx->irqs = irqs;
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
index fc6ad4f985de..f2fe88816bea 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.h
+++ b/drivers/scsi/cxlflash/ocxl_hw.h
@@ -13,7 +13,6 @@
struct ocxlflash_irqs {
int hwirq;
u32 virq;
- u64 ptrig;
void __iomem *vtrig;
};
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 593669ac3669..df0ebabbf387 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -30,7 +30,7 @@ struct cxlflash_global global;
/**
* marshal_rele_to_resize() - translate release to resize structure
- * @rele: Source structure from which to translate/copy.
+ * @release: Source structure from which to translate/copy.
* @resize: Destination structure for the translate/copy.
*/
static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
@@ -44,7 +44,7 @@ static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
/**
* marshal_det_to_rele() - translate detach to release structure
* @detach: Destination structure for the translate/copy.
- * @rele: Source structure from which to translate/copy.
+ * @release: Source structure from which to translate/copy.
*/
static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
struct dk_cxlflash_release *release)
@@ -369,20 +369,18 @@ retry:
goto out;
}
- if (driver_byte(result) == DRIVER_SENSE) {
- result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+ if (result > 0 && scsi_sense_valid(&sshdr)) {
if (result & SAM_STAT_CHECK_CONDITION) {
switch (sshdr.sense_key) {
case NO_SENSE:
case RECOVERED_ERROR:
- /* fall through */
case NOT_READY:
result &= ~SAM_STAT_CHECK_CONDITION;
break;
case UNIT_ATTENTION:
switch (sshdr.asc) {
case 0x29: /* Power on Reset or Device Reset */
- /* fall through */
+ fallthrough;
case 0x2A: /* Device capacity changed */
case 0x3F: /* Report LUNs changed */
/* Retry the command once more */
@@ -518,7 +516,7 @@ void rhte_checkin(struct ctx_info *ctxi,
}
/**
- * rhte_format1() - populates a RHTE for format 1
+ * rht_format1() - populates a RHTE for format 1
* @rhte: RHTE to populate.
* @lun_id: LUN ID of LUN associated with RHTE.
* @perm: Desired permissions for RHTE.
@@ -1791,13 +1789,12 @@ static int process_sense(struct scsi_device *sdev,
switch (sshdr.sense_key) {
case NO_SENSE:
case RECOVERED_ERROR:
- /* fall through */
case NOT_READY:
break;
case UNIT_ATTENTION:
switch (sshdr.asc) {
case 0x29: /* Power on Reset or Device Reset */
- /* fall through */
+ fallthrough;
case 0x2A: /* Device settings/capacity changed */
rc = read_cap16(sdev, lli);
if (rc) {
@@ -2157,7 +2154,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
if (unlikely(rc))
goto cxlflash_ioctl_exit;
- /* fall through */
+ fallthrough;
case DK_CXLFLASH_MANAGE_LUN:
known_ioctl = true;
@@ -2168,7 +2165,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
if (likely(do_ioctl))
break;
- /* fall through */
+ fallthrough;
default:
rc = -EINVAL;
goto cxlflash_ioctl_exit;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index f1406ac77b0d..5c74dc7c2288 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -41,7 +41,7 @@ static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
/**
* marshal_clone_to_rele() - translate clone to release structure
* @clone: Source structure from which to translate/copy.
- * @rele: Destination structure for the translate/copy.
+ * @release: Destination structure for the translate/copy.
*/
static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
struct dk_cxlflash_release *release)
@@ -229,7 +229,7 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
/**
* validate_alloc() - validates the specified block has been allocated
- * @ba_lun_info: LUN info owning the block allocator.
+ * @bali: LUN info owning the block allocator.
* @aun: Block to validate.
*
* Return: 0 on success, -1 on failure
@@ -300,7 +300,7 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
/**
* ba_clone() - Clone a chunk of the block allocation table
* @ba_lun: Block allocator from which to allocate a block.
- * @to_free: Block to free.
+ * @to_clone: Block to clone.
*
* Return: 0 on success, -1 on failure
*/
@@ -361,7 +361,7 @@ void cxlflash_ba_terminate(struct ba_lun *ba_lun)
/**
* init_vlun() - initializes a LUN for virtual use
- * @lun_info: LUN information structure that owns the block allocator.
+ * @lli: LUN information structure that owns the block allocator.
*
* Return: 0 on success, -errno on failure
*/
@@ -430,8 +430,8 @@ static int write_same16(struct scsi_device *sdev,
struct device *dev = &cfg->dev->dev;
const u32 s = ilog2(sdev->sector_size) - 9;
const u32 to = sdev->request_queue->rq_timeout;
- const u32 ws_limit = blk_queue_get_max_sectors(sdev->request_queue,
- REQ_OP_WRITE_SAME) >> s;
+ const u32 ws_limit =
+ sdev->request_queue->limits.max_write_zeroes_sectors >> s;
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 13fbb2eab842..670a836a6ba1 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -61,10 +61,10 @@
#include <asm/io.h>
#include <scsi/scsi.h>
-#include <scsi/scsicam.h> /* needed for scsicam_bios_param */
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
#include "dc395x.h"
@@ -160,22 +160,6 @@
#define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
#define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
-/* cmd->result */
-#define RES_TARGET 0x000000FF /* Target State */
-#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
-#define RES_ENDMSG 0x0000FF00 /* End Message */
-#define RES_DID 0x00FF0000 /* DID_ codes */
-#define RES_DRV 0xFF000000 /* DRIVER_ codes */
-
-#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
-#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
-
-#define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
-#define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
-#define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
-#define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
-#define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
-
#define TAG_NONE 255
/*
@@ -903,7 +887,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
- if (dir == PCI_DMA_NONE || !nseg) {
+ if (dir == DMA_NONE || !nseg) {
dprintkdbg(DBG_0,
"build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
@@ -958,11 +942,10 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
/**
- * dc395x_queue_command - queue scsi command passed from the mid
+ * dc395x_queue_command_lck - queue scsi command passed from the mid
* layer, invoke 'done' on completion
*
* @cmd: pointer to scsi command object
- * @done: function pointer to be invoked on completion
*
* Returns 1 if the adapter (host) is busy, else returns 0. One
* reason for an adapter to be busy is that the number
@@ -975,9 +958,10 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
* and is expected to be held on return.
*
- **/
-static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+ */
+static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct DeviceCtlBlk *dcb;
struct ScsiReqBlk *srb;
struct AdapterCtlBlk *acb =
@@ -986,7 +970,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
/* Assume BAD_TARGET; will be cleared later */
- cmd->result = DID_BAD_TARGET << 16;
+ set_host_byte(cmd, DID_BAD_TARGET);
/* ignore invalid targets */
if (cmd->device->id >= acb->scsi_host->max_id ||
@@ -1011,9 +995,8 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
goto complete;
}
- /* set callback and clear result in the command */
- cmd->scsi_done = done;
- cmd->result = 0;
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_GOOD);
srb = list_first_entry_or_null(&acb->srb_free_list,
struct ScsiReqBlk, list);
@@ -1053,38 +1036,6 @@ complete:
static DEF_SCSI_QCMD(dc395x_queue_command)
-/*
- * Return the disk geometry for the given SCSI device.
- */
-static int dc395x_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *info)
-{
-#ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
- int heads, sectors, cylinders;
- struct AdapterCtlBlk *acb;
- int size = capacity;
-
- dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
- acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
- heads = 64;
- sectors = 32;
- cylinders = size / (heads * sectors);
-
- if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
- heads = 255;
- sectors = 63;
- cylinders = size / (heads * sectors);
- }
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
- return 0;
-#else
- return scsicam_bios_param(bdev, capacity, info);
-#endif
-}
-
-
static void dump_register_info(struct AdapterCtlBlk *acb,
struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
{
@@ -1282,7 +1233,7 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
free_tag(dcb, srb);
list_add_tail(&srb->list, &acb->srb_free_list);
dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
- cmd->result = DID_ABORT << 16;
+ set_host_byte(cmd, DID_ABORT);
return SUCCESS;
}
srb = find_cmd(cmd, &dcb->srb_going_list);
@@ -1314,12 +1265,8 @@ static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
} else if (dcb->sync_offset == 0)
dcb->sync_offset = SYNC_NEGO_OFFSET;
- *ptr++ = MSG_EXTENDED; /* (01h) */
- *ptr++ = 3; /* length */
- *ptr++ = EXTENDED_SDTR; /* (01h) */
- *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
- *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
- srb->msg_count += 5;
+ srb->msg_count += spi_populate_sync_msg(ptr, dcb->min_nego_period,
+ dcb->sync_offset);
srb->state |= SRB_DO_SYNC_NEGO;
}
@@ -1338,11 +1285,7 @@ static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->msgout_buf[1]);
return;
}
- *ptr++ = MSG_EXTENDED; /* (01h) */
- *ptr++ = 2; /* length */
- *ptr++ = EXTENDED_WDTR; /* (03h) */
- *ptr++ = wide;
- srb->msg_count += 4;
+ srb->msg_count += spi_populate_width_msg(ptr, wide);
srb->state |= SRB_DO_WIDE_NEGO;
}
@@ -1389,7 +1332,7 @@ void selection_timeout_missed(unsigned long ptr)
static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
struct ScsiReqBlk* srb)
{
- u16 s_stat2, return_code;
+ u16 __maybe_unused s_stat2, return_code;
u8 s_stat, scsicommand, i, identify_message;
u8 *ptr;
dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
@@ -1509,7 +1452,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
return 1;
}
/* Send Tag id */
- DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SIMPLE_QUEUE_TAG);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
dcb->tag_mask |= tag_mask;
srb->tag_number = tag_number;
@@ -1765,8 +1708,9 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
if (!srb->msg_count) {
dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
srb->cmd);
- DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
- DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP);
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
+ /* it's important for atn stop */
DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
return;
}
@@ -1774,7 +1718,7 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
for (i = 0; i < srb->msg_count; i++)
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
srb->msg_count = 0;
- if (srb->msgout_buf[0] == MSG_ABORT)
+ if (srb->msgout_buf[0] == ABORT_TASK_SET)
srb->state = SRB_ABORT_SENT;
DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
@@ -2430,7 +2374,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
}
#endif /* DC395x_LASTPIO */
else { /* xfer pad */
- u8 data = 0, data2 = 0;
if (srb->sg_count) {
srb->adapter_status = H_OVER_UNDER_RUN;
srb->status |= OVER_RUN;
@@ -2445,8 +2388,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
CFG2_WIDEFIFO);
if (io_dir & DMACMD_DIR) {
- data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
- data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
} else {
/* Danger, Robinson: If you find KGs
* scattered over the wide disk, the driver
@@ -2460,7 +2403,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
/* Danger, Robinson: If you find a collection of Ks on your disk
* something broke :-( */
if (io_dir & DMACMD_DIR)
- data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
else
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
}
@@ -2572,7 +2515,7 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
srb = acb->tmp_srb;
srb->state = SRB_UNEXPECT_RESEL;
dcb->active_srb = srb;
- srb->msgout_buf[0] = MSG_ABORT_TAG;
+ srb->msgout_buf[0] = ABORT_TASK;
srb->msg_count = 1;
DC395x_ENABLE_MSGOUT;
dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
@@ -2814,7 +2757,7 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
msgin_reject(acb, srb);
break;
- case MSG_IGNOREWIDE:
+ case IGNORE_WIDE_RESIDUE:
/* Discard wide residual */
dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
break;
@@ -2958,7 +2901,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
} else {
if ((srb->state & (SRB_START_ + SRB_MSGOUT))
|| !(srb->
- state & (SRB_DISCONNECT + SRB_COMPLETED))) {
+ state & (SRB_DISCONNECT | SRB_COMPLETED))) {
/*
* Selection time out
* SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
@@ -3022,7 +2965,6 @@ static void reselect(struct AdapterCtlBlk *acb)
struct ScsiReqBlk *srb = NULL;
u16 rsel_tar_lun_id;
u8 id, lun;
- u8 arblostflag = 0;
dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
clear_fifo(acb, "reselect");
@@ -3044,7 +2986,6 @@ static void reselect(struct AdapterCtlBlk *acb)
srb->cmd, dcb->target_id,
dcb->target_lun, rsel_tar_lun_id,
DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
- arblostflag = 1;
/*srb->state |= SRB_DISCONNECT; */
srb->state = SRB_READY;
@@ -3075,7 +3016,7 @@ static void reselect(struct AdapterCtlBlk *acb)
"disconnection? <%02i-%i>\n",
dcb->target_id, dcb->target_lun);
- if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
+ if (dcb->sync_mode & EN_TAG_QUEUEING) {
srb = acb->tmp_srb;
dcb->active_srb = srb;
} else {
@@ -3168,7 +3109,7 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
struct scsi_cmnd *cmd = srb->cmd;
enum dma_data_direction dir = cmd->sc_data_direction;
- if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
+ if (scsi_sg_count(cmd) && dir != DMA_NONE) {
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
@@ -3220,6 +3161,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
scsi_sgtalbe(cmd));
status = srb->target_status;
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_GOOD);
if (srb->flag & AUTO_REQSENSE) {
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
pci_unmap_srb_sense(acb, srb);
@@ -3228,7 +3171,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
*/
srb->flag &= ~AUTO_REQSENSE;
srb->adapter_status = 0;
- srb->target_status = CHECK_CONDITION << 1;
+ srb->target_status = SAM_STAT_CHECK_CONDITION;
if (debug_enabled(DBG_1)) {
switch (cmd->sense_buffer[2] & 0x0f) {
case NOT_READY:
@@ -3275,22 +3218,13 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
*((unsigned int *)(cmd->sense_buffer + 3)));
}
- if (status == (CHECK_CONDITION << 1)) {
- cmd->result = DID_BAD_TARGET << 16;
+ if (status == SAM_STAT_CHECK_CONDITION) {
+ set_host_byte(cmd, DID_BAD_TARGET);
goto ckc_e;
}
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
- if (srb->total_xfer_length
- && srb->total_xfer_length >= cmd->underflow)
- cmd->result =
- MK_RES_LNX(DRIVER_SENSE, DID_OK,
- srb->end_message, CHECK_CONDITION);
- /*SET_RES_DID(cmd->result,DID_OK) */
- else
- cmd->result =
- MK_RES_LNX(DRIVER_SENSE, DID_OK,
- srb->end_message, CHECK_CONDITION);
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
goto ckc_e;
}
@@ -3300,10 +3234,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/*
* target status..........................
*/
- if (status_byte(status) == CHECK_CONDITION) {
+ if (status == SAM_STAT_CHECK_CONDITION) {
request_sense(acb, dcb, srb);
return;
- } else if (status_byte(status) == QUEUE_FULL) {
+ } else if (status == SAM_STAT_TASK_SET_FULL) {
tempcnt = (u8)list_size(&dcb->srb_going_list);
dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
dcb->target_id, dcb->target_lun, tempcnt);
@@ -3319,13 +3253,11 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
} else if (status == SCSI_STAT_SEL_TIMEOUT) {
srb->adapter_status = H_SEL_TIMEOUT;
srb->target_status = 0;
- cmd->result = DID_NO_CONNECT << 16;
+ set_host_byte(cmd, DID_NO_CONNECT);
} else {
srb->adapter_status = 0;
- SET_RES_DID(cmd->result, DID_ERROR);
- SET_RES_MSG(cmd->result, srb->end_message);
- SET_RES_TARGET(cmd->result, status);
-
+ set_host_byte(cmd, DID_ERROR);
+ set_status_byte(cmd, status);
}
} else {
/*
@@ -3334,16 +3266,13 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
status = srb->adapter_status;
if (status & H_OVER_UNDER_RUN) {
srb->target_status = 0;
- SET_RES_DID(cmd->result, DID_OK);
- SET_RES_MSG(cmd->result, srb->end_message);
+ scsi_msg_to_host_byte(cmd, srb->end_message);
} else if (srb->status & PARITY_ERROR) {
- SET_RES_DID(cmd->result, DID_PARITY);
- SET_RES_MSG(cmd->result, srb->end_message);
+ set_host_byte(cmd, DID_PARITY);
} else { /* No error */
srb->adapter_status = 0;
srb->target_status = 0;
- SET_RES_DID(cmd->result, DID_OK);
}
}
@@ -3364,15 +3293,15 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
ptr = (struct ScsiInqData *)(base + offset);
- if (!ckc_only && (cmd->result & RES_DID) == 0
+ if (!ckc_only && get_host_byte(cmd) == DID_OK
&& cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
- && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
+ && dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
dcb->inquiry7 = ptr->Flags;
/*if( srb->cmd->cmnd[0] == INQUIRY && */
/* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
- if ((cmd->result == (DID_OK << 16) ||
- status_byte(cmd->result) == CHECK_CONDITION)) {
+ if ((get_host_byte(cmd) == DID_OK) ||
+ (get_status_byte(cmd) == SAM_STAT_CHECK_CONDITION)) {
if (!dcb->init_tcq_flag) {
add_dev(acb, dcb, ptr);
dcb->init_tcq_flag = 1;
@@ -3385,9 +3314,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/* Here is the info for Doug Gilbert's sg3 ... */
scsi_set_resid(cmd, srb->total_xfer_length);
- /* This may be interpreted by sb. or not ... */
- cmd->SCp.this_residual = srb->total_xfer_length;
- cmd->SCp.buffers_residual = 0;
if (debug_enabled(DBG_KG)) {
if (srb->total_xfer_length)
dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
@@ -3399,13 +3325,13 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
if (srb != acb->tmp_srb) {
/* Add to free list */
dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
- cmd, cmd->result);
+ cmd, cmd->result);
list_move_tail(&srb->list, &acb->srb_free_list);
} else {
dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
waiting_process_next(acb);
}
@@ -3423,24 +3349,20 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
struct scsi_cmnd *p;
list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
- enum dma_data_direction dir;
- int result;
-
p = srb->cmd;
- dir = p->sc_data_direction;
- result = MK_RES(0, did_flag, 0, 0);
printk("G:%p(%02i-%i) ", p,
p->device->id, (u8)p->device->lun);
list_del(&srb->list);
free_tag(dcb, srb);
list_add_tail(&srb->list, &acb->srb_free_list);
- p->result = result;
+ set_host_byte(p, did_flag);
+ set_status_byte(p, SAM_STAT_GOOD);
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
if (force) {
/* For new EH, we normally don't need to give commands back,
* as they all complete or all time out */
- p->scsi_done(p);
+ scsi_done(p);
}
}
if (!list_empty(&dcb->srb_going_list))
@@ -3455,20 +3377,19 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
/* Waiting queue */
list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
- int result;
p = srb->cmd;
- result = MK_RES(0, did_flag, 0, 0);
printk("W:%p<%02i-%i>", p, p->device->id,
(u8)p->device->lun);
list_move_tail(&srb->list, &acb->srb_free_list);
- p->result = result;
+ set_host_byte(p, did_flag);
+ set_status_byte(p, SAM_STAT_GOOD);
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
if (force) {
/* For new EH, we normally don't need to give commands back,
* as they all complete or all time out */
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
if (!list_empty(&dcb->srb_waiting_list))
@@ -3664,10 +3585,19 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
#endif
if (dcb->target_lun != 0) {
/* Copy settings */
- struct DeviceCtlBlk *p;
- list_for_each_entry(p, &acb->dcb_list, list)
- if (p->target_id == dcb->target_id)
+ struct DeviceCtlBlk *p = NULL, *iter;
+
+ list_for_each_entry(iter, &acb->dcb_list, list)
+ if (iter->target_id == dcb->target_id) {
+ p = iter;
break;
+ }
+
+ if (!p) {
+ kfree(dcb);
+ return NULL;
+ }
+
dprintkdbg(DBG_1,
"device_alloc: <%02i-%i> copy from <%02i-%i>\n",
dcb->target_id, dcb->target_lun,
@@ -4159,7 +4089,7 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
int srb_idx = 0;
unsigned i = 0;
- struct SGentry *uninitialized_var(ptr);
+ struct SGentry *ptr;
for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;
@@ -4292,7 +4222,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
/**
- * adapter_init_host - Initialize the scsi host instance based on
+ * adapter_init_scsi_host - Initialize the scsi host instance based on
* values that we have already stored in the adapter instance. There's
* some mention that a lot of these are deprecated, so we won't use
* them (we'll use the ones in the adapter instance) but we'll fill
@@ -4380,13 +4310,14 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)
/**
- * init_adapter - Grab the resource for the card, setup the adapter
+ * adapter_init - Grab the resource for the card, setup the adapter
* information, set the card into a known state, create the various
* tables etc etc. This basically gets all adapter information all up
* to date, initialised and gets the chip in sync with it.
*
- * @host: This hosts adapter structure
+ * @acb: The adapter which we are to init.
* @io_port: The base I/O port
+ * @io_port_len: The I/O port size
* @irq: IRQ
*
* Returns 0 if the initialization succeeds, any other value on
@@ -4537,14 +4468,8 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
/*seq_printf(m, "\n"); */
seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
- seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
- acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
- acb->dcb_map[6], acb->dcb_map[7]);
- seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
- acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
- acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
- acb->dcb_map[14], acb->dcb_map[15]);
+ seq_printf(m, "Map of attached LUNs: %8ph\n", &acb->dcb_map[0]);
+ seq_printf(m, " %8ph\n", &acb->dcb_map[8]);
seq_puts(m,
"Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
@@ -4622,7 +4547,6 @@ static struct scsi_host_template dc395x_driver_template = {
.show_info = dc395x_show_info,
.name = DC395X_BANNER " " DC395X_VERSION,
.queuecommand = dc395x_queue_command,
- .bios_param = dc395x_bios_param,
.slave_alloc = dc395x_slave_alloc,
.slave_destroy = dc395x_slave_destroy,
.can_queue = DC395x_MAX_CAN_QUEUE,
@@ -4698,6 +4622,7 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* initialise the adapter and everything we need */
if (adapter_init(acb, io_port_base, io_port_len, irq)) {
dprintkl(KERN_INFO, "adapter init failed\n");
+ acb = NULL;
goto fail;
}
@@ -4761,30 +4686,7 @@ static struct pci_driver dc395x_driver = {
.probe = dc395x_init_one,
.remove = dc395x_remove_one,
};
-
-
-/**
- * dc395x_module_init - Module initialization function
- *
- * Used by both module and built-in driver to initialise this driver.
- **/
-static int __init dc395x_module_init(void)
-{
- return pci_register_driver(&dc395x_driver);
-}
-
-
-/**
- * dc395x_module_exit - Module cleanup function.
- **/
-static void __exit dc395x_module_exit(void)
-{
- pci_unregister_driver(&dc395x_driver);
-}
-
-
-module_init(dc395x_module_init);
-module_exit(dc395x_module_exit);
+module_pci_driver(dc395x_driver);
MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
diff --git a/drivers/scsi/dc395x.h b/drivers/scsi/dc395x.h
index 5379a936141a..24a36c046d07 100644
--- a/drivers/scsi/dc395x.h
+++ b/drivers/scsi/dc395x.h
@@ -156,15 +156,6 @@
#define H_ABORT 0x0FF
/* SCSI BUS Status byte codes */
-#define SCSI_STAT_GOOD 0x0 /* Good status */
-#define SCSI_STAT_CHECKCOND 0x02 /* SCSI Check Condition */
-#define SCSI_STAT_CONDMET 0x04 /* Condition Met */
-#define SCSI_STAT_BUSY 0x08 /* Target busy status */
-#define SCSI_STAT_INTER 0x10 /* Intermediate status */
-#define SCSI_STAT_INTERCONDMET 0x14 /* Intermediate condition met */
-#define SCSI_STAT_RESCONFLICT 0x18 /* Reservation conflict */
-#define SCSI_STAT_CMDTERM 0x22 /* Command Terminated */
-#define SCSI_STAT_QUEUEFULL 0x28 /* Queue Full */
#define SCSI_STAT_UNEXP_BUS_F 0xFD /* Unexpect Bus Free */
#define SCSI_STAT_BUS_RST_DETECT 0xFE /* Scsi Bus Reset detected */
#define SCSI_STAT_SEL_TIMEOUT 0xFF /* Selection Time out */
@@ -181,35 +172,6 @@
#define SYNC_NEGO_OFFSET 15
-/* SCSI MSG BYTE */
-#define MSG_COMPLETE 0x00
-#define MSG_EXTENDED 0x01
-#define MSG_SAVE_PTR 0x02
-#define MSG_RESTORE_PTR 0x03
-#define MSG_DISCONNECT 0x04
-#define MSG_INITIATOR_ERROR 0x05
-#define MSG_ABORT 0x06
-#define MSG_REJECT_ 0x07
-#define MSG_NOP 0x08
-#define MSG_PARITY_ERROR 0x09
-#define MSG_LINK_CMD_COMPL 0x0A
-#define MSG_LINK_CMD_COMPL_FLG 0x0B
-#define MSG_BUS_RESET 0x0C
-#define MSG_ABORT_TAG 0x0D
-#define MSG_SIMPLE_QTAG 0x20
-#define MSG_HEAD_QTAG 0x21
-#define MSG_ORDER_QTAG 0x22
-#define MSG_IGNOREWIDE 0x23
-#define MSG_IDENTIFY 0x80
-#define MSG_HOST_ID 0xC0
-
-/* SCSI STATUS BYTE */
-#define STATUS_GOOD 0x00
-#define CHECK_CONDITION_ 0x02
-#define STATUS_BUSY 0x08
-#define STATUS_INTERMEDIATE 0x10
-#define RESERVE_CONFLICT 0x18
-
/* cmd->result */
#define STATUS_MASK_ 0xFF
#define MSG_MASK 0xFF00
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index f32da0ca529e..610a51538f03 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -88,6 +88,7 @@ struct alua_dh_data {
struct scsi_device *sdev;
int init_error;
struct mutex init_mutex;
+ bool disabled;
};
struct alua_queue_data {
@@ -126,7 +127,7 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
int bufflen, struct scsi_sense_hdr *sshdr, int flags)
{
u8 cdb[MAX_COMMAND_SIZE];
- int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
/* Prepare the command. */
@@ -156,7 +157,7 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
u8 cdb[MAX_COMMAND_SIZE];
unsigned char stpg_data[8];
int stpg_len = 8;
- int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
/* Prepare the data buffer */
@@ -405,15 +406,23 @@ static char print_alua_state(unsigned char state)
}
}
-static int alua_check_sense(struct scsi_device *sdev,
- struct scsi_sense_hdr *sense_hdr)
+static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
{
+ struct alua_dh_data *h = sdev->handler_data;
+ struct alua_port_group *pg;
+
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
/*
* LUN Not Accessible - ALUA state transition
*/
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (pg)
+ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+ rcu_read_unlock();
alua_check(sdev, false);
return NEEDS_RETRY;
}
@@ -507,13 +516,20 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
struct scsi_sense_hdr sense_hdr;
struct alua_port_group *tmp_pg;
int len, k, off, bufflen = ALUA_RTPG_SIZE;
+ int group_id_old, state_old, pref_old, valid_states_old;
unsigned char *desc, *buff;
- unsigned err, retval;
+ unsigned err;
+ int retval;
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
unsigned long flags;
bool transitioning_sense = false;
+ group_id_old = pg->group_id;
+ state_old = pg->state;
+ pref_old = pg->pref;
+ valid_states_old = pg->valid_states;
+
if (!pg->expiry) {
unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
@@ -548,13 +564,15 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
kfree(buff);
return SCSI_DH_OK;
}
- if (!scsi_sense_valid(&sense_hdr)) {
+ if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
ALUA_DH_NAME, retval);
kfree(buff);
- if (driver_byte(retval) == DRIVER_ERROR)
+ if (retval < 0)
return SCSI_DH_DEV_TEMP_BUSY;
+ if (host_byte(retval) == DID_NO_CONNECT)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
return SCSI_DH_IO;
}
@@ -565,10 +583,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
* even though it shouldn't according to T10.
* The retry without rtpg_ext_hdr_req set
* handles this.
+ * Note: some arrays return a sense key of ILLEGAL_REQUEST
+ * with ASC 00h if they don't support the extended header.
*/
if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
- sense_hdr.sense_key == ILLEGAL_REQUEST &&
- sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
+ sense_hdr.sense_key == ILLEGAL_REQUEST) {
pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
goto retry;
}
@@ -658,8 +677,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_lock();
list_for_each_entry_rcu(h,
&tmp_pg->dh_list, node) {
- /* h->sdev should always be valid */
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state = desc[0];
}
rcu_read_unlock();
@@ -678,17 +697,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
if (transitioning_sense)
pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
- sdev_printk(KERN_INFO, sdev,
- "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
- ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
- pg->pref ? "preferred" : "non-preferred",
- pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
- pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
- pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
- pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
- pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
- pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
- pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+ if (group_id_old != pg->group_id || state_old != pg->state ||
+ pref_old != pg->pref || valid_states_old != pg->valid_states)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
+ ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
+ pg->pref ? "preferred" : "non-preferred",
+ pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
+ pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
switch (pg->state) {
case SCSI_ACCESS_STATE_TRANSITIONING:
@@ -705,7 +726,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
pg->expiry = 0;
rcu_read_lock();
list_for_each_entry_rcu(h, &pg->dh_list, node) {
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state =
(pg->state & SCSI_ACCESS_STATE_MASK);
if (pg->pref)
@@ -773,11 +795,11 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
if (retval) {
- if (!scsi_sense_valid(&sense_hdr)) {
+ if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: stpg failed, result %d",
ALUA_DH_NAME, retval);
- if (driver_byte(retval) == DRIVER_ERROR)
+ if (retval < 0)
return SCSI_DH_DEV_TEMP_BUSY;
} else {
sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
@@ -789,6 +811,51 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
return SCSI_DH_RETRY;
}
+static bool alua_rtpg_select_sdev(struct alua_port_group *pg)
+{
+ struct alua_dh_data *h;
+ struct scsi_device *sdev = NULL;
+
+ lockdep_assert_held(&pg->lock);
+ if (WARN_ON(!pg->rtpg_sdev))
+ return false;
+
+ /*
+ * RCU protection isn't necessary for dh_list here
+ * as we hold pg->lock, but for access to h->pg.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &pg->dh_list, node) {
+ if (!h->sdev)
+ continue;
+ if (h->sdev == pg->rtpg_sdev) {
+ h->disabled = true;
+ continue;
+ }
+ if (rcu_dereference(h->pg) == pg &&
+ !h->disabled &&
+ !scsi_device_get(h->sdev)) {
+ sdev = h->sdev;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!sdev) {
+ pr_warn("%s: no device found for rtpg\n",
+ (pg->device_id_len ?
+ (char *)pg->device_id_str : "(nameless PG)"));
+ return false;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n");
+
+ scsi_device_put(pg->rtpg_sdev);
+ pg->rtpg_sdev = sdev;
+
+ return true;
+}
+
static void alua_rtpg_work(struct work_struct *work)
{
struct alua_port_group *pg =
@@ -797,6 +864,7 @@ static void alua_rtpg_work(struct work_struct *work)
LIST_HEAD(qdata_list);
int err = SCSI_DH_OK;
struct alua_queue_data *qdata, *tmp;
+ struct alua_dh_data *h;
unsigned long flags;
spin_lock_irqsave(&pg->lock, flags);
@@ -830,9 +898,18 @@ static void alua_rtpg_work(struct work_struct *work)
}
err = alua_rtpg(sdev, pg);
spin_lock_irqsave(&pg->lock, flags);
- if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+
+ /* If RTPG failed on the current device, try using another */
+ if (err == SCSI_DH_RES_TEMP_UNAVAIL &&
+ alua_rtpg_select_sdev(pg))
+ err = SCSI_DH_IMM_RETRY;
+
+ if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY ||
+ pg->flags & ALUA_PG_RUN_RTPG) {
pg->flags &= ~ALUA_PG_RUNNING;
- if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+ if (err == SCSI_DH_IMM_RETRY)
+ pg->interval = 0;
+ else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
pg->interval = ALUA_RTPG_RETRY_DELAY;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
@@ -860,6 +937,12 @@ static void alua_rtpg_work(struct work_struct *work)
}
list_splice_init(&pg->rtpg_list, &qdata_list);
+ /*
+ * We went through an RTPG, for good or bad.
+ * Re-enable all devices for the next attempt.
+ */
+ list_for_each_entry(h, &pg->dh_list, node)
+ h->disabled = false;
pg->rtpg_sdev = NULL;
spin_unlock_irqrestore(&pg->lock, flags);
@@ -944,6 +1027,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
int err = SCSI_DH_DEV_UNSUPP, tpgs;
mutex_lock(&h->init_mutex);
+ h->disabled = false;
tpgs = alua_check_tpgs(sdev);
if (tpgs != TPGS_MODE_NONE)
err = alua_check_vpd(sdev, h, tpgs);
@@ -1062,7 +1146,6 @@ static void alua_check(struct scsi_device *sdev, bool force)
return;
}
rcu_read_unlock();
-
alua_rtpg_queue(pg, sdev, NULL, force);
kref_put(&pg->kref, release_port_group);
}
@@ -1089,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
case SCSI_ACCESS_STATE_OPTIMAL:
case SCSI_ACCESS_STATE_ACTIVE:
case SCSI_ACCESS_STATE_LBA:
- return BLK_STS_OK;
case SCSI_ACCESS_STATE_TRANSITIONING:
- return BLK_STS_RESOURCE;
+ return BLK_STS_OK;
default:
req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR;
@@ -1147,7 +1229,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
spin_lock(&h->pg_lock);
pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
rcu_assign_pointer(h->pg, NULL);
- h->sdev = NULL;
spin_unlock(&h->pg_lock);
if (pg) {
spin_lock_irq(&pg->lock);
@@ -1156,6 +1237,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
kref_put(&pg->kref, release_port_group);
}
sdev->handler_data = NULL;
+ synchronize_rcu();
kfree(h);
}
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index caa685cfe3d4..2e21ab447873 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -239,7 +239,7 @@ static int send_trespass_cmd(struct scsi_device *sdev,
unsigned char cdb[MAX_COMMAND_SIZE];
int err, res = SCSI_DH_OK, len;
struct scsi_sense_hdr sshdr;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
if (csdev->flags & CLARIION_SHORT_TRESPASS) {
@@ -280,8 +280,8 @@ static int send_trespass_cmd(struct scsi_device *sdev,
return res;
}
-static int clariion_check_sense(struct scsi_device *sdev,
- struct scsi_sense_hdr *sense_hdr)
+static enum scsi_disposition clariion_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
{
switch (sense_hdr->sense_key) {
case NOT_READY:
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 8acd4bb9fefb..0d2cfa60aa06 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -60,7 +60,7 @@ static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h,
ret = SCSI_DH_OK;
break;
}
- /* Fallthrough */
+ fallthrough;
default:
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed, sense %x/%x/%x\n",
@@ -83,7 +83,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
unsigned char cmd[6] = { TEST_UNIT_READY };
struct scsi_sense_hdr sshdr;
int ret = SCSI_DH_OK, res;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
retry:
@@ -121,7 +121,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
struct scsi_device *sdev = h->sdev;
int res, rc = SCSI_DH_OK;
int retry_cnt = HP_SW_RETRIES;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
retry:
@@ -147,7 +147,7 @@ retry:
rc = SCSI_DH_RETRY;
break;
}
- /* fall through */
+ fallthrough;
default:
sdev_printk(KERN_WARNING, sdev,
"%s: sending start_stop_unit failed, "
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 5efc959493ec..bf8754741f85 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
else {
- list_add_rcu(&h->node, &h->ctlr->dh_list);
h->sdev = sdev;
+ list_add_rcu(&h->node, &h->ctlr->dh_list);
}
spin_unlock(&list_lock);
err = SCSI_DH_OK;
@@ -536,7 +536,7 @@ static void send_mode_select(struct work_struct *work)
unsigned char cdb[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sshdr;
unsigned int data_size;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
spin_lock(&ctlr->ms_lock);
@@ -656,8 +656,8 @@ static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req)
return BLK_STS_OK;
}
-static int rdac_check_sense(struct scsi_device *sdev,
- struct scsi_sense_hdr *sense_hdr)
+static enum scsi_disposition rdac_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
{
struct rdac_dh_data *h = sdev->handler_data;
@@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
spin_lock(&list_lock);
if (h->ctlr) {
list_del_rcu(&h->node);
- h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller);
}
spin_unlock(&list_lock);
sdev->handler_data = NULL;
+ synchronize_rcu();
kfree(h);
}
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 6df60b31ecb0..a171ce6b70b2 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -52,7 +52,7 @@ static struct scsi_host_template dmx3191d_driver_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
};
static int dmx3191d_probe_one(struct pci_dev *pdev,
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
deleted file mode 100644
index bf0daeeb50a9..000000000000
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ /dev/null
@@ -1,441 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _SCSI_I2O_H
-#define _SCSI_I2O_H
-
-/* I2O kernel space accessible structures/APIs
- *
- * (c) Copyright 1999, 2000 Red Hat Software
- *
- *************************************************************************
- *
- * This header file defined the I2O APIs/structures for use by
- * the I2O kernel modules.
- */
-
-#ifdef __KERNEL__ /* This file to be included by kernel only */
-
-#include <linux/i2o-dev.h>
-
-#include <linux/notifier.h>
-#include <linux/atomic.h>
-
-
-/*
- * Tunable parameters first
- */
-
-/* How many different OSM's are we allowing */
-#define MAX_I2O_MODULES 64
-
-#define I2O_EVT_CAPABILITY_OTHER 0x01
-#define I2O_EVT_CAPABILITY_CHANGED 0x02
-
-#define I2O_EVT_SENSOR_STATE_CHANGED 0x01
-
-//#ifdef __KERNEL__ /* ioctl stuff only thing exported to users */
-
-#define I2O_MAX_MANAGERS 4
-
-/*
- * I2O Interface Objects
- */
-
-#include <linux/wait.h>
-typedef wait_queue_head_t adpt_wait_queue_head_t;
-#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
-typedef wait_queue_entry_t adpt_wait_queue_entry_t;
-
-/*
- * message structures
- */
-
-struct i2o_message
-{
- u8 version_offset;
- u8 flags;
- u16 size;
- u32 target_tid:12;
- u32 init_tid:12;
- u32 function:8;
- u32 initiator_context;
- /* List follows */
-};
-
-struct adpt_device;
-struct _adpt_hba;
-struct i2o_device
-{
- struct i2o_device *next; /* Chain */
- struct i2o_device *prev;
-
- char dev_name[8]; /* linux /dev name if available */
- i2o_lct_entry lct_data;/* Device LCT information */
- u32 flags;
- struct proc_dir_entry* proc_entry; /* /proc dir */
- struct adpt_device *owner;
- struct _adpt_hba *controller; /* Controlling IOP */
-};
-
-/*
- * Each I2O controller has one of these objects
- */
-
-struct i2o_controller
-{
- char name[16];
- int unit;
- int type;
- int enabled;
-
- struct notifier_block *event_notifer; /* Events */
- atomic_t users;
- struct i2o_device *devices; /* I2O device chain */
- struct i2o_controller *next; /* Controller chain */
-
-};
-
-/*
- * I2O System table entry
- */
-struct i2o_sys_tbl_entry
-{
- u16 org_id;
- u16 reserved1;
- u32 iop_id:12;
- u32 reserved2:20;
- u16 seg_num:12;
- u16 i2o_version:4;
- u8 iop_state;
- u8 msg_type;
- u16 frame_size;
- u16 reserved3;
- u32 last_changed;
- u32 iop_capabilities;
- u32 inbound_low;
- u32 inbound_high;
-};
-
-struct i2o_sys_tbl
-{
- u8 num_entries;
- u8 version;
- u16 reserved1;
- u32 change_ind;
- u32 reserved2;
- u32 reserved3;
- struct i2o_sys_tbl_entry iops[0];
-};
-
-/*
- * I2O classes / subclasses
- */
-
-/* Class ID and Code Assignments
- * (LCT.ClassID.Version field)
- */
-#define I2O_CLASS_VERSION_10 0x00
-#define I2O_CLASS_VERSION_11 0x01
-
-/* Class code names
- * (from v1.5 Table 6-1 Class Code Assignments.)
- */
-
-#define I2O_CLASS_EXECUTIVE 0x000
-#define I2O_CLASS_DDM 0x001
-#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
-#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011
-#define I2O_CLASS_LAN 0x020
-#define I2O_CLASS_WAN 0x030
-#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040
-#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041
-#define I2O_CLASS_SCSI_PERIPHERAL 0x051
-#define I2O_CLASS_ATE_PORT 0x060
-#define I2O_CLASS_ATE_PERIPHERAL 0x061
-#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
-#define I2O_CLASS_FLOPPY_DEVICE 0x071
-#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
-#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
-#define I2O_CLASS_PEER_TRANSPORT 0x091
-
-/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
- */
-
-#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
-
-/* Subclasses
- */
-
-#define I2O_SUBCLASS_i960 0x001
-#define I2O_SUBCLASS_HDM 0x020
-#define I2O_SUBCLASS_ISM 0x021
-
-/* Operation functions */
-
-#define I2O_PARAMS_FIELD_GET 0x0001
-#define I2O_PARAMS_LIST_GET 0x0002
-#define I2O_PARAMS_MORE_GET 0x0003
-#define I2O_PARAMS_SIZE_GET 0x0004
-#define I2O_PARAMS_TABLE_GET 0x0005
-#define I2O_PARAMS_FIELD_SET 0x0006
-#define I2O_PARAMS_LIST_SET 0x0007
-#define I2O_PARAMS_ROW_ADD 0x0008
-#define I2O_PARAMS_ROW_DELETE 0x0009
-#define I2O_PARAMS_TABLE_CLEAR 0x000A
-
-/*
- * I2O serial number conventions / formats
- * (circa v1.5)
- */
-
-#define I2O_SNFORMAT_UNKNOWN 0
-#define I2O_SNFORMAT_BINARY 1
-#define I2O_SNFORMAT_ASCII 2
-#define I2O_SNFORMAT_UNICODE 3
-#define I2O_SNFORMAT_LAN48_MAC 4
-#define I2O_SNFORMAT_WAN 5
-
-/* Plus new in v2.0 (Yellowstone pdf doc)
- */
-
-#define I2O_SNFORMAT_LAN64_MAC 6
-#define I2O_SNFORMAT_DDM 7
-#define I2O_SNFORMAT_IEEE_REG64 8
-#define I2O_SNFORMAT_IEEE_REG128 9
-#define I2O_SNFORMAT_UNKNOWN2 0xff
-
-/* Transaction Reply Lists (TRL) Control Word structure */
-
-#define TRL_SINGLE_FIXED_LENGTH 0x00
-#define TRL_SINGLE_VARIABLE_LENGTH 0x40
-#define TRL_MULTIPLE_FIXED_LENGTH 0x80
-
-/*
- * Messaging API values
- */
-
-#define I2O_CMD_ADAPTER_ASSIGN 0xB3
-#define I2O_CMD_ADAPTER_READ 0xB2
-#define I2O_CMD_ADAPTER_RELEASE 0xB5
-#define I2O_CMD_BIOS_INFO_SET 0xA5
-#define I2O_CMD_BOOT_DEVICE_SET 0xA7
-#define I2O_CMD_CONFIG_VALIDATE 0xBB
-#define I2O_CMD_CONN_SETUP 0xCA
-#define I2O_CMD_DDM_DESTROY 0xB1
-#define I2O_CMD_DDM_ENABLE 0xD5
-#define I2O_CMD_DDM_QUIESCE 0xC7
-#define I2O_CMD_DDM_RESET 0xD9
-#define I2O_CMD_DDM_SUSPEND 0xAF
-#define I2O_CMD_DEVICE_ASSIGN 0xB7
-#define I2O_CMD_DEVICE_RELEASE 0xB9
-#define I2O_CMD_HRT_GET 0xA8
-#define I2O_CMD_ADAPTER_CLEAR 0xBE
-#define I2O_CMD_ADAPTER_CONNECT 0xC9
-#define I2O_CMD_ADAPTER_RESET 0xBD
-#define I2O_CMD_LCT_NOTIFY 0xA2
-#define I2O_CMD_OUTBOUND_INIT 0xA1
-#define I2O_CMD_PATH_ENABLE 0xD3
-#define I2O_CMD_PATH_QUIESCE 0xC5
-#define I2O_CMD_PATH_RESET 0xD7
-#define I2O_CMD_STATIC_MF_CREATE 0xDD
-#define I2O_CMD_STATIC_MF_RELEASE 0xDF
-#define I2O_CMD_STATUS_GET 0xA0
-#define I2O_CMD_SW_DOWNLOAD 0xA9
-#define I2O_CMD_SW_UPLOAD 0xAB
-#define I2O_CMD_SW_REMOVE 0xAD
-#define I2O_CMD_SYS_ENABLE 0xD1
-#define I2O_CMD_SYS_MODIFY 0xC1
-#define I2O_CMD_SYS_QUIESCE 0xC3
-#define I2O_CMD_SYS_TAB_SET 0xA3
-
-#define I2O_CMD_UTIL_NOP 0x00
-#define I2O_CMD_UTIL_ABORT 0x01
-#define I2O_CMD_UTIL_CLAIM 0x09
-#define I2O_CMD_UTIL_RELEASE 0x0B
-#define I2O_CMD_UTIL_PARAMS_GET 0x06
-#define I2O_CMD_UTIL_PARAMS_SET 0x05
-#define I2O_CMD_UTIL_EVT_REGISTER 0x13
-#define I2O_CMD_UTIL_EVT_ACK 0x14
-#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
-#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
-#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
-#define I2O_CMD_UTIL_LOCK 0x17
-#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
-#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
-
-#define I2O_CMD_SCSI_EXEC 0x81
-#define I2O_CMD_SCSI_ABORT 0x83
-#define I2O_CMD_SCSI_BUSRESET 0x27
-
-#define I2O_CMD_BLOCK_READ 0x30
-#define I2O_CMD_BLOCK_WRITE 0x31
-#define I2O_CMD_BLOCK_CFLUSH 0x37
-#define I2O_CMD_BLOCK_MLOCK 0x49
-#define I2O_CMD_BLOCK_MUNLOCK 0x4B
-#define I2O_CMD_BLOCK_MMOUNT 0x41
-#define I2O_CMD_BLOCK_MEJECT 0x43
-
-#define I2O_PRIVATE_MSG 0xFF
-
-/*
- * Init Outbound Q status
- */
-
-#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
-#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
-#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
-#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
-
-/*
- * I2O Get Status State values
- */
-
-#define ADAPTER_STATE_INITIALIZING 0x01
-#define ADAPTER_STATE_RESET 0x02
-#define ADAPTER_STATE_HOLD 0x04
-#define ADAPTER_STATE_READY 0x05
-#define ADAPTER_STATE_OPERATIONAL 0x08
-#define ADAPTER_STATE_FAILED 0x10
-#define ADAPTER_STATE_FAULTED 0x11
-
-/* I2O API function return values */
-
-#define I2O_RTN_NO_ERROR 0
-#define I2O_RTN_NOT_INIT 1
-#define I2O_RTN_FREE_Q_EMPTY 2
-#define I2O_RTN_TCB_ERROR 3
-#define I2O_RTN_TRANSACTION_ERROR 4
-#define I2O_RTN_ADAPTER_ALREADY_INIT 5
-#define I2O_RTN_MALLOC_ERROR 6
-#define I2O_RTN_ADPTR_NOT_REGISTERED 7
-#define I2O_RTN_MSG_REPLY_TIMEOUT 8
-#define I2O_RTN_NO_STATUS 9
-#define I2O_RTN_NO_FIRM_VER 10
-#define I2O_RTN_NO_LINK_SPEED 11
-
-/* Reply message status defines for all messages */
-
-#define I2O_REPLY_STATUS_SUCCESS 0x00
-#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
-#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
-#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
-#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
-#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
-#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
-#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
-#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
-#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
-#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
-#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
-
-/* Status codes and Error Information for Parameter functions */
-
-#define I2O_PARAMS_STATUS_SUCCESS 0x00
-#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
-#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
-#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
-#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
-#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
-#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
-#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
-#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
-#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
-#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
-#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
-#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
-#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
-#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
-#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
-#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
-
-/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
- * messages: Table 3-2 Detailed Status Codes.*/
-
-#define I2O_DSC_SUCCESS 0x0000
-#define I2O_DSC_BAD_KEY 0x0002
-#define I2O_DSC_TCL_ERROR 0x0003
-#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
-#define I2O_DSC_NO_SUCH_PAGE 0x0005
-#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
-#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
-#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
-#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
-#define I2O_DSC_DEVICE_LOCKED 0x000B
-#define I2O_DSC_DEVICE_RESET 0x000C
-#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
-#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
-#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
-#define I2O_DSC_INVALID_OFFSET 0x0010
-#define I2O_DSC_INVALID_PARAMETER 0x0011
-#define I2O_DSC_INVALID_REQUEST 0x0012
-#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
-#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
-#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
-#define I2O_DSC_MISSING_PARAMETER 0x0016
-#define I2O_DSC_TIMEOUT 0x0017
-#define I2O_DSC_UNKNOWN_ERROR 0x0018
-#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
-#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
-#define I2O_DSC_DEVICE_BUSY 0x001B
-#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
-
-/* Device Claim Types */
-#define I2O_CLAIM_PRIMARY 0x01000000
-#define I2O_CLAIM_MANAGEMENT 0x02000000
-#define I2O_CLAIM_AUTHORIZED 0x03000000
-#define I2O_CLAIM_SECONDARY 0x04000000
-
-/* Message header defines for VersionOffset */
-#define I2OVER15 0x0001
-#define I2OVER20 0x0002
-/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */
-#define I2OVERSION I2OVER15
-#define SGL_OFFSET_0 I2OVERSION
-#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
-#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
-#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
-#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
-#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
-#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
-#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
-#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
-
-#define TRL_OFFSET_5 (0x0050 | I2OVERSION)
-#define TRL_OFFSET_6 (0x0060 | I2OVERSION)
-
- /* msg header defines for MsgFlags */
-#define MSG_STATIC 0x0100
-#define MSG_64BIT_CNTXT 0x0200
-#define MSG_MULTI_TRANS 0x1000
-#define MSG_FAIL 0x2000
-#define MSG_LAST 0x4000
-#define MSG_REPLY 0x8000
-
- /* minimum size msg */
-#define THREE_WORD_MSG_SIZE 0x00030000
-#define FOUR_WORD_MSG_SIZE 0x00040000
-#define FIVE_WORD_MSG_SIZE 0x00050000
-#define SIX_WORD_MSG_SIZE 0x00060000
-#define SEVEN_WORD_MSG_SIZE 0x00070000
-#define EIGHT_WORD_MSG_SIZE 0x00080000
-#define NINE_WORD_MSG_SIZE 0x00090000
-#define TEN_WORD_MSG_SIZE 0x000A0000
-#define I2O_MESSAGE_SIZE(x) ((x)<<16)
-
-
-/* Special TID Assignments */
-
-#define ADAPTER_TID 0
-#define HOST_TID 1
-
-#define MSG_FRAME_SIZE 128
-#define NMBR_MSG_FRAMES 128
-
-#define MSG_POOL_SIZE 16384
-
-#define I2O_POST_WAIT_OK 0
-#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
-
-
-#endif /* __KERNEL__ */
-
-#endif /* _SCSI_I2O_H */
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
deleted file mode 100644
index 6bc33f4f020d..000000000000
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti_ioctl.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.txt for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-/***************************************************************************
- * This file is generated from osd_unix.h *
- * *************************************************************************/
-
-#ifndef _dpti_ioctl_h
-#define _dpti_ioctl_h
-
-// IOCTL interface commands
-
-#ifndef _IOWR
-# define _IOWR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOW
-# define _IOW(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOR
-# define _IOR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IO
-# define _IO(x,y) (((x)<<8)|y)
-#endif
-/* EATA PassThrough Command */
-#define EATAUSRCMD _IOWR('D',65,EATA_CP)
-/* Set Debug Level If Enabled */
-#define DPT_DEBUG _IOW('D',66,int)
-/* Get Signature Structure */
-#define DPT_SIGNATURE _IOR('D',67,dpt_sig_S)
-#if defined __bsdi__
-#define DPT_SIGNATURE_PACKED _IOR('D',67,dpt_sig_S_Packed)
-#endif
-/* Get Number Of DPT Adapters */
-#define DPT_NUMCTRLS _IOR('D',68,int)
-/* Get Adapter Info Structure */
-#define DPT_CTRLINFO _IOR('D',69,CtrlInfo)
-/* Get Statistics If Enabled */
-#define DPT_STATINFO _IO('D',70)
-/* Clear Stats If Enabled */
-#define DPT_CLRSTAT _IO('D',71)
-/* Get System Info Structure */
-#define DPT_SYSINFO _IOR('D',72,sysInfo_S)
-/* Set Timeout Value */
-#define DPT_TIMEOUT _IO('D',73)
-/* Get config Data */
-#define DPT_CONFIG _IO('D',74)
-/* Get Blink LED Code */
-#define DPT_BLINKLED _IOR('D',75,int)
-/* Get Statistical information (if available) */
-#define DPT_STATS_INFO _IOR('D',80,STATS_DATA)
-/* Clear the statistical information */
-#define DPT_STATS_CLEAR _IO('D',81)
-/* Get Performance metrics */
-#define DPT_PERF_INFO _IOR('D',82,dpt_perf_t)
-/* Send an I2O command */
-#define I2OUSRCMD _IO('D',76)
-/* Inform driver to re-acquire LCT information */
-#define I2ORESCANCMD _IO('D',77)
-/* Inform driver to reset adapter */
-#define I2ORESETCMD _IO('D',78)
-/* See if the target is mounted */
-#define DPT_TARGET_BUSY _IOR('D',79, TARGET_BUSY_T)
-
-
- /* Structure Returned From Get Controller Info */
-
-typedef struct {
- uCHAR state; /* Operational state */
- uCHAR id; /* Host adapter SCSI id */
- int vect; /* Interrupt vector number */
- int base; /* Base I/O address */
- int njobs; /* # of jobs sent to HA */
- int qdepth; /* Controller queue depth. */
- int wakebase; /* mpx wakeup base index. */
- uINT SGsize; /* Scatter/Gather list size. */
- unsigned heads; /* heads for drives on cntlr. */
- unsigned sectors; /* sectors for drives on cntlr. */
- uCHAR do_drive32; /* Flag for Above 16 MB Ability */
- uCHAR BusQuiet; /* SCSI Bus Quiet Flag */
- char idPAL[4]; /* 4 Bytes Of The ID Pal */
- uCHAR primary; /* 1 For Primary, 0 For Secondary */
- uCHAR eataVersion; /* EATA Version */
- uINT cpLength; /* EATA Command Packet Length */
- uINT spLength; /* EATA Status Packet Length */
- uCHAR drqNum; /* DRQ Index (0,5,6,7) */
- uCHAR flag1; /* EATA Flags 1 (Byte 9) */
- uCHAR flag2; /* EATA Flags 2 (Byte 30) */
-} CtrlInfo;
-
-typedef struct {
- uSHORT length; // Remaining length of this
- uSHORT drvrHBAnum; // Relative HBA # used by the driver
- uINT baseAddr; // Base I/O address
- uSHORT blinkState; // Blink LED state (0=Not in blink LED)
- uCHAR pciBusNum; // PCI Bus # (Optional)
- uCHAR pciDeviceNum; // PCI Device # (Optional)
- uSHORT hbaFlags; // Miscellaneous HBA flags
- uSHORT Interrupt; // Interrupt set for this device.
-# if (defined(_DPT_ARC))
- uINT baseLength;
- ADAPTER_OBJECT *AdapterObject;
- LARGE_INTEGER DmaLogicalAddress;
- PVOID DmaVirtualAddress;
- LARGE_INTEGER ReplyLogicalAddress;
- PVOID ReplyVirtualAddress;
-# else
- uINT reserved1; // Reserved for future expansion
- uINT reserved2; // Reserved for future expansion
- uINT reserved3; // Reserved for future expansion
-# endif
-} drvrHBAinfo_S;
-
-typedef struct TARGET_BUSY
-{
- uLONG channel;
- uLONG id;
- uLONG lun;
- uLONG isBusy;
-} TARGET_BUSY_T;
-
-#endif
-
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
deleted file mode 100644
index a6644b332b53..000000000000
--- a/drivers/scsi/dpt/dptsig.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* BSDI dptsig.h,v 1.7 1998/06/03 19:15:00 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __DPTSIG_H_
-#define __DPTSIG_H_
-#ifdef _SINIX_ADDON
-#include "dpt.h"
-#endif
-/* DPT SIGNATURE SPEC AND HEADER FILE */
-/* Signature Version 1 (sorry no 'A') */
-
-/* to make sure we are talking the same size under all OS's */
-typedef unsigned char sigBYTE;
-typedef unsigned short sigWORD;
-typedef unsigned int sigINT;
-
-/*
- * use sigWORDLittleEndian for:
- * dsCapabilities
- * dsDeviceSupp
- * dsAdapterSupp
- * dsApplication
- * use sigLONGLittleEndian for:
- * dsOS
- * so that the sig can be standardised to Little Endian
- */
-#if (defined(_DPT_BIG_ENDIAN))
-# define sigWORDLittleEndian(x) ((((x)&0xFF)<<8)|(((x)>>8)&0xFF))
-# define sigLONGLittleEndian(x) \
- ((((x)&0xFF)<<24) | \
- (((x)&0xFF00)<<8) | \
- (((x)&0xFF0000L)>>8) | \
- (((x)&0xFF000000L)>>24))
-#else
-# define sigWORDLittleEndian(x) (x)
-# define sigLONGLittleEndian(x) (x)
-#endif
-
-/* must make sure the structure is not word or double-word aligned */
-/* --------------------------------------------------------------- */
-/* Borland will ignore the following pragma: */
-/* Word alignment is OFF by default. If in the, IDE make */
-/* sure that Options | Compiler | Code Generation | Word Alignment */
-/* is not checked. If using BCC, do not use the -a option. */
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=mac68k
-#endif
-
-
-/* Current Signature Version - sigBYTE dsSigVersion; */
-/* ------------------------------------------------------------------ */
-#define SIG_VERSION 1
-
-/* Processor Family - sigBYTE dsProcessorFamily; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-/* What type of processor the file is meant to run on. */
-/* This will let us know whether to read sigWORDs as high/low or low/high. */
-#define PROC_INTEL 0x00 /* Intel 80x86/ia64 */
-#define PROC_MOTOROLA 0x01 /* Motorola 68K */
-#define PROC_MIPS4000 0x02 /* MIPS RISC 4000 */
-#define PROC_ALPHA 0x03 /* DEC Alpha */
-#define PROC_POWERPC 0x04 /* IBM Power PC */
-#define PROC_i960 0x05 /* Intel i960 */
-#define PROC_ULTRASPARC 0x06 /* SPARC processor */
-
-/* Specific Minimim Processor - sigBYTE dsProcessor; FLAG BITS */
-/* ------------------------------------------------------------------ */
-/* Different bit definitions dependent on processor_family */
-
-/* PROC_INTEL: */
-#define PROC_8086 0x01 /* Intel 8086 */
-#define PROC_286 0x02 /* Intel 80286 */
-#define PROC_386 0x04 /* Intel 80386 */
-#define PROC_486 0x08 /* Intel 80486 */
-#define PROC_PENTIUM 0x10 /* Intel 586 aka P5 aka Pentium */
-#define PROC_SEXIUM 0x20 /* Intel 686 aka P6 aka Pentium Pro or MMX */
-#define PROC_IA64 0x40 /* Intel IA64 processor */
-
-/* PROC_i960: */
-#define PROC_960RX 0x01 /* Intel 80960RC/RD */
-#define PROC_960HX 0x02 /* Intel 80960HA/HD/HT */
-
-/* PROC_MOTOROLA: */
-#define PROC_68000 0x01 /* Motorola 68000 */
-#define PROC_68010 0x02 /* Motorola 68010 */
-#define PROC_68020 0x04 /* Motorola 68020 */
-#define PROC_68030 0x08 /* Motorola 68030 */
-#define PROC_68040 0x10 /* Motorola 68040 */
-
-/* PROC_POWERPC */
-#define PROC_PPC601 0x01 /* PowerPC 601 */
-#define PROC_PPC603 0x02 /* PowerPC 603 */
-#define PROC_PPC604 0x04 /* PowerPC 604 */
-
-/* PROC_MIPS4000: */
-#define PROC_R4000 0x01 /* MIPS R4000 */
-
-/* Filetype - sigBYTE dsFiletype; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define FT_EXECUTABLE 0 /* Executable Program */
-#define FT_SCRIPT 1 /* Script/Batch File??? */
-#define FT_HBADRVR 2 /* HBA Driver */
-#define FT_OTHERDRVR 3 /* Other Driver */
-#define FT_IFS 4 /* Installable Filesystem Driver */
-#define FT_ENGINE 5 /* DPT Engine */
-#define FT_COMPDRVR 6 /* Compressed Driver Disk */
-#define FT_LANGUAGE 7 /* Foreign Language file */
-#define FT_FIRMWARE 8 /* Downloadable or actual Firmware */
-#define FT_COMMMODL 9 /* Communications Module */
-#define FT_INT13 10 /* INT 13 style HBA Driver */
-#define FT_HELPFILE 11 /* Help file */
-#define FT_LOGGER 12 /* Event Logger */
-#define FT_INSTALL 13 /* An Install Program */
-#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
-#define FT_RESOURCE 15 /* Storage Manager Resource File */
-#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
-
-/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define FTF_DLL 0x01 /* Dynamic Link Library */
-#define FTF_NLM 0x02 /* Netware Loadable Module */
-#define FTF_OVERLAYS 0x04 /* Uses overlays */
-#define FTF_DEBUG 0x08 /* Debug version */
-#define FTF_TSR 0x10 /* TSR */
-#define FTF_SYS 0x20 /* DOS Loadable driver */
-#define FTF_PROTECTED 0x40 /* Runs in protected mode */
-#define FTF_APP_SPEC 0x80 /* Application Specific */
-#define FTF_ROM (FTF_SYS|FTF_TSR) /* Special Case */
-
-/* OEM - sigBYTE dsOEM; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define OEM_DPT 0 /* DPT */
-#define OEM_ATT 1 /* ATT */
-#define OEM_NEC 2 /* NEC */
-#define OEM_ALPHA 3 /* Alphatronix */
-#define OEM_AST 4 /* AST */
-#define OEM_OLIVETTI 5 /* Olivetti */
-#define OEM_SNI 6 /* Siemens/Nixdorf */
-#define OEM_SUN 7 /* SUN Microsystems */
-
-/* Operating System - sigLONG dsOS; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define OS_DOS 0x00000001 /* PC/MS-DOS */
-#define OS_WINDOWS 0x00000002 /* Microsoft Windows 3.x */
-#define OS_WINDOWS_NT 0x00000004 /* Microsoft Windows NT */
-#define OS_OS2M 0x00000008 /* OS/2 1.2.x,MS 1.3.0,IBM 1.3.x - Monolithic */
-#define OS_OS2L 0x00000010 /* Microsoft OS/2 1.301 - LADDR */
-#define OS_OS22x 0x00000020 /* IBM OS/2 2.x */
-#define OS_NW286 0x00000040 /* Novell NetWare 286 */
-#define OS_NW386 0x00000080 /* Novell NetWare 386 */
-#define OS_GEN_UNIX 0x00000100 /* Generic Unix */
-#define OS_SCO_UNIX 0x00000200 /* SCO Unix */
-#define OS_ATT_UNIX 0x00000400 /* ATT Unix */
-#define OS_UNIXWARE 0x00000800 /* USL Unix */
-#define OS_INT_UNIX 0x00001000 /* Interactive Unix */
-#define OS_SOLARIS 0x00002000 /* SunSoft Solaris */
-#define OS_QNX 0x00004000 /* QNX for Tom Moch */
-#define OS_NEXTSTEP 0x00008000 /* NeXTSTEP/OPENSTEP/MACH */
-#define OS_BANYAN 0x00010000 /* Banyan Vines */
-#define OS_OLIVETTI_UNIX 0x00020000/* Olivetti Unix */
-#define OS_MAC_OS 0x00040000 /* Mac OS */
-#define OS_WINDOWS_95 0x00080000 /* Microsoft Windows '95 */
-#define OS_NW4x 0x00100000 /* Novell Netware 4.x */
-#define OS_BSDI_UNIX 0x00200000 /* BSDi Unix BSD/OS 2.0 and up */
-#define OS_AIX_UNIX 0x00400000 /* AIX Unix */
-#define OS_FREE_BSD 0x00800000 /* FreeBSD Unix */
-#define OS_LINUX 0x01000000 /* Linux */
-#define OS_DGUX_UNIX 0x02000000 /* Data General Unix */
-#define OS_SINIX_N 0x04000000 /* SNI SINIX-N */
-#define OS_PLAN9 0x08000000 /* ATT Plan 9 */
-#define OS_TSX 0x10000000 /* SNH TSX-32 */
-
-#define OS_OTHER 0x80000000 /* Other */
-
-/* Capabilities - sigWORD dsCapabilities; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define CAP_RAID0 0x0001 /* RAID-0 */
-#define CAP_RAID1 0x0002 /* RAID-1 */
-#define CAP_RAID3 0x0004 /* RAID-3 */
-#define CAP_RAID5 0x0008 /* RAID-5 */
-#define CAP_SPAN 0x0010 /* Spanning */
-#define CAP_PASS 0x0020 /* Provides passthrough */
-#define CAP_OVERLAP 0x0040 /* Passthrough supports overlapped commands */
-#define CAP_ASPI 0x0080 /* Supports ASPI Command Requests */
-#define CAP_ABOVE16MB 0x0100 /* ISA Driver supports greater than 16MB */
-#define CAP_EXTEND 0x8000 /* Extended info appears after description */
-#ifdef SNI_MIPS
-#define CAP_CACHEMODE 0x1000 /* dpt_force_cache is set in driver */
-#endif
-
-/* Devices Supported - sigWORD dsDeviceSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define DEV_DASD 0x0001 /* DASD (hard drives) */
-#define DEV_TAPE 0x0002 /* Tape drives */
-#define DEV_PRINTER 0x0004 /* Printers */
-#define DEV_PROC 0x0008 /* Processors */
-#define DEV_WORM 0x0010 /* WORM drives */
-#define DEV_CDROM 0x0020 /* CD-ROM drives */
-#define DEV_SCANNER 0x0040 /* Scanners */
-#define DEV_OPTICAL 0x0080 /* Optical Drives */
-#define DEV_JUKEBOX 0x0100 /* Jukebox */
-#define DEV_COMM 0x0200 /* Communications Devices */
-#define DEV_OTHER 0x0400 /* Other Devices */
-#define DEV_ALL 0xFFFF /* All SCSI Devices */
-
-/* Adapters Families Supported - sigWORD dsAdapterSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define ADF_2001 0x0001 /* PM2001 */
-#define ADF_2012A 0x0002 /* PM2012A */
-#define ADF_PLUS_ISA 0x0004 /* PM2011,PM2021 */
-#define ADF_PLUS_EISA 0x0008 /* PM2012B,PM2022 */
-#define ADF_SC3_ISA 0x0010 /* PM2021 */
-#define ADF_SC3_EISA 0x0020 /* PM2022,PM2122, etc */
-#define ADF_SC3_PCI 0x0040 /* SmartCache III PCI */
-#define ADF_SC4_ISA 0x0080 /* SmartCache IV ISA */
-#define ADF_SC4_EISA 0x0100 /* SmartCache IV EISA */
-#define ADF_SC4_PCI 0x0200 /* SmartCache IV PCI */
-#define ADF_SC5_PCI 0x0400 /* Fifth Generation I2O products */
-/*
- * Combinations of products
- */
-#define ADF_ALL_2000 (ADF_2001|ADF_2012A)
-#define ADF_ALL_PLUS (ADF_PLUS_ISA|ADF_PLUS_EISA)
-#define ADF_ALL_SC3 (ADF_SC3_ISA|ADF_SC3_EISA|ADF_SC3_PCI)
-#define ADF_ALL_SC4 (ADF_SC4_ISA|ADF_SC4_EISA|ADF_SC4_PCI)
-#define ADF_ALL_SC5 (ADF_SC5_PCI)
-/* All EATA Cacheing Products */
-#define ADF_ALL_CACHE (ADF_ALL_PLUS|ADF_ALL_SC3|ADF_ALL_SC4)
-/* All EATA Bus Mastering Products */
-#define ADF_ALL_MASTER (ADF_2012A|ADF_ALL_CACHE)
-/* All EATA Adapter Products */
-#define ADF_ALL_EATA (ADF_2001|ADF_ALL_MASTER)
-#define ADF_ALL ADF_ALL_EATA
-
-/* Application - sigWORD dsApplication; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define APP_DPTMGR 0x0001 /* DPT Storage Manager */
-#define APP_ENGINE 0x0002 /* DPT Engine */
-#define APP_SYTOS 0x0004 /* Sytron Sytos Plus */
-#define APP_CHEYENNE 0x0008 /* Cheyenne ARCServe + ARCSolo */
-#define APP_MSCDEX 0x0010 /* Microsoft CD-ROM extensions */
-#define APP_NOVABACK 0x0020 /* NovaStor Novaback */
-#define APP_AIM 0x0040 /* Archive Information Manager */
-
-/* Requirements - sigBYTE dsRequirements; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define REQ_SMARTROM 0x01 /* Requires SmartROM to be present */
-#define REQ_DPTDDL 0x02 /* Requires DPTDDL.SYS to be loaded */
-#define REQ_HBA_DRIVER 0x04 /* Requires an HBA driver to be loaded */
-#define REQ_ASPI_TRAN 0x08 /* Requires an ASPI Transport Modules */
-#define REQ_ENGINE 0x10 /* Requires a DPT Engine to be loaded */
-#define REQ_COMM_ENG 0x20 /* Requires a DPT Communications Engine */
-
-/*
- * You may adjust dsDescription_size with an override to a value less than
- * 50 so that the structure allocates less real space.
- */
-#if (!defined(dsDescription_size))
-# define dsDescription_size 50
-#endif
-
-typedef struct dpt_sig {
- char dsSignature[6]; /* ALWAYS "dPtSiG" */
- sigBYTE dsSigVersion; /* signature version (currently 1) */
- sigBYTE dsProcessorFamily; /* what type of processor */
- sigBYTE dsProcessor; /* precise processor */
- sigBYTE dsFiletype; /* type of file */
- sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
- sigBYTE dsOEM; /* OEM file was created for */
- sigINT dsOS; /* which Operating systems */
- sigWORD dsCapabilities; /* RAID levels, etc. */
- sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
- sigWORD dsAdapterSupp; /* DPT adapter families supported */
- sigWORD dsApplication; /* applications file is for */
- sigBYTE dsRequirements; /* Other driver dependencies */
- sigBYTE dsVersion; /* 1 */
- sigBYTE dsRevision; /* 'J' */
- sigBYTE dsSubRevision; /* '9' ' ' if N/A */
- sigBYTE dsMonth; /* creation month */
- sigBYTE dsDay; /* creation day */
- sigBYTE dsYear; /* creation year since 1980 (1993=13) */
- /* description (NULL terminated) */
- char dsDescription[dsDescription_size];
-} dpt_sig_S;
-/* 32 bytes minimum - with no description. Put NULL at description[0] */
-/* 81 bytes maximum - with 49 character description plus NULL. */
-
-/* This line added at Roycroft's request */
-/* Microsoft's NT compiler gets confused if you do a pack and don't */
-/* restore it. */
-
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=reset
-#endif
-
-#endif
diff --git a/drivers/scsi/dpt/osd_defs.h b/drivers/scsi/dpt/osd_defs.h
deleted file mode 100644
index de3ae5722982..000000000000
--- a/drivers/scsi/dpt/osd_defs.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* BSDI osd_defs.h,v 1.4 1998/06/03 19:14:58 karels Exp */
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef _OSD_DEFS_H
-#define _OSD_DEFS_H
-
-/*File - OSD_DEFS.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains the OS dependent defines. This file is included
- *in osd_util.h and provides the OS specific defines for that file.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/31/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
- /* Define the operating system */
-#if (defined(__linux__))
-# define _DPT_LINUX
-#elif (defined(__bsdi__))
-# define _DPT_BSDI
-#elif (defined(__FreeBSD__))
-# define _DPT_FREE_BSD
-#else
-# define _DPT_SCO
-#endif
-
-#if defined (ZIL_CURSES)
-#define _DPT_CURSES
-#else
-#define _DPT_MOTIF
-#endif
-
- /* Redefine 'far' to nothing - no far pointer type required in UNIX */
-#define far
-
- /* Define the mutually exclusive semaphore type */
-#define SEMAPHORE_T unsigned int *
- /* Define a handle to a DLL */
-#define DLL_HANDLE_T unsigned int *
-
-#endif
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
deleted file mode 100644
index b2613c2eaac7..000000000000
--- a/drivers/scsi/dpt/osd_util.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/* BSDI osd_util.h,v 1.8 1998/06/03 19:14:58 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __OSD_UTIL_H
-#define __OSD_UTIL_H
-
-/*File - OSD_UTIL.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains defines and function prototypes that are
- *operating system dependent. The resources defined in this file
- *are not specific to any particular application.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/7/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
-/*----------------------------- */
-/* Operating system selections: */
-/*----------------------------- */
-
-/*#define _DPT_MSDOS */
-/*#define _DPT_WIN_3X */
-/*#define _DPT_WIN_4X */
-/*#define _DPT_WIN_NT */
-/*#define _DPT_NETWARE */
-/*#define _DPT_OS2 */
-/*#define _DPT_SCO */
-/*#define _DPT_UNIXWARE */
-/*#define _DPT_SOLARIS */
-/*#define _DPT_NEXTSTEP */
-/*#define _DPT_BANYAN */
-
-/*-------------------------------- */
-/* Include the OS specific defines */
-/*-------------------------------- */
-
-/*#define OS_SELECTION From Above List */
-/*#define SEMAPHORE_T ??? */
-/*#define DLL_HANDLE_T ??? */
-
-#if (defined(KERNEL) && (defined(__FreeBSD__) || defined(__bsdi__)))
-# include "i386/isa/dpt_osd_defs.h"
-#else
-# include "osd_defs.h"
-#endif
-
-#ifndef DPT_UNALIGNED
- #define DPT_UNALIGNED
-#endif
-
-#ifndef DPT_EXPORT
- #define DPT_EXPORT
-#endif
-
-#ifndef DPT_IMPORT
- #define DPT_IMPORT
-#endif
-
-#ifndef DPT_RUNTIME_IMPORT
- #define DPT_RUNTIME_IMPORT DPT_IMPORT
-#endif
-
-/*--------------------- */
-/* OS dependent defines */
-/*--------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_WIN_3X)
- #define _DPT_16_BIT
-#else
- #define _DPT_32_BIT
-#endif
-
-#if defined (_DPT_SCO) || defined (_DPT_UNIXWARE) || defined (_DPT_SOLARIS) || defined (_DPT_AIX) || defined (SNI_MIPS) || defined (_DPT_BSDI) || defined (_DPT_FREE_BSD) || defined(_DPT_LINUX)
- #define _DPT_UNIX
-#endif
-
-#if defined (_DPT_WIN_3x) || defined (_DPT_WIN_4X) || defined (_DPT_WIN_NT) \
- || defined (_DPT_OS2)
- #define _DPT_DLL_SUPPORT
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) && !defined (_DPT_NETWARE)
- #define _DPT_PREEMPTIVE
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X)
- #define _DPT_MULTI_THREADED
-#endif
-
-#if !defined (_DPT_MSDOS)
- #define _DPT_MULTI_TASKING
-#endif
-
- /* These exist for platforms that */
- /* chunk when accessing mis-aligned */
- /* data */
-#if defined (SNI_MIPS) || defined (_DPT_SOLARIS)
- #if defined (_DPT_BIG_ENDIAN)
- #if !defined (_DPT_STRICT_ALIGN)
- #define _DPT_STRICT_ALIGN
- #endif
- #endif
-#endif
-
- /* Determine if in C or C++ mode */
-#ifdef __cplusplus
- #define _DPT_CPP
-#else
- #define _DPT_C
-#endif
-
-/*-------------------------------------------------------------------*/
-/* Under Solaris the compiler refuses to accept code like: */
-/* { {"DPT"}, 0, NULL .... }, */
-/* and complains about the {"DPT"} part by saying "cannot use { } */
-/* to initialize char*". */
-/* */
-/* By defining these ugly macros we can get around this and also */
-/* not have to copy and #ifdef large sections of code. I know that */
-/* these macros are *really* ugly, but they should help reduce */
-/* maintenance in the long run. */
-/* */
-/*-------------------------------------------------------------------*/
-#if !defined (DPTSQO)
- #if defined (_DPT_SOLARIS)
- #define DPTSQO
- #define DPTSQC
- #else
- #define DPTSQO {
- #define DPTSQC }
- #endif /* solaris */
-#endif /* DPTSQO */
-
-
-/*---------------------- */
-/* OS dependent typedefs */
-/*---------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_SCO)
- #define BYTE unsigned char
- #define WORD unsigned short
-#endif
-
-#ifndef _DPT_TYPEDEFS
- #define _DPT_TYPEDEFS
- typedef unsigned char uCHAR;
- typedef unsigned short uSHORT;
- typedef unsigned int uINT;
- typedef unsigned long uLONG;
-
- typedef union {
- uCHAR u8[4];
- uSHORT u16[2];
- uLONG u32;
- } access_U;
-#endif
-
-#if !defined (NULL)
- #define NULL 0
-#endif
-
-
-/*Prototypes - function ----------------------------------------------------- */
-
-#ifdef __cplusplus
- extern "C" { /* Declare all these functions as "C" functions */
-#endif
-
-/*------------------------ */
-/* Byte reversal functions */
-/*------------------------ */
-
- /* Reverses the byte ordering of a 2 byte variable */
-#if (!defined(osdSwap2))
- uSHORT osdSwap2(DPT_UNALIGNED uSHORT *);
-#endif // !osdSwap2
-
- /* Reverses the byte ordering of a 4 byte variable and shifts left 8 bits */
-#if (!defined(osdSwap3))
- uLONG osdSwap3(DPT_UNALIGNED uLONG *);
-#endif // !osdSwap3
-
-
-#ifdef _DPT_NETWARE
- #include "novpass.h" /* For DPT_Bswapl() prototype */
- /* Inline the byte swap */
- #ifdef __cplusplus
- inline uLONG osdSwap4(uLONG *inLong) {
- return *inLong = DPT_Bswapl(*inLong);
- }
- #else
- #define osdSwap4(inLong) DPT_Bswapl(inLong)
- #endif // cplusplus
-#else
- /* Reverses the byte ordering of a 4 byte variable */
-# if (!defined(osdSwap4))
- uLONG osdSwap4(DPT_UNALIGNED uLONG *);
-# endif // !osdSwap4
-
- /* The following functions ALWAYS swap regardless of the *
- * presence of DPT_BIG_ENDIAN */
-
- uSHORT trueSwap2(DPT_UNALIGNED uSHORT *);
- uLONG trueSwap4(DPT_UNALIGNED uLONG *);
-
-#endif // netware
-
-
-/*-------------------------------------*
- * Network order swap functions *
- * *
- * These functions/macros will be used *
- * by the structure insert()/extract() *
- * functions. *
- *
- * We will enclose all structure *
- * portability modifications inside *
- * #ifdefs. When we are ready, we *
- * will #define DPT_PORTABLE to begin *
- * using the modifications. *
- *-------------------------------------*/
-uLONG netSwap4(uLONG val);
-
-#if defined (_DPT_BIG_ENDIAN)
-
-// for big-endian we need to swap
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (((x) >> 8) | ((x) << 8))
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) netSwap4((x))
-#endif // NET_SWAP_4
-
-#else
-
-// for little-endian we don't need to do anything
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (x)
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) (x)
-#endif // NET_SWAP_4
-
-#endif // big endian
-
-
-
-/*----------------------------------- */
-/* Run-time loadable module functions */
-/*----------------------------------- */
-
- /* Loads the specified run-time loadable DLL */
-DLL_HANDLE_T osdLoadModule(uCHAR *);
- /* Unloads the specified run-time loadable DLL */
-uSHORT osdUnloadModule(DLL_HANDLE_T);
- /* Returns a pointer to a function inside a run-time loadable DLL */
-void * osdGetFnAddr(DLL_HANDLE_T,uCHAR *);
-
-/*--------------------------------------- */
-/* Mutually exclusive semaphore functions */
-/*--------------------------------------- */
-
- /* Create a named semaphore */
-SEMAPHORE_T osdCreateNamedSemaphore(char *);
- /* Create a mutually exlusive semaphore */
-SEMAPHORE_T osdCreateSemaphore(void);
- /* create an event semaphore */
-SEMAPHORE_T osdCreateEventSemaphore(void);
- /* create a named event semaphore */
-SEMAPHORE_T osdCreateNamedEventSemaphore(char *);
-
- /* Destroy the specified mutually exclusive semaphore object */
-uSHORT osdDestroySemaphore(SEMAPHORE_T);
- /* Request access to the specified mutually exclusive semaphore */
-uLONG osdRequestSemaphore(SEMAPHORE_T,uLONG);
- /* Release access to the specified mutually exclusive semaphore */
-uSHORT osdReleaseSemaphore(SEMAPHORE_T);
- /* wait for a event to happen */
-uLONG osdWaitForEventSemaphore(SEMAPHORE_T, uLONG);
- /* signal an event */
-uLONG osdSignalEventSemaphore(SEMAPHORE_T);
- /* reset the event */
-uLONG osdResetEventSemaphore(SEMAPHORE_T);
-
-/*----------------- */
-/* Thread functions */
-/*----------------- */
-
- /* Releases control to the task switcher in non-preemptive */
- /* multitasking operating systems. */
-void osdSwitchThreads(void);
-
- /* Starts a thread function */
-uLONG osdStartThread(void *,void *);
-
-/* what is my thread id */
-uLONG osdGetThreadID(void);
-
-/* wakes up the specifed thread */
-void osdWakeThread(uLONG);
-
-/* osd sleep for x milliseconds */
-void osdSleep(uLONG);
-
-#define DPT_THREAD_PRIORITY_LOWEST 0x00
-#define DPT_THREAD_PRIORITY_NORMAL 0x01
-#define DPT_THREAD_PRIORITY_HIGHEST 0x02
-
-uCHAR osdSetThreadPriority(uLONG tid, uCHAR priority);
-
-#ifdef __cplusplus
- } /* end the xtern "C" declaration */
-#endif
-
-#endif /* osd_util_h */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
deleted file mode 100644
index a4aa1c31ff72..000000000000
--- a/drivers/scsi/dpt/sys_info.h
+++ /dev/null
@@ -1,417 +0,0 @@
-/* BSDI sys_info.h,v 1.6 1998/06/03 19:14:59 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __SYS_INFO_H
-#define __SYS_INFO_H
-
-/*File - SYS_INFO.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains structure definitions for the OS dependent
- *layer system information buffers.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Don Kemper
- *Date: 5/10/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Include Files ------------------------------------------------------------- */
-
-#include "osd_util.h"
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif // no unpack
-
-
-/*struct - driveParam_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the drive parameters seen during
- *booting.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct driveParam_S {
-#else
- typedef struct {
-#endif
-
- uSHORT cylinders; /* Up to 1024 */
- uCHAR heads; /* Up to 255 */
- uCHAR sectors; /* Up to 63 */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } driveParam_S;
-#endif
-/*driveParam_S - end */
-
-
-/*struct - sysInfo_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the command system information that
- *should be returned by every OS dependent layer.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define SI_CMOS_Valid 0x0001
-#define SI_NumDrivesValid 0x0002
-#define SI_ProcessorValid 0x0004
-#define SI_MemorySizeValid 0x0008
-#define SI_DriveParamsValid 0x0010
-#define SI_SmartROMverValid 0x0020
-#define SI_OSversionValid 0x0040
-#define SI_OSspecificValid 0x0080 /* 1 if OS structure returned */
-#define SI_BusTypeValid 0x0100
-
-#define SI_ALL_VALID 0x0FFF /* All Std SysInfo is valid */
-#define SI_NO_SmartROM 0x8000
-
-/*busType - definitions */
-#define SI_ISA_BUS 0x00
-#define SI_MCA_BUS 0x01
-#define SI_EISA_BUS 0x02
-#define SI_PCI_BUS 0x04
-
-#ifdef __cplusplus
- struct sysInfo_S {
-#else
- typedef struct {
-#endif
-
- uCHAR drive0CMOS; /* CMOS Drive 0 Type */
- uCHAR drive1CMOS; /* CMOS Drive 1 Type */
- uCHAR numDrives; /* 0040:0075 contents */
- uCHAR processorFamily; /* Same as DPTSIG's definition */
- uCHAR processorType; /* Same as DPTSIG's definition */
- uCHAR smartROMMajorVersion;
- uCHAR smartROMMinorVersion; /* SmartROM version */
- uCHAR smartROMRevision;
- uSHORT flags; /* See bit definitions above */
- uSHORT conventionalMemSize; /* in KB */
- uINT extendedMemSize; /* in KB */
- uINT osType; /* Same as DPTSIG's definition */
- uCHAR osMajorVersion;
- uCHAR osMinorVersion; /* The OS version */
- uCHAR osRevision;
-#ifdef _SINIX_ADDON
- uCHAR busType; /* See defininitions above */
- uSHORT osSubRevision;
- uCHAR pad[2]; /* For alignment */
-#else
- uCHAR osSubRevision;
- uCHAR busType; /* See defininitions above */
- uCHAR pad[3]; /* For alignment */
-#endif
- driveParam_S drives[16]; /* SmartROM Logical Drives */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } sysInfo_S;
-#endif
-/*sysInfo_S - end */
-
-
-/*struct - DOS_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *DOS workstation.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define DI_DOS_HIGH 0x01 /* DOS is loaded high */
-#define DI_DPMI_VALID 0x02 /* DPMI version is valid */
-
-#ifdef __cplusplus
- struct DOS_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR flags; /* See bit definitions above */
- uSHORT driverLocation; /* SmartROM BIOS address */
- uSHORT DOS_version;
- uSHORT DPMI_version;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } DOS_Info_S;
-#endif
-/*DOS_Info_S - end */
-
-
-/*struct - Netware_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Netware machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct Netware_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR driverName[13]; /* ie PM12NW31.DSK */
- uCHAR serverName[48];
- uCHAR netwareVersion; /* The Netware OS version */
- uCHAR netwareSubVersion;
- uCHAR netwareRevision;
- uSHORT maxConnections; /* Probably 250 or 1000 */
- uSHORT connectionsInUse;
- uSHORT maxVolumes;
- uCHAR unused;
- uCHAR SFTlevel;
- uCHAR TTSlevel;
-
- uCHAR clibMajorVersion; /* The CLIB.NLM version */
- uCHAR clibMinorVersion;
- uCHAR clibRevision;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } Netware_Info_S;
-#endif
-/*Netware_Info_S - end */
-
-
-/*struct - OS2_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *OS/2 machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct OS2_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } OS2_Info_S;
-#endif
-/*OS2_Info_S - end */
-
-
-/*struct - WinNT_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Windows NT machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct WinNT_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } WinNT_Info_S;
-#endif
-/*WinNT_Info_S - end */
-
-
-/*struct - SCO_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *SCO UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct SCO_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } SCO_Info_S;
-#endif
-/*SCO_Info_S - end */
-
-
-/*struct - USL_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *USL UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct USL_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } USL_Info_S;
-#endif
-/*USL_Info_S - end */
-
-
- /* Restore default structure packing */
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif // no unpack
-
-#endif // __SYS_INFO_H
-
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
deleted file mode 100644
index abc74fd474dc..000000000000
--- a/drivers/scsi/dpt_i2o.c
+++ /dev/null
@@ -1,3575 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/***************************************************************************
- dpti.c - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2000 by Adaptec
-
- July 30, 2001 First version being submitted
- for inclusion in the kernel. V2.4
-
- See Documentation/scsi/dpti.txt for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-/***************************************************************************
- * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
- - Support 2.6 kernel and DMA-mapping
- - ioctl fix for raid tools
- - use schedule_timeout in long long loop
- **************************************************************************/
-
-/*#define DEBUG 1 */
-/*#define UARTDELAY 1 */
-
-#include <linux/module.h>
-
-MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
-MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
-
-////////////////////////////////////////////////////////////////
-
-#include <linux/ioctl.h> /* For SCSI-Passthrough */
-#include <linux/uaccess.h>
-
-#include <linux/stat.h>
-#include <linux/slab.h> /* for kmalloc() */
-#include <linux/pci.h> /* for PCI support */
-#include <linux/proc_fs.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h> /* for udelay */
-#include <linux/interrupt.h>
-#include <linux/kernel.h> /* for printk */
-#include <linux/sched.h>
-#include <linux/reboot.h>
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/mutex.h>
-
-#include <asm/processor.h> /* for boot_cpu_data */
-#include <asm/pgtable.h>
-#include <asm/io.h> /* for virt_to_bus, etc. */
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-
-#include "dpt/dptsig.h"
-#include "dpti.h"
-
-/*============================================================================
- * Create a binary signature - this is read by dptsig
- * Needed for our management apps
- *============================================================================
- */
-static DEFINE_MUTEX(adpt_mutex);
-static dpt_sig_S DPTI_sig = {
- {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
-#ifdef __i386__
- PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
-#elif defined(__ia64__)
- PROC_INTEL, PROC_IA64,
-#elif defined(__sparc__)
- PROC_ULTRASPARC, PROC_ULTRASPARC,
-#elif defined(__alpha__)
- PROC_ALPHA, PROC_ALPHA,
-#else
- (-1),(-1),
-#endif
- FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
- ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
- DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
-};
-
-
-
-
-/*============================================================================
- * Globals
- *============================================================================
- */
-
-static DEFINE_MUTEX(adpt_configuration_lock);
-
-static struct i2o_sys_tbl *sys_tbl;
-static dma_addr_t sys_tbl_pa;
-static int sys_tbl_ind;
-static int sys_tbl_len;
-
-static adpt_hba* hba_chain = NULL;
-static int hba_count = 0;
-
-static struct class *adpt_sysfs_class;
-
-static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
-#endif
-
-static const struct file_operations adpt_fops = {
- .unlocked_ioctl = adpt_unlocked_ioctl,
- .open = adpt_open,
- .release = adpt_close,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_adpt_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-/* Structures and definitions for synchronous message posting.
- * See adpt_i2o_post_wait() for description
- * */
-struct adpt_i2o_post_wait_data
-{
- int status;
- u32 id;
- adpt_wait_queue_head_t *wq;
- struct adpt_i2o_post_wait_data *next;
-};
-
-static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
-static u32 adpt_post_wait_id = 0;
-static DEFINE_SPINLOCK(adpt_post_wait_lock);
-
-
-/*============================================================================
- * Functions
- *============================================================================
- */
-
-static inline int dpt_dma64(adpt_hba *pHba)
-{
- return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
-}
-
-static inline u32 dma_high(dma_addr_t addr)
-{
- return upper_32_bits(addr);
-}
-
-static inline u32 dma_low(dma_addr_t addr)
-{
- return (u32)addr;
-}
-
-static u8 adpt_read_blink_led(adpt_hba* host)
-{
- if (host->FwDebugBLEDflag_P) {
- if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
- return readb(host->FwDebugBLEDvalue_P);
- }
- }
- return 0;
-}
-
-/*============================================================================
- * Scsi host template interface functions
- *============================================================================
- */
-
-#ifdef MODULE
-static struct pci_device_id dptids[] = {
- { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { 0, }
-};
-#endif
-
-MODULE_DEVICE_TABLE(pci,dptids);
-
-static int adpt_detect(struct scsi_host_template* sht)
-{
- struct pci_dev *pDev = NULL;
- adpt_hba *pHba;
- adpt_hba *next;
-
- PINFO("Detecting Adaptec I2O RAID controllers...\n");
-
- /* search for all Adatpec I2O RAID cards */
- while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
- if(pDev->device == PCI_DPT_DEVICE_ID ||
- pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
- if(adpt_install_hba(sht, pDev) ){
- PERROR("Could not Init an I2O RAID device\n");
- PERROR("Will not try to detect others.\n");
- return hba_count-1;
- }
- pci_dev_get(pDev);
- }
- }
-
- /* In INIT state, Activate IOPs */
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- // Activate does get status , init outbound, and get hrt
- if (adpt_i2o_activate_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- }
- }
-
-
- /* Active IOPs in HOLD state */
-
-rebuild_sys_tab:
- if (hba_chain == NULL)
- return 0;
-
- /*
- * If build_sys_table fails, we kill everything and bail
- * as we can't init the IOPs w/o a system table
- */
- if (adpt_i2o_build_sys_table() < 0) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
-
- PDEBUG("HBA's in HOLD state\n");
-
- /* If IOP don't get online, we need to rebuild the System table */
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (adpt_i2o_online_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- goto rebuild_sys_tab;
- }
- }
-
- /* Active IOPs now in OPERATIONAL state */
- PDEBUG("HBA's in OPERATIONAL state\n");
-
- printk("dpti: If you have a lot of devices this could take a few minutes.\n");
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
- if (adpt_i2o_lct_get(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
-
- if (adpt_i2o_parse_lct(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- adpt_inquiry(pHba);
- }
-
- adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
- if (IS_ERR(adpt_sysfs_class)) {
- printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
- adpt_sysfs_class = NULL;
- }
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- if (adpt_scsi_host_alloc(pHba, sht) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- pHba->initialized = TRUE;
- pHba->state &= ~DPTI_STATE_RESET;
- if (adpt_sysfs_class) {
- struct device *dev = device_create(adpt_sysfs_class,
- NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
- "dpti%d", pHba->unit);
- if (IS_ERR(dev)) {
- printk(KERN_WARNING"dpti%d: unable to "
- "create device in dpt_i2o class\n",
- pHba->unit);
- }
- }
- }
-
- // Register our control device node
- // nodes will need to be created in /dev to access this
- // the nodes can not be created from within the driver
- if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
- return hba_count;
-}
-
-
-static void adpt_release(adpt_hba *pHba)
-{
- struct Scsi_Host *shost = pHba->host;
-
- scsi_remove_host(shost);
-// adpt_i2o_quiesce_hba(pHba);
- adpt_i2o_delete_hba(pHba);
- scsi_host_put(shost);
-}
-
-
-static void adpt_inquiry(adpt_hba* pHba)
-{
- u32 msg[17];
- u32 *mptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- u32 len;
- u32 reqlen;
- u8* buf;
- dma_addr_t addr;
- u8 scb[16];
- s32 rcode;
-
- memset(msg, 0, sizeof(msg));
- buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
- if(!buf){
- printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
- return;
- }
- memset((void*)buf, 0, 36);
-
- len = 36;
- direction = 0x00000000;
- scsidir =0x40000000; // DATA IN (iop<--dev)
-
- if (dpt_dma64(pHba))
- reqlen = 17; // SINGLE SGE, 64 bit
- else
- reqlen = 14; // SINGLE SGE, 32 bit
- /* Stick the headers on */
- msg[0] = reqlen<<16 | SGL_OFFSET_12;
- msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
- msg[2] = 0;
- msg[3] = 0;
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
- msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
-
- mptr=msg+7;
-
- memset(scb, 0, sizeof(scb));
- // Write SCSI command into the message - always 16 byte block
- scb[0] = INQUIRY;
- scb[1] = 0;
- scb[2] = 0;
- scb[3] = 0;
- scb[4] = 36;
- scb[5] = 0;
- // Don't care about the rest of scb
-
- memcpy(mptr, scb, sizeof(scb));
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
-
- /* Now fill in the SGList and command */
- *lenptr = len;
- if (dpt_dma64(pHba)) {
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = dma_low(addr);
- *mptr++ = dma_high(addr);
- } else {
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = addr;
- }
-
- // Send it on it's way
- rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
- if (rcode != 0) {
- sprintf(pHba->detail, "Adaptec I2O RAID");
- printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
- if (rcode != -ETIME && rcode != -EINTR)
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- } else {
- memset(pHba->detail, 0, sizeof(pHba->detail));
- memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
- memcpy(&(pHba->detail[16]), " Model: ", 8);
- memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
- memcpy(&(pHba->detail[40]), " FW: ", 4);
- memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
- pHba->detail[48] = '\0'; /* precautionary */
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- }
- adpt_i2o_status_get(pHba);
- return ;
-}
-
-
-static int adpt_slave_configure(struct scsi_device * device)
-{
- struct Scsi_Host *host = device->host;
- adpt_hba* pHba;
-
- pHba = (adpt_hba *) host->hostdata[0];
-
- if (host->can_queue && device->tagged_supported) {
- scsi_change_queue_depth(device,
- host->can_queue - 1);
- }
- return 0;
-}
-
-static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
-{
- adpt_hba* pHba = NULL;
- struct adpt_device* pDev = NULL; /* dpt per device information */
-
- cmd->scsi_done = done;
- /*
- * SCSI REQUEST_SENSE commands will be executed automatically by the
- * Host Adapter for any errors, so they should not be executed
- * explicitly unless the Sense Data is zero indicating that no error
- * occurred.
- */
-
- if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
- cmd->result = (DID_OK << 16);
- cmd->scsi_done(cmd);
- return 0;
- }
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- if (!pHba) {
- return FAILED;
- }
-
- rmb();
- if ((pHba->state) & DPTI_STATE_RESET)
- return SCSI_MLQUEUE_HOST_BUSY;
-
- // TODO if the cmd->device if offline then I may need to issue a bus rescan
- // followed by a get_lct to see if the device is there anymore
- if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
- /*
- * First command request for this device. Set up a pointer
- * to the device structure. This should be a TEST_UNIT_READY
- * command from scan_scsis_single.
- */
- if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
- // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
- // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
- cmd->result = (DID_NO_CONNECT << 16);
- cmd->scsi_done(cmd);
- return 0;
- }
- cmd->device->hostdata = pDev;
- }
- pDev->pScsi_dev = cmd->device;
-
- /*
- * If we are being called from when the device is being reset,
- * delay processing of the command until later.
- */
- if (pDev->state & DPTI_DEV_RESET ) {
- return FAILED;
- }
- return adpt_scsi_to_i2o(pHba, cmd, pDev);
-}
-
-static DEF_SCSI_QCMD(adpt_queue)
-
-static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
- sector_t capacity, int geom[])
-{
- int heads=-1;
- int sectors=-1;
- int cylinders=-1;
-
- // *** First lets set the default geometry ****
-
- // If the capacity is less than ox2000
- if (capacity < 0x2000 ) { // floppy
- heads = 18;
- sectors = 2;
- }
- // else if between 0x2000 and 0x20000
- else if (capacity < 0x20000) {
- heads = 64;
- sectors = 32;
- }
- // else if between 0x20000 and 0x40000
- else if (capacity < 0x40000) {
- heads = 65;
- sectors = 63;
- }
- // else if between 0x4000 and 0x80000
- else if (capacity < 0x80000) {
- heads = 128;
- sectors = 63;
- }
- // else if greater than 0x80000
- else {
- heads = 255;
- sectors = 63;
- }
- cylinders = sector_div(capacity, heads * sectors);
-
- // Special case if CDROM
- if(sdev->type == 5) { // CDROM
- heads = 252;
- sectors = 63;
- cylinders = 1111;
- }
-
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
-
- PDEBUG("adpt_bios_param: exit\n");
- return 0;
-}
-
-
-static const char *adpt_info(struct Scsi_Host *host)
-{
- adpt_hba* pHba;
-
- pHba = (adpt_hba *) host->hostdata[0];
- return (char *) (pHba->detail);
-}
-
-static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
-{
- struct adpt_device* d;
- int id;
- int chan;
- adpt_hba* pHba;
- int unit;
-
- // Find HBA (host bus adapter) we are looking for
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->host == host) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return 0;
- }
- host = pHba->host;
-
- seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
- seq_printf(m, "%s\n", pHba->detail);
- seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
- pHba->host->host_no, pHba->name, host->irq);
- seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
- host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
-
- seq_puts(m, "Devices:\n");
- for(chan = 0; chan < MAX_CHANNEL; chan++) {
- for(id = 0; id < MAX_ID; id++) {
- d = pHba->channel[chan].device[id];
- while(d) {
- seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
- seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
-
- unit = d->pI2o_dev->lct_data.tid;
- seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
- unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
- scsi_device_online(d->pScsi_dev)? "online":"offline");
- d = d->next_lun;
- }
- }
- }
- return 0;
-}
-
-/*
- * Turn a pointer to ioctl reply data into an u32 'context'
- */
-static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
-{
-#if BITS_PER_LONG == 32
- return (u32)(unsigned long)reply;
-#else
- ulong flags = 0;
- u32 nr, i;
-
- spin_lock_irqsave(pHba->host->host_lock, flags);
- nr = ARRAY_SIZE(pHba->ioctl_reply_context);
- for (i = 0; i < nr; i++) {
- if (pHba->ioctl_reply_context[i] == NULL) {
- pHba->ioctl_reply_context[i] = reply;
- break;
- }
- }
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- if (i >= nr) {
- printk(KERN_WARNING"%s: Too many outstanding "
- "ioctl commands\n", pHba->name);
- return (u32)-1;
- }
-
- return i;
-#endif
-}
-
-/*
- * Go from an u32 'context' to a pointer to ioctl reply data.
- */
-static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
-{
-#if BITS_PER_LONG == 32
- return (void *)(unsigned long)context;
-#else
- void *p = pHba->ioctl_reply_context[context];
- pHba->ioctl_reply_context[context] = NULL;
-
- return p;
-#endif
-}
-
-/*===========================================================================
- * Error Handling routines
- *===========================================================================
- */
-
-static int adpt_abort(struct scsi_cmnd * cmd)
-{
- adpt_hba* pHba = NULL; /* host bus adapter structure */
- struct adpt_device* dptdevice; /* dpt per device information */
- u32 msg[5];
- int rcode;
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
- if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
- printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
- return FAILED;
- }
-
- memset(msg, 0, sizeof(msg));
- msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
- msg[2] = 0;
- msg[3]= 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[4] = cmd->request->tag + 1;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
- return SUCCESS;
-}
-
-
-#define I2O_DEVICE_RESET 0x27
-// This is the same for BLK and SCSI devices
-// NOTE this is wrong in the i2o.h definitions
-// This is not currently supported by our adapter but we issue it anyway
-static int adpt_device_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
- int old_state;
- struct adpt_device* d = cmd->device->hostdata;
-
- pHba = (void*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
- if (!d) {
- printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
- return FAILED;
- }
- memset(msg, 0, sizeof(msg));
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
- msg[2] = 0;
- msg[3] = 0;
-
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- old_state = d->state;
- d->state |= DPTI_DEV_RESET;
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- d->state = old_state;
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
- return SUCCESS;
- }
-}
-
-
-#define I2O_HBA_BUS_RESET 0x87
-// This version of bus reset is called by the eh_error handler
-static int adpt_bus_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- memset(msg, 0, sizeof(msg));
- printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
- msg[2] = 0;
- msg[3] = 0;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
- return SUCCESS;
- }
-}
-
-// This version of reset is called by the eh_error_handler
-static int __adpt_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- int rcode;
- char name[32];
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- strncpy(name, pHba->name, sizeof(name));
- printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
- rcode = adpt_hba_reset(pHba);
- if(rcode == 0){
- printk(KERN_WARNING"%s: HBA reset complete\n", name);
- return SUCCESS;
- } else {
- printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
- return FAILED;
- }
-}
-
-static int adpt_reset(struct scsi_cmnd* cmd)
-{
- int rc;
-
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __adpt_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
-
- return rc;
-}
-
-// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
-static int adpt_hba_reset(adpt_hba* pHba)
-{
- int rcode;
-
- pHba->state |= DPTI_STATE_RESET;
-
- // Activate does get status , init outbound, and get hrt
- if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
- printk(KERN_ERR "%s: Could not activate\n", pHba->name);
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_build_sys_table()) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in HOLD state\n",pHba->name);
-
- if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
-
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- pHba->state &= ~DPTI_STATE_RESET;
-
- adpt_fail_posted_scbs(pHba);
- return 0; /* return success */
-}
-
-/*===========================================================================
- *
- *===========================================================================
- */
-
-
-static void adpt_i2o_sys_shutdown(void)
-{
- adpt_hba *pHba, *pNext;
- struct adpt_i2o_post_wait_data *p1, *old;
-
- printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
- printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
- /* Delete all IOPs from the controller chain */
- /* They should have already been released by the
- * scsi-core
- */
- for (pHba = hba_chain; pHba; pHba = pNext) {
- pNext = pHba->next;
- adpt_i2o_delete_hba(pHba);
- }
-
- /* Remove any timedout entries from the wait queue. */
-// spin_lock_irqsave(&adpt_post_wait_lock, flags);
- /* Nothing should be outstanding at this point so just
- * free them
- */
- for(p1 = adpt_post_wait_queue; p1;) {
- old = p1;
- p1 = p1->next;
- kfree(old);
- }
-// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
- adpt_post_wait_queue = NULL;
-
- printk(KERN_INFO "Adaptec I2O controllers down.\n");
-}
-
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
-{
-
- adpt_hba* pHba = NULL;
- adpt_hba* p = NULL;
- ulong base_addr0_phys = 0;
- ulong base_addr1_phys = 0;
- u32 hba_map0_area_size = 0;
- u32 hba_map1_area_size = 0;
- void __iomem *base_addr_virt = NULL;
- void __iomem *msg_addr_virt = NULL;
- int dma64 = 0;
-
- int raptorFlag = FALSE;
-
- if(pci_enable_device(pDev)) {
- return -EINVAL;
- }
-
- if (pci_request_regions(pDev, "dpt_i2o")) {
- PERROR("dpti: adpt_config_hba: pci request region failed\n");
- return -EINVAL;
- }
-
- pci_set_master(pDev);
-
- /*
- * See if we should enable dma64 mode.
- */
- if (sizeof(dma_addr_t) > 4 &&
- dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
- dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
- dma64 = 1;
-
- if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
- return -EINVAL;
-
- /* adapter only supports message blocks below 4GB */
- dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
-
- base_addr0_phys = pci_resource_start(pDev,0);
- hba_map0_area_size = pci_resource_len(pDev,0);
-
- // Check if standard PCI card or single BAR Raptor
- if(pDev->device == PCI_DPT_DEVICE_ID){
- if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
- // Raptor card with this device id needs 4M
- hba_map0_area_size = 0x400000;
- } else { // Not Raptor - it is a PCI card
- if(hba_map0_area_size > 0x100000 ){
- hba_map0_area_size = 0x100000;
- }
- }
- } else {// Raptor split BAR config
- // Use BAR1 in this configuration
- base_addr1_phys = pci_resource_start(pDev,1);
- hba_map1_area_size = pci_resource_len(pDev,1);
- raptorFlag = TRUE;
- }
-
-#if BITS_PER_LONG == 64
- /*
- * The original Adaptec 64 bit driver has this comment here:
- * "x86_64 machines need more optimal mappings"
- *
- * I assume some HBAs report ridiculously large mappings
- * and we need to limit them on platforms with IOMMUs.
- */
- if (raptorFlag == TRUE) {
- if (hba_map0_area_size > 128)
- hba_map0_area_size = 128;
- if (hba_map1_area_size > 524288)
- hba_map1_area_size = 524288;
- } else {
- if (hba_map0_area_size > 524288)
- hba_map0_area_size = 524288;
- }
-#endif
-
- base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
- if (!base_addr_virt) {
- pci_release_regions(pDev);
- PERROR("dpti: adpt_config_hba: io remap failed\n");
- return -EINVAL;
- }
-
- if(raptorFlag == TRUE) {
- msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
- if (!msg_addr_virt) {
- PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -EINVAL;
- }
- } else {
- msg_addr_virt = base_addr_virt;
- }
-
- // Allocate and zero the data structure
- pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
- if (!pHba) {
- if (msg_addr_virt != base_addr_virt)
- iounmap(msg_addr_virt);
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -ENOMEM;
- }
-
- mutex_lock(&adpt_configuration_lock);
-
- if(hba_chain != NULL){
- for(p = hba_chain; p->next; p = p->next);
- p->next = pHba;
- } else {
- hba_chain = pHba;
- }
- pHba->next = NULL;
- pHba->unit = hba_count;
- sprintf(pHba->name, "dpti%d", hba_count);
- hba_count++;
-
- mutex_unlock(&adpt_configuration_lock);
-
- pHba->pDev = pDev;
- pHba->base_addr_phys = base_addr0_phys;
-
- // Set up the Virtual Base Address of the I2O Device
- pHba->base_addr_virt = base_addr_virt;
- pHba->msg_addr_virt = msg_addr_virt;
- pHba->irq_mask = base_addr_virt+0x30;
- pHba->post_port = base_addr_virt+0x40;
- pHba->reply_port = base_addr_virt+0x44;
-
- pHba->hrt = NULL;
- pHba->lct = NULL;
- pHba->lct_size = 0;
- pHba->status_block = NULL;
- pHba->post_count = 0;
- pHba->state = DPTI_STATE_RESET;
- pHba->pDev = pDev;
- pHba->devices = NULL;
- pHba->dma64 = dma64;
-
- // Initializing the spinlocks
- spin_lock_init(&pHba->state_lock);
- spin_lock_init(&adpt_post_wait_lock);
-
- if(raptorFlag == 0){
- printk(KERN_INFO "Adaptec I2O RAID controller"
- " %d at %p size=%x irq=%d%s\n",
- hba_count-1, base_addr_virt,
- hba_map0_area_size, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- } else {
- printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
- hba_count-1, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
- printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
- }
-
- if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
- printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
- adpt_i2o_delete_hba(pHba);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static void adpt_i2o_delete_hba(adpt_hba* pHba)
-{
- adpt_hba* p1;
- adpt_hba* p2;
- struct i2o_device* d;
- struct i2o_device* next;
- int i;
- int j;
- struct adpt_device* pDev;
- struct adpt_device* pNext;
-
-
- mutex_lock(&adpt_configuration_lock);
- if(pHba->host){
- free_irq(pHba->host->irq, pHba);
- }
- p2 = NULL;
- for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
- if(p1 == pHba) {
- if(p2) {
- p2->next = p1->next;
- } else {
- hba_chain = p1->next;
- }
- break;
- }
- }
-
- hba_count--;
- mutex_unlock(&adpt_configuration_lock);
-
- iounmap(pHba->base_addr_virt);
- pci_release_regions(pHba->pDev);
- if(pHba->msg_addr_virt != pHba->base_addr_virt){
- iounmap(pHba->msg_addr_virt);
- }
- if(pHba->FwDebugBuffer_P)
- iounmap(pHba->FwDebugBuffer_P);
- if(pHba->hrt) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
- pHba->hrt, pHba->hrt_pa);
- }
- if(pHba->lct) {
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- }
- if(pHba->status_block) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
- pHba->status_block, pHba->status_block_pa);
- }
- if(pHba->reply_pool) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- for(d = pHba->devices; d ; d = next){
- next = d->next;
- kfree(d);
- }
- for(i = 0 ; i < pHba->top_scsi_channel ; i++){
- for(j = 0; j < MAX_ID; j++){
- if(pHba->channel[i].device[j] != NULL){
- for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
- pNext = pDev->next_lun;
- kfree(pDev);
- }
- }
- }
- }
- pci_dev_put(pHba->pDev);
- if (adpt_sysfs_class)
- device_destroy(adpt_sysfs_class,
- MKDEV(DPTI_I2O_MAJOR, pHba->unit));
- kfree(pHba);
-
- if(hba_count <= 0){
- unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
- if (adpt_sysfs_class) {
- class_destroy(adpt_sysfs_class);
- adpt_sysfs_class = NULL;
- }
- }
-}
-
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
-{
- struct adpt_device* d;
-
- if(chan < 0 || chan >= MAX_CHANNEL)
- return NULL;
-
- d = pHba->channel[chan].device[id];
- if(!d || d->tid == 0) {
- return NULL;
- }
-
- /* If it is the only lun at that address then this should match*/
- if(d->scsi_lun == lun){
- return d;
- }
-
- /* else we need to look through all the luns */
- for(d=d->next_lun ; d ; d = d->next_lun){
- if(d->scsi_lun == lun){
- return d;
- }
- }
- return NULL;
-}
-
-
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
-{
- // I used my own version of the WAIT_QUEUE_HEAD
- // to handle some version differences
- // When embedded in the kernel this could go back to the vanilla one
- ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
- int status = 0;
- ulong flags = 0;
- struct adpt_i2o_post_wait_data *p1, *p2;
- struct adpt_i2o_post_wait_data *wait_data =
- kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
- DECLARE_WAITQUEUE(wait, current);
-
- if (!wait_data)
- return -ENOMEM;
-
- /*
- * The spin locking is needed to keep anyone from playing
- * with the queue pointers and id while we do the same
- */
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- // TODO we need a MORE unique way of getting ids
- // to support async LCT get
- wait_data->next = adpt_post_wait_queue;
- adpt_post_wait_queue = wait_data;
- adpt_post_wait_id++;
- adpt_post_wait_id &= 0x7fff;
- wait_data->id = adpt_post_wait_id;
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- wait_data->wq = &adpt_wq_i2o_post;
- wait_data->status = -ETIMEDOUT;
-
- add_wait_queue(&adpt_wq_i2o_post, &wait);
-
- msg[2] |= 0x80000000 | ((u32)wait_data->id);
- timeout *= HZ;
- if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
- set_current_state(TASK_INTERRUPTIBLE);
- if(pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (!timeout)
- schedule();
- else{
- timeout = schedule_timeout(timeout);
- if (timeout == 0) {
- // I/O issued, but cannot get result in
- // specified time. Freeing resorces is
- // dangerous.
- status = -ETIME;
- }
- }
- if(pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- }
- remove_wait_queue(&adpt_wq_i2o_post, &wait);
-
- if(status == -ETIMEDOUT){
- printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
- // We will have to free the wait_data memory during shutdown
- return status;
- }
-
- /* Remove the entry from the queue. */
- p2 = NULL;
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
- if(p1 == wait_data) {
- if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
- status = -EOPNOTSUPP;
- }
- if(p2) {
- p2->next = p1->next;
- } else {
- adpt_post_wait_queue = p1->next;
- }
- break;
- }
- }
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- kfree(wait_data);
-
- return status;
-}
-
-
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
-{
-
- u32 m = EMPTY_QUEUE;
- u32 __iomem *msg;
- ulong timeout = jiffies + 30*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg = pHba->msg_addr_virt + m;
- memcpy_toio(msg, data, len);
- wmb();
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- return 0;
-}
-
-
-static void adpt_i2o_post_wait_complete(u32 context, int status)
-{
- struct adpt_i2o_post_wait_data *p1 = NULL;
- /*
- * We need to search through the adpt_post_wait
- * queue to see if the given message is still
- * outstanding. If not, it means that the IOP
- * took longer to respond to the message than we
- * had allowed and timer has already expired.
- * Not much we can do about that except log
- * it for debug purposes, increase timeout, and recompile
- *
- * Lock needed to keep anyone from moving queue pointers
- * around while we're looking through them.
- */
-
- context &= 0x7fff;
-
- spin_lock(&adpt_post_wait_lock);
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- if(p1->id == context) {
- p1->status = status;
- spin_unlock(&adpt_post_wait_lock);
- wake_up_interruptible(p1->wq);
- return;
- }
- }
- spin_unlock(&adpt_post_wait_lock);
- // If this happens we lose commands that probably really completed
- printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
- printk(KERN_DEBUG" Tasks in wait queue:\n");
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- printk(KERN_DEBUG" %d\n",p1->id);
- }
- return;
-}
-
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
-{
- u32 msg[8];
- u8* status;
- dma_addr_t addr;
- u32 m = EMPTY_QUEUE ;
- ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
-
- if(pHba->initialized == FALSE) { // First time reset should be quick
- timeout = jiffies + (25*HZ);
- } else {
- adpt_i2o_quiesce_hba(pHba);
- }
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"Timeout waiting for message!\n");
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if(status == NULL) {
- adpt_send_nop(pHba, m);
- printk(KERN_ERR"IOP reset failed - no free memory.\n");
- return -ENOMEM;
- }
- memset(status,0,4);
-
- msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]=0;
- msg[3]=0;
- msg[4]=0;
- msg[5]=0;
- msg[6]=dma_low(addr);
- msg[7]=dma_high(addr);
-
- memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
- wmb();
- writel(m, pHba->post_port);
- wmb();
-
- while(*status == 0){
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we cannot
- free these because controller may awake and corrupt
- those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
- PDEBUG("%s: Reset in progress...\n", pHba->name);
- // Here we wait for message frame to become available
- // indicated that reset has finished
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
- // Flush the offset
- adpt_send_nop(pHba, m);
- }
- adpt_i2o_status_get(pHba);
- if(*status == 0x02 ||
- pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
- pHba->name);
- } else {
- PDEBUG("%s: Reset completed.\n", pHba->name);
- }
-
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-#ifdef UARTDELAY
- // This delay is to allow someone attached to the card through the debug UART to
- // set up the dump levels that they want before the rest of the initialization sequence
- adpt_delay(20000);
-#endif
- return 0;
-}
-
-
-static int adpt_i2o_parse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // larger than 7, or 8 ...
- struct adpt_device* pDev;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- /*
- * If we have hidden devices, we need to inform the upper layers about
- * the possible maximum id reference to handle device access when
- * an array is disassembled. This code has no other purpose but to
- * allow us future access to devices that are currently hidden
- * behind arrays, hotspares or have not been configured (JBOD mode).
- */
- if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
- lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
- lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- continue;
- }
- tid = lct->lct_entry[i].tid;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- continue;
- }
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if (scsi_id >= MAX_ID){
- printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- }
- d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
- if(d==NULL)
- {
- printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- tid = d->lct_data.tid;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
- }
- bus_no = 0;
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
- tid = d->lct_data.tid;
- // TODO get the bus_no from hrt-but for now they are in order
- //bus_no =
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- pHba->channel[bus_no].type = d->lct_data.class_id;
- pHba->channel[bus_no].tid = tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
- {
- pHba->channel[bus_no].scsi_id = buf[1];
- PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
- }
- // TODO remove - this is just until we get from hrt
- bus_no++;
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
- break;
- }
- }
- }
-
- // Setup adpt_device table
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
-
- tid = d->lct_data.tid;
- scsi_id = -1;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- continue;
- }
- if (scsi_id >= MAX_ID) {
- continue;
- }
- if( pHba->channel[bus_no].device[scsi_id] == NULL){
- pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- for( pDev = pHba->channel[bus_no].device[scsi_id];
- pDev->next_lun; pDev = pDev->next_lun){
- }
- pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev->next_lun == NULL) {
- return -ENOMEM;
- }
- pDev = pDev->next_lun;
- }
- pDev->tid = tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- }
- if(scsi_id == -1){
- printk(KERN_WARNING"Could not find SCSI ID for %s\n",
- d->lct_data.identity_tag);
- }
- }
- }
- return 0;
-}
-
-
-/*
- * Each I2O controller has a chain of devices on it - these match
- * the useful parts of the LCT of the board.
- */
-
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
-{
- mutex_lock(&adpt_configuration_lock);
- d->controller=pHba;
- d->owner=NULL;
- d->next=pHba->devices;
- d->prev=NULL;
- if (pHba->devices != NULL){
- pHba->devices->prev=d;
- }
- pHba->devices=d;
- *d->dev_name = 0;
-
- mutex_unlock(&adpt_configuration_lock);
- return 0;
-}
-
-static int adpt_open(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- mutex_lock(&adpt_mutex);
- //TODO check for root access
- //
- minor = iminor(inode);
- if (minor >= hba_count) {
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- if (pHba == NULL) {
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
-
-// if(pHba->in_use){
- // mutex_unlock(&adpt_configuration_lock);
-// return -EBUSY;
-// }
-
- pHba->in_use = 1;
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
-
- return 0;
-}
-
-static int adpt_close(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- minor = iminor(inode);
- if (minor >= hba_count) {
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return -ENXIO;
- }
-
- pHba->in_use = 0;
-
- return 0;
-}
-
-
-static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
-{
- u32 msg[MAX_MESSAGE_SIZE];
- u32* reply = NULL;
- u32 size = 0;
- u32 reply_size = 0;
- u32 __user *user_msg = arg;
- u32 __user * user_reply = NULL;
- void **sg_list = NULL;
- u32 sg_offset = 0;
- u32 sg_count = 0;
- int sg_index = 0;
- u32 i = 0;
- u32 rcode = 0;
- void *p = NULL;
- dma_addr_t addr;
- ulong flags = 0;
-
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- return -EFAULT;
- }
- size = size>>16;
-
- user_reply = &user_msg[size];
- if(size > MAX_MESSAGE_SIZE){
- return -EFAULT;
- }
- size *= 4; // Convert to bytes
-
- /* Copy in the user's I2O command */
- if(copy_from_user(msg, user_msg, size)) {
- return -EFAULT;
- }
- get_user(reply_size, &user_reply[0]);
- reply_size = reply_size>>16;
- if(reply_size > REPLY_FRAME_SIZE){
- reply_size = REPLY_FRAME_SIZE;
- }
- reply_size *= 4;
- reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
- if(reply == NULL) {
- printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
- return -ENOMEM;
- }
- sg_offset = (msg[0]>>4)&0xf;
- msg[2] = 0x40000000; // IOCTL context
- msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1) {
- rcode = -EBUSY;
- goto free;
- }
-
- sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
- if (!sg_list) {
- rcode = -ENOMEM;
- goto free;
- }
- if(sg_offset) {
- // TODO add 64 bit API
- struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
- if (sg_count > pHba->sg_tablesize){
- printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
- rcode = -EINVAL;
- goto free;
- }
-
- for(i = 0; i < sg_count; i++) {
- int sg_size;
-
- if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
- printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
- rcode = -EINVAL;
- goto cleanup;
- }
- sg_size = sg[i].flag_count & 0xffffff;
- /* Allocate memory for the transfer */
- p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
- if(!p) {
- printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- pHba->name,sg_size,i,sg_count);
- rcode = -ENOMEM;
- goto cleanup;
- }
- sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
- /* Copy in the user's SG buffer if necessary */
- if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
- // sg_simple_element API is 32 bit
- if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
- printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- /* sg_simple_element API is 32 bit, but addr < 4GB */
- sg[i].addr_bus = addr;
- }
- }
-
- do {
- /*
- * Stop any new commands from enterring the
- * controller while processing the ioctl
- */
- if (pHba->host) {
- scsi_block_requests(pHba->host);
- spin_lock_irqsave(pHba->host->host_lock, flags);
- }
- rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
- if (rcode != 0)
- printk("adpt_i2o_passthru: post wait failed %d %p\n",
- rcode, reply);
- if (pHba->host) {
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- scsi_unblock_requests(pHba->host);
- }
- } while (rcode == -ETIMEDOUT);
-
- if(rcode){
- goto cleanup;
- }
-
- if(sg_offset) {
- /* Copy back the Scatter Gather buffers back to user space */
- u32 j;
- // TODO add 64 bit API
- struct sg_simple_element* sg;
- int sg_size;
-
- // re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- rcode = -EFAULT;
- goto cleanup;
- }
- size = size>>16;
- size *= 4;
- if (size > MAX_MESSAGE_SIZE) {
- rcode = -EINVAL;
- goto cleanup;
- }
- /* Copy in the user's I2O command */
- if (copy_from_user (msg, user_msg, size)) {
- rcode = -EFAULT;
- goto cleanup;
- }
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
-
- // TODO add 64 bit API
- sg = (struct sg_simple_element*)(msg + sg_offset);
- for (j = 0; j < sg_count; j++) {
- /* Copy out the SG list to user's buffer if necessary */
- if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
- sg_size = sg[j].flag_count & 0xffffff;
- // sg_simple_element API is 32 bit
- if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- }
- }
-
- /* Copy back the reply to user space */
- if (reply_size) {
- // we wrote our own values for context - now restore the user supplied ones
- if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
- printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
- rcode = -EFAULT;
- }
- if(copy_to_user(user_reply, reply, reply_size)) {
- printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
- rcode = -EFAULT;
- }
- }
-
-
-cleanup:
- if (rcode != -ETIME && rcode != -EINTR) {
- struct sg_simple_element *sg =
- (struct sg_simple_element*) (msg +sg_offset);
- while(sg_index) {
- if(sg_list[--sg_index]) {
- dma_free_coherent(&pHba->pDev->dev,
- sg[sg_index].flag_count & 0xffffff,
- sg_list[sg_index],
- sg[sg_index].addr_bus);
- }
- }
- }
-
-free:
- kfree(sg_list);
- kfree(reply);
- return rcode;
-}
-
-#if defined __ia64__
-static void adpt_ia64_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_IA64;
-}
-#endif
-
-#if defined __sparc__
-static void adpt_sparc_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ULTRASPARC;
-}
-#endif
-#if defined __alpha__
-static void adpt_alpha_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ALPHA;
-}
-#endif
-
-#if defined __i386__
-
-#include <uapi/asm/vm86.h>
-
-static void adpt_i386_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- switch (boot_cpu_data.x86) {
- case CPU_386:
- si->processorType = PROC_386;
- break;
- case CPU_486:
- si->processorType = PROC_486;
- break;
- case CPU_586:
- si->processorType = PROC_PENTIUM;
- break;
- default: // Just in case
- si->processorType = PROC_PENTIUM;
- break;
- }
-}
-#endif
-
-/*
- * This routine returns information about the system. This does not effect
- * any logic and if the info is wrong - it doesn't matter.
- */
-
-/* Get all the info we can not get from kernel services */
-static int adpt_system_info(void __user *buffer)
-{
- sysInfo_S si;
-
- memset(&si, 0, sizeof(si));
-
- si.osType = OS_LINUX;
- si.osMajorVersion = 0;
- si.osMinorVersion = 0;
- si.osRevision = 0;
- si.busType = SI_PCI_BUS;
- si.processorFamily = DPTI_sig.dsProcessorFamily;
-
-#if defined __i386__
- adpt_i386_info(&si);
-#elif defined (__ia64__)
- adpt_ia64_info(&si);
-#elif defined(__sparc__)
- adpt_sparc_info(&si);
-#elif defined (__alpha__)
- adpt_alpha_info(&si);
-#else
- si.processorType = 0xff ;
-#endif
- if (copy_to_user(buffer, &si, sizeof(si))){
- printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
-{
- int minor;
- int error = 0;
- adpt_hba* pHba;
- ulong flags = 0;
- void __user *argp = (void __user *)arg;
-
- minor = iminor(inode);
- if (minor >= DPTI_MAX_HBA){
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if(pHba == NULL){
- return -ENXIO;
- }
-
- while((volatile u32) pHba->state & DPTI_STATE_RESET )
- schedule_timeout_uninterruptible(2);
-
- switch (cmd) {
- // TODO: handle 3 cases
- case DPT_SIGNATURE:
- if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
- return -EFAULT;
- }
- break;
- case I2OUSRCMD:
- return adpt_i2o_passthru(pHba, argp);
-
- case DPT_CTRLINFO:{
- drvrHBAinfo_S HbaInfo;
-
-#define FLG_OSD_PCI_VALID 0x0001
-#define FLG_OSD_DMA 0x0002
-#define FLG_OSD_I2O 0x0004
- memset(&HbaInfo, 0, sizeof(HbaInfo));
- HbaInfo.drvrHBAnum = pHba->unit;
- HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
- HbaInfo.blinkState = adpt_read_blink_led(pHba);
- HbaInfo.pciBusNum = pHba->pDev->bus->number;
- HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
- HbaInfo.Interrupt = pHba->pDev->irq;
- HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
- if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
- printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
- return -EFAULT;
- }
- break;
- }
- case DPT_SYSINFO:
- return adpt_system_info(argp);
- case DPT_BLINKLED:{
- u32 value;
- value = (u32)adpt_read_blink_led(pHba);
- if (copy_to_user(argp, &value, sizeof(value))) {
- return -EFAULT;
- }
- break;
- }
- case I2ORESETCMD: {
- struct Scsi_Host *shost = pHba->host;
-
- if (shost)
- spin_lock_irqsave(shost->host_lock, flags);
- adpt_hba_reset(pHba);
- if (shost)
- spin_unlock_irqrestore(shost->host_lock, flags);
- break;
- }
- case I2ORESCANCMD:
- adpt_rescan(pHba);
- break;
- default:
- return -EINVAL;
- }
-
- return error;
-}
-
-static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
- ret = adpt_ioctl(inode, file, cmd, arg);
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
-
- switch(cmd) {
- case DPT_SIGNATURE:
- case I2OUSRCMD:
- case DPT_CTRLINFO:
- case DPT_SYSINFO:
- case DPT_BLINKLED:
- case I2ORESETCMD:
- case I2ORESCANCMD:
- case (DPT_TARGET_BUSY & 0xFFFF):
- case DPT_TARGET_BUSY:
- ret = adpt_ioctl(inode, file, cmd, arg);
- break;
- default:
- ret = -ENOIOCTLCMD;
- }
-
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-#endif
-
-static irqreturn_t adpt_isr(int irq, void *dev_id)
-{
- struct scsi_cmnd* cmd;
- adpt_hba* pHba = dev_id;
- u32 m;
- void __iomem *reply;
- u32 status=0;
- u32 context;
- ulong flags = 0;
- int handled = 0;
-
- if (pHba == NULL){
- printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
- return IRQ_NONE;
- }
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
-
- while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // Try twice then give up
- rmb();
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // This really should not happen
- printk(KERN_ERR"dpti: Could not get reply frame\n");
- goto out;
- }
- }
- if (pHba->reply_pool_pa <= m &&
- m < pHba->reply_pool_pa +
- (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
- reply = (u8 *)pHba->reply_pool +
- (m - pHba->reply_pool_pa);
- } else {
- /* Ick, we should *never* be here */
- printk(KERN_ERR "dpti: reply frame not from pool\n");
- reply = (u8 *)bus_to_virt(m);
- }
-
- if (readl(reply) & MSG_FAIL) {
- u32 old_m = readl(reply+28);
- void __iomem *msg;
- u32 old_context;
- PDEBUG("%s: Failed message\n",pHba->name);
- if(old_m >= 0x100000){
- printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
- writel(m,pHba->reply_port);
- continue;
- }
- // Transaction context is 0 in failed reply frame
- msg = pHba->msg_addr_virt + old_m;
- old_context = readl(msg+12);
- writel(old_context, reply+12);
- adpt_send_nop(pHba, old_m);
- }
- context = readl(reply+8);
- if(context & 0x40000000){ // IOCTL
- void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
- if( p != NULL) {
- memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
- }
- // All IOCTLs will also be post wait
- }
- if(context & 0x80000000){ // Post wait message
- status = readl(reply+16);
- if(status >> 24){
- status &= 0xffff; /* Get detail status */
- } else {
- status = I2O_POST_WAIT_OK;
- }
- if(!(context & 0x40000000)) {
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL) {
- printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
- }
- }
- adpt_i2o_post_wait_complete(context, status);
- } else { // SCSI message
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL){
- scsi_dma_unmap(cmd);
- adpt_i2o_to_scsi(reply, cmd);
- }
- }
- writel(m, pHba->reply_port);
- wmb();
- rmb();
- }
- handled = 1;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return IRQ_RETVAL(handled);
-}
-
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
-{
- int i;
- u32 msg[MAX_MESSAGE_SIZE];
- u32* mptr;
- u32* lptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- int nseg;
- u32 len;
- u32 reqlen;
- s32 rcode;
- dma_addr_t addr;
-
- memset(msg, 0 , sizeof(msg));
- len = scsi_bufflen(cmd);
- direction = 0x00000000;
-
- scsidir = 0x00000000; // DATA NO XFER
- if(len) {
- /*
- * Set SCBFlags to indicate if data is being transferred
- * in or out, or no data transfer
- * Note: Do not have to verify index is less than 0 since
- * cmd->cmnd[0] is an unsigned char
- */
- switch(cmd->sc_data_direction){
- case DMA_FROM_DEVICE:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- break;
- case DMA_TO_DEVICE:
- direction=0x04000000; // SGL OUT
- scsidir =0x80000000; // DATA OUT (iop-->dev)
- break;
- case DMA_NONE:
- break;
- case DMA_BIDIRECTIONAL:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- // Assume In - and continue;
- break;
- default:
- printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
- pHba->name, cmd->cmnd[0]);
- cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
- cmd->scsi_done(cmd);
- return 0;
- }
- }
- // msg[0] is set later
- // I2O_CMD_SCSI_EXEC
- msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
- msg[2] = 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[3] = cmd->request->tag + 1;
- // Our cards use the transaction context as the tag for queueing
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
- msg[5] = d->tid;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000|cmd->cmd_len;
-
- mptr=msg+7;
-
- // Write SCSI command into the message - always 16 byte block
- memset(mptr, 0, 16);
- memcpy(mptr, cmd->cmnd, cmd->cmd_len);
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
- if (dpt_dma64(pHba)) {
- reqlen = 16; // SINGLE SGE
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- } else {
- reqlen = 14; // SINGLE SGE
- }
- /* Now fill in the SGList and command */
-
- nseg = scsi_dma_map(cmd);
- BUG_ON(nseg < 0);
- if (nseg) {
- struct scatterlist *sg;
-
- len = 0;
- scsi_for_each_sg(cmd, sg, nseg, i) {
- lptr = mptr;
- *mptr++ = direction|0x10000000|sg_dma_len(sg);
- len+=sg_dma_len(sg);
- addr = sg_dma_address(sg);
- *mptr++ = dma_low(addr);
- if (dpt_dma64(pHba))
- *mptr++ = dma_high(addr);
- /* Make this an end of list */
- if (i == nseg - 1)
- *lptr = direction|0xD0000000|sg_dma_len(sg);
- }
- reqlen = mptr - msg;
- *lenptr = len;
-
- if(cmd->underflow && len != cmd->underflow){
- printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
- len, cmd->underflow);
- }
- } else {
- *lenptr = len = 0;
- reqlen = 12;
- }
-
- /* Stick the headers on */
- msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
-
- // Send it on it's way
- rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
- if (rcode == 0) {
- return 0;
- }
- return rcode;
-}
-
-
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
-{
- struct Scsi_Host *host;
-
- host = scsi_host_alloc(sht, sizeof(adpt_hba*));
- if (host == NULL) {
- printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
- return -1;
- }
- host->hostdata[0] = (unsigned long)pHba;
- pHba->host = host;
-
- host->irq = pHba->pDev->irq;
- /* no IO ports, so don't have to set host->io_port and
- * host->n_io_port
- */
- host->io_port = 0;
- host->n_io_port = 0;
- /* see comments in scsi_host.h */
- host->max_id = 16;
- host->max_lun = 256;
- host->max_channel = pHba->top_scsi_channel + 1;
- host->cmd_per_lun = 1;
- host->unique_id = (u32)sys_tbl_pa + pHba->unit;
- host->sg_tablesize = pHba->sg_tablesize;
- host->can_queue = pHba->post_fifo_size;
- host->use_cmd_list = 1;
-
- return 0;
-}
-
-
-static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 hba_status;
- u32 dev_status;
- u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
- // I know this would look cleaner if I just read bytes
- // but the model I have been using for all the rest of the
- // io is in 4 byte words - so I keep that model
- u16 detailed_status = readl(reply+16) &0xffff;
- dev_status = (detailed_status & 0xff);
- hba_status = detailed_status >> 8;
-
- // calculate resid for sg
- scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
-
- cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
-
- if(!(reply_flags & MSG_FAIL)) {
- switch(detailed_status & I2O_SCSI_DSC_MASK) {
- case I2O_SCSI_DSC_SUCCESS:
- cmd->result = (DID_OK << 16);
- // handle underflow
- if (readl(reply+20) < cmd->underflow) {
- cmd->result = (DID_ERROR <<16);
- printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
- }
- break;
- case I2O_SCSI_DSC_REQUEST_ABORTED:
- cmd->result = (DID_ABORT << 16);
- break;
- case I2O_SCSI_DSC_PATH_INVALID:
- case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
- case I2O_SCSI_DSC_SELECTION_TIMEOUT:
- case I2O_SCSI_DSC_COMMAND_TIMEOUT:
- case I2O_SCSI_DSC_NO_ADAPTER:
- case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
- printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_TIME_OUT << 16);
- break;
- case I2O_SCSI_DSC_ADAPTER_BUSY:
- case I2O_SCSI_DSC_BUS_BUSY:
- cmd->result = (DID_BUS_BUSY << 16);
- break;
- case I2O_SCSI_DSC_SCSI_BUS_RESET:
- case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
- cmd->result = (DID_RESET << 16);
- break;
- case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
- printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
- cmd->result = (DID_PARITY << 16);
- break;
- case I2O_SCSI_DSC_UNABLE_TO_ABORT:
- case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
- case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
- case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_AUTOSENSE_FAILED:
- case I2O_SCSI_DSC_DATA_OVERRUN:
- case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
- case I2O_SCSI_DSC_SEQUENCE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
- case I2O_SCSI_DSC_PROVIDE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_TERMINATED:
- case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
- case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
- case I2O_SCSI_DSC_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_INVALID_CDB:
- case I2O_SCSI_DSC_LUN_INVALID:
- case I2O_SCSI_DSC_SCSI_TID_INVALID:
- case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
- case I2O_SCSI_DSC_NO_NEXUS:
- case I2O_SCSI_DSC_CDB_RECEIVED:
- case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
- case I2O_SCSI_DSC_QUEUE_FROZEN:
- case I2O_SCSI_DSC_REQUEST_INVALID:
- default:
- printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_ERROR << 16);
- break;
- }
-
- // copy over the request sense data if it was a check
- // condition status
- if (dev_status == SAM_STAT_CHECK_CONDITION) {
- u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
- // Copy over the sense data
- memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
- if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
- cmd->sense_buffer[2] == DATA_PROTECT ){
- /* This is to handle an array failed */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
-
- }
- }
- } else {
- /* In this condtion we could not talk to the tid
- * the card rejected it. We should signal a retry
- * for a limitted number of retries.
- */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
- }
-
- cmd->result |= (dev_status);
-
- if(cmd->scsi_done != NULL){
- cmd->scsi_done(cmd);
- }
- return cmd->result;
-}
-
-
-static s32 adpt_rescan(adpt_hba* pHba)
-{
- s32 rcode;
- ulong flags = 0;
-
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
- goto out;
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
- goto out;
- rcode = 0;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return rcode;
-}
-
-
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // at least 8 u32's
- struct adpt_device* pDev = NULL;
- struct i2o_device* pI2o_dev = NULL;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- // Mark each drive as unscanned
- for (d = pHba->devices; d; d = d->next) {
- pDev =(struct adpt_device*) d->owner;
- if(!pDev){
- continue;
- }
- pDev->state |= DPTI_DEV_UNSCANNED;
- }
-
- printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- continue;
- }
-
- if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- tid = lct->lct_entry[i].tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- printk(KERN_ERR"%s: Could not query device\n",pHba->name);
- continue;
- }
- bus_no = buf[0]>>16;
- if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
- printk(KERN_WARNING
- "%s: Channel number %d out of range\n",
- pHba->name, bus_no);
- continue;
- }
-
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- pDev = pHba->channel[bus_no].device[scsi_id];
- /* da lun */
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- break;
- }
- pDev = pDev->next_lun;
- }
- if(!pDev ) { // Something new add it
- d = kmalloc(sizeof(struct i2o_device),
- GFP_ATOMIC);
- if(d==NULL)
- {
- printk(KERN_CRIT "Out of memory for I2O device data.\n");
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
-
- pDev = pHba->channel[bus_no].device[scsi_id];
- if( pDev == NULL){
- pDev =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- while (pDev->next_lun) {
- pDev = pDev->next_lun;
- }
- pDev = pDev->next_lun =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- }
- pDev->tid = d->lct_data.tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- // Too late, SCSI system has made up it's mind, but what the hey ...
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- } // end of new i2o device
-
- // We found an old device - check it
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- if(!scsi_device_online(pDev->pScsi_dev)) {
- printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
- pHba->name,bus_no,scsi_id,scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
- }
- }
- d = pDev->pI2o_dev;
- if(d->lct_data.tid != tid) { // something changed
- pDev->tid = tid;
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
- if (pDev->pScsi_dev) {
- pDev->pScsi_dev->changed = TRUE;
- pDev->pScsi_dev->removable = TRUE;
- }
- }
- // Found it - mark it scanned
- pDev->state = DPTI_DEV_ONLINE;
- break;
- }
- pDev = pDev->next_lun;
- }
- }
- }
- for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
- pDev =(struct adpt_device*) pI2o_dev->owner;
- if(!pDev){
- continue;
- }
- // Drive offline drives that previously existed but could not be found
- // in the LCT table
- if (pDev->state & DPTI_DEV_UNSCANNED){
- pDev->state = DPTI_DEV_OFFLINE;
- printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
- }
- }
- }
- return 0;
-}
-
-static void adpt_fail_posted_scbs(adpt_hba* pHba)
-{
- struct scsi_cmnd* cmd = NULL;
- struct scsi_device* d = NULL;
-
- shost_for_each_device(d, pHba->host) {
- unsigned long flags;
- spin_lock_irqsave(&d->list_lock, flags);
- list_for_each_entry(cmd, &d->cmd_list, list) {
- cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
- cmd->scsi_done(cmd);
- }
- spin_unlock_irqrestore(&d->list_lock, flags);
- }
-}
-
-
-/*============================================================================
- * Routines from i2o subsystem
- *============================================================================
- */
-
-
-
-/*
- * Bring an I2O controller into HOLD state. See the spec.
- */
-static int adpt_i2o_activate_hba(adpt_hba* pHba)
-{
- int rcode;
-
- if(pHba->initialized ) {
- if (adpt_i2o_status_get(pHba) < 0) {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
- if (adpt_i2o_status_get(pHba) < 0) {
- printk(KERN_INFO "HBA not responding.\n");
- return -1;
- }
- }
-
- if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
- printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
- return -1;
- }
-
- if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
- pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
- pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
- pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
- adpt_i2o_reset_hba(pHba);
- if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
- return -1;
- }
- }
- } else {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
-
- }
-
- if (adpt_i2o_init_outbound_q(pHba) < 0) {
- return -1;
- }
-
- /* In HOLD state */
-
- if (adpt_i2o_hrt_get(pHba) < 0) {
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Bring a controller online into OPERATIONAL state.
- */
-
-static int adpt_i2o_online_hba(adpt_hba* pHba)
-{
- if (adpt_i2o_systab_send(pHba) < 0)
- return -1;
- /* In READY state */
-
- if (adpt_i2o_enable_hba(pHba) < 0)
- return -1;
-
- /* In OPERATIONAL state */
- return 0;
-}
-
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
-{
- u32 __iomem *msg;
- ulong timeout = jiffies + 5*HZ;
-
- while(m == EMPTY_QUEUE){
- rmb();
- m = readl(pHba->post_port);
- if(m != EMPTY_QUEUE){
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
- return 2;
- }
- schedule_timeout_uninterruptible(1);
- }
- msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
- writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
- writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
- writel( 0,&msg[2]);
- wmb();
-
- writel(m, pHba->post_port);
- wmb();
- return 0;
-}
-
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
-{
- u8 *status;
- dma_addr_t addr;
- u32 __iomem *msg = NULL;
- int i;
- ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
- u32 m;
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
-
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if (!status) {
- adpt_send_nop(pHba, m);
- printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- memset(status, 0, 4);
-
- writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
- writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
- writel(0, &msg[2]);
- writel(0x0106, &msg[3]); /* Transaction context */
- writel(4096, &msg[4]); /* Host page frame size */
- writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
- writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
- writel((u32)addr, &msg[7]);
-
- writel(m, pHba->post_port);
- wmb();
-
- // Wait for the reply status to come back
- do {
- if (*status) {
- if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
- break;
- }
- }
- rmb();
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (1);
-
- // If the command was successful, fill the fifo with our reply
- // message packets
- if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
- return -2;
- }
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-
- if(pHba->reply_pool != NULL) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- &pHba->reply_pool_pa, GFP_KERNEL);
- if (!pHba->reply_pool) {
- printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
- return -ENOMEM;
- }
- memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
-
- for(i = 0; i < pHba->reply_fifo_size; i++) {
- writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
- pHba->reply_port);
- wmb();
- }
- adpt_i2o_status_get(pHba);
- return 0;
-}
-
-
-/*
- * I2O System Table. Contains information about
- * all the IOPs in the system. Used to inform IOPs
- * about each other's existence.
- *
- * sys_tbl_ver is the CurrentChangeIndicator that is
- * used by IOPs to track changes.
- */
-
-
-
-static s32 adpt_i2o_status_get(adpt_hba* pHba)
-{
- ulong timeout;
- u32 m;
- u32 __iomem *msg;
- u8 *status_block=NULL;
-
- if(pHba->status_block == NULL) {
- pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(i2o_status_block),
- &pHba->status_block_pa, GFP_KERNEL);
- if(pHba->status_block == NULL) {
- printk(KERN_ERR
- "dpti%d: Get Status Block failed; Out of memory. \n",
- pHba->unit);
- return -ENOMEM;
- }
- }
- memset(pHba->status_block, 0, sizeof(i2o_status_block));
- status_block = (u8*)(pHba->status_block);
- timeout = jiffies+TMOUT_GETSTATUS*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message !\n",
- pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m==EMPTY_QUEUE);
-
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
- writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
- writel(1, &msg[2]);
- writel(0, &msg[3]);
- writel(0, &msg[4]);
- writel(0, &msg[5]);
- writel( dma_low(pHba->status_block_pa), &msg[6]);
- writel( dma_high(pHba->status_block_pa), &msg[7]);
- writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- while(status_block[87]!=0xff){
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR"dpti%d: Get status timeout.\n",
- pHba->unit);
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- // Set up our number of outbound and inbound messages
- pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
- if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
- pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
- }
-
- pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
- if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
- pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
- }
-
- // Calculate the Scatter Gather list size
- if (dpt_dma64(pHba)) {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 14 * sizeof(u32))
- / (sizeof(struct sg_simple_element) + sizeof(u32)));
- } else {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 12 * sizeof(u32))
- / sizeof(struct sg_simple_element));
- }
- if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
- pHba->sg_tablesize = SG_LIST_ELEMENTS;
- }
-
-
-#ifdef DEBUG
- printk("dpti%d: State = ",pHba->unit);
- switch(pHba->status_block->iop_state) {
- case 0x01:
- printk("INIT\n");
- break;
- case 0x02:
- printk("RESET\n");
- break;
- case 0x04:
- printk("HOLD\n");
- break;
- case 0x05:
- printk("READY\n");
- break;
- case 0x08:
- printk("OPERATIONAL\n");
- break;
- case 0x10:
- printk("FAILED\n");
- break;
- case 0x11:
- printk("FAULTED\n");
- break;
- default:
- printk("%x (unknown!!)\n",pHba->status_block->iop_state);
- }
-#endif
- return 0;
-}
-
-/*
- * Get the IOP's Logical Configuration Table
- */
-static int adpt_i2o_lct_get(adpt_hba* pHba)
-{
- u32 msg[8];
- int ret;
- u32 buf[16];
-
- if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
- pHba->lct_size = pHba->status_block->expected_lct_size;
- }
- do {
- if (pHba->lct == NULL) {
- pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->lct_size, &pHba->lct_pa,
- GFP_ATOMIC);
- if(pHba->lct == NULL) {
- printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- }
- memset(pHba->lct, 0, pHba->lct_size);
-
- msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
- msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0xFFFFFFFF; /* All devices */
- msg[5] = 0x00000000; /* Report now */
- msg[6] = 0xD0000000|pHba->lct_size;
- msg[7] = (u32)pHba->lct_pa;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
- printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
- pHba->name, ret);
- printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
- return ret;
- }
-
- if ((pHba->lct->table_size << 2) > pHba->lct_size) {
- pHba->lct_size = pHba->lct->table_size << 2;
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- pHba->lct = NULL;
- }
- } while (pHba->lct == NULL);
-
- PDEBUG("%s: Hardware resource table read.\n", pHba->name);
-
-
- // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
- pHba->FwDebugBufferSize = buf[1];
- pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
- pHba->FwDebugBufferSize);
- if (pHba->FwDebugBuffer_P) {
- pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_FLAGS_OFFSET;
- pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_BLED_OFFSET;
- pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
- pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_STR_LENGTH_OFFSET;
- pHba->FwDebugBuffer_P += buf[2];
- pHba->FwDebugFlags = 0;
- }
- }
-
- return 0;
-}
-
-static int adpt_i2o_build_sys_table(void)
-{
- adpt_hba* pHba = hba_chain;
- int count = 0;
-
- if (sys_tbl)
- dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
- sys_tbl, sys_tbl_pa);
-
- sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
- (hba_count) * sizeof(struct i2o_sys_tbl_entry);
-
- sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
- sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
- if (!sys_tbl) {
- printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
- return -ENOMEM;
- }
- memset(sys_tbl, 0, sys_tbl_len);
-
- sys_tbl->num_entries = hba_count;
- sys_tbl->version = I2OVERSION;
- sys_tbl->change_ind = sys_tbl_ind++;
-
- for(pHba = hba_chain; pHba; pHba = pHba->next) {
- u64 addr;
- // Get updated Status Block so we have the latest information
- if (adpt_i2o_status_get(pHba)) {
- sys_tbl->num_entries--;
- continue; // try next one
- }
-
- sys_tbl->iops[count].org_id = pHba->status_block->org_id;
- sys_tbl->iops[count].iop_id = pHba->unit + 2;
- sys_tbl->iops[count].seg_num = 0;
- sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
- sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
- sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
- sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
- sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
- sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
- addr = pHba->base_addr_phys + 0x40;
- sys_tbl->iops[count].inbound_low = dma_low(addr);
- sys_tbl->iops[count].inbound_high = dma_high(addr);
-
- count++;
- }
-
-#ifdef DEBUG
-{
- u32 *table = (u32*)sys_tbl;
- printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
- for(count = 0; count < (sys_tbl_len >>2); count++) {
- printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
- count, table[count]);
- }
-}
-#endif
-
- return 0;
-}
-
-
-/*
- * Dump the information block associated with a given unit (TID)
- */
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
-{
- char buf[64];
- int unit = d->lct_data.tid;
-
- printk(KERN_INFO "TID %3.3d ", unit);
-
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Vendor: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Device: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
- {
- buf[8]=0;
- printk(" Rev: %-12.12s\n", buf);
- }
-#ifdef DEBUG
- printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
- printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
- printk(KERN_INFO "\tFlags: ");
-
- if(d->lct_data.device_flags&(1<<0))
- printk("C"); // ConfigDialog requested
- if(d->lct_data.device_flags&(1<<1))
- printk("U"); // Multi-user capable
- if(!(d->lct_data.device_flags&(1<<4)))
- printk("P"); // Peer service enabled!
- if(!(d->lct_data.device_flags&(1<<5)))
- printk("M"); // Mgmt service enabled!
- printk("\n");
-#endif
-}
-
-#ifdef DEBUG
-/*
- * Do i2o class name lookup
- */
-static const char *adpt_i2o_get_class_name(int class)
-{
- int idx = 16;
- static char *i2o_class_name[] = {
- "Executive",
- "Device Driver Module",
- "Block Device",
- "Tape Device",
- "LAN Interface",
- "WAN Interface",
- "Fibre Channel Port",
- "Fibre Channel Device",
- "SCSI Device",
- "ATE Port",
- "ATE Device",
- "Floppy Controller",
- "Floppy Device",
- "Secondary Bus Port",
- "Peer Transport Agent",
- "Peer Transport",
- "Unknown"
- };
-
- switch(class&0xFFF) {
- case I2O_CLASS_EXECUTIVE:
- idx = 0; break;
- case I2O_CLASS_DDM:
- idx = 1; break;
- case I2O_CLASS_RANDOM_BLOCK_STORAGE:
- idx = 2; break;
- case I2O_CLASS_SEQUENTIAL_STORAGE:
- idx = 3; break;
- case I2O_CLASS_LAN:
- idx = 4; break;
- case I2O_CLASS_WAN:
- idx = 5; break;
- case I2O_CLASS_FIBRE_CHANNEL_PORT:
- idx = 6; break;
- case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
- idx = 7; break;
- case I2O_CLASS_SCSI_PERIPHERAL:
- idx = 8; break;
- case I2O_CLASS_ATE_PORT:
- idx = 9; break;
- case I2O_CLASS_ATE_PERIPHERAL:
- idx = 10; break;
- case I2O_CLASS_FLOPPY_CONTROLLER:
- idx = 11; break;
- case I2O_CLASS_FLOPPY_DEVICE:
- idx = 12; break;
- case I2O_CLASS_BUS_ADAPTER_PORT:
- idx = 13; break;
- case I2O_CLASS_PEER_TRANSPORT_AGENT:
- idx = 14; break;
- case I2O_CLASS_PEER_TRANSPORT:
- idx = 15; break;
- }
- return i2o_class_name[idx];
-}
-#endif
-
-
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
-{
- u32 msg[6];
- int ret, size = sizeof(i2o_hrt);
-
- do {
- if (pHba->hrt == NULL) {
- pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
- size, &pHba->hrt_pa, GFP_KERNEL);
- if (pHba->hrt == NULL) {
- printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
- }
-
- msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
- msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
- msg[4]= (0xD0000000 | size); /* Simple transaction */
- msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
- printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
- return ret;
- }
-
- if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
- int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
- dma_free_coherent(&pHba->pDev->dev, size,
- pHba->hrt, pHba->hrt_pa);
- size = newsize;
- pHba->hrt = NULL;
- }
- } while(pHba->hrt == NULL);
- return 0;
-}
-
-/*
- * Query one scalar group value or a whole scalar group.
- */
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen)
-{
- u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
- u8 *opblk_va;
- dma_addr_t opblk_pa;
- u8 *resblk_va;
- dma_addr_t resblk_pa;
-
- int size;
-
- /* 8 bytes for header */
- resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
- if (resblk_va == NULL) {
- printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
-
- opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(opblk), &opblk_pa, GFP_KERNEL);
- if (opblk_va == NULL) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- if (field == -1) /* whole group */
- opblk[4] = -1;
-
- memcpy(opblk_va, opblk, sizeof(opblk));
- size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
- opblk_va, opblk_pa, sizeof(opblk),
- resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
- dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
- if (size == -ETIME) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
- return -ETIME;
- } else if (size == -EINTR) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
- return -EINTR;
- }
-
- memcpy(buf, resblk_va+8, buflen); /* cut off header */
-
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- if (size < 0)
- return size;
-
- return buflen;
-}
-
-
-/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
- *
- * This function can be used for all UtilParamsGet/Set operations.
- * The OperationBlock is given in opblk-buffer,
- * and results are returned in resblk-buffer.
- * Note that the minimum sized resblk is 8 bytes and contains
- * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
- */
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk_va, dma_addr_t opblk_pa, int oplen,
- void *resblk_va, dma_addr_t resblk_pa, int reslen)
-{
- u32 msg[9];
- u32 *res = (u32 *)resblk_va;
- int wait_status;
-
- msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
- msg[1] = cmd << 24 | HOST_TID << 12 | tid;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0;
- msg[5] = 0x54000000 | oplen; /* OperationBlock */
- msg[6] = (u32)opblk_pa;
- msg[7] = 0xD0000000 | reslen; /* ResultBlock */
- msg[8] = (u32)resblk_pa;
-
- if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
- printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
- return wait_status; /* -DetailedStatus */
- }
-
- if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
- printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
- "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
- pHba->name,
- (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
- : "PARAMS_GET",
- res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
- return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
- }
-
- return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
-}
-
-
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
-
- /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
-
- if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
- (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
- return 0;
- }
-
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
-
- if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
- pHba->unit, -ret);
- } else {
- printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-/*
- * Enable IOP. Allows the IOP to resume external operations.
- */
-static int adpt_i2o_enable_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
- if(!pHba->status_block){
- return -ENOMEM;
- }
- /* Enable only allowed on READY state */
- if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
- return 0;
-
- if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
- return -EINVAL;
-
- msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
- pHba->name, ret);
- } else {
- PDEBUG("%s: Enabled.\n", pHba->name);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-static int adpt_i2o_systab_send(adpt_hba* pHba)
-{
- u32 msg[12];
- int ret;
-
- msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
- msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
- msg[5] = 0; /* Segment 0 */
-
- /*
- * Provide three SGL-elements:
- * System table (SysTab), Private memory space declaration and
- * Private i/o space declaration
- */
- msg[6] = 0x54000000 | sys_tbl_len;
- msg[7] = (u32)sys_tbl_pa;
- msg[8] = 0x54000000 | 0;
- msg[9] = 0;
- msg[10] = 0xD4000000 | 0;
- msg[11] = 0;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
- printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
- pHba->name, ret);
- }
-#ifdef DEBUG
- else {
- PINFO("%s: SysTab set.\n", pHba->name);
- }
-#endif
-
- return ret;
-}
-
-
-/*============================================================================
- *
- *============================================================================
- */
-
-
-#ifdef UARTDELAY
-
-static static void adpt_delay(int millisec)
-{
- int i;
- for (i = 0; i < millisec; i++) {
- udelay(1000); /* delay for one millisecond */
- }
-}
-
-#endif
-
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = "dpt_i2o",
- .proc_name = "dpt_i2o",
- .show_info = adpt_show_info,
- .info = adpt_info,
- .queuecommand = adpt_queue,
- .eh_abort_handler = adpt_abort,
- .eh_device_reset_handler = adpt_device_reset,
- .eh_bus_reset_handler = adpt_bus_reset,
- .eh_host_reset_handler = adpt_reset,
- .bios_param = adpt_bios_param,
- .slave_configure = adpt_slave_configure,
- .can_queue = MAX_TO_IOP_MESSAGES,
- .this_id = 7,
-};
-
-static int __init adpt_init(void)
-{
- int error;
- adpt_hba *pHba, *next;
-
- printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
-
- error = adpt_detect(&driver_template);
- if (error < 0)
- return error;
- if (hba_chain == NULL)
- return -ENODEV;
-
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- error = scsi_add_host(pHba->host, &pHba->pDev->dev);
- if (error)
- goto fail;
- scsi_scan_host(pHba->host);
- }
- return 0;
-fail:
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- scsi_remove_host(pHba->host);
- }
- return error;
-}
-
-static void __exit adpt_exit(void)
-{
- adpt_hba *pHba, *next;
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- adpt_release(pHba);
- }
-}
-
-module_init(adpt_init);
-module_exit(adpt_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
deleted file mode 100644
index 42b1e28b5884..000000000000
--- a/drivers/scsi/dpti.h
+++ /dev/null
@@ -1,332 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.txt for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-#ifndef _DPT_H
-#define _DPT_H
-
-#define MAX_TO_IOP_MESSAGES (255)
-#define MAX_FROM_IOP_MESSAGES (255)
-
-
-/*
- * SCSI interface function Prototypes
- */
-
-static int adpt_detect(struct scsi_host_template * sht);
-static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd);
-static int adpt_abort(struct scsi_cmnd * cmd);
-static int adpt_reset(struct scsi_cmnd* cmd);
-static int adpt_slave_configure(struct scsi_device *);
-
-static const char *adpt_info(struct Scsi_Host *pSHost);
-static int adpt_bios_param(struct scsi_device * sdev, struct block_device *dev,
- sector_t, int geom[]);
-
-static int adpt_bus_reset(struct scsi_cmnd* cmd);
-static int adpt_device_reset(struct scsi_cmnd* cmd);
-
-
-/*
- * struct scsi_host_template (see scsi/scsi_host.h)
- */
-
-#define DPT_DRIVER_NAME "Adaptec I2O RAID"
-
-#ifndef HOSTS_C
-
-#include "dpt/sys_info.h"
-#include <linux/wait.h>
-#include "dpt/dpti_i2o.h"
-#include "dpt/dpti_ioctl.h"
-
-#define DPT_I2O_VERSION "2.4 Build 5go"
-#define DPT_VERSION 2
-#define DPT_REVISION '4'
-#define DPT_SUBREVISION '5'
-#define DPT_BETA ""
-#define DPT_MONTH 8
-#define DPT_DAY 7
-#define DPT_YEAR (2001-1980)
-
-#define DPT_DRIVER "dpt_i2o"
-#define DPTI_I2O_MAJOR (151)
-#define DPT_ORGANIZATION_ID (0x1B) /* For Private Messages */
-#define DPTI_MAX_HBA (16)
-#define MAX_CHANNEL (5) // Maximum Channel # Supported
-#define MAX_ID (128) // Maximum Target ID Supported
-
-/* Sizes in 4 byte words */
-#define REPLY_FRAME_SIZE (17)
-#define MAX_MESSAGE_SIZE (128)
-#define SG_LIST_ELEMENTS (56)
-
-#define EMPTY_QUEUE 0xffffffff
-#define I2O_INTERRUPT_PENDING_B (0x08)
-
-#define PCI_DPT_VENDOR_ID (0x1044) // DPT PCI Vendor ID
-#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID
-#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511)
-
-/* Debugging macro from Linux Device Drivers - Rubini */
-#undef PDEBUG
-#ifdef DEBUG
-//TODO add debug level switch
-# define PDEBUG(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-# define PDEBUGV(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-#else
-# define PDEBUG(fmt, args...) /* not debugging: nothing */
-# define PDEBUGV(fmt, args...) /* not debugging: nothing */
-#endif
-
-#define PERROR(fmt, args...) printk(KERN_ERR fmt, ##args)
-#define PWARN(fmt, args...) printk(KERN_WARNING fmt, ##args)
-#define PINFO(fmt, args...) printk(KERN_INFO fmt, ##args)
-#define PCRIT(fmt, args...) printk(KERN_CRIT fmt, ##args)
-
-#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
-
-// Command timeouts
-#define FOREVER (0)
-#define TMOUT_INQUIRY (20)
-#define TMOUT_FLUSH (360/45)
-#define TMOUT_ABORT (30)
-#define TMOUT_SCSI (300)
-#define TMOUT_IOPRESET (360)
-#define TMOUT_GETSTATUS (15)
-#define TMOUT_INITOUTBOUND (15)
-#define TMOUT_LCT (360)
-
-
-#define I2O_SCSI_DEVICE_DSC_MASK 0x00FF
-
-#define I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION 0x000A
-
-#define I2O_SCSI_DSC_MASK 0xFF00
-#define I2O_SCSI_DSC_SUCCESS 0x0000
-#define I2O_SCSI_DSC_REQUEST_ABORTED 0x0200
-#define I2O_SCSI_DSC_UNABLE_TO_ABORT 0x0300
-#define I2O_SCSI_DSC_COMPLETE_WITH_ERROR 0x0400
-#define I2O_SCSI_DSC_ADAPTER_BUSY 0x0500
-#define I2O_SCSI_DSC_REQUEST_INVALID 0x0600
-#define I2O_SCSI_DSC_PATH_INVALID 0x0700
-#define I2O_SCSI_DSC_DEVICE_NOT_PRESENT 0x0800
-#define I2O_SCSI_DSC_UNABLE_TO_TERMINATE 0x0900
-#define I2O_SCSI_DSC_SELECTION_TIMEOUT 0x0A00
-#define I2O_SCSI_DSC_COMMAND_TIMEOUT 0x0B00
-#define I2O_SCSI_DSC_MR_MESSAGE_RECEIVED 0x0D00
-#define I2O_SCSI_DSC_SCSI_BUS_RESET 0x0E00
-#define I2O_SCSI_DSC_PARITY_ERROR_FAILURE 0x0F00
-#define I2O_SCSI_DSC_AUTOSENSE_FAILED 0x1000
-#define I2O_SCSI_DSC_NO_ADAPTER 0x1100
-#define I2O_SCSI_DSC_DATA_OVERRUN 0x1200
-#define I2O_SCSI_DSC_UNEXPECTED_BUS_FREE 0x1300
-#define I2O_SCSI_DSC_SEQUENCE_FAILURE 0x1400
-#define I2O_SCSI_DSC_REQUEST_LENGTH_ERROR 0x1500
-#define I2O_SCSI_DSC_PROVIDE_FAILURE 0x1600
-#define I2O_SCSI_DSC_BDR_MESSAGE_SENT 0x1700
-#define I2O_SCSI_DSC_REQUEST_TERMINATED 0x1800
-#define I2O_SCSI_DSC_IDE_MESSAGE_SENT 0x3300
-#define I2O_SCSI_DSC_RESOURCE_UNAVAILABLE 0x3400
-#define I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT 0x3500
-#define I2O_SCSI_DSC_MESSAGE_RECEIVED 0x3600
-#define I2O_SCSI_DSC_INVALID_CDB 0x3700
-#define I2O_SCSI_DSC_LUN_INVALID 0x3800
-#define I2O_SCSI_DSC_SCSI_TID_INVALID 0x3900
-#define I2O_SCSI_DSC_FUNCTION_UNAVAILABLE 0x3A00
-#define I2O_SCSI_DSC_NO_NEXUS 0x3B00
-#define I2O_SCSI_DSC_SCSI_IID_INVALID 0x3C00
-#define I2O_SCSI_DSC_CDB_RECEIVED 0x3D00
-#define I2O_SCSI_DSC_LUN_ALREADY_ENABLED 0x3E00
-#define I2O_SCSI_DSC_BUS_BUSY 0x3F00
-#define I2O_SCSI_DSC_QUEUE_FROZEN 0x4000
-
-
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
-#define HBA_FLAGS_INSTALLED_B 0x00000001 // Adapter Was Installed
-#define HBA_FLAGS_BLINKLED_B 0x00000002 // Adapter In Blink LED State
-#define HBA_FLAGS_IN_RESET 0x00000040 /* in reset */
-#define HBA_HOSTRESET_FAILED 0x00000080 /* adpt_resethost failed */
-
-
-// Device state flags
-#define DPTI_DEV_ONLINE 0x00
-#define DPTI_DEV_UNSCANNED 0x01
-#define DPTI_DEV_RESET 0x02
-#define DPTI_DEV_OFFLINE 0x04
-
-
-struct adpt_device {
- struct adpt_device* next_lun;
- u32 flags;
- u32 type;
- u32 capacity;
- u32 block_size;
- u8 scsi_channel;
- u8 scsi_id;
- u64 scsi_lun;
- u8 state;
- u16 tid;
- struct i2o_device* pI2o_dev;
- struct scsi_device *pScsi_dev;
-};
-
-struct adpt_channel {
- struct adpt_device* device[MAX_ID]; /* used as an array of 128 scsi ids */
- u8 scsi_id;
- u8 type;
- u16 tid;
- u32 state;
- struct i2o_device* pI2o_dev;
-};
-
-// HBA state flags
-#define DPTI_STATE_RESET (0x01)
-
-typedef struct _adpt_hba {
- struct _adpt_hba *next;
- struct pci_dev *pDev;
- struct Scsi_Host *host;
- u32 state;
- spinlock_t state_lock;
- int unit;
- int host_no; /* SCSI host number */
- u8 initialized;
- u8 in_use; /* is the management node open*/
-
- char name[32];
- char detail[55];
-
- void __iomem *base_addr_virt;
- void __iomem *msg_addr_virt;
- ulong base_addr_phys;
- void __iomem *post_port;
- void __iomem *reply_port;
- void __iomem *irq_mask;
- u16 post_count;
- u32 post_fifo_size;
- u32 reply_fifo_size;
- u32* reply_pool;
- dma_addr_t reply_pool_pa;
- u32 sg_tablesize; // Scatter/Gather List Size.
- u8 top_scsi_channel;
- u8 top_scsi_id;
- u64 top_scsi_lun;
- u8 dma64;
-
- i2o_status_block* status_block;
- dma_addr_t status_block_pa;
- i2o_hrt* hrt;
- dma_addr_t hrt_pa;
- i2o_lct* lct;
- dma_addr_t lct_pa;
- uint lct_size;
- struct i2o_device* devices;
- struct adpt_channel channel[MAX_CHANNEL];
- struct proc_dir_entry* proc_entry; /* /proc dir */
-
- void __iomem *FwDebugBuffer_P; // Virtual Address Of FW Debug Buffer
- u32 FwDebugBufferSize; // FW Debug Buffer Size In Bytes
- void __iomem *FwDebugStrLength_P;// Virtual Addr Of FW Debug String Len
- void __iomem *FwDebugFlags_P; // Virtual Address Of FW Debug Flags
- void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
- void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
- u32 FwDebugFlags;
- u32 *ioctl_reply_context[4];
-} adpt_hba;
-
-struct sg_simple_element {
- u32 flag_count;
- u32 addr_bus;
-};
-
-/*
- * Function Prototypes
- */
-
-static void adpt_i2o_sys_shutdown(void);
-static int adpt_init(void);
-static int adpt_i2o_build_sys_table(void);
-static irqreturn_t adpt_isr(int irq, void *dev_id);
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d);
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen);
-#ifdef DEBUG
-static const char *adpt_i2o_get_class_name(int class);
-#endif
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk, dma_addr_t opblk_pa, int oplen,
- void *resblk, dma_addr_t resblk_pa, int reslen);
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
-static int adpt_i2o_lct_get(adpt_hba* pHba);
-static int adpt_i2o_parse_lct(adpt_hba* pHba);
-static int adpt_i2o_activate_hba(adpt_hba* pHba);
-static int adpt_i2o_enable_hba(adpt_hba* pHba);
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d);
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len);
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba);
-static s32 adpt_i2o_status_get(adpt_hba* pHba);
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
-static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd);
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
-static s32 adpt_hba_reset(adpt_hba* pHba);
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
-static s32 adpt_rescan(adpt_hba* pHba);
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba);
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
-static void adpt_i2o_delete_hba(adpt_hba* pHba);
-static void adpt_inquiry(adpt_hba* pHba);
-static void adpt_fail_posted_scbs(adpt_hba* pHba);
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun);
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
-static int adpt_i2o_online_hba(adpt_hba* pHba);
-static void adpt_i2o_post_wait_complete(u32, int);
-static int adpt_i2o_systab_send(adpt_hba* pHba);
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg);
-static int adpt_open(struct inode *inode, struct file *file);
-static int adpt_close(struct inode *inode, struct file *file);
-
-
-#ifdef UARTDELAY
-static void adpt_delay(int millisec);
-#endif
-
-#define PRINT_BUFFER_SIZE 512
-
-#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags
-#define HBA_FLAGS_DBG_KERNEL_PRINT_B 0x00010000 // Kernel Debugger Print
-#define HBA_FLAGS_DBG_FW_PRINT_B 0x00020000 // Firmware Debugger Print
-#define HBA_FLAGS_DBG_FUNCTION_ENTRY_B 0x00040000 // Function Entry Point
-#define HBA_FLAGS_DBG_FUNCTION_EXIT_B 0x00080000 // Function Exit
-#define HBA_FLAGS_DBG_ERROR_B 0x00100000 // Error Conditions
-#define HBA_FLAGS_DBG_INIT_B 0x00200000 // Init Prints
-#define HBA_FLAGS_DBG_OS_COMMANDS_B 0x00400000 // OS Command Info
-#define HBA_FLAGS_DBG_SCAN_B 0x00800000 // Device Scan
-
-#define FW_DEBUG_STR_LENGTH_OFFSET 0
-#define FW_DEBUG_FLAGS_OFFSET 4
-#define FW_DEBUG_BLED_OFFSET 8
-
-#define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01
-#endif /* !HOSTS_C */
-#endif /* _DPT_H */
diff --git a/drivers/scsi/elx/Kconfig b/drivers/scsi/elx/Kconfig
new file mode 100644
index 000000000000..831daea7a951
--- /dev/null
+++ b/drivers/scsi/elx/Kconfig
@@ -0,0 +1,9 @@
+config SCSI_EFCT
+ tristate "Emulex Fibre Channel Target"
+ depends on PCI && SCSI
+ depends on TARGET_CORE
+ depends on SCSI_FC_ATTRS
+ select CRC_T10DIF
+ help
+ The efct driver provides enhanced SCSI Target Mode
+ support for specific SLI-4 adapters.
diff --git a/drivers/scsi/elx/Makefile b/drivers/scsi/elx/Makefile
new file mode 100644
index 000000000000..a8537d7a2a6e
--- /dev/null
+++ b/drivers/scsi/elx/Makefile
@@ -0,0 +1,18 @@
+#// SPDX-License-Identifier: GPL-2.0
+#/*
+# * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+# */
+
+
+obj-$(CONFIG_SCSI_EFCT) := efct.o
+
+efct-objs := efct/efct_driver.o efct/efct_io.o efct/efct_scsi.o \
+ efct/efct_xport.o efct/efct_hw.o efct/efct_hw_queues.o \
+ efct/efct_lio.o efct/efct_unsol.o
+
+efct-objs += libefc/efc_cmds.o libefc/efc_domain.o libefc/efc_fabric.o \
+ libefc/efc_node.o libefc/efc_nport.o libefc/efc_device.o \
+ libefc/efclib.o libefc/efc_sm.o libefc/efc_els.o
+
+efct-objs += libefc_sli/sli4.o
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
new file mode 100644
index 000000000000..b08fc8839808
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+
+#include "efct_hw.h"
+#include "efct_unsol.h"
+#include "efct_scsi.h"
+
+LIST_HEAD(efct_devices);
+
+static int logmask;
+module_param(logmask, int, 0444);
+MODULE_PARM_DESC(logmask, "logging bitmask (default 0)");
+
+static struct libefc_function_template efct_libefc_templ = {
+ .issue_mbox_rqst = efct_issue_mbox_rqst,
+ .send_els = efct_els_hw_srrs_send,
+ .send_bls = efct_efc_bls_send,
+
+ .new_nport = efct_scsi_tgt_new_nport,
+ .del_nport = efct_scsi_tgt_del_nport,
+ .scsi_new_node = efct_scsi_new_initiator,
+ .scsi_del_node = efct_scsi_del_initiator,
+ .hw_seq_free = efct_efc_hw_sequence_free,
+};
+
+static int
+efct_device_init(void)
+{
+ int rc;
+
+ /* driver-wide init for target-server */
+ rc = efct_scsi_tgt_driver_init();
+ if (rc) {
+ pr_err("efct_scsi_tgt_init failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = efct_scsi_reg_fc_transport();
+ if (rc) {
+ pr_err("failed to register to FC host\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void
+efct_device_shutdown(void)
+{
+ efct_scsi_release_fc_transport();
+
+ efct_scsi_tgt_driver_exit();
+}
+
+static void *
+efct_device_alloc(u32 nid)
+{
+ struct efct *efct = NULL;
+
+ efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid);
+ if (!efct)
+ return efct;
+
+ INIT_LIST_HEAD(&efct->list_entry);
+ list_add_tail(&efct->list_entry, &efct_devices);
+
+ return efct;
+}
+
+static void
+efct_teardown_msix(struct efct *efct)
+{
+ u32 i;
+
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ free_irq(pci_irq_vector(efct->pci, i),
+ &efct->intr_context[i]);
+ }
+
+ pci_free_irq_vectors(efct->pci);
+}
+
+static int
+efct_efclib_config(struct efct *efct, struct libefc_function_template *tt)
+{
+ struct efc *efc;
+ struct sli4 *sli;
+ int rc = 0;
+
+ efc = kzalloc(sizeof(*efc), GFP_KERNEL);
+ if (!efc)
+ return -ENOMEM;
+
+ efct->efcport = efc;
+
+ memcpy(&efc->tt, tt, sizeof(*tt));
+ efc->base = efct;
+ efc->pci = efct->pci;
+
+ efc->def_wwnn = efct_get_wwnn(&efct->hw);
+ efc->def_wwpn = efct_get_wwpn(&efct->hw);
+ efc->enable_tgt = 1;
+ efc->log_level = EFC_LOG_LIB;
+
+ sli = &efct->hw.sli;
+ efc->max_xfer_size = sli->sge_supported_length *
+ sli_get_max_sgl(&efct->hw.sli);
+ efc->sli = sli;
+ efc->fcfi = efct->hw.fcf_indicator;
+
+ rc = efcport_init(efc);
+ if (rc)
+ efc_log_err(efc, "efcport_init failed\n");
+
+ return rc;
+}
+
+static int efct_request_firmware_update(struct efct *efct);
+
+static const char*
+efct_pci_model(u16 device)
+{
+ switch (device) {
+ case EFCT_DEVICE_LANCER_G6: return "LPE31004";
+ case EFCT_DEVICE_LANCER_G7: return "LPE36000";
+ default: return "unknown";
+ }
+}
+
+static int
+efct_device_attach(struct efct *efct)
+{
+ u32 rc = 0, i = 0;
+
+ if (efct->attached) {
+ efc_log_err(efct, "Device is already attached\n");
+ return -EIO;
+ }
+
+ snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc",
+ efct->instance_index);
+
+ efct->logmask = logmask;
+ efct->filter_def = EFCT_DEFAULT_FILTER;
+ efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC;
+
+ efct->model = efct_pci_model(efct->pci->device);
+
+ efct->efct_req_fw_upgrade = true;
+
+ /* Allocate transport object and bring online */
+ efct->xport = efct_xport_alloc(efct);
+ if (!efct->xport) {
+ efc_log_err(efct, "failed to allocate transport object\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = efct_xport_attach(efct->xport);
+ if (rc) {
+ efc_log_err(efct, "failed to attach transport object\n");
+ goto xport_out;
+ }
+
+ rc = efct_xport_initialize(efct->xport);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize transport object\n");
+ goto xport_out;
+ }
+
+ rc = efct_efclib_config(efct, &efct_libefc_templ);
+ if (rc) {
+ efc_log_err(efct, "failed to init efclib\n");
+ goto efclib_out;
+ }
+
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ efc_log_debug(efct, "irq %d enabled\n", i);
+ enable_irq(pci_irq_vector(efct->pci, i));
+ }
+
+ efct->attached = true;
+
+ if (efct->efct_req_fw_upgrade)
+ efct_request_firmware_update(efct);
+
+ return rc;
+
+efclib_out:
+ efct_xport_detach(efct->xport);
+xport_out:
+ efct_xport_free(efct->xport);
+ efct->xport = NULL;
+out:
+ return rc;
+}
+
+static int
+efct_device_detach(struct efct *efct)
+{
+ int i;
+
+ if (!efct || !efct->attached) {
+ pr_err("Device is not attached\n");
+ return -EIO;
+ }
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN))
+ efc_log_err(efct, "Transport Shutdown timed out\n");
+
+ for (i = 0; i < efct->n_msix_vec; i++)
+ disable_irq(pci_irq_vector(efct->pci, i));
+
+ efct_xport_detach(efct->xport);
+
+ efct_xport_free(efct->xport);
+ efct->xport = NULL;
+
+ efcport_destroy(efct->efcport);
+ kfree(efct->efcport);
+
+ efct->attached = false;
+
+ return 0;
+}
+
+static void
+efct_fw_write_cb(int status, u32 actual_write_length,
+ u32 change_status, void *arg)
+{
+ struct efct_fw_write_result *result = arg;
+
+ result->status = status;
+ result->actual_xfer = actual_write_length;
+ result->change_status = change_status;
+
+ complete(&result->done);
+}
+
+static int
+efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
+ u8 *change_status)
+{
+ int rc = 0;
+ u32 bytes_left;
+ u32 xfer_size;
+ u32 offset;
+ struct efc_dma dma;
+ int last = 0;
+ struct efct_fw_write_result result;
+
+ init_completion(&result.done);
+
+ bytes_left = buf_len;
+ offset = 0;
+
+ dma.size = FW_WRITE_BUFSIZE;
+ dma.virt = dma_alloc_coherent(&efct->pci->dev,
+ dma.size, &dma.phys, GFP_KERNEL);
+ if (!dma.virt)
+ return -ENOMEM;
+
+ while (bytes_left > 0) {
+ if (bytes_left > FW_WRITE_BUFSIZE)
+ xfer_size = FW_WRITE_BUFSIZE;
+ else
+ xfer_size = bytes_left;
+
+ memcpy(dma.virt, buf + offset, xfer_size);
+
+ if (bytes_left == xfer_size)
+ last = 1;
+
+ efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset,
+ last, efct_fw_write_cb, &result);
+
+ if (wait_for_completion_interruptible(&result.done) != 0) {
+ rc = -ENXIO;
+ break;
+ }
+
+ if (result.actual_xfer == 0 || result.status != 0) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (last)
+ *change_status = result.change_status;
+
+ bytes_left -= result.actual_xfer;
+ offset += result.actual_xfer;
+ }
+
+ dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys);
+ return rc;
+}
+
+static int
+efct_fw_reset(struct efct *efct)
+{
+ /*
+ * Firmware reset to activate the new firmware.
+ * Function 0 will update and load the new firmware
+ * during attach.
+ */
+ if (timer_pending(&efct->xport->stats_timer))
+ del_timer(&efct->xport->stats_timer);
+
+ if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
+ efc_log_info(efct, "failed to reset firmware\n");
+ return -EIO;
+ }
+
+ efc_log_info(efct, "successfully reset firmware.Now resetting port\n");
+
+ efct_device_detach(efct);
+ return efct_device_attach(efct);
+}
+
+static int
+efct_request_firmware_update(struct efct *efct)
+{
+ int rc = 0;
+ u8 file_name[256], fw_change_status = 0;
+ const struct firmware *fw;
+ struct efct_hw_grp_hdr *fw_image;
+
+ snprintf(file_name, 256, "%s.grp", efct->model);
+
+ rc = request_firmware(&fw, file_name, &efct->pci->dev);
+ if (rc) {
+ efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name);
+ return rc;
+ }
+
+ fw_image = (struct efct_hw_grp_hdr *)fw->data;
+
+ if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision,
+ strnlen(fw_image->revision, 16))) {
+ efc_log_debug(efct,
+ "Skip update. Firmware is already up to date.\n");
+ goto exit;
+ }
+
+ efc_log_info(efct, "Firmware update is initiated. %s -> %s\n",
+ efct->hw.sli.fw_name[0], fw_image->revision);
+
+ rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status);
+ if (rc) {
+ efc_log_err(efct, "Firmware update failed. rc = %d\n", rc);
+ goto exit;
+ }
+
+ efc_log_info(efct, "Firmware updated successfully\n");
+ switch (fw_change_status) {
+ case 0x00:
+ efc_log_info(efct, "New firmware is active.\n");
+ break;
+ case 0x01:
+ efc_log_info(efct,
+ "System reboot needed to activate the new firmware\n");
+ break;
+ case 0x02:
+ case 0x03:
+ efc_log_info(efct,
+ "firmware reset to activate the new firmware\n");
+ efct_fw_reset(efct);
+ break;
+ default:
+ efc_log_info(efct, "Unexpected value change_status:%d\n",
+ fw_change_status);
+ break;
+ }
+
+exit:
+ release_firmware(fw);
+
+ return rc;
+}
+
+static void
+efct_device_free(struct efct *efct)
+{
+ if (efct) {
+ list_del(&efct->list_entry);
+ kfree(efct);
+ }
+}
+
+static int
+efct_device_interrupts_required(struct efct *efct)
+{
+ int rc;
+
+ rc = efct_hw_setup(&efct->hw, efct, efct->pci);
+ if (rc < 0)
+ return rc;
+
+ return efct->hw.config.n_eq;
+}
+
+static irqreturn_t
+efct_intr_thread(int irq, void *handle)
+{
+ struct efct_intr_context *intr_ctx = handle;
+ struct efct *efct = intr_ctx->efct;
+
+ efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+efct_intr_msix(int irq, void *handle)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static int
+efct_setup_msix(struct efct *efct, u32 num_intrs)
+{
+ int rc = 0, i;
+
+ if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) {
+ dev_err(&efct->pci->dev,
+ "%s : MSI-X not available\n", __func__);
+ return -EIO;
+ }
+
+ efct->n_msix_vec = num_intrs;
+
+ rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+
+ if (rc < 0) {
+ dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < num_intrs; i++) {
+ struct efct_intr_context *intr_ctx = NULL;
+
+ intr_ctx = &efct->intr_context[i];
+ intr_ctx->efct = efct;
+ intr_ctx->index = i;
+
+ rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
+ efct_intr_msix, efct_intr_thread, 0,
+ EFCT_DRIVER_NAME, intr_ctx);
+ if (rc) {
+ dev_err(&efct->pci->dev,
+ "Failed to register %d vector: %d\n", i, rc);
+ goto out;
+ }
+ }
+
+ return rc;
+
+out:
+ while (--i >= 0)
+ free_irq(pci_irq_vector(efct->pci, i),
+ &efct->intr_context[i]);
+
+ pci_free_irq_vectors(efct->pci);
+ return rc;
+}
+
+static struct pci_device_id efct_pci_table[] = {
+ {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
+ {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
+ {} /* terminate list */
+};
+
+static int
+efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct efct *efct = NULL;
+ int rc;
+ u32 i, r;
+ int num_interrupts = 0;
+ int nid;
+
+ dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc)
+ return rc;
+
+ pci_set_master(pdev);
+
+ rc = pci_set_mwi(pdev);
+ if (rc) {
+ dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc);
+ goto mwi_out;
+ }
+
+ rc = pci_request_regions(pdev, EFCT_DRIVER_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc);
+ goto req_regions_out;
+ }
+
+ /* Fetch the Numa node id for this device */
+ nid = dev_to_node(&pdev->dev);
+ if (nid < 0) {
+ dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid);
+ nid = 0;
+ }
+
+ /* Allocate efct */
+ efct = efct_device_alloc(nid);
+ if (!efct) {
+ dev_err(&pdev->dev, "Failed to allocate efct\n");
+ rc = -ENOMEM;
+ goto alloc_out;
+ }
+
+ efct->pci = pdev;
+ efct->numa_node = nid;
+
+ /* Map all memory BARs */
+ for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ efct->reg[r] = ioremap(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ r++;
+ }
+
+ /*
+ * If the 64-bit attribute is set, both this BAR and the
+ * next form the complete address. Skip processing the
+ * next BAR.
+ */
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64)
+ i++;
+ }
+
+ pci_set_drvdata(pdev, efct);
+
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
+ goto dma_mask_out;
+ }
+
+ num_interrupts = efct_device_interrupts_required(efct);
+ if (num_interrupts < 0) {
+ efc_log_err(efct, "efct_device_interrupts_required failed\n");
+ rc = -1;
+ goto dma_mask_out;
+ }
+
+ /*
+ * Initialize MSIX interrupts, note,
+ * efct_setup_msix() enables the interrupt
+ */
+ rc = efct_setup_msix(efct, num_interrupts);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't setup msix\n");
+ goto dma_mask_out;
+ }
+ /* Disable interrupt for now */
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ efc_log_debug(efct, "irq %d disabled\n", i);
+ disable_irq(pci_irq_vector(efct->pci, i));
+ }
+
+ rc = efct_device_attach(efct);
+ if (rc)
+ goto attach_out;
+
+ return 0;
+
+attach_out:
+ efct_teardown_msix(efct);
+dma_mask_out:
+ pci_set_drvdata(pdev, NULL);
+
+ for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (efct->reg[i])
+ iounmap(efct->reg[i]);
+ }
+ efct_device_free(efct);
+alloc_out:
+ pci_release_regions(pdev);
+req_regions_out:
+ pci_clear_mwi(pdev);
+mwi_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void
+efct_pci_remove(struct pci_dev *pdev)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+ u32 i;
+
+ if (!efct)
+ return;
+
+ efct_device_detach(efct);
+
+ efct_teardown_msix(efct);
+
+ for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (efct->reg[i])
+ iounmap(efct->reg[i]);
+ }
+
+ pci_set_drvdata(pdev, NULL);
+
+ efct_device_free(efct);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static void
+efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev)
+{
+ if (efct) {
+ efc_log_debug(efct,
+ "PCI channel disable preparing for reset\n");
+ efct_device_detach(efct);
+ /* Disable interrupt and pci device */
+ efct_teardown_msix(efct);
+ }
+ pci_disable_device(pdev);
+}
+
+static void
+efct_device_prep_for_recover(struct efct *efct)
+{
+ if (efct) {
+ efc_log_debug(efct, "PCI channel preparing for recovery\n");
+ efct_hw_io_abort_all(&efct->hw);
+ }
+}
+
+/**
+ * efct_pci_io_error_detected - method for handling PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * device error detected handling routine, which will perform the proper
+ * error detected operation.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+ pci_ers_result_t rc;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ efct_device_prep_for_recover(efct);
+ rc = PCI_ERS_RESULT_CAN_RECOVER;
+ break;
+ case pci_channel_io_frozen:
+ efct_device_prep_for_reset(efct, pdev);
+ rc = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ case pci_channel_io_perm_failure:
+ efct_device_detach(efct);
+ rc = PCI_ERS_RESULT_DISCONNECT;
+ break;
+ default:
+ efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state);
+ efct_device_prep_for_reset(efct, pdev);
+ rc = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ }
+
+ return rc;
+}
+
+static pci_ers_result_t
+efct_pci_io_slot_reset(struct pci_dev *pdev)
+{
+ int rc;
+ struct efct *efct = pci_get_drvdata(pdev);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ efc_log_err(efct, "failed to enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+
+ pci_save_state(pdev);
+
+ pci_set_master(pdev);
+
+ rc = efct_setup_msix(efct, efct->n_msix_vec);
+ if (rc)
+ efc_log_err(efct, "rc %d returned, IRQ allocation failed\n",
+ rc);
+
+ /* Perform device reset */
+ efct_device_detach(efct);
+ /* Bring device to online*/
+ efct_device_attach(efct);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void
+efct_pci_io_resume(struct pci_dev *pdev)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+
+ /* Perform device reset */
+ efct_device_detach(efct);
+ /* Bring device to online*/
+ efct_device_attach(efct);
+}
+
+MODULE_DEVICE_TABLE(pci, efct_pci_table);
+
+static struct pci_error_handlers efct_pci_err_handler = {
+ .error_detected = efct_pci_io_error_detected,
+ .slot_reset = efct_pci_io_slot_reset,
+ .resume = efct_pci_io_resume,
+};
+
+static struct pci_driver efct_pci_driver = {
+ .name = EFCT_DRIVER_NAME,
+ .id_table = efct_pci_table,
+ .probe = efct_pci_probe,
+ .remove = efct_pci_remove,
+ .err_handler = &efct_pci_err_handler,
+};
+
+static
+int __init efct_init(void)
+{
+ int rc;
+
+ rc = efct_device_init();
+ if (rc) {
+ pr_err("efct_device_init failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = pci_register_driver(&efct_pci_driver);
+ if (rc) {
+ pr_err("pci_register_driver failed rc=%d\n", rc);
+ efct_device_shutdown();
+ }
+
+ return rc;
+}
+
+static void __exit efct_exit(void)
+{
+ pci_unregister_driver(&efct_pci_driver);
+ efct_device_shutdown();
+}
+
+module_init(efct_init);
+module_exit(efct_exit);
+MODULE_VERSION(EFCT_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
diff --git a/drivers/scsi/elx/efct/efct_driver.h b/drivers/scsi/elx/efct/efct_driver.h
new file mode 100644
index 000000000000..0e3c931db7c2
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_driver.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_DRIVER_H__)
+#define __EFCT_DRIVER_H__
+
+/***************************************************************************
+ * OS specific includes
+ */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/firmware.h>
+#include "../include/efc_common.h"
+#include "../libefc/efclib.h"
+#include "efct_hw.h"
+#include "efct_io.h"
+#include "efct_xport.h"
+
+#define EFCT_DRIVER_NAME "efct"
+#define EFCT_DRIVER_VERSION "1.0.0.0"
+
+/* EFCT_DEFAULT_FILTER-
+ * MRQ filter to segregate the IO flow.
+ */
+#define EFCT_DEFAULT_FILTER "0x01ff22ff,0,0,0"
+
+/* EFCT_OS_MAX_ISR_TIME_MSEC -
+ * maximum time driver code should spend in an interrupt
+ * or kernel thread context without yielding
+ */
+#define EFCT_OS_MAX_ISR_TIME_MSEC 1000
+
+#define EFCT_FC_MAX_SGL 64
+#define EFCT_FC_DIF_SEED 0
+
+/* Watermark */
+#define EFCT_WATERMARK_HIGH_PCT 90
+#define EFCT_WATERMARK_LOW_PCT 80
+#define EFCT_IO_WATERMARK_PER_INITIATOR 8
+
+#define EFCT_PCI_MAX_REGS 6
+#define MAX_PCI_INTERRUPTS 16
+
+struct efct_intr_context {
+ struct efct *efct;
+ u32 index;
+};
+
+struct efct {
+ struct pci_dev *pci;
+ void __iomem *reg[EFCT_PCI_MAX_REGS];
+
+ u32 n_msix_vec;
+ bool attached;
+ bool soft_wwn_enable;
+ u8 efct_req_fw_upgrade;
+ struct efct_intr_context intr_context[MAX_PCI_INTERRUPTS];
+ u32 numa_node;
+
+ char name[EFC_NAME_LENGTH];
+ u32 instance_index;
+ struct list_head list_entry;
+ struct efct_scsi_tgt tgt_efct;
+ struct efct_xport *xport;
+ struct efc *efcport;
+ struct Scsi_Host *shost;
+ int logmask;
+ u32 max_isr_time_msec;
+
+ const char *desc;
+
+ const char *model;
+
+ struct efct_hw hw;
+
+ u32 rq_selection_policy;
+ char *filter_def;
+ int topology;
+
+ /* Look up for target node */
+ struct xarray lookup;
+
+ /*
+ * Target IO timer value:
+ * Zero: target command timeout disabled.
+ * Non-zero: Timeout value, in seconds, for target commands
+ */
+ u32 target_io_timer_sec;
+
+ int speed;
+ struct dentry *sess_debugfs_dir;
+};
+
+#define FW_WRITE_BUFSIZE (64 * 1024)
+
+struct efct_fw_write_result {
+ struct completion done;
+ int status;
+ u32 actual_xfer;
+ u32 change_status;
+};
+
+extern struct list_head efct_devices;
+
+#endif /* __EFCT_DRIVER_H__ */
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
new file mode 100644
index 000000000000..5a5525054d71
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -0,0 +1,3580 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_unsol.h"
+
+struct efct_hw_link_stat_cb_arg {
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg);
+ void *arg;
+};
+
+struct efct_hw_host_stat_cb_arg {
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg);
+ void *arg;
+};
+
+struct efct_hw_fw_wr_cb_arg {
+ void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg);
+ void *arg;
+};
+
+struct efct_mbox_rqst_ctx {
+ int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg);
+ void *arg;
+};
+
+static int
+efct_hw_link_event_init(struct efct_hw *hw)
+{
+ hw->link.status = SLI4_LINK_STATUS_MAX;
+ hw->link.topology = SLI4_LINK_TOPO_NONE;
+ hw->link.medium = SLI4_LINK_MEDIUM_MAX;
+ hw->link.speed = 0;
+ hw->link.loop_map = NULL;
+ hw->link.fc_id = U32_MAX;
+
+ return 0;
+}
+
+static int
+efct_hw_read_max_dump_size(struct efct_hw *hw)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ struct efct *efct = hw->os;
+ int rc = 0;
+ struct sli4_rsp_cmn_set_dump_location *rsp;
+
+ /* attempt to detemine the dump size for function 0 only. */
+ if (PCI_FUNC(efct->pci->devfn) != 0)
+ return rc;
+
+ if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0))
+ return -EIO;
+
+ rsp = (struct sli4_rsp_cmn_set_dump_location *)
+ (buf + offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc != 0) {
+ efc_log_debug(hw->os, "set dump location cmd failed\n");
+ return rc;
+ }
+
+ hw->dump_size =
+ le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN;
+
+ efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size);
+
+ return rc;
+}
+
+static int
+__efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_topology *read_topo =
+ (struct sli4_cmd_read_topology *)mqe;
+ u8 speed;
+ struct efc_domain_record drec = {0};
+ struct efct *efct = hw->os;
+
+ if (status || le16_to_cpu(read_topo->hdr.status)) {
+ efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(read_topo->hdr.status));
+ return -EIO;
+ }
+
+ switch (le32_to_cpu(read_topo->dw2_attentype) &
+ SLI4_READTOPO_ATTEN_TYPE) {
+ case SLI4_READ_TOPOLOGY_LINK_UP:
+ hw->link.status = SLI4_LINK_STATUS_UP;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_DOWN:
+ hw->link.status = SLI4_LINK_STATUS_DOWN;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
+ hw->link.status = SLI4_LINK_STATUS_NO_ALPA;
+ break;
+ default:
+ hw->link.status = SLI4_LINK_STATUS_MAX;
+ break;
+ }
+
+ switch (read_topo->topology) {
+ case SLI4_READ_TOPO_NON_FC_AL:
+ hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL;
+ break;
+ case SLI4_READ_TOPO_FC_AL:
+ hw->link.topology = SLI4_LINK_TOPO_FC_AL;
+ if (hw->link.status == SLI4_LINK_STATUS_UP)
+ hw->link.loop_map = hw->loop_map.virt;
+ hw->link.fc_id = read_topo->acquired_al_pa;
+ break;
+ default:
+ hw->link.topology = SLI4_LINK_TOPO_MAX;
+ break;
+ }
+
+ hw->link.medium = SLI4_LINK_MEDIUM_FC;
+
+ speed = (le32_to_cpu(read_topo->currlink_state) &
+ SLI4_READTOPO_LINKSTATE_SPEED) >> 8;
+ switch (speed) {
+ case SLI4_READ_TOPOLOGY_SPEED_1G:
+ hw->link.speed = 1 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_2G:
+ hw->link.speed = 2 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_4G:
+ hw->link.speed = 4 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_8G:
+ hw->link.speed = 8 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_16G:
+ hw->link.speed = 16 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_32G:
+ hw->link.speed = 32 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_64G:
+ hw->link.speed = 64 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_128G:
+ hw->link.speed = 128 * 1000;
+ break;
+ }
+
+ drec.speed = hw->link.speed;
+ drec.fc_id = hw->link.fc_id;
+ drec.is_nport = true;
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec);
+
+ return 0;
+}
+
+static int
+efct_hw_cb_link(void *ctx, void *e)
+{
+ struct efct_hw *hw = ctx;
+ struct sli4_link_event *event = e;
+ struct efc_domain *d = NULL;
+ int rc = 0;
+ struct efct *efct = hw->os;
+
+ efct_hw_link_event_init(hw);
+
+ switch (event->status) {
+ case SLI4_LINK_STATUS_UP:
+
+ hw->link = *event;
+ efct->efcport->link_status = EFC_LINK_STATUS_UP;
+
+ if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) {
+ struct efc_domain_record drec = {0};
+
+ efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n",
+ event->speed);
+ drec.speed = event->speed;
+ drec.fc_id = event->fc_id;
+ drec.is_nport = true;
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND,
+ &drec);
+ } else if (event->topology == SLI4_LINK_TOPO_FC_AL) {
+ u8 buf[SLI4_BMBX_SIZE];
+
+ efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n",
+ event->speed);
+
+ if (!sli_cmd_read_topology(&hw->sli, buf,
+ &hw->loop_map)) {
+ rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT,
+ __efct_read_topology_cb, NULL);
+ }
+
+ if (rc)
+ efc_log_debug(hw->os, "READ_TOPOLOGY failed\n");
+ } else {
+ efc_log_info(hw->os, "%s(%#x), speed is %d\n",
+ "Link Up, unsupported topology ",
+ event->topology, event->speed);
+ }
+ break;
+ case SLI4_LINK_STATUS_DOWN:
+ efc_log_info(hw->os, "Link down\n");
+
+ hw->link.status = event->status;
+ efct->efcport->link_status = EFC_LINK_STATUS_DOWN;
+
+ d = efct->efcport->domain;
+ if (d)
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d);
+ break;
+ default:
+ efc_log_debug(hw->os, "unhandled link status %#x\n",
+ event->status);
+ break;
+ }
+
+ return 0;
+}
+
+int
+efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
+{
+ u32 i, max_sgl, cpus;
+
+ if (hw->hw_setup_called)
+ return 0;
+
+ /*
+ * efct_hw_init() relies on NULL pointers indicating that a structure
+ * needs allocation. If a structure is non-NULL, efct_hw_init() won't
+ * free/realloc that memory
+ */
+ memset(hw, 0, sizeof(struct efct_hw));
+
+ hw->hw_setup_called = true;
+
+ hw->os = os;
+
+ mutex_init(&hw->bmbx_lock);
+ spin_lock_init(&hw->cmd_lock);
+ INIT_LIST_HEAD(&hw->cmd_head);
+ INIT_LIST_HEAD(&hw->cmd_pending);
+ hw->cmd_head_count = 0;
+
+ /* Create mailbox command ctx pool */
+ hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
+ sizeof(struct efct_command_ctx));
+ if (!hw->cmd_ctx_pool) {
+ efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n");
+ return -EIO;
+ }
+
+ /* Create mailbox request ctx pool for library callback */
+ hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
+ sizeof(struct efct_mbox_rqst_ctx));
+ if (!hw->mbox_rqst_pool) {
+ efc_log_err(hw->os, "failed to allocate mbox request pool\n");
+ return -EIO;
+ }
+
+ spin_lock_init(&hw->io_lock);
+ INIT_LIST_HEAD(&hw->io_inuse);
+ INIT_LIST_HEAD(&hw->io_free);
+ INIT_LIST_HEAD(&hw->io_wait_free);
+
+ atomic_set(&hw->io_alloc_failed_count, 0);
+
+ hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4;
+ if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) {
+ efc_log_err(hw->os, "SLI setup failed\n");
+ return -EIO;
+ }
+
+ efct_hw_link_event_init(hw);
+
+ sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw);
+
+ /*
+ * Set all the queue sizes to the maximum allowed.
+ */
+ for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
+ hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i];
+ /*
+ * Adjust the size of the WQs so that the CQ is twice as big as
+ * the WQ to allow for 2 completions per IO. This allows us to
+ * handle multi-phase as well as aborts.
+ */
+ hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
+
+ /*
+ * The RQ assignment for RQ pair mode.
+ */
+
+ hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD;
+ hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size;
+
+ cpus = num_possible_cpus();
+ hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus;
+
+ max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED;
+ max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl;
+ hw->config.n_sgl = max_sgl;
+
+ (void)efct_hw_read_max_dump_size(hw);
+
+ return 0;
+}
+
+static void
+efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
+{
+ efc_log_info(hw->os,
+ "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
+ j, hw->config.filter_def[j], i, id);
+}
+
+static inline void
+efct_hw_init_free_io(struct efct_hw_io *io)
+{
+ /*
+ * Set io->done to NULL, to avoid any callbacks, should
+ * a completion be received for one of these IOs
+ */
+ io->done = NULL;
+ io->abort_done = NULL;
+ io->status_saved = false;
+ io->abort_in_progress = false;
+ io->type = 0xFFFF;
+ io->wq = NULL;
+}
+
+static bool efct_hw_iotype_is_originator(u16 io_type)
+{
+ switch (io_type) {
+ case EFCT_HW_FC_CT:
+ case EFCT_HW_ELS_REQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void
+efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ /* Restore the default */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+}
+
+static void
+efct_hw_wq_process_io(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_io *io = arg;
+ struct efct_hw *hw = io->hw;
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+ u32 len = 0;
+ u32 ext = 0;
+
+ /* clear xbusy flag if WCQE[XB] is clear */
+ if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
+ io->xbusy = false;
+
+ /* get extended CQE status */
+ switch (io->type) {
+ case EFCT_HW_BLS_ACC:
+ case EFCT_HW_BLS_RJT:
+ break;
+ case EFCT_HW_ELS_REQ:
+ sli_fc_els_did(&hw->sli, cqe, &ext);
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_ELS_RSP:
+ case EFCT_HW_FC_CT_RSP:
+ break;
+ case EFCT_HW_FC_CT:
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_WRITE:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_READ:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_RSP:
+ break;
+ case EFCT_HW_IO_DNRX_REQUEUE:
+ /* release the count for re-posting the buffer */
+ /* efct_hw_io_free(hw, io); */
+ break;
+ default:
+ efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
+ io->type, io->indicator);
+ break;
+ }
+ if (status) {
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ /*
+ * If we're not an originator IO, and XB is set, then issue
+ * abort for the IO from within the HW
+ */
+ if (efct_hw_iotype_is_originator(io->type) &&
+ wcqe->flags & SLI4_WCQE_XB) {
+ int rc;
+
+ efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
+ io->indicator, io->reqtag);
+
+ /*
+ * Because targets may send a response when the IO
+ * completes using the same XRI, we must wait for the
+ * XRI_ABORTED CQE to issue the IO callback
+ */
+ rc = efct_hw_io_abort(hw, io, false, NULL, NULL);
+ if (rc == 0) {
+ /*
+ * latch status to return after abort is
+ * complete
+ */
+ io->status_saved = true;
+ io->saved_status = status;
+ io->saved_ext = ext;
+ io->saved_len = len;
+ goto exit_efct_hw_wq_process_io;
+ } else if (rc == -EINPROGRESS) {
+ /*
+ * Already being aborted by someone else (ABTS
+ * perhaps). Just return original
+ * error.
+ */
+ efc_log_debug(hw->os, "%s%#x tag=%#x\n",
+ "abort in progress xri=",
+ io->indicator, io->reqtag);
+
+ } else {
+ /* Failed to abort for some other reason, log
+ * error
+ */
+ efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n",
+ "Failed to abort xri=",
+ io->indicator, io->reqtag, rc);
+ }
+ }
+ }
+
+ if (io->done) {
+ efct_hw_done_t done = io->done;
+
+ io->done = NULL;
+
+ if (io->status_saved) {
+ /* use latched status if exists */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = false;
+ }
+
+ /* Restore default SGL */
+ efct_hw_io_restore_sgl(hw, io);
+ done(io, len, status, ext, io->arg);
+ }
+
+exit_efct_hw_wq_process_io:
+ return;
+}
+
+static int
+efct_hw_setup_io(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct efct_hw_io *io = NULL;
+ uintptr_t xfer_virt = 0;
+ uintptr_t xfer_phys = 0;
+ u32 index;
+ bool new_alloc = true;
+ struct efc_dma *dma;
+ struct efct *efct = hw->os;
+
+ if (!hw->io) {
+ hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL);
+ if (!hw->io)
+ return -ENOMEM;
+
+ memset(hw->io, 0, hw->config.n_io * sizeof(io));
+
+ for (i = 0; i < hw->config.n_io; i++) {
+ hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!hw->io[i])
+ goto error;
+ }
+
+ /* Create WQE buffs for IO */
+ hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size),
+ GFP_KERNEL);
+ if (!hw->wqe_buffs) {
+ kfree(hw->io);
+ return -ENOMEM;
+ }
+
+ } else {
+ /* re-use existing IOs, including SGLs */
+ new_alloc = false;
+ }
+
+ if (new_alloc) {
+ dma = &hw->xfer_rdy;
+ dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
+ dma->virt = dma_alloc_coherent(&efct->pci->dev,
+ dma->size, &dma->phys, GFP_KERNEL);
+ if (!dma->virt)
+ return -ENOMEM;
+ }
+ xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
+ xfer_phys = hw->xfer_rdy.phys;
+
+ /* Initialize the pool of HW IO objects */
+ for (i = 0; i < hw->config.n_io; i++) {
+ struct hw_wq_callback *wqcb;
+
+ io = hw->io[i];
+
+ /* initialize IO fields */
+ io->hw = hw;
+
+ /* Assign a WQE buff */
+ io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
+
+ /* Allocate the request tag for this IO */
+ wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
+ if (!wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+ io->reqtag = wqcb->instance_index;
+
+ /* Now for the fields that are initialized on each free */
+ efct_hw_init_free_io(io);
+
+ /* The XB flag isn't cleared on IO free, so init to zero */
+ io->xbusy = 0;
+
+ if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI,
+ &io->indicator, &index)) {
+ efc_log_err(hw->os,
+ "sli_resource_alloc failed @ %d\n", i);
+ return -ENOMEM;
+ }
+
+ if (new_alloc) {
+ dma = &io->def_sgl;
+ dma->size = hw->config.n_sgl *
+ sizeof(struct sli4_sge);
+ dma->virt = dma_alloc_coherent(&efct->pci->dev,
+ dma->size, &dma->phys,
+ GFP_KERNEL);
+ if (!dma->virt) {
+ efc_log_err(hw->os, "dma_alloc fail %d\n", i);
+ memset(&io->def_sgl, 0,
+ sizeof(struct efc_dma));
+ return -ENOMEM;
+ }
+ }
+ io->def_sgl_count = hw->config.n_sgl;
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+
+ if (hw->xfer_rdy.size) {
+ io->xfer_rdy.virt = (void *)xfer_virt;
+ io->xfer_rdy.phys = xfer_phys;
+ io->xfer_rdy.size = sizeof(struct fcp_txrdy);
+
+ xfer_virt += sizeof(struct fcp_txrdy);
+ xfer_phys += sizeof(struct fcp_txrdy);
+ }
+ }
+
+ return 0;
+error:
+ for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
+ kfree(hw->io[i]);
+ hw->io[i] = NULL;
+ }
+
+ kfree(hw->io);
+ hw->io = NULL;
+
+ return -ENOMEM;
+}
+
+static int
+efct_hw_init_prereg_io(struct efct_hw *hw)
+{
+ u32 i, idx = 0;
+ struct efct_hw_io *io = NULL;
+ u8 cmd[SLI4_BMBX_SIZE];
+ int rc = 0;
+ u32 n_rem;
+ u32 n = 0;
+ u32 sgls_per_request = 256;
+ struct efc_dma **sgls = NULL;
+ struct efc_dma req;
+ struct efct *efct = hw->os;
+
+ sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL);
+ if (!sgls)
+ return -ENOMEM;
+
+ memset(&req, 0, sizeof(struct efc_dma));
+ req.size = 32 + sgls_per_request * 16;
+ req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
+ GFP_KERNEL);
+ if (!req.virt) {
+ kfree(sgls);
+ return -ENOMEM;
+ }
+
+ for (n_rem = hw->config.n_io; n_rem; n_rem -= n) {
+ /* Copy address of SGL's into local sgls[] array, break
+ * out if the xri is not contiguous.
+ */
+ u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
+
+ for (n = 0; n < min; n++) {
+ /* Check that we have contiguous xri values */
+ if (n > 0) {
+ if (hw->io[idx + n]->indicator !=
+ hw->io[idx + n - 1]->indicator + 1)
+ break;
+ }
+
+ sgls[n] = hw->io[idx + n]->sgl;
+ }
+
+ if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
+ hw->io[idx]->indicator, n, sgls, NULL, &req)) {
+ rc = -EIO;
+ break;
+ }
+
+ rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL);
+ if (rc) {
+ efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc);
+ break;
+ }
+
+ /* Add to tail if successful */
+ for (i = 0; i < n; i++, idx++) {
+ io = hw->io[idx];
+ io->state = EFCT_HW_IO_STATE_FREE;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ }
+ }
+
+ dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys);
+ memset(&req, 0, sizeof(struct efc_dma));
+ kfree(sgls);
+
+ return rc;
+}
+
+static int
+efct_hw_init_io(struct efct_hw *hw)
+{
+ u32 i, idx = 0;
+ bool prereg = false;
+ struct efct_hw_io *io = NULL;
+ int rc = 0;
+
+ prereg = hw->sli.params.sgl_pre_registered;
+
+ if (prereg)
+ return efct_hw_init_prereg_io(hw);
+
+ for (i = 0; i < hw->config.n_io; i++, idx++) {
+ io = hw->io[idx];
+ io->state = EFCT_HW_IO_STATE_FREE;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
+{
+ int rc = 0;
+ u8 buf[SLI4_BMBX_SIZE];
+ struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
+
+ memset(&param, 0, sizeof(param));
+ param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf,
+ SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), &param);
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc)
+ efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
+ fdt_xfer_hint, rc);
+ else
+ efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
+ le32_to_cpu(param.fdt_xfer_hint));
+
+ return rc;
+}
+
+static int
+efct_hw_config_rq(struct efct_hw *hw)
+{
+ u32 min_rq_count, i, rc;
+ struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ u8 buf[SLI4_BMBX_SIZE];
+
+ efc_log_info(hw->os, "using REG_FCFI standard\n");
+
+ /*
+ * Set the filter match/mask values from hw's
+ * filter_def values
+ */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_cfg[i].rq_id = cpu_to_le16(0xffff);
+ rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
+ rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
+ rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
+ rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
+ }
+
+ /*
+ * Update the rq_id's of the FCF configuration
+ * (don't update more than the number of rq_cfg
+ * elements)
+ */
+ min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ?
+ hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG;
+ for (i = 0; i < min_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+ u32 j;
+
+ for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
+ u32 mask = (rq->filter_mask != 0) ?
+ rq->filter_mask : 1;
+
+ if (!(mask & (1U << j)))
+ continue;
+
+ rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
+ efct_logfcfi(hw, j, i, rq->hdr->id);
+ }
+ }
+
+ rc = -EIO;
+ if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg))
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+
+ if (rc != 0) {
+ efc_log_err(hw->os, "FCFI registration failed\n");
+ return rc;
+ }
+ hw->fcf_indicator =
+ le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi);
+
+ return rc;
+}
+
+static int
+efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
+{
+ u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
+ struct hw_rq *rq;
+ struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
+ struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
+ u32 rc, i;
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
+ goto issue_cmd;
+
+ /* Set the filter match/mask values from hw's filter_def values */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_filter[i].rq_id = cpu_to_le16(0xffff);
+ rq_filter[i].type_mask = (u8)hw->config.filter_def[i];
+ rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8);
+ rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16);
+ rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24);
+ }
+
+ rq = hw->hw_rq[0];
+ rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
+ rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
+
+ mrq_bitmask = 0x2;
+issue_cmd:
+ efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n",
+ hw->hw_rq_count, hw->config.rq_selection_policy, mode);
+ /* Invoke REG_FCFI_MRQ */
+ rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index,
+ hw->config.rq_selection_policy, mrq_bitmask,
+ hw->hw_mrq_count, rq_filter);
+ if (rc) {
+ efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n");
+ return -EIO;
+ }
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+
+ rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
+
+ if ((rc) || (le16_to_cpu(rsp->hdr.status))) {
+ efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n",
+ rsp->hdr.command, le16_to_cpu(rsp->hdr.status));
+ return -EIO;
+ }
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
+ hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
+
+ return 0;
+}
+
+static void
+efct_hw_queue_hash_add(struct efct_queue_hash *hash,
+ u16 id, u16 index)
+{
+ u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the number of queues, then we
+ * never have to worry about an infinite loop.
+ */
+ while (hash[hash_index].in_use)
+ hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /* not used, claim the entry */
+ hash[hash_index].id = id;
+ hash[hash_index].in_use = true;
+ hash[hash_index].index = index;
+}
+
+static int
+efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable)
+{
+ int rc = 0;
+ u8 buf[SLI4_BMBX_SIZE];
+ struct sli4_rqst_cmn_set_features_health_check param;
+ u32 health_check_flag = 0;
+
+ memset(&param, 0, sizeof(param));
+
+ if (enable)
+ health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
+
+ if (query)
+ health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
+
+ param.health_check_dword = cpu_to_le32(health_check_flag);
+
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf,
+ SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), &param);
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc)
+ efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
+ else
+ efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
+
+ return rc;
+}
+
+int
+efct_hw_init(struct efct_hw *hw)
+{
+ int rc;
+ u32 i = 0;
+ int rem_count;
+ unsigned long flags = 0;
+ struct efct_hw_io *temp;
+ struct efc_dma *dma;
+
+ /*
+ * Make sure the command lists are empty. If this is start-of-day,
+ * they'll be empty since they were just initialized in efct_hw_setup.
+ * If we've just gone through a reset, the command and command pending
+ * lists should have been cleaned up as part of the reset
+ * (efct_hw_reset()).
+ */
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ if (!list_empty(&hw->cmd_head)) {
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ efc_log_err(hw->os, "command found on cmd list\n");
+ return -EIO;
+ }
+ if (!list_empty(&hw->cmd_pending)) {
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ efc_log_err(hw->os, "command found on pending list\n");
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ /* Free RQ buffers if prevously allocated */
+ efct_hw_rx_free(hw);
+
+ /*
+ * The IO queues must be initialized here for the reset case. The
+ * efct_hw_init_io() function will re-add the IOs to the free list.
+ * The cmd_head list should be OK since we free all entries in
+ * efct_hw_command_cancel() that is called in the efct_hw_reset().
+ */
+
+ /* If we are in this function due to a reset, there may be stale items
+ * on lists that need to be removed. Clean them up.
+ */
+ rem_count = 0;
+ while ((!list_empty(&hw->io_wait_free))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
+ rem_count);
+
+ rem_count = 0;
+ while ((!list_empty(&hw->io_inuse))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
+ rem_count);
+
+ rem_count = 0;
+ while ((!list_empty(&hw->io_free))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_free, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
+ rem_count);
+
+ /* If MRQ not required, Make sure we dont request feature. */
+ if (hw->config.n_rq == 1)
+ hw->sli.features &= (~SLI4_REQFEAT_MRQP);
+
+ if (sli_init(&hw->sli)) {
+ efc_log_err(hw->os, "SLI failed to initialize\n");
+ return -EIO;
+ }
+
+ if (hw->sliport_healthcheck) {
+ rc = efct_hw_config_sli_port_health_check(hw, 0, 1);
+ if (rc != 0) {
+ efc_log_err(hw->os, "Enable port Health check fail\n");
+ return rc;
+ }
+ }
+
+ /*
+ * Set FDT transfer hint, only works on Lancer
+ */
+ if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) {
+ /*
+ * Non-fatal error. In particular, we can disregard failure to
+ * set EFCT_HW_FDT_XFER_HINT on devices with legacy firmware
+ * that do not support EFCT_HW_FDT_XFER_HINT feature.
+ */
+ efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
+ }
+
+ /* zero the hashes */
+ memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
+ efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
+
+ memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
+ efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE);
+
+ memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
+ efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE);
+
+ rc = efct_hw_init_queues(hw);
+ if (rc)
+ return rc;
+
+ rc = efct_hw_map_wq_cpu(hw);
+ if (rc)
+ return rc;
+
+ /* Allocate and p_st RQ buffers */
+ rc = efct_hw_rx_allocate(hw);
+ if (rc) {
+ efc_log_err(hw->os, "rx_allocate failed\n");
+ return rc;
+ }
+
+ rc = efct_hw_rx_post(hw);
+ if (rc) {
+ efc_log_err(hw->os, "WARNING - error posting RQ buffers\n");
+ return rc;
+ }
+
+ if (hw->config.n_eq == 1) {
+ rc = efct_hw_config_rq(hw);
+ if (rc) {
+ efc_log_err(hw->os, "config rq failed %d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0);
+ if (rc != 0) {
+ efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n");
+ return rc;
+ }
+
+ rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0);
+ if (rc != 0) {
+ efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n");
+ return rc;
+ }
+ }
+
+ /*
+ * Allocate the WQ request tag pool, if not previously allocated
+ * (the request tag value is 16 bits, thus the pool allocation size
+ * of 64k)
+ */
+ hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw);
+ if (!hw->wq_reqtag_pool) {
+ efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ rc = efct_hw_setup_io(hw);
+ if (rc) {
+ efc_log_err(hw->os, "IO allocation failure\n");
+ return rc;
+ }
+
+ rc = efct_hw_init_io(hw);
+ if (rc) {
+ efc_log_err(hw->os, "IO initialization failure\n");
+ return rc;
+ }
+
+ dma = &hw->loop_map;
+ dma->size = SLI4_MIN_LOOP_MAP_BYTES;
+ dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
+ GFP_KERNEL);
+ if (!dma->virt)
+ return -EIO;
+
+ /*
+ * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ
+ * entries
+ */
+ for (i = 0; i < hw->eq_count; i++)
+ sli_queue_arm(&hw->sli, &hw->eq[i], true);
+
+ /*
+ * Initialize RQ hash
+ */
+ for (i = 0; i < hw->rq_count; i++)
+ efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
+
+ /*
+ * Initialize WQ hash
+ */
+ for (i = 0; i < hw->wq_count; i++)
+ efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
+
+ /*
+ * Arming the CQ allows (e.g.) MQ completions to write CQ entries
+ */
+ for (i = 0; i < hw->cq_count; i++) {
+ efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
+ sli_queue_arm(&hw->sli, &hw->cq[i], true);
+ }
+
+ /* Set RQ process limit*/
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
+ }
+
+ /* record the fact that the queues are functional */
+ hw->state = EFCT_HW_STATE_ACTIVE;
+ /*
+ * Allocate a HW IOs for send frame.
+ */
+ hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw);
+ if (!hw->hw_wq[0]->send_frame_io)
+ efc_log_err(hw->os, "alloc for send_frame_io failed\n");
+
+ /* Initialize send frame sequence id */
+ atomic_set(&hw->send_frame_seq_id, 0);
+
+ return 0;
+}
+
+int
+efct_hw_parse_filter(struct efct_hw *hw, void *value)
+{
+ int rc = 0;
+ char *p = NULL;
+ char *token;
+ u32 idx = 0;
+
+ for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++)
+ hw->config.filter_def[idx] = 0;
+
+ p = kstrdup(value, GFP_KERNEL);
+ if (!p || !*p) {
+ efc_log_err(hw->os, "p is NULL\n");
+ return -ENOMEM;
+ }
+
+ idx = 0;
+ while ((token = strsep(&p, ",")) && *token) {
+ if (kstrtou32(token, 0, &hw->config.filter_def[idx++]))
+ efc_log_err(hw->os, "kstrtoint failed\n");
+
+ if (!p || !*p)
+ break;
+
+ if (idx == ARRAY_SIZE(hw->config.filter_def))
+ break;
+ }
+ kfree(p);
+
+ return rc;
+}
+
+u64
+efct_get_wwnn(struct efct_hw *hw)
+{
+ struct sli4 *sli = &hw->sli;
+ u8 p[8];
+
+ memcpy(p, sli->wwnn, sizeof(p));
+ return get_unaligned_be64(p);
+}
+
+u64
+efct_get_wwpn(struct efct_hw *hw)
+{
+ struct sli4 *sli = &hw->sli;
+ u8 p[8];
+
+ memcpy(p, sli->wwpn, sizeof(p));
+ return get_unaligned_be64(p);
+}
+
+static struct efc_hw_rq_buffer *
+efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
+ u32 size)
+{
+ struct efct *efct = hw->os;
+ struct efc_hw_rq_buffer *rq_buf = NULL;
+ struct efc_hw_rq_buffer *prq;
+ u32 i;
+
+ if (!count)
+ return NULL;
+
+ rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL);
+ if (!rq_buf)
+ return NULL;
+ memset(rq_buf, 0, sizeof(*rq_buf) * count);
+
+ for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
+ prq->rqindex = rqindex;
+ prq->dma.size = size;
+ prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
+ prq->dma.size,
+ &prq->dma.phys,
+ GFP_KERNEL);
+ if (!prq->dma.virt) {
+ efc_log_err(hw->os, "DMA allocation failed\n");
+ kfree(rq_buf);
+ return NULL;
+ }
+ }
+ return rq_buf;
+}
+
+static void
+efct_hw_rx_buffer_free(struct efct_hw *hw,
+ struct efc_hw_rq_buffer *rq_buf,
+ u32 count)
+{
+ struct efct *efct = hw->os;
+ u32 i;
+ struct efc_hw_rq_buffer *prq;
+
+ if (rq_buf) {
+ for (i = 0, prq = rq_buf; i < count; i++, prq++) {
+ dma_free_coherent(&efct->pci->dev,
+ prq->dma.size, prq->dma.virt,
+ prq->dma.phys);
+ memset(&prq->dma, 0, sizeof(struct efc_dma));
+ }
+
+ kfree(rq_buf);
+ }
+}
+
+int
+efct_hw_rx_allocate(struct efct_hw *hw)
+{
+ struct efct *efct = hw->os;
+ u32 i;
+ int rc = 0;
+ u32 rqindex = 0;
+ u32 hdr_size = EFCT_HW_RQ_SIZE_HDR;
+ u32 payload_size = hw->config.rq_default_buffer_size;
+
+ rqindex = 0;
+
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ /* Allocate header buffers */
+ rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
+ rq->entry_count,
+ hdr_size);
+ if (!rq->hdr_buf) {
+ efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n");
+ rc = -EIO;
+ break;
+ }
+
+ efc_log_debug(hw->os,
+ "rq[%2d] rq_id %02d header %4d by %4d bytes\n",
+ i, rq->hdr->id, rq->entry_count, hdr_size);
+
+ rqindex++;
+
+ /* Allocate payload buffers */
+ rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
+ rq->entry_count,
+ payload_size);
+ if (!rq->payload_buf) {
+ efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n");
+ rc = -EIO;
+ break;
+ }
+ efc_log_debug(hw->os,
+ "rq[%2d] rq_id %02d default %4d by %4d bytes\n",
+ i, rq->data->id, rq->entry_count, payload_size);
+ rqindex++;
+ }
+
+ return rc ? -EIO : 0;
+}
+
+int
+efct_hw_rx_post(struct efct_hw *hw)
+{
+ u32 i;
+ u32 idx;
+ u32 rq_idx;
+ int rc = 0;
+
+ if (!hw->seq_pool) {
+ u32 count = 0;
+
+ for (i = 0; i < hw->hw_rq_count; i++)
+ count += hw->hw_rq[i]->entry_count;
+
+ hw->seq_pool = kmalloc_array(count,
+ sizeof(struct efc_hw_sequence), GFP_KERNEL);
+ if (!hw->seq_pool)
+ return -ENOMEM;
+ }
+
+ /*
+ * In RQ pair mode, we MUST post the header and payload buffer at the
+ * same time.
+ */
+ for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
+ struct hw_rq *rq = hw->hw_rq[rq_idx];
+
+ for (i = 0; i < rq->entry_count - 1; i++) {
+ struct efc_hw_sequence *seq;
+
+ seq = hw->seq_pool + idx;
+ idx++;
+ seq->header = &rq->hdr_buf[i];
+ seq->payload = &rq->payload_buf[i];
+ rc = efct_hw_sequence_free(hw, seq);
+ if (rc)
+ break;
+ }
+ if (rc)
+ break;
+ }
+
+ if (rc && hw->seq_pool)
+ kfree(hw->seq_pool);
+
+ return rc;
+}
+
+void
+efct_hw_rx_free(struct efct_hw *hw)
+{
+ u32 i;
+
+ /* Free hw_rq buffers */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ if (rq) {
+ efct_hw_rx_buffer_free(hw, rq->hdr_buf,
+ rq->entry_count);
+ rq->hdr_buf = NULL;
+ efct_hw_rx_buffer_free(hw, rq->payload_buf,
+ rq->entry_count);
+ rq->payload_buf = NULL;
+ }
+ }
+}
+
+static int
+efct_hw_cmd_submit_pending(struct efct_hw *hw)
+{
+ int rc = 0;
+
+ /* Assumes lock held */
+
+ /* Only submit MQE if there's room */
+ while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) &&
+ !list_empty(&hw->cmd_pending)) {
+ struct efct_command_ctx *ctx;
+
+ ctx = list_first_entry(&hw->cmd_pending,
+ struct efct_command_ctx, list_entry);
+ if (!ctx)
+ break;
+
+ list_del_init(&ctx->list_entry);
+
+ list_add_tail(&ctx->list_entry, &hw->cmd_head);
+ hw->cmd_head_count++;
+ if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) {
+ efc_log_debug(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ rc = -EIO;
+ break;
+ }
+ }
+ return rc;
+}
+
+int
+efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
+{
+ int rc = -EIO;
+ unsigned long flags = 0;
+ void *bmbx = NULL;
+
+ /*
+ * If the chip is in an error state (UE'd) then reject this mailbox
+ * command.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
+ efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(&hw->sli),
+ sli_reg_read_err1(&hw->sli),
+ sli_reg_read_err2(&hw->sli));
+
+ return -EIO;
+ }
+
+ /*
+ * Send a mailbox command to the hardware, and either wait for
+ * a completion (EFCT_CMD_POLL) or get an optional asynchronous
+ * completion (EFCT_CMD_NOWAIT).
+ */
+
+ if (opts == EFCT_CMD_POLL) {
+ mutex_lock(&hw->bmbx_lock);
+ bmbx = hw->sli.bmbx.virt;
+
+ memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
+
+ if (sli_bmbx_command(&hw->sli) == 0) {
+ rc = 0;
+ memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
+ }
+ mutex_unlock(&hw->bmbx_lock);
+ } else if (opts == EFCT_CMD_NOWAIT) {
+ struct efct_command_ctx *ctx = NULL;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "Can't send command, HW state=%d\n",
+ hw->state);
+ return -EIO;
+ }
+
+ ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC);
+ if (!ctx)
+ return -ENOSPC;
+
+ memset(ctx, 0, sizeof(struct efct_command_ctx));
+
+ if (cb) {
+ ctx->cb = cb;
+ ctx->arg = arg;
+ }
+
+ memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE);
+ ctx->ctx = hw;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+
+ /* Add to pending list */
+ INIT_LIST_HEAD(&ctx->list_entry);
+ list_add_tail(&ctx->list_entry, &hw->cmd_pending);
+
+ /* Submit as much of the pending list as we can */
+ rc = efct_hw_cmd_submit_pending(hw);
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe,
+ size_t size)
+{
+ struct efct_command_ctx *ctx = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ if (!list_empty(&hw->cmd_head)) {
+ ctx = list_first_entry(&hw->cmd_head,
+ struct efct_command_ctx, list_entry);
+ list_del_init(&ctx->list_entry);
+ }
+ if (!ctx) {
+ efc_log_err(hw->os, "no command context\n");
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ return -EIO;
+ }
+
+ hw->cmd_head_count--;
+
+ /* Post any pending requests */
+ efct_hw_cmd_submit_pending(hw);
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ if (ctx->cb) {
+ memcpy(ctx->buf, mqe, size);
+ ctx->cb(hw, status, ctx->buf, ctx->arg);
+ }
+
+ mempool_free(ctx, hw->cmd_ctx_pool);
+
+ return 0;
+}
+
+static int
+efct_hw_mq_process(struct efct_hw *hw,
+ int status, struct sli4_queue *mq)
+{
+ u8 mqe[SLI4_BMBX_SIZE];
+ int rc;
+
+ rc = sli_mq_read(&hw->sli, mq, mqe);
+ if (!rc)
+ rc = efct_hw_command_process(hw, status, mqe, mq->size);
+
+ return rc;
+}
+
+static int
+efct_hw_command_cancel(struct efct_hw *hw)
+{
+ unsigned long flags = 0;
+ int rc = 0;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+
+ /*
+ * Manually clean up remaining commands. Note: since this calls
+ * efct_hw_command_process(), we'll also process the cmd_pending
+ * list, so no need to manually clean that out.
+ */
+ while (!list_empty(&hw->cmd_head)) {
+ u8 mqe[SLI4_BMBX_SIZE] = { 0 };
+ struct efct_command_ctx *ctx;
+
+ ctx = list_first_entry(&hw->cmd_head,
+ struct efct_command_ctx, list_entry);
+
+ efc_log_debug(hw->os, "hung command %08x\n",
+ !ctx ? U32_MAX : *((u32 *)ctx->buf));
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE);
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ return rc;
+}
+
+static void
+efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct efct_mbox_rqst_ctx *ctx = arg;
+
+ if (ctx) {
+ if (ctx->callback)
+ (*ctx->callback)(hw->os->efcport, status, mqe,
+ ctx->arg);
+
+ mempool_free(ctx, hw->mbox_rqst_pool);
+ }
+}
+
+int
+efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
+{
+ struct efct_mbox_rqst_ctx *ctx;
+ struct efct *efct = base;
+ struct efct_hw *hw = &efct->hw;
+ int rc;
+
+ /*
+ * Allocate a callback context (which includes the mbox cmd buffer),
+ * we need this to be persistent as the mbox cmd submission may be
+ * queued and executed later execution.
+ */
+ ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC);
+ if (!ctx)
+ return -EIO;
+
+ ctx->callback = cb;
+ ctx->arg = arg;
+
+ rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx);
+ if (rc) {
+ efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc);
+ mempool_free(ctx, hw->mbox_rqst_pool);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline struct efct_hw_io *
+_efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+
+ if (!list_empty(&hw->io_free)) {
+ io = list_first_entry(&hw->io_free, struct efct_hw_io,
+ list_entry);
+ list_del(&io->list_entry);
+ }
+ if (io) {
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_inuse);
+ io->state = EFCT_HW_IO_STATE_INUSE;
+ io->abort_reqtag = U32_MAX;
+ io->wq = hw->wq_cpu_array[raw_smp_processor_id()];
+ if (!io->wq) {
+ efc_log_err(hw->os, "WQ not assigned for cpu:%d\n",
+ raw_smp_processor_id());
+ io->wq = hw->hw_wq[0];
+ }
+ kref_init(&io->ref);
+ io->release = efct_hw_io_free_internal;
+ } else {
+ atomic_add(1, &hw->io_alloc_failed_count);
+ }
+
+ return io;
+}
+
+struct efct_hw_io *
+efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ io = _efct_hw_io_alloc(hw);
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+
+ return io;
+}
+
+static void
+efct_hw_io_free_move_correct_list(struct efct_hw *hw,
+ struct efct_hw_io *io)
+{
+ /*
+ * When an IO is freed, depending on the exchange busy flag,
+ * move it to the correct list.
+ */
+ if (io->xbusy) {
+ /*
+ * add to wait_free list and wait for XRI_ABORTED CQEs to clean
+ * up
+ */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_wait_free);
+ io->state = EFCT_HW_IO_STATE_WAIT_FREE;
+ } else {
+ /* IO not busy, add to free list */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ io->state = EFCT_HW_IO_STATE_FREE;
+ }
+}
+
+static inline void
+efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ /* initialize IO fields */
+ efct_hw_init_free_io(io);
+
+ /* Restore default SGL */
+ efct_hw_io_restore_sgl(hw, io);
+}
+
+void
+efct_hw_io_free_internal(struct kref *arg)
+{
+ unsigned long flags = 0;
+ struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref);
+ struct efct_hw *hw = io->hw;
+
+ /* perform common cleanup */
+ efct_hw_io_free_common(hw, io);
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ /* remove from in-use list */
+ if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) {
+ list_del_init(&io->list_entry);
+ efct_hw_io_free_move_correct_list(hw, io);
+ }
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+}
+
+int
+efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ return kref_put(&io->ref, io->release);
+}
+
+struct efct_hw_io *
+efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
+{
+ u32 ioindex;
+
+ ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0];
+ return hw->io[ioindex];
+}
+
+int
+efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
+ enum efct_hw_io_type type)
+{
+ struct sli4_sge *data = NULL;
+ u32 i = 0;
+ u32 skips = 0;
+ u32 sge_flags = 0;
+
+ if (!io) {
+ efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io);
+ return -EIO;
+ }
+
+ /* Clear / reset the scatter-gather list */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+ io->first_data_sge = 0;
+
+ memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+ io->n_sge = 0;
+ io->sge_offset = 0;
+
+ io->type = type;
+
+ data = io->sgl->virt;
+
+ /*
+ * Some IO types have underlying hardware requirements on the order
+ * of SGEs. Process all special entries here.
+ */
+ switch (type) {
+ case EFCT_HW_IO_TARGET_WRITE:
+
+ /* populate host resident XFER_RDY buffer */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+ data->buffer_address_high =
+ cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
+ data->buffer_address_low =
+ cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
+ data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+
+ skips = EFCT_TARGET_WRITE_SKIPS;
+
+ io->n_sge = 1;
+ break;
+ case EFCT_HW_IO_TARGET_READ:
+ /*
+ * For FCP_TSEND64, the first 2 entries are SKIP SGE's
+ */
+ skips = EFCT_TARGET_READ_SKIPS;
+ break;
+ case EFCT_HW_IO_TARGET_RSP:
+ /*
+ * No skips, etc. for FCP_TRSP64
+ */
+ break;
+ default:
+ efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+ return -EIO;
+ }
+
+ /*
+ * Write skip entries
+ */
+ for (i = 0; i < skips; i++) {
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+ }
+
+ io->n_sge += skips;
+
+ /*
+ * Set last
+ */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ return 0;
+}
+
+int
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length)
+{
+ struct sli4_sge *data = NULL;
+ u32 sge_flags = 0;
+
+ if (!io || !addr || !length) {
+ efc_log_err(hw->os,
+ "bad parameter hw=%p io=%p addr=%lx length=%u\n",
+ hw, io, addr, length);
+ return -EIO;
+ }
+
+ if (length > hw->sli.sge_supported_length) {
+ efc_log_err(hw->os,
+ "length of SGE %d bigger than allowed %d\n",
+ length, hw->sli.sge_supported_length);
+ return -EIO;
+ }
+
+ data = io->sgl->virt;
+ data += io->n_sge;
+
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= ~SLI4_SGE_TYPE_MASK;
+ sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
+ sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
+ sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
+
+ data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
+ data->buffer_address_low = cpu_to_le32(lower_32_bits(addr));
+ data->buffer_length = cpu_to_le32(length);
+
+ /*
+ * Always assume this is the last entry and mark as such.
+ * If this is not the first entry unset the "last SGE"
+ * indication for the previous entry
+ */
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ if (io->n_sge) {
+ sge_flags = le32_to_cpu(data[-1].dw2_flags);
+ sge_flags &= ~SLI4_SGE_LAST;
+ data[-1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ /* Set first_data_bde if not previously set */
+ if (io->first_data_sge == 0)
+ io->first_data_sge = io->n_sge;
+
+ io->sge_offset += length;
+ io->n_sge++;
+
+ return 0;
+}
+
+void
+efct_hw_io_abort_all(struct efct_hw *hw)
+{
+ struct efct_hw_io *io_to_abort = NULL;
+ struct efct_hw_io *next_io = NULL;
+
+ list_for_each_entry_safe(io_to_abort, next_io,
+ &hw->io_inuse, list_entry) {
+ efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
+ }
+}
+
+static void
+efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_io *io = arg;
+ struct efct_hw *hw = io->hw;
+ u32 ext = 0;
+ u32 len = 0;
+ struct hw_wq_callback *wqcb;
+
+ /*
+ * For IOs that were aborted internally, we may need to issue the
+ * callback here depending on whether a XRI_ABORTED CQE is expected ot
+ * not. If the status is Local Reject/No XRI, then
+ * issue the callback now.
+ */
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
+ ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
+ efct_hw_done_t done = io->done;
+
+ io->done = NULL;
+
+ /*
+ * Use latched status as this is always saved for an internal
+ * abort Note: We won't have both a done and abort_done
+ * function, so don't worry about
+ * clobbering the len, status and ext fields.
+ */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = false;
+ done(io, len, status, ext, io->arg);
+ }
+
+ if (io->abort_done) {
+ efct_hw_done_t done = io->abort_done;
+
+ io->abort_done = NULL;
+ done(io, len, status, ext, io->abort_arg);
+ }
+
+ /* clear abort bit to indicate abort is complete */
+ io->abort_in_progress = false;
+
+ /* Free the WQ callback */
+ if (io->abort_reqtag == U32_MAX) {
+ efc_log_err(hw->os, "HW IO already freed\n");
+ return;
+ }
+
+ wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
+ efct_hw_reqtag_free(hw, wqcb);
+
+ /*
+ * Call efct_hw_io_free() because this releases the WQ reservation as
+ * well as doing the refcount put. Don't duplicate the code here.
+ */
+ (void)efct_hw_io_free(hw, io);
+}
+
+static void
+efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe)
+{
+ struct sli4_abort_wqe *abort = (void *)wqe->wqebuf;
+
+ memset(abort, 0, hw->sli.wqe_size);
+
+ abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
+ abort->ia_ir_byte |= wqe->send_abts ? 0 : 1;
+
+ /* Suppress ABTS retries */
+ abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
+
+ abort->t_tag = cpu_to_le32(wqe->id);
+ abort->command = SLI4_WQE_ABORT;
+ abort->request_tag = cpu_to_le16(wqe->abort_reqtag);
+
+ abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
+
+ abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+}
+
+int
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg)
+{
+ struct hw_wq_callback *wqcb;
+ unsigned long flags = 0;
+
+ if (!io_to_abort) {
+ efc_log_err(hw->os, "bad parameter hw=%p io=%p\n",
+ hw, io_to_abort);
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
+ hw->state);
+ return -EIO;
+ }
+
+ /* take a reference on IO being aborted */
+ if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
+ /* command no longer active */
+ efc_log_debug(hw->os,
+ "io not active xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return -ENOENT;
+ }
+
+ /* Must have a valid WQ reference */
+ if (!io_to_abort->wq) {
+ efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
+ io_to_abort->indicator);
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ return -ENOENT;
+ }
+
+ /*
+ * Validation checks complete; now check to see if already being
+ * aborted, if not set the flag.
+ */
+ if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) {
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ efc_log_debug(hw->os,
+ "io already being aborted xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return -EINPROGRESS;
+ }
+
+ /*
+ * If we got here, the possibilities are:
+ * - host owned xri
+ * - io_to_abort->wq_index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - port owned xri:
+ * - rxri: io_to_abort->wq_index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ * - non-rxri
+ * - io_to_abort->index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - io_to_abort->index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ */
+ io_to_abort->abort_done = cb;
+ io_to_abort->abort_arg = arg;
+
+ /* Allocate a request tag for the abort portion of this IO */
+ wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
+ if (!wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+
+ io_to_abort->abort_reqtag = wqcb->instance_index;
+ io_to_abort->wqe.send_abts = send_abts;
+ io_to_abort->wqe.id = io_to_abort->indicator;
+ io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
+
+ /*
+ * If the wqe is on the pending list, then set this wqe to be
+ * aborted when the IO's wqe is removed from the list.
+ */
+ if (io_to_abort->wq) {
+ spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
+ if (io_to_abort->wqe.list_entry.next) {
+ io_to_abort->wqe.abort_wqe_submit_needed = true;
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
+ flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
+ }
+
+ efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
+
+ /* ABORT_WQE does not actually utilize an XRI on the Port,
+ * therefore, keep xbusy as-is to track the exchange's state,
+ * not the ABORT_WQE's state
+ */
+ if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
+ io_to_abort->abort_in_progress = false;
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void
+efct_hw_reqtag_pool_free(struct efct_hw *hw)
+{
+ u32 i;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ struct hw_wq_callback *wqcb = NULL;
+
+ if (reqtag_pool) {
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = reqtag_pool->tags[i];
+ if (!wqcb)
+ continue;
+
+ kfree(wqcb);
+ }
+ kfree(reqtag_pool);
+ hw->wq_reqtag_pool = NULL;
+ }
+}
+
+struct reqtag_pool *
+efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct reqtag_pool *reqtag_pool;
+ struct hw_wq_callback *wqcb;
+
+ reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
+ if (!reqtag_pool)
+ return NULL;
+
+ INIT_LIST_HEAD(&reqtag_pool->freelist);
+ /* initialize reqtag pool lock */
+ spin_lock_init(&reqtag_pool->lock);
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
+ if (!wqcb)
+ break;
+
+ reqtag_pool->tags[i] = wqcb;
+ wqcb->instance_index = i;
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
+ }
+
+ return reqtag_pool;
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg)
+{
+ struct hw_wq_callback *wqcb = NULL;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ unsigned long flags = 0;
+
+ if (!callback)
+ return wqcb;
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+
+ if (!list_empty(&reqtag_pool->freelist)) {
+ wqcb = list_first_entry(&reqtag_pool->freelist,
+ struct hw_wq_callback, list_entry);
+ }
+
+ if (wqcb) {
+ list_del_init(&wqcb->list_entry);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ wqcb->callback = callback;
+ wqcb->arg = arg;
+ } else {
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ }
+
+ return wqcb;
+}
+
+void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
+{
+ unsigned long flags = 0;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+
+ if (!wqcb->callback)
+ efc_log_err(hw->os, "WQCB is already freed\n");
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
+{
+ struct hw_wq_callback *wqcb;
+
+ wqcb = hw->wq_reqtag_pool->tags[instance_index];
+ if (!wqcb)
+ efc_log_err(hw->os, "wqcb for instance %d is null\n",
+ instance_index);
+
+ return wqcb;
+}
+
+int
+efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
+{
+ int index = -1;
+ int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the maximum number of Qs, then
+ * we never have to worry about an infinite loop. We will always find
+ * an unused entry.
+ */
+ do {
+ if (hash[i].in_use && hash[i].id == id)
+ index = hash[i].index;
+ else
+ i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
+ } while (index == -1 && hash[i].in_use);
+
+ return index;
+}
+
+int
+efct_hw_process(struct efct_hw *hw, u32 vector,
+ u32 max_isr_time_msec)
+{
+ struct hw_eq *eq;
+
+ /*
+ * The caller should disable interrupts if they wish to prevent us
+ * from processing during a shutdown. The following states are defined:
+ * EFCT_HW_STATE_UNINITIALIZED - No queues allocated
+ * EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
+ * queues are cleared.
+ * EFCT_HW_STATE_ACTIVE - Chip and queues are operational
+ * EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
+ * EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
+ * completions.
+ */
+ if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
+ return 0;
+
+ /* Get pointer to struct hw_eq */
+ eq = hw->hw_eq[vector];
+ if (!eq)
+ return 0;
+
+ eq->use_count++;
+
+ return efct_hw_eq_process(hw, eq, max_isr_time_msec);
+}
+
+int
+efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
+ u32 max_isr_time_msec)
+{
+ u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
+ u32 tcheck_count;
+ u64 tstart;
+ u64 telapsed;
+ bool done = false;
+
+ tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
+ tstart = jiffies_to_msecs(jiffies);
+
+ while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
+ u16 cq_id = 0;
+ int rc;
+
+ rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
+ if (unlikely(rc)) {
+ if (rc == SLI4_EQE_STATUS_EQ_FULL) {
+ u32 i;
+
+ /*
+ * Received a sentinel EQE indicating the
+ * EQ is full. Process all CQs
+ */
+ for (i = 0; i < hw->cq_count; i++)
+ efct_hw_cq_process(hw, hw->hw_cq[i]);
+ continue;
+ } else {
+ return rc;
+ }
+ } else {
+ int index;
+
+ index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
+
+ if (likely(index >= 0))
+ efct_hw_cq_process(hw, hw->hw_cq[index]);
+ else
+ efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
+ }
+
+ if (eq->queue->n_posted > eq->queue->posted_limit)
+ sli_queue_arm(&hw->sli, eq->queue, false);
+
+ if (tcheck_count && (--tcheck_count == 0)) {
+ tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
+ telapsed = jiffies_to_msecs(jiffies) - tstart;
+ if (telapsed >= max_isr_time_msec)
+ done = true;
+ }
+ }
+ sli_queue_eq_arm(&hw->sli, eq->queue, true);
+
+ return 0;
+}
+
+static int
+_efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+ int queue_rc;
+
+ /* Every so often, set the wqec bit to generate comsummed completions */
+ if (wq->wqec_count)
+ wq->wqec_count--;
+
+ if (wq->wqec_count == 0) {
+ struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
+
+ genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
+ wq->wqec_count = wq->wqec_set_count;
+ }
+
+ /* Decrement WQ free count */
+ wq->free_count--;
+
+ queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
+
+ return (queue_rc < 0) ? -EIO : 0;
+}
+
+static void
+hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
+{
+ struct efct_hw_wqe *wqe;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&wq->queue->lock, flags);
+
+ /* Update free count with value passed in */
+ wq->free_count += update_free_count;
+
+ while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
+ wqe = list_first_entry(&wq->pending_list,
+ struct efct_hw_wqe, list_entry);
+ list_del_init(&wqe->list_entry);
+ _efct_hw_wq_write(wq, wqe);
+
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = false;
+ efct_hw_fill_abort_wqe(wq->hw, wqe);
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+}
+
+void
+efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
+{
+ u8 cqe[sizeof(struct sli4_mcqe)];
+ u16 rid = U16_MAX;
+ /* completion type */
+ enum sli4_qentry ctype;
+ u32 n_processed = 0;
+ u32 tstart, telapsed;
+
+ tstart = jiffies_to_msecs(jiffies);
+
+ while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
+ int status;
+
+ status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
+ /*
+ * The sign of status is significant. If status is:
+ * == 0 : call completed correctly and
+ * the CQE indicated success
+ * > 0 : call completed correctly and
+ * the CQE indicated an error
+ * < 0 : call failed and no information is available about the
+ * CQE
+ */
+ if (status < 0) {
+ if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
+ /*
+ * Notification that an entry was consumed,
+ * but not completed
+ */
+ continue;
+
+ break;
+ }
+
+ switch (ctype) {
+ case SLI4_QENTRY_ASYNC:
+ sli_cqe_async(&hw->sli, cqe);
+ break;
+ case SLI4_QENTRY_MQ:
+ /*
+ * Process MQ entry. Note there is no way to determine
+ * the MQ_ID from the completion entry.
+ */
+ efct_hw_mq_process(hw, status, hw->mq);
+ break;
+ case SLI4_QENTRY_WQ:
+ efct_hw_wq_process(hw, cq, cqe, status, rid);
+ break;
+ case SLI4_QENTRY_WQ_RELEASE: {
+ u32 wq_id = rid;
+ int index;
+ struct hw_wq *wq = NULL;
+
+ index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
+
+ if (likely(index >= 0)) {
+ wq = hw->hw_wq[index];
+ } else {
+ efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
+ break;
+ }
+ /* Submit any HW IOs that are on the WQ pending list */
+ hw_wq_submit_pending(wq, wq->wqec_set_count);
+
+ break;
+ }
+
+ case SLI4_QENTRY_RQ:
+ efct_hw_rqpair_process_rq(hw, cq, cqe);
+ break;
+ case SLI4_QENTRY_XABT: {
+ efct_hw_xabt_process(hw, cq, cqe, rid);
+ break;
+ }
+ default:
+ efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
+ ctype, rid);
+ break;
+ }
+
+ n_processed++;
+ if (n_processed == cq->queue->proc_limit)
+ break;
+
+ if (cq->queue->n_posted >= cq->queue->posted_limit)
+ sli_queue_arm(&hw->sli, cq->queue, false);
+ }
+
+ sli_queue_arm(&hw->sli, cq->queue, true);
+
+ if (n_processed > cq->queue->max_num_processed)
+ cq->queue->max_num_processed = n_processed;
+ telapsed = jiffies_to_msecs(jiffies) - tstart;
+ if (telapsed > cq->queue->max_process_time)
+ cq->queue->max_process_time = telapsed;
+}
+
+void
+efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, int status, u16 rid)
+{
+ struct hw_wq_callback *wqcb;
+
+ if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
+ if (status)
+ efc_log_err(hw->os, "reque xri failed, status = %d\n",
+ status);
+ return;
+ }
+
+ wqcb = efct_hw_reqtag_get_instance(hw, rid);
+ if (!wqcb) {
+ efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
+ return;
+ }
+
+ if (!wqcb->callback) {
+ efc_log_err(hw->os, "wqcb callback is NULL\n");
+ return;
+ }
+
+ (*wqcb->callback)(wqcb->arg, cqe, status);
+}
+
+void
+efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, u16 rid)
+{
+ /* search IOs wait free list */
+ struct efct_hw_io *io = NULL;
+ unsigned long flags = 0;
+
+ io = efct_hw_io_lookup(hw, rid);
+ if (!io) {
+ /* IO lookup failure should never happen */
+ efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
+ return;
+ }
+
+ if (!io->xbusy)
+ efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
+ else
+ /* mark IO as no longer busy */
+ io->xbusy = false;
+
+ /*
+ * For IOs that were aborted internally, we need to issue any pending
+ * callback here.
+ */
+ if (io->done) {
+ efct_hw_done_t done = io->done;
+ void *arg = io->arg;
+
+ /*
+ * Use latched status as this is always saved for an internal
+ * abort
+ */
+ int status = io->saved_status;
+ u32 len = io->saved_len;
+ u32 ext = io->saved_ext;
+
+ io->done = NULL;
+ io->status_saved = false;
+
+ done(io, len, status, ext, arg);
+ }
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ if (io->state == EFCT_HW_IO_STATE_INUSE ||
+ io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
+ /* if on wait_free list, caller has already freed IO;
+ * remove from wait_free list and add to free list.
+ * if on in-use list, already marked as no longer busy;
+ * just leave there and wait for caller to free.
+ */
+ if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
+ io->state = EFCT_HW_IO_STATE_FREE;
+ list_del_init(&io->list_entry);
+ efct_hw_io_free_move_correct_list(hw, io);
+ }
+ }
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+}
+
+static int
+efct_hw_flush(struct efct_hw *hw)
+{
+ u32 i = 0;
+
+ /* Process any remaining completions */
+ for (i = 0; i < hw->eq_count; i++)
+ efct_hw_process(hw, i, ~0);
+
+ return 0;
+}
+
+int
+efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+ int rc = 0;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&wq->queue->lock, flags);
+ if (list_empty(&wq->pending_list)) {
+ if (wq->free_count > 0) {
+ rc = _efct_hw_wq_write(wq, wqe);
+ } else {
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ while (wq->free_count > 0) {
+ wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe,
+ list_entry);
+ if (!wqe)
+ break;
+
+ list_del_init(&wqe->list_entry);
+ rc = _efct_hw_wq_write(wq, wqe);
+ if (rc)
+ break;
+
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = false;
+ efct_hw_fill_abort_wqe(wq->hw, wqe);
+
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+
+ return rc;
+}
+
+int
+efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls)
+{
+ struct efct *efct = efc->base;
+
+ return efct_hw_bls_send(efct, type, bls, NULL, NULL);
+}
+
+int
+efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
+ void *cb, void *arg)
+{
+ struct efct_hw *hw = &efct->hw;
+ struct efct_hw_io *hio;
+ struct sli_bls_payload bls;
+ int rc;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os,
+ "cannot send BLS, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ hio = efct_hw_io_alloc(hw);
+ if (!hio) {
+ efc_log_err(hw->os, "HIO allocation failed\n");
+ return -EIO;
+ }
+
+ hio->done = cb;
+ hio->arg = arg;
+
+ bls_params->xri = hio->indicator;
+ bls_params->tag = hio->reqtag;
+
+ if (type == FC_RCTL_BA_ACC) {
+ hio->type = EFCT_HW_BLS_ACC;
+ bls.type = SLI4_SLI_BLS_ACC;
+ memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc));
+ } else {
+ hio->type = EFCT_HW_BLS_RJT;
+ bls.type = SLI4_SLI_BLS_RJT;
+ memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt));
+ }
+
+ bls.ox_id = cpu_to_le16(bls_params->ox_id);
+ bls.rx_id = cpu_to_le16(bls_params->rx_id);
+
+ if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf,
+ &bls, bls_params)) {
+ efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
+ return -EIO;
+ }
+
+ hio->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hio->wq->use_count++;
+ rc = efct_hw_wq_write(hio->wq, &hio->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ hio->xbusy = false;
+ }
+
+ return rc;
+}
+
+static int
+efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *arg)
+{
+ struct efc_disc_io *io = arg;
+
+ efc_disc_io_complete(io, length, status, ext_status);
+ return 0;
+}
+
+static inline void
+efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params)
+{
+ u8 *cmd = io->req.virt;
+
+ params->cmd = *cmd;
+ params->s_id = io->s_id;
+ params->d_id = io->d_id;
+ params->ox_id = io->iparam.els.ox_id;
+ params->rpi = io->rpi;
+ params->vpi = io->vpi;
+ params->rpi_registered = io->rpi_registered;
+ params->xmit_len = io->xmit_len;
+ params->rsp_len = io->rsp_len;
+ params->timeout = io->iparam.els.timeout;
+}
+
+static inline void
+efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params)
+{
+ params->r_ctl = io->iparam.ct.r_ctl;
+ params->type = io->iparam.ct.type;
+ params->df_ctl = io->iparam.ct.df_ctl;
+ params->d_id = io->d_id;
+ params->ox_id = io->iparam.ct.ox_id;
+ params->rpi = io->rpi;
+ params->vpi = io->vpi;
+ params->rpi_registered = io->rpi_registered;
+ params->xmit_len = io->xmit_len;
+ params->rsp_len = io->rsp_len;
+ params->timeout = io->iparam.ct.timeout;
+}
+
+/**
+ * efct_els_hw_srrs_send() - Send a single request and response cmd.
+ * @efc: efc library structure
+ * @io: Discovery IO used to hold els and ct cmd context.
+ *
+ * This routine supports communication sequences consisting of a single
+ * request and single response between two endpoints. Examples include:
+ * - Sending an ELS request.
+ * - Sending an ELS response - To send an ELS response, the caller must provide
+ * the OX_ID from the received request.
+ * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
+ * the caller must provide the R_CTL, TYPE, and DF_CTL
+ * values to place in the FC frame header.
+ *
+ * Return: Status of the request.
+ */
+int
+efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
+{
+ struct efct *efct = efc->base;
+ struct efct_hw_io *hio;
+ struct efct_hw *hw = &efct->hw;
+ struct efc_dma *send = &io->req;
+ struct efc_dma *receive = &io->rsp;
+ struct sli4_sge *sge = NULL;
+ int rc = 0;
+ u32 len = io->xmit_len;
+ u32 sge0_flags;
+ u32 sge1_flags;
+
+ hio = efct_hw_io_alloc(hw);
+ if (!hio) {
+ pr_err("HIO alloc failed\n");
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_debug(hw->os,
+ "cannot send SRRS, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ hio->done = efct_els_ssrs_send_cb;
+ hio->arg = io;
+
+ sge = hio->sgl->virt;
+
+ /* clear both SGE */
+ memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+
+ sge0_flags = le32_to_cpu(sge[0].dw2_flags);
+ sge1_flags = le32_to_cpu(sge[1].dw2_flags);
+ if (send->size) {
+ sge[0].buffer_address_high =
+ cpu_to_le32(upper_32_bits(send->phys));
+ sge[0].buffer_address_low =
+ cpu_to_le32(lower_32_bits(send->phys));
+
+ sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+
+ sge[0].buffer_length = cpu_to_le32(len);
+ }
+
+ if (io->io_type == EFC_DISC_IO_ELS_REQ ||
+ io->io_type == EFC_DISC_IO_CT_REQ) {
+ sge[1].buffer_address_high =
+ cpu_to_le32(upper_32_bits(receive->phys));
+ sge[1].buffer_address_low =
+ cpu_to_le32(lower_32_bits(receive->phys));
+
+ sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+ sge1_flags |= SLI4_SGE_LAST;
+
+ sge[1].buffer_length = cpu_to_le32(receive->size);
+ } else {
+ sge0_flags |= SLI4_SGE_LAST;
+ }
+
+ sge[0].dw2_flags = cpu_to_le32(sge0_flags);
+ sge[1].dw2_flags = cpu_to_le32(sge1_flags);
+
+ switch (io->io_type) {
+ case EFC_DISC_IO_ELS_REQ: {
+ struct sli_els_params els_params;
+
+ hio->type = EFCT_HW_ELS_REQ;
+ efct_fill_els_params(io, &els_params);
+ els_params.xri = hio->indicator;
+ els_params.tag = hio->reqtag;
+
+ if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &els_params)) {
+ efc_log_err(hw->os, "REQ WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_ELS_RESP: {
+ struct sli_els_params els_params;
+
+ hio->type = EFCT_HW_ELS_RSP;
+ efct_fill_els_params(io, &els_params);
+ els_params.xri = hio->indicator;
+ els_params.tag = hio->reqtag;
+ if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send,
+ &els_params)){
+ efc_log_err(hw->os, "RSP WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_CT_REQ: {
+ struct sli_ct_params ct_params;
+
+ hio->type = EFCT_HW_FC_CT;
+ efct_fill_ct_params(io, &ct_params);
+ ct_params.xri = hio->indicator;
+ ct_params.tag = hio->reqtag;
+ if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &ct_params)){
+ efc_log_err(hw->os, "GEN WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_CT_RESP: {
+ struct sli_ct_params ct_params;
+
+ hio->type = EFCT_HW_FC_CT_RSP;
+ efct_fill_ct_params(io, &ct_params);
+ ct_params.xri = hio->indicator;
+ ct_params.tag = hio->reqtag;
+ if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &ct_params)){
+ efc_log_err(hw->os, "XMIT SEQ WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ default:
+ efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type);
+ rc = -EIO;
+ }
+
+ if (rc == 0) {
+ hio->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hio->wq->use_count++;
+ rc = efct_hw_wq_write(hio->wq, &hio->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ hio->xbusy = false;
+ }
+ }
+
+ return rc;
+}
+
+int
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+ struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
+ void *cb, void *arg)
+{
+ int rc = 0;
+ bool send_wqe = true;
+
+ if (!io) {
+ pr_err("bad parm hw=%p io=%p\n", hw, io);
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ /*
+ * Save state needed during later stages
+ */
+ io->type = type;
+ io->done = cb;
+ io->arg = arg;
+
+ /*
+ * Format the work queue entry used to send the IO
+ */
+ switch (type) {
+ case EFCT_HW_IO_TARGET_WRITE: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+ struct fcp_txrdy *xfer = io->xfer_rdy.virt;
+
+ /*
+ * Fill in the XFER_RDY for IF_TYPE 0 devices
+ */
+ xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
+ xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len);
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, io->first_data_sge,
+ SLI4_CQ_DEFAULT,
+ 0, 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TRECEIVE WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFCT_HW_IO_TARGET_READ: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, io->first_data_sge,
+ SLI4_CQ_DEFAULT,
+ 0, 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TSEND WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFCT_HW_IO_TARGET_RSP: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, SLI4_CQ_DEFAULT,
+ 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TRSP WQE error\n");
+ rc = -EIO;
+ }
+
+ break;
+ }
+ default:
+ efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+ rc = -EIO;
+ }
+
+ if (send_wqe && rc == 0) {
+ io->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hw->tcmd_wq_submit[io->wq->instance]++;
+ io->wq->use_count++;
+ rc = efct_hw_wq_write(io->wq, &io->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ io->xbusy = false;
+ }
+ }
+
+ return rc;
+}
+
+int
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+ u8 sof, u8 eof, struct efc_dma *payload,
+ struct efct_hw_send_frame_context *ctx,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg)
+{
+ int rc;
+ struct efct_hw_wqe *wqe;
+ u32 xri;
+ struct hw_wq *wq;
+
+ wqe = &ctx->wqe;
+
+ /* populate the callback object */
+ ctx->hw = hw;
+
+ /* Fetch and populate request tag */
+ ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
+ if (!ctx->wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+
+ wq = hw->hw_wq[0];
+
+ /* Set XRI and RX_ID in the header based on which WQ, and which
+ * send_frame_io we are using
+ */
+ xri = wq->send_frame_io->indicator;
+
+ /* Build the send frame WQE */
+ rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
+ sof, eof, (u32 *)hdr, payload, payload->len,
+ EFCT_HW_SEND_FRAME_TIMEOUT, xri,
+ ctx->wqcb->instance_index);
+ if (rc) {
+ efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Write to WQ */
+ rc = efct_hw_wq_write(wq, wqe);
+ if (rc) {
+ efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
+ return -EIO;
+ }
+
+ wq->use_count++;
+
+ return 0;
+}
+
+static int
+efct_hw_cb_link_stat(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_link_stats *mbox_rsp;
+ struct efct_hw_link_stat_cb_arg *cb_arg = arg;
+ struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX];
+ u32 num_counters, i;
+ u32 mbox_rsp_flags = 0;
+
+ mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe;
+ mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags);
+ num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13;
+ memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) *
+ EFCT_HW_LINK_STAT_MAX);
+
+ /* Fill overflow counts, mask starts from SLI4_READ_LNKSTAT_W02OF*/
+ for (i = 0; i < EFCT_HW_LINK_STAT_MAX; i++)
+ counts[i].overflow = (mbox_rsp_flags & (1 << (i + 2)));
+
+ counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->linkfail_errcnt);
+ counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter =
+ le32_to_cpu(mbox_rsp->losssync_errcnt);
+ counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter =
+ le32_to_cpu(mbox_rsp->losssignal_errcnt);
+ counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->primseq_errcnt);
+ counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter =
+ le32_to_cpu(mbox_rsp->inval_txword_errcnt);
+ counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter =
+ le32_to_cpu(mbox_rsp->crc_errcnt);
+ counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt);
+ counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter =
+ le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt);
+ counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt);
+ counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofa_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofdti_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofni_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_soff_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+ status = le16_to_cpu(mbox_rsp->hdr.status);
+ cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters,
+ u8 clear_overflow_flags, u8 clear_all_counters,
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters,
+ void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ struct efct_hw_link_stat_cb_arg *cb_arg;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+
+ cb_arg = kzalloc(sizeof(*cb_arg), GFP_ATOMIC);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Send the HW command */
+ if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters,
+ clear_overflow_flags, clear_all_counters))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_link_stat, cb_arg);
+
+ if (rc)
+ kfree(cb_arg);
+
+ return rc;
+}
+
+static int
+efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_status *mbox_rsp =
+ (struct sli4_cmd_read_status *)mqe;
+ struct efct_hw_host_stat_cb_arg *cb_arg = arg;
+ struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX];
+ u32 num_counters = EFCT_HW_HOST_STAT_MAX;
+
+ memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) *
+ EFCT_HW_HOST_STAT_MAX);
+
+ counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_kbyte_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_kbyte_cnt);
+ counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_frame_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_frame_cnt);
+ counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_seq_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_seq_cnt);
+ counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter =
+ le32_to_cpu(mbox_rsp->tot_exchanges_orig);
+ counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter =
+ le32_to_cpu(mbox_rsp->tot_exchanges_resp);
+ counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_p_bsy_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_f_bsy_cnt);
+ counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter =
+ le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt);
+ counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt);
+ counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt);
+ counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter =
+ le32_to_cpu(mbox_rsp->empty_xri_pool_cnt);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+ status = le16_to_cpu(mbox_rsp->hdr.status);
+ cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_get_host_stats(struct efct_hw *hw, u8 cc,
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters,
+ void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ struct efct_hw_host_stat_cb_arg *cb_arg;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+
+ cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Send the HW command to get the host stats */
+ if (!sli_cmd_read_status(&hw->sli, mbxdata, cc))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_host_stat, cb_arg);
+
+ if (rc) {
+ efc_log_debug(hw->os, "READ_HOST_STATS failed\n");
+ kfree(cb_arg);
+ }
+
+ return rc;
+}
+
+struct efct_hw_async_call_ctx {
+ efct_hw_async_cb_t callback;
+ void *arg;
+ u8 cmd[SLI4_BMBX_SIZE];
+};
+
+static void
+efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct efct_hw_async_call_ctx *ctx = arg;
+
+ if (ctx) {
+ if (ctx->callback)
+ (*ctx->callback)(hw, status, mqe, ctx->arg);
+
+ kfree(ctx);
+ }
+}
+
+int
+efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg)
+{
+ struct efct_hw_async_call_ctx *ctx;
+ int rc;
+
+ /*
+ * Allocate a callback context (which includes the mbox cmd buffer),
+ * we need this to be persistent as the mbox cmd submission may be
+ * queued and executed later execution.
+ */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->callback = callback;
+ ctx->arg = arg;
+
+ /* Build and send a NOP mailbox command */
+ if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) {
+ efc_log_err(hw->os, "COMMON_NOP format failure\n");
+ kfree(ctx);
+ return -EIO;
+ }
+
+ rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb,
+ ctx);
+ if (rc) {
+ efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc);
+ kfree(ctx);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_sli_config *mbox_rsp =
+ (struct sli4_cmd_sli_config *)mqe;
+ struct sli4_rsp_cmn_write_object *wr_obj_rsp;
+ struct efct_hw_fw_wr_cb_arg *cb_arg = arg;
+ u32 bytes_written;
+ u16 mbox_status;
+ u32 change_status;
+
+ wr_obj_rsp = (struct sli4_rsp_cmn_write_object *)
+ &mbox_rsp->payload.embed;
+ bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length);
+ mbox_status = le16_to_cpu(mbox_rsp->hdr.status);
+ change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) &
+ RSP_CHANGE_STATUS);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (!status && mbox_status)
+ status = mbox_status;
+ cb_arg->cb(status, bytes_written, change_status,
+ cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size,
+ u32 offset, int last,
+ void (*cb)(int status, u32 bytes_written,
+ u32 change_status, void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+ struct efct_hw_fw_wr_cb_arg *cb_arg;
+ int noc = 0;
+
+ cb_arg = kzalloc(sizeof(*cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Write a portion of a firmware image to the device */
+ if (!sli_cmd_common_write_object(&hw->sli, mbxdata,
+ noc, last, size, offset, "/prg/",
+ dma))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_fw_write, cb_arg);
+
+ if (rc != 0) {
+ efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n");
+ kfree(cb_arg);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe,
+ void *arg)
+{
+ return 0;
+}
+
+int
+efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
+ uintptr_t value,
+ void (*cb)(int status, uintptr_t value, void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ u8 link[SLI4_BMBX_SIZE];
+ u32 speed = 0;
+ u8 reset_alpa = 0;
+
+ switch (ctrl) {
+ case EFCT_HW_PORT_INIT:
+ if (!sli_cmd_config_link(&hw->sli, link))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+
+ if (rc != 0) {
+ efc_log_err(hw->os, "CONFIG_LINK failed\n");
+ break;
+ }
+ speed = hw->config.speed;
+ reset_alpa = (u8)(value & 0xff);
+
+ rc = -EIO;
+ if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+ /* Free buffer on error, since no callback is coming */
+ if (rc)
+ efc_log_err(hw->os, "INIT_LINK failed\n");
+ break;
+
+ case EFCT_HW_PORT_SHUTDOWN:
+ if (!sli_cmd_down_link(&hw->sli, link))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+ /* Free buffer on error, since no callback is coming */
+ if (rc)
+ efc_log_err(hw->os, "DOWN_LINK failed\n");
+ break;
+
+ default:
+ efc_log_debug(hw->os, "unhandled control %#x\n", ctrl);
+ break;
+ }
+
+ return rc;
+}
+
+void
+efct_hw_teardown(struct efct_hw *hw)
+{
+ u32 i = 0;
+ u32 destroy_queues;
+ u32 free_memory;
+ struct efc_dma *dma;
+ struct efct *efct = hw->os;
+
+ destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE);
+ free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED);
+
+ /* Cancel Sliport Healthcheck */
+ if (hw->sliport_healthcheck) {
+ hw->sliport_healthcheck = 0;
+ efct_hw_config_sli_port_health_check(hw, 0, 0);
+ }
+
+ if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) {
+ hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
+
+ efct_hw_flush(hw);
+
+ if (list_empty(&hw->cmd_head))
+ efc_log_debug(hw->os,
+ "All commands completed on MQ queue\n");
+ else
+ efc_log_debug(hw->os,
+ "Some cmds still pending on MQ queue\n");
+
+ /* Cancel any remaining commands */
+ efct_hw_command_cancel(hw);
+ } else {
+ hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
+ }
+
+ dma_free_coherent(&efct->pci->dev,
+ hw->rnode_mem.size, hw->rnode_mem.virt,
+ hw->rnode_mem.phys);
+ memset(&hw->rnode_mem, 0, sizeof(struct efc_dma));
+
+ if (hw->io) {
+ for (i = 0; i < hw->config.n_io; i++) {
+ if (hw->io[i] && hw->io[i]->sgl &&
+ hw->io[i]->sgl->virt) {
+ dma_free_coherent(&efct->pci->dev,
+ hw->io[i]->sgl->size,
+ hw->io[i]->sgl->virt,
+ hw->io[i]->sgl->phys);
+ }
+ kfree(hw->io[i]);
+ hw->io[i] = NULL;
+ }
+ kfree(hw->io);
+ hw->io = NULL;
+ kfree(hw->wqe_buffs);
+ hw->wqe_buffs = NULL;
+ }
+
+ dma = &hw->xfer_rdy;
+ dma_free_coherent(&efct->pci->dev,
+ dma->size, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+
+ dma = &hw->loop_map;
+ dma_free_coherent(&efct->pci->dev,
+ dma->size, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+
+ for (i = 0; i < hw->wq_count; i++)
+ sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->rq_count; i++)
+ sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->mq_count; i++)
+ sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->cq_count; i++)
+ sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->eq_count; i++)
+ sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues,
+ free_memory);
+
+ /* Free rq buffers */
+ efct_hw_rx_free(hw);
+
+ efct_hw_queue_teardown(hw);
+
+ kfree(hw->wq_cpu_array);
+
+ sli_teardown(&hw->sli);
+
+ /* record the fact that the queues are non-functional */
+ hw->state = EFCT_HW_STATE_UNINITIALIZED;
+
+ /* free sequence free pool */
+ kfree(hw->seq_pool);
+ hw->seq_pool = NULL;
+
+ /* free hw_wq_callback pool */
+ efct_hw_reqtag_pool_free(hw);
+
+ mempool_destroy(hw->cmd_ctx_pool);
+ mempool_destroy(hw->mbox_rqst_pool);
+
+ /* Mark HW setup as not having been called */
+ hw->hw_setup_called = false;
+}
+
+static int
+efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset,
+ enum efct_hw_state prev_state)
+{
+ int rc = 0;
+
+ switch (reset) {
+ case EFCT_HW_RESET_FUNCTION:
+ efc_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_reset failed\n");
+ rc = -EIO;
+ }
+ break;
+ case EFCT_HW_RESET_FIRMWARE:
+ efc_log_debug(hw->os, "issuing firmware reset\n");
+ if (sli_fw_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_soft_reset failed\n");
+ rc = -EIO;
+ }
+ /*
+ * Because the FW reset leaves the FW in a non-running state,
+ * follow that with a regular reset.
+ */
+ efc_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_reset failed\n");
+ rc = -EIO;
+ }
+ break;
+ default:
+ efc_log_err(hw->os, "unknown type - no reset performed\n");
+ hw->state = prev_state;
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+int
+efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset)
+{
+ int rc = 0;
+ enum efct_hw_state prev_state = hw->state;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE)
+ efc_log_debug(hw->os,
+ "HW state %d is not active\n", hw->state);
+
+ hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS;
+
+ /*
+ * If the prev_state is already reset/teardown in progress,
+ * don't continue further
+ */
+ if (prev_state == EFCT_HW_STATE_RESET_IN_PROGRESS ||
+ prev_state == EFCT_HW_STATE_TEARDOWN_IN_PROGRESS)
+ return efct_hw_sli_reset(hw, reset, prev_state);
+
+ if (prev_state != EFCT_HW_STATE_UNINITIALIZED) {
+ efct_hw_flush(hw);
+
+ if (list_empty(&hw->cmd_head))
+ efc_log_debug(hw->os,
+ "All commands completed on MQ queue\n");
+ else
+ efc_log_err(hw->os,
+ "Some commands still pending on MQ queue\n");
+ }
+
+ /* Reset the chip */
+ rc = efct_hw_sli_reset(hw, reset, prev_state);
+ if (rc == -EINVAL)
+ return -EIO;
+
+ return rc;
+}
diff --git a/drivers/scsi/elx/efct/efct_hw.h b/drivers/scsi/elx/efct/efct_hw.h
new file mode 100644
index 000000000000..f3f4aa78dce9
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw.h
@@ -0,0 +1,764 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef _EFCT_HW_H
+#define _EFCT_HW_H
+
+#include "../libefc_sli/sli4.h"
+
+/*
+ * EFCT PCI IDs
+ */
+#define EFCT_VENDOR_ID 0x10df
+/* LightPulse 16Gb x 4 FC (lancer-g6) */
+#define EFCT_DEVICE_LANCER_G6 0xe307
+/* LightPulse 32Gb x 4 FC (lancer-g7) */
+#define EFCT_DEVICE_LANCER_G7 0xf407
+
+/*Default RQ entries len used by driver*/
+#define EFCT_HW_RQ_ENTRIES_MIN 512
+#define EFCT_HW_RQ_ENTRIES_DEF 1024
+#define EFCT_HW_RQ_ENTRIES_MAX 4096
+
+/*Defines the size of the RQ buffers used for each RQ*/
+#define EFCT_HW_RQ_SIZE_HDR 128
+#define EFCT_HW_RQ_SIZE_PAYLOAD 1024
+
+/*Define the maximum number of multi-receive queues*/
+#define EFCT_HW_MAX_MRQS 8
+
+/*
+ * Define count of when to set the WQEC bit in a submitted
+ * WQE, causing a consummed/released completion to be posted.
+ */
+#define EFCT_HW_WQEC_SET_COUNT 32
+
+/*Send frame timeout in seconds*/
+#define EFCT_HW_SEND_FRAME_TIMEOUT 10
+
+/*
+ * FDT Transfer Hint value, reads greater than this value
+ * will be segmented to implement fairness. A value of zero disables
+ * the feature.
+ */
+#define EFCT_HW_FDT_XFER_HINT 8192
+
+#define EFCT_HW_TIMECHECK_ITERATIONS 100
+#define EFCT_HW_MAX_NUM_MQ 1
+#define EFCT_HW_MAX_NUM_RQ 32
+#define EFCT_HW_MAX_NUM_EQ 16
+#define EFCT_HW_MAX_NUM_WQ 32
+#define EFCT_HW_DEF_NUM_EQ 1
+
+#define OCE_HW_MAX_NUM_MRQ_PAIRS 16
+
+#define EFCT_HW_MQ_DEPTH 128
+#define EFCT_HW_EQ_DEPTH 1024
+
+/*
+ * A CQ will be assinged to each WQ
+ * (CQ must have 2X entries of the WQ for abort
+ * processing), plus a separate one for each RQ PAIR and one for MQ
+ */
+#define EFCT_HW_MAX_NUM_CQ \
+ ((EFCT_HW_MAX_NUM_WQ * 2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2))
+
+#define EFCT_HW_Q_HASH_SIZE 128
+#define EFCT_HW_RQ_HEADER_SIZE 128
+#define EFCT_HW_RQ_HEADER_INDEX 0
+
+#define EFCT_HW_REQUE_XRI_REGTAG 65534
+
+/* Options for efct_hw_command() */
+enum efct_cmd_opts {
+ /* command executes synchronously and busy-waits for completion */
+ EFCT_CMD_POLL,
+ /* command executes asynchronously. Uses callback */
+ EFCT_CMD_NOWAIT,
+};
+
+enum efct_hw_reset {
+ EFCT_HW_RESET_FUNCTION,
+ EFCT_HW_RESET_FIRMWARE,
+ EFCT_HW_RESET_MAX
+};
+
+enum efct_hw_topo {
+ EFCT_HW_TOPOLOGY_AUTO,
+ EFCT_HW_TOPOLOGY_NPORT,
+ EFCT_HW_TOPOLOGY_LOOP,
+ EFCT_HW_TOPOLOGY_NONE,
+ EFCT_HW_TOPOLOGY_MAX
+};
+
+/* pack fw revision values into a single uint64_t */
+#define HW_FWREV(a, b, c, d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32) \
+ | ((uint64_t)(c) << 16) | ((uint64_t)(d)))
+
+#define EFCT_FW_VER_STR(a, b, c, d) (#a "." #b "." #c "." #d)
+
+enum efct_hw_io_type {
+ EFCT_HW_ELS_REQ,
+ EFCT_HW_ELS_RSP,
+ EFCT_HW_FC_CT,
+ EFCT_HW_FC_CT_RSP,
+ EFCT_HW_BLS_ACC,
+ EFCT_HW_BLS_RJT,
+ EFCT_HW_IO_TARGET_READ,
+ EFCT_HW_IO_TARGET_WRITE,
+ EFCT_HW_IO_TARGET_RSP,
+ EFCT_HW_IO_DNRX_REQUEUE,
+ EFCT_HW_IO_MAX,
+};
+
+enum efct_hw_io_state {
+ EFCT_HW_IO_STATE_FREE,
+ EFCT_HW_IO_STATE_INUSE,
+ EFCT_HW_IO_STATE_WAIT_FREE,
+ EFCT_HW_IO_STATE_WAIT_SEC_HIO,
+};
+
+#define EFCT_TARGET_WRITE_SKIPS 1
+#define EFCT_TARGET_READ_SKIPS 2
+
+struct efct_hw;
+struct efct_io;
+
+#define EFCT_CMD_CTX_POOL_SZ 32
+/**
+ * HW command context.
+ * Stores the state for the asynchronous commands sent to the hardware.
+ */
+struct efct_command_ctx {
+ struct list_head list_entry;
+ int (*cb)(struct efct_hw *hw, int status, u8 *mqe, void *arg);
+ void *arg; /* Argument for callback */
+ /* buffer holding command / results */
+ u8 buf[SLI4_BMBX_SIZE];
+ void *ctx; /* upper layer context */
+};
+
+struct efct_hw_sgl {
+ uintptr_t addr;
+ size_t len;
+};
+
+union efct_hw_io_param_u {
+ struct sli_bls_params bls;
+ struct sli_els_params els;
+ struct sli_ct_params fc_ct;
+ struct sli_fcp_tgt_params fcp_tgt;
+};
+
+/* WQ steering mode */
+enum efct_hw_wq_steering {
+ EFCT_HW_WQ_STEERING_CLASS,
+ EFCT_HW_WQ_STEERING_REQUEST,
+ EFCT_HW_WQ_STEERING_CPU,
+};
+
+/* HW wqe object */
+struct efct_hw_wqe {
+ struct list_head list_entry;
+ bool abort_wqe_submit_needed;
+ bool send_abts;
+ u32 id;
+ u32 abort_reqtag;
+ u8 *wqebuf;
+};
+
+struct efct_hw_io;
+/* Typedef for HW "done" callback */
+typedef int (*efct_hw_done_t)(struct efct_hw_io *, u32 len, int status,
+ u32 ext, void *ul_arg);
+
+/**
+ * HW IO object.
+ *
+ * Stores the per-IO information necessary
+ * for both SLI and efct.
+ * @ref: reference counter for hw io object
+ * @state: state of IO: free, busy, wait_free
+ * @list_entry used for busy, wait_free, free lists
+ * @wqe Work queue object, with link for pending
+ * @hw pointer back to hardware context
+ * @xfer_rdy transfer ready data
+ * @type IO type
+ * @xbusy Exchange is active in FW
+ * @abort_in_progress if TRUE, abort is in progress
+ * @status_saved if TRUE, latched status should be returned
+ * @wq_class WQ class if steering mode is Class
+ * @reqtag request tag for this HW IO
+ * @wq WQ assigned to the exchange
+ * @done Function called on IO completion
+ * @arg argument passed to IO done callback
+ * @abort_done Function called on abort completion
+ * @abort_arg argument passed to abort done callback
+ * @wq_steering WQ steering mode request
+ * @saved_status Saved status
+ * @saved_len Status length
+ * @saved_ext Saved extended status
+ * @eq EQ on which this HIO came up
+ * @sge_offset SGE data offset
+ * @def_sgl_count Count of SGEs in default SGL
+ * @abort_reqtag request tag for an abort of this HW IO
+ * @indicator Exchange indicator
+ * @def_sgl default SGL
+ * @sgl pointer to current active SGL
+ * @sgl_count count of SGEs in io->sgl
+ * @first_data_sge index of first data SGE
+ * @n_sge number of active SGEs
+ */
+struct efct_hw_io {
+ struct kref ref;
+ enum efct_hw_io_state state;
+ void (*release)(struct kref *arg);
+ struct list_head list_entry;
+ struct efct_hw_wqe wqe;
+
+ struct efct_hw *hw;
+ struct efc_dma xfer_rdy;
+ u16 type;
+ bool xbusy;
+ int abort_in_progress;
+ bool status_saved;
+ u8 wq_class;
+ u16 reqtag;
+
+ struct hw_wq *wq;
+ efct_hw_done_t done;
+ void *arg;
+ efct_hw_done_t abort_done;
+ void *abort_arg;
+
+ enum efct_hw_wq_steering wq_steering;
+
+ u32 saved_status;
+ u32 saved_len;
+ u32 saved_ext;
+
+ struct hw_eq *eq;
+ u32 sge_offset;
+ u32 def_sgl_count;
+ u32 abort_reqtag;
+ u32 indicator;
+ struct efc_dma def_sgl;
+ struct efc_dma *sgl;
+ u32 sgl_count;
+ u32 first_data_sge;
+ u32 n_sge;
+};
+
+enum efct_hw_port {
+ EFCT_HW_PORT_INIT,
+ EFCT_HW_PORT_SHUTDOWN,
+};
+
+/* Node group rpi reference */
+struct efct_hw_rpi_ref {
+ atomic_t rpi_count;
+ atomic_t rpi_attached;
+};
+
+enum efct_hw_link_stat {
+ EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT,
+ EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT,
+ EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT,
+ EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT,
+ EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT,
+ EFCT_HW_LINK_STAT_CRC_COUNT,
+ EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT,
+ EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT,
+ EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT,
+ EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_RCV_EOFA_COUNT,
+ EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_SOFF_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT,
+ EFCT_HW_LINK_STAT_MAX,
+};
+
+enum efct_hw_host_stat {
+ EFCT_HW_HOST_STAT_TX_KBYTE_COUNT,
+ EFCT_HW_HOST_STAT_RX_KBYTE_COUNT,
+ EFCT_HW_HOST_STAT_TX_FRAME_COUNT,
+ EFCT_HW_HOST_STAT_RX_FRAME_COUNT,
+ EFCT_HW_HOST_STAT_TX_SEQ_COUNT,
+ EFCT_HW_HOST_STAT_RX_SEQ_COUNT,
+ EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG,
+ EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP,
+ EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT,
+ EFCT_HW_HOST_STAT_RX_F_BSY_COUNT,
+ EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT,
+ EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT,
+ EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT,
+ EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT,
+ EFCT_HW_HOST_STAT_MAX,
+};
+
+enum efct_hw_state {
+ EFCT_HW_STATE_UNINITIALIZED,
+ EFCT_HW_STATE_QUEUES_ALLOCATED,
+ EFCT_HW_STATE_ACTIVE,
+ EFCT_HW_STATE_RESET_IN_PROGRESS,
+ EFCT_HW_STATE_TEARDOWN_IN_PROGRESS,
+};
+
+struct efct_hw_link_stat_counts {
+ u8 overflow;
+ u32 counter;
+};
+
+struct efct_hw_host_stat_counts {
+ u32 counter;
+};
+
+/* Structure used for the hash lookup of queue IDs */
+struct efct_queue_hash {
+ bool in_use;
+ u16 id;
+ u16 index;
+};
+
+/* WQ callback object */
+struct hw_wq_callback {
+ u16 instance_index; /* use for request tag */
+ void (*callback)(void *arg, u8 *cqe, int status);
+ void *arg;
+ struct list_head list_entry;
+};
+
+struct reqtag_pool {
+ spinlock_t lock; /* pool lock */
+ struct hw_wq_callback *tags[U16_MAX];
+ struct list_head freelist;
+};
+
+struct efct_hw_config {
+ u32 n_eq;
+ u32 n_cq;
+ u32 n_mq;
+ u32 n_rq;
+ u32 n_wq;
+ u32 n_io;
+ u32 n_sgl;
+ u32 speed;
+ u32 topology;
+ /* size of the buffers for first burst */
+ u32 rq_default_buffer_size;
+ u8 esoc;
+ /* MRQ RQ selection policy */
+ u8 rq_selection_policy;
+ /* RQ quanta if rq_selection_policy == 2 */
+ u8 rr_quanta;
+ u32 filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+};
+
+struct efct_hw {
+ struct efct *os;
+ struct sli4 sli;
+ u16 ulp_start;
+ u16 ulp_max;
+ u32 dump_size;
+ enum efct_hw_state state;
+ bool hw_setup_called;
+ u8 sliport_healthcheck;
+ u16 fcf_indicator;
+
+ /* HW configuration */
+ struct efct_hw_config config;
+
+ /* calculated queue sizes for each type */
+ u32 num_qentries[SLI4_QTYPE_MAX];
+
+ /* Storage for SLI queue objects */
+ struct sli4_queue wq[EFCT_HW_MAX_NUM_WQ];
+ struct sli4_queue rq[EFCT_HW_MAX_NUM_RQ];
+ u16 hw_rq_lookup[EFCT_HW_MAX_NUM_RQ];
+ struct sli4_queue mq[EFCT_HW_MAX_NUM_MQ];
+ struct sli4_queue cq[EFCT_HW_MAX_NUM_CQ];
+ struct sli4_queue eq[EFCT_HW_MAX_NUM_EQ];
+
+ /* HW queue */
+ u32 eq_count;
+ u32 cq_count;
+ u32 mq_count;
+ u32 wq_count;
+ u32 rq_count;
+ u32 cmd_head_count;
+ struct list_head eq_list;
+
+ struct efct_queue_hash cq_hash[EFCT_HW_Q_HASH_SIZE];
+ struct efct_queue_hash rq_hash[EFCT_HW_Q_HASH_SIZE];
+ struct efct_queue_hash wq_hash[EFCT_HW_Q_HASH_SIZE];
+
+ /* Storage for HW queue objects */
+ struct hw_wq *hw_wq[EFCT_HW_MAX_NUM_WQ];
+ struct hw_rq *hw_rq[EFCT_HW_MAX_NUM_RQ];
+ struct hw_mq *hw_mq[EFCT_HW_MAX_NUM_MQ];
+ struct hw_cq *hw_cq[EFCT_HW_MAX_NUM_CQ];
+ struct hw_eq *hw_eq[EFCT_HW_MAX_NUM_EQ];
+ /* count of hw_rq[] entries */
+ u32 hw_rq_count;
+ /* count of multirq RQs */
+ u32 hw_mrq_count;
+
+ struct hw_wq **wq_cpu_array;
+
+ /* Sequence objects used in incoming frame processing */
+ struct efc_hw_sequence *seq_pool;
+
+ /* Maintain an ordered, linked list of outstanding HW commands. */
+ struct mutex bmbx_lock;
+ spinlock_t cmd_lock;
+ struct list_head cmd_head;
+ struct list_head cmd_pending;
+ mempool_t *cmd_ctx_pool;
+ mempool_t *mbox_rqst_pool;
+
+ struct sli4_link_event link;
+
+ /* pointer array of IO objects */
+ struct efct_hw_io **io;
+ /* array of WQE buffs mapped to IO objects */
+ u8 *wqe_buffs;
+
+ /* IO lock to synchronize list access */
+ spinlock_t io_lock;
+ /* List of IO objects in use */
+ struct list_head io_inuse;
+ /* List of IO objects waiting to be freed */
+ struct list_head io_wait_free;
+ /* List of IO objects available for allocation */
+ struct list_head io_free;
+
+ struct efc_dma loop_map;
+
+ struct efc_dma xfer_rdy;
+
+ struct efc_dma rnode_mem;
+
+ atomic_t io_alloc_failed_count;
+
+ /* stat: wq sumbit count */
+ u32 tcmd_wq_submit[EFCT_HW_MAX_NUM_WQ];
+ /* stat: wq complete count */
+ u32 tcmd_wq_complete[EFCT_HW_MAX_NUM_WQ];
+
+ atomic_t send_frame_seq_id;
+ struct reqtag_pool *wq_reqtag_pool;
+};
+
+enum efct_hw_io_count_type {
+ EFCT_HW_IO_INUSE_COUNT,
+ EFCT_HW_IO_FREE_COUNT,
+ EFCT_HW_IO_WAIT_FREE_COUNT,
+ EFCT_HW_IO_N_TOTAL_IO_COUNT,
+};
+
+/* HW queue data structures */
+struct hw_eq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ u32 entry_count;
+ u32 entry_size;
+ struct efct_hw *hw;
+ struct sli4_queue *queue;
+ struct list_head cq_list;
+ u32 use_count;
+};
+
+struct hw_cq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_eq *eq;
+ struct sli4_queue *queue;
+ struct list_head q_list;
+ u32 use_count;
+};
+
+struct hw_q {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+};
+
+struct hw_mq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_cq *cq;
+ struct sli4_queue *queue;
+
+ u32 use_count;
+};
+
+struct hw_wq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ struct efct_hw *hw;
+
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_cq *cq;
+ struct sli4_queue *queue;
+ u32 class;
+
+ /* WQ consumed */
+ u32 wqec_set_count;
+ u32 wqec_count;
+ u32 free_count;
+ u32 total_submit_count;
+ struct list_head pending_list;
+
+ /* HW IO allocated for use with Send Frame */
+ struct efct_hw_io *send_frame_io;
+
+ /* Stats */
+ u32 use_count;
+ u32 wq_pending_count;
+};
+
+struct hw_rq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+
+ u32 entry_count;
+ u32 use_count;
+ u32 hdr_entry_size;
+ u32 first_burst_entry_size;
+ u32 data_entry_size;
+ bool is_mrq;
+ u32 base_mrq_id;
+
+ struct hw_cq *cq;
+
+ u8 filter_mask;
+ struct sli4_queue *hdr;
+ struct sli4_queue *first_burst;
+ struct sli4_queue *data;
+
+ struct efc_hw_rq_buffer *hdr_buf;
+ struct efc_hw_rq_buffer *fb_buf;
+ struct efc_hw_rq_buffer *payload_buf;
+ /* RQ tracker for this RQ */
+ struct efc_hw_sequence **rq_tracker;
+};
+
+struct efct_hw_send_frame_context {
+ struct efct_hw *hw;
+ struct hw_wq_callback *wqcb;
+ struct efct_hw_wqe wqe;
+ void (*callback)(int status, void *arg);
+ void *arg;
+
+ /* General purpose elements */
+ struct efc_hw_sequence *seq;
+ struct efc_dma payload;
+};
+
+struct efct_hw_grp_hdr {
+ u32 size;
+ __be32 magic_number;
+ u32 word2;
+ u8 rev_name[128];
+ u8 date[12];
+ u8 revision[32];
+};
+
+static inline int
+efct_hw_get_link_speed(struct efct_hw *hw) {
+ return hw->link.speed;
+}
+
+int
+efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev);
+int efct_hw_init(struct efct_hw *hw);
+int
+efct_hw_parse_filter(struct efct_hw *hw, void *value);
+int
+efct_hw_init_queues(struct efct_hw *hw);
+int
+efct_hw_map_wq_cpu(struct efct_hw *hw);
+uint64_t
+efct_get_wwnn(struct efct_hw *hw);
+uint64_t
+efct_get_wwpn(struct efct_hw *hw);
+
+int efct_hw_rx_allocate(struct efct_hw *hw);
+int efct_hw_rx_post(struct efct_hw *hw);
+void efct_hw_rx_free(struct efct_hw *hw);
+int
+efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb,
+ void *arg);
+int
+efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg);
+
+struct efct_hw_io *efct_hw_io_alloc(struct efct_hw *hw);
+int efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io);
+u8 efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io);
+int
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+ struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
+ void *cb, void *arg);
+int
+efct_hw_io_register_sgl(struct efct_hw *hw, struct efct_hw_io *io,
+ struct efc_dma *sgl,
+ u32 sgl_count);
+int
+efct_hw_io_init_sges(struct efct_hw *hw,
+ struct efct_hw_io *io, enum efct_hw_io_type type);
+
+int
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length);
+int
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg);
+u32
+efct_hw_io_get_count(struct efct_hw *hw,
+ enum efct_hw_io_count_type io_count_type);
+struct efct_hw_io
+*efct_hw_io_lookup(struct efct_hw *hw, u32 indicator);
+void efct_hw_io_abort_all(struct efct_hw *hw);
+void efct_hw_io_free_internal(struct kref *arg);
+
+/* HW WQ request tag API */
+struct reqtag_pool *efct_hw_reqtag_pool_alloc(struct efct_hw *hw);
+void efct_hw_reqtag_pool_free(struct efct_hw *hw);
+struct hw_wq_callback
+*efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe,
+ int status), void *arg);
+void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb);
+struct hw_wq_callback
+*efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index);
+
+/* RQ completion handlers for RQ pair mode */
+int
+efct_hw_rqpair_process_rq(struct efct_hw *hw,
+ struct hw_cq *cq, u8 *cqe);
+int
+efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq);
+static inline void
+efct_hw_sequence_copy(struct efc_hw_sequence *dst,
+ struct efc_hw_sequence *src)
+{
+ /* Copy src to dst, then zero out the linked list link */
+ *dst = *src;
+}
+
+int
+efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq);
+
+static inline int
+efct_hw_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ /* Only RQ pair mode is supported */
+ return efct_hw_rqpair_sequence_free(hw, seq);
+}
+
+int
+efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
+ u32 max_isr_time_msec);
+void efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq);
+void
+efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, int status, u16 rid);
+void
+efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, u16 rid);
+int
+efct_hw_process(struct efct_hw *hw, u32 vector, u32 max_isr_time_msec);
+int
+efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id);
+int efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe);
+int
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+ u8 sof, u8 eof, struct efc_dma *payload,
+ struct efct_hw_send_frame_context *ctx,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg);
+int
+efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io);
+int
+efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls);
+int
+efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
+ void *cb, void *arg);
+
+/* Function for retrieving link statistics */
+int
+efct_hw_get_link_stats(struct efct_hw *hw,
+ u8 req_ext_counters,
+ u8 clear_overflow_flags,
+ u8 clear_all_counters,
+ void (*efct_hw_link_stat_cb_t)(int status,
+ u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg),
+ void *arg);
+/* Function for retrieving host statistics */
+int
+efct_hw_get_host_stats(struct efct_hw *hw,
+ u8 cc,
+ void (*efct_hw_host_stat_cb_t)(int status,
+ u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg),
+ void *arg);
+int
+efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma,
+ u32 size, u32 offset, int last,
+ void (*cb)(int status, u32 bytes_written,
+ u32 change_status, void *arg),
+ void *arg);
+typedef void (*efct_hw_async_cb_t)(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg);
+int
+efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg);
+
+struct hw_eq *efct_hw_new_eq(struct efct_hw *hw, u32 entry_count);
+struct hw_cq *efct_hw_new_cq(struct hw_eq *eq, u32 entry_count);
+u32
+efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
+ u32 num_cqs, u32 entry_count);
+struct hw_mq *efct_hw_new_mq(struct hw_cq *cq, u32 entry_count);
+struct hw_wq
+*efct_hw_new_wq(struct hw_cq *cq, u32 entry_count);
+u32
+efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
+ u32 num_rq_pairs, u32 entry_count);
+void efct_hw_del_eq(struct hw_eq *eq);
+void efct_hw_del_cq(struct hw_cq *cq);
+void efct_hw_del_mq(struct hw_mq *mq);
+void efct_hw_del_wq(struct hw_wq *wq);
+void efct_hw_del_rq(struct hw_rq *rq);
+void efct_hw_queue_teardown(struct efct_hw *hw);
+void efct_hw_teardown(struct efct_hw *hw);
+int
+efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset);
+
+int
+efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
+ uintptr_t value,
+ void (*cb)(int status, uintptr_t value, void *arg),
+ void *arg);
+
+#endif /* __EFCT_H__ */
diff --git a/drivers/scsi/elx/efct/efct_hw_queues.c b/drivers/scsi/elx/efct/efct_hw_queues.c
new file mode 100644
index 000000000000..3a1d1a5864a3
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw_queues.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_unsol.h"
+
+int
+efct_hw_init_queues(struct efct_hw *hw)
+{
+ struct hw_eq *eq = NULL;
+ struct hw_cq *cq = NULL;
+ struct hw_wq *wq = NULL;
+ struct hw_mq *mq = NULL;
+
+ struct hw_eq *eqs[EFCT_HW_MAX_NUM_EQ];
+ struct hw_cq *cqs[EFCT_HW_MAX_NUM_EQ];
+ struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ];
+ u32 i = 0, j;
+
+ hw->eq_count = 0;
+ hw->cq_count = 0;
+ hw->mq_count = 0;
+ hw->wq_count = 0;
+ hw->rq_count = 0;
+ hw->hw_rq_count = 0;
+ INIT_LIST_HEAD(&hw->eq_list);
+
+ for (i = 0; i < hw->config.n_eq; i++) {
+ /* Create EQ */
+ eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH);
+ if (!eq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ eqs[i] = eq;
+
+ /* Create one MQ */
+ if (!i) {
+ cq = efct_hw_new_cq(eq,
+ hw->num_qentries[SLI4_QTYPE_CQ]);
+ if (!cq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH);
+ if (!mq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+ }
+
+ /* Create WQ */
+ cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]);
+ if (!cq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]);
+ if (!wq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+ }
+
+ /* Create CQ set */
+ if (efct_hw_new_cq_set(eqs, cqs, i, hw->num_qentries[SLI4_QTYPE_CQ])) {
+ efct_hw_queue_teardown(hw);
+ return -EIO;
+ }
+
+ /* Create RQ set */
+ if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) {
+ efct_hw_queue_teardown(hw);
+ return -EIO;
+ }
+
+ for (j = 0; j < i ; j++) {
+ rqs[j]->filter_mask = 0;
+ rqs[j]->is_mrq = true;
+ rqs[j]->base_mrq_id = rqs[0]->hdr->id;
+ }
+
+ hw->hw_mrq_count = i;
+
+ return 0;
+}
+
+int
+efct_hw_map_wq_cpu(struct efct_hw *hw)
+{
+ struct efct *efct = hw->os;
+ u32 cpu = 0, i;
+
+ /* Init cpu_map array */
+ hw->wq_cpu_array = kcalloc(num_possible_cpus(), sizeof(void *),
+ GFP_KERNEL);
+ if (!hw->wq_cpu_array)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->config.n_eq; i++) {
+ const struct cpumask *maskp;
+
+ /* Get a CPU mask for all CPUs affinitized to this vector */
+ maskp = pci_irq_get_affinity(efct->pci, i);
+ if (!maskp) {
+ efc_log_debug(efct, "maskp null for vector:%d\n", i);
+ continue;
+ }
+
+ /* Loop through all CPUs associated with vector idx */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
+ hw->wq_cpu_array[cpu] = hw->hw_wq[i];
+ }
+ }
+
+ return 0;
+}
+
+struct hw_eq *
+efct_hw_new_eq(struct efct_hw *hw, u32 entry_count)
+{
+ struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+
+ if (!eq)
+ return NULL;
+
+ eq->type = SLI4_QTYPE_EQ;
+ eq->hw = hw;
+ eq->entry_count = entry_count;
+ eq->instance = hw->eq_count++;
+ eq->queue = &hw->eq[eq->instance];
+ INIT_LIST_HEAD(&eq->cq_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_EQ, eq->queue, entry_count,
+ NULL)) {
+ efc_log_err(hw->os, "EQ[%d] alloc failure\n", eq->instance);
+ kfree(eq);
+ return NULL;
+ }
+
+ sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
+ hw->hw_eq[eq->instance] = eq;
+ INIT_LIST_HEAD(&eq->list_entry);
+ list_add_tail(&eq->list_entry, &hw->eq_list);
+ efc_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance,
+ eq->queue->id, eq->entry_count);
+ return eq;
+}
+
+struct hw_cq *
+efct_hw_new_cq(struct hw_eq *eq, u32 entry_count)
+{
+ struct efct_hw *hw = eq->hw;
+ struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+
+ if (!cq)
+ return NULL;
+
+ cq->eq = eq;
+ cq->type = SLI4_QTYPE_CQ;
+ cq->instance = eq->hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+
+ INIT_LIST_HEAD(&cq->q_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_CQ, cq->queue,
+ cq->entry_count, eq->queue)) {
+ efc_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
+ eq->instance, eq->entry_count);
+ kfree(cq);
+ return NULL;
+ }
+
+ hw->hw_cq[cq->instance] = cq;
+ INIT_LIST_HEAD(&cq->list_entry);
+ list_add_tail(&cq->list_entry, &eq->cq_list);
+ efc_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance,
+ cq->queue->id, cq->entry_count);
+ return cq;
+}
+
+u32
+efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
+ u32 num_cqs, u32 entry_count)
+{
+ u32 i;
+ struct efct_hw *hw = eqs[0]->hw;
+ struct sli4 *sli4 = &hw->sli;
+ struct hw_cq *cq = NULL;
+ struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT];
+ struct sli4_queue *assefct[SLI4_MAX_CQ_SET_COUNT];
+
+ /* Initialise CQS pointers to NULL */
+ for (i = 0; i < num_cqs; i++)
+ cqs[i] = NULL;
+
+ for (i = 0; i < num_cqs; i++) {
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ goto error;
+
+ cqs[i] = cq;
+ cq->eq = eqs[i];
+ cq->type = SLI4_QTYPE_CQ;
+ cq->instance = hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+ qs[i] = cq->queue;
+ assefct[i] = eqs[i]->queue;
+ INIT_LIST_HEAD(&cq->q_list);
+ }
+
+ if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) {
+ efc_log_err(hw->os, "Failed to create CQ Set.\n");
+ goto error;
+ }
+
+ for (i = 0; i < num_cqs; i++) {
+ hw->hw_cq[cqs[i]->instance] = cqs[i];
+ INIT_LIST_HEAD(&cqs[i]->list_entry);
+ list_add_tail(&cqs[i]->list_entry, &cqs[i]->eq->cq_list);
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_cqs; i++) {
+ kfree(cqs[i]);
+ cqs[i] = NULL;
+ }
+ return -EIO;
+}
+
+struct hw_mq *
+efct_hw_new_mq(struct hw_cq *cq, u32 entry_count)
+{
+ struct efct_hw *hw = cq->eq->hw;
+ struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+
+ if (!mq)
+ return NULL;
+
+ mq->cq = cq;
+ mq->type = SLI4_QTYPE_MQ;
+ mq->instance = cq->eq->hw->mq_count++;
+ mq->entry_count = entry_count;
+ mq->entry_size = EFCT_HW_MQ_DEPTH;
+ mq->queue = &hw->mq[mq->instance];
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_MQ, mq->queue, mq->entry_size,
+ cq->queue)) {
+ efc_log_err(hw->os, "MQ allocation failure\n");
+ kfree(mq);
+ return NULL;
+ }
+
+ hw->hw_mq[mq->instance] = mq;
+ INIT_LIST_HEAD(&mq->list_entry);
+ list_add_tail(&mq->list_entry, &cq->q_list);
+ efc_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance,
+ mq->queue->id, mq->entry_count);
+ return mq;
+}
+
+struct hw_wq *
+efct_hw_new_wq(struct hw_cq *cq, u32 entry_count)
+{
+ struct efct_hw *hw = cq->eq->hw;
+ struct hw_wq *wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+
+ if (!wq)
+ return NULL;
+
+ wq->hw = cq->eq->hw;
+ wq->cq = cq;
+ wq->type = SLI4_QTYPE_WQ;
+ wq->instance = cq->eq->hw->wq_count++;
+ wq->entry_count = entry_count;
+ wq->queue = &hw->wq[wq->instance];
+ wq->wqec_set_count = EFCT_HW_WQEC_SET_COUNT;
+ wq->wqec_count = wq->wqec_set_count;
+ wq->free_count = wq->entry_count - 1;
+ INIT_LIST_HEAD(&wq->pending_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_WQ, wq->queue,
+ wq->entry_count, cq->queue)) {
+ efc_log_err(hw->os, "WQ allocation failure\n");
+ kfree(wq);
+ return NULL;
+ }
+
+ hw->hw_wq[wq->instance] = wq;
+ INIT_LIST_HEAD(&wq->list_entry);
+ list_add_tail(&wq->list_entry, &cq->q_list);
+ efc_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d\n",
+ wq->instance, wq->queue->id, wq->entry_count, wq->class);
+ return wq;
+}
+
+u32
+efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
+ u32 num_rq_pairs, u32 entry_count)
+{
+ struct efct_hw *hw = cqs[0]->eq->hw;
+ struct hw_rq *rq = NULL;
+ struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL };
+ u32 i, q_count, size;
+
+ /* Initialise RQS pointers */
+ for (i = 0; i < num_rq_pairs; i++)
+ rqs[i] = NULL;
+
+ /*
+ * Allocate an RQ object SET, where each element in set
+ * encapsulates 2 SLI queues (for rq pair)
+ */
+ for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
+ goto error;
+
+ rqs[i] = rq;
+ rq->instance = hw->hw_rq_count++;
+ rq->cq = cqs[i];
+ rq->type = SLI4_QTYPE_RQ;
+ rq->entry_count = entry_count;
+
+ /* Header RQ */
+ rq->hdr = &hw->rq[hw->rq_count];
+ rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count] = rq->hdr;
+
+ /* Data RQ */
+ rq->data = &hw->rq[hw->rq_count];
+ rq->data_entry_size = hw->config.rq_default_buffer_size;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count + 1] = rq->data;
+
+ rq->rq_tracker = NULL;
+ }
+
+ if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
+ cqs[0]->queue->id,
+ rqs[0]->entry_count,
+ rqs[0]->hdr_entry_size,
+ rqs[0]->data_entry_size)) {
+ efc_log_err(hw->os, "RQ Set alloc failure for base CQ=%d\n",
+ cqs[0]->queue->id);
+ goto error;
+ }
+
+ for (i = 0; i < num_rq_pairs; i++) {
+ hw->hw_rq[rqs[i]->instance] = rqs[i];
+ INIT_LIST_HEAD(&rqs[i]->list_entry);
+ list_add_tail(&rqs[i]->list_entry, &cqs[i]->q_list);
+ size = sizeof(struct efc_hw_sequence *) * rqs[i]->entry_count;
+ rqs[i]->rq_tracker = kzalloc(size, GFP_KERNEL);
+ if (!rqs[i]->rq_tracker)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_rq_pairs; i++) {
+ if (rqs[i]) {
+ kfree(rqs[i]->rq_tracker);
+ kfree(rqs[i]);
+ }
+ }
+
+ return -EIO;
+}
+
+void
+efct_hw_del_eq(struct hw_eq *eq)
+{
+ struct hw_cq *cq;
+ struct hw_cq *cq_next;
+
+ if (!eq)
+ return;
+
+ list_for_each_entry_safe(cq, cq_next, &eq->cq_list, list_entry)
+ efct_hw_del_cq(cq);
+ list_del(&eq->list_entry);
+ eq->hw->hw_eq[eq->instance] = NULL;
+ kfree(eq);
+}
+
+void
+efct_hw_del_cq(struct hw_cq *cq)
+{
+ struct hw_q *q;
+ struct hw_q *q_next;
+
+ if (!cq)
+ return;
+
+ list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) {
+ switch (q->type) {
+ case SLI4_QTYPE_MQ:
+ efct_hw_del_mq((struct hw_mq *)q);
+ break;
+ case SLI4_QTYPE_WQ:
+ efct_hw_del_wq((struct hw_wq *)q);
+ break;
+ case SLI4_QTYPE_RQ:
+ efct_hw_del_rq((struct hw_rq *)q);
+ break;
+ default:
+ break;
+ }
+ }
+ list_del(&cq->list_entry);
+ cq->eq->hw->hw_cq[cq->instance] = NULL;
+ kfree(cq);
+}
+
+void
+efct_hw_del_mq(struct hw_mq *mq)
+{
+ if (!mq)
+ return;
+
+ list_del(&mq->list_entry);
+ mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
+ kfree(mq);
+}
+
+void
+efct_hw_del_wq(struct hw_wq *wq)
+{
+ if (!wq)
+ return;
+
+ list_del(&wq->list_entry);
+ wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
+ kfree(wq);
+}
+
+void
+efct_hw_del_rq(struct hw_rq *rq)
+{
+ struct efct_hw *hw = NULL;
+
+ if (!rq)
+ return;
+ /* Free RQ tracker */
+ kfree(rq->rq_tracker);
+ rq->rq_tracker = NULL;
+ list_del(&rq->list_entry);
+ hw = rq->cq->eq->hw;
+ hw->hw_rq[rq->instance] = NULL;
+ kfree(rq);
+}
+
+void
+efct_hw_queue_teardown(struct efct_hw *hw)
+{
+ struct hw_eq *eq;
+ struct hw_eq *eq_next;
+
+ if (!hw->eq_list.next)
+ return;
+
+ list_for_each_entry_safe(eq, eq_next, &hw->eq_list, list_entry)
+ efct_hw_del_eq(eq);
+}
+
+static inline int
+efct_hw_rqpair_find(struct efct_hw *hw, u16 rq_id)
+{
+ return efct_hw_queue_hash_find(hw->rq_hash, rq_id);
+}
+
+static struct efc_hw_sequence *
+efct_hw_rqpair_get(struct efct_hw *hw, u16 rqindex, u16 bufindex)
+{
+ struct sli4_queue *rq_hdr = &hw->rq[rqindex];
+ struct efc_hw_sequence *seq = NULL;
+ struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
+ unsigned long flags = 0;
+
+ if (bufindex >= rq_hdr->length) {
+ efc_log_err(hw->os,
+ "RQidx %d bufidx %d exceed ring len %d for id %d\n",
+ rqindex, bufindex, rq_hdr->length, rq_hdr->id);
+ return NULL;
+ }
+
+ /* rq_hdr lock also covers rqindex+1 queue */
+ spin_lock_irqsave(&rq_hdr->lock, flags);
+
+ seq = rq->rq_tracker[bufindex];
+ rq->rq_tracker[bufindex] = NULL;
+
+ if (!seq) {
+ efc_log_err(hw->os,
+ "RQbuf NULL, rqidx %d, bufidx %d, cur q idx = %d\n",
+ rqindex, bufindex, rq_hdr->index);
+ }
+
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return seq;
+}
+
+int
+efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe)
+{
+ u16 rq_id;
+ u32 index;
+ int rqindex;
+ int rq_status;
+ u32 h_len;
+ u32 p_len;
+ struct efc_hw_sequence *seq;
+ struct hw_rq *rq;
+
+ rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe,
+ &rq_id, &index);
+ if (rq_status != 0) {
+ switch (rq_status) {
+ case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
+ case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
+ /* just get RQ buffer then return to chip */
+ rqindex = efct_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ efc_log_debug(hw->os,
+ "status=%#x: lookup fail id=%#x\n",
+ rq_status, rq_id);
+ break;
+ }
+
+ /* get RQ buffer */
+ seq = efct_hw_rqpair_get(hw, rqindex, index);
+
+ /* return to chip */
+ if (efct_hw_rqpair_sequence_free(hw, seq)) {
+ efc_log_debug(hw->os,
+ "status=%#x,fail rtrn buf to RQ\n",
+ rq_status);
+ break;
+ }
+ break;
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
+ /*
+ * since RQ buffers were not consumed, cannot return
+ * them to chip
+ */
+ efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n",
+ rq_status);
+ fallthrough;
+ default:
+ break;
+ }
+ return -EIO;
+ }
+
+ rqindex = efct_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n",
+ rq_id);
+ return -EIO;
+ }
+
+ rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
+ rq->use_count++;
+
+ seq = efct_hw_rqpair_get(hw, rqindex, index);
+ if (WARN_ON(!seq))
+ return -EIO;
+
+ seq->hw = hw;
+
+ sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
+ seq->header->dma.len = h_len;
+ seq->payload->dma.len = p_len;
+ seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
+ seq->hw_priv = cq->eq;
+
+ efct_unsolicited_cb(hw->os, seq);
+
+ return 0;
+}
+
+static int
+efct_hw_rqpair_put(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex];
+ struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex];
+ u32 hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
+ struct hw_rq *rq = hw->hw_rq[hw_rq_index];
+ u32 phys_hdr[2];
+ u32 phys_payload[2];
+ int qindex_hdr;
+ int qindex_payload;
+ unsigned long flags = 0;
+
+ /* Update the RQ verification lookup tables */
+ phys_hdr[0] = upper_32_bits(seq->header->dma.phys);
+ phys_hdr[1] = lower_32_bits(seq->header->dma.phys);
+ phys_payload[0] = upper_32_bits(seq->payload->dma.phys);
+ phys_payload[1] = lower_32_bits(seq->payload->dma.phys);
+
+ /* rq_hdr lock also covers payload / header->rqindex+1 queue */
+ spin_lock_irqsave(&rq_hdr->lock, flags);
+
+ /*
+ * Note: The header must be posted last for buffer pair mode because
+ * posting on the header queue posts the payload queue as well.
+ * We do not ring the payload queue independently in RQ pair mode.
+ */
+ qindex_payload = sli_rq_write(&hw->sli, rq_payload,
+ (void *)phys_payload);
+ qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr);
+ if (qindex_hdr < 0 ||
+ qindex_payload < 0) {
+ efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return -EIO;
+ }
+
+ /* ensure the indexes are the same */
+ WARN_ON(qindex_hdr != qindex_payload);
+
+ /* Update the lookup table */
+ if (!rq->rq_tracker[qindex_hdr]) {
+ rq->rq_tracker[qindex_hdr] = seq;
+ } else {
+ efc_log_debug(hw->os,
+ "expected rq_tracker[%d][%d] buffer to be NULL\n",
+ hw_rq_index, qindex_hdr);
+ }
+
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return 0;
+}
+
+int
+efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ int rc = 0;
+
+ /*
+ * Post the data buffer first. Because in RQ pair mode, ringing the
+ * doorbell of the header ring will post the data buffer as well.
+ */
+ if (efct_hw_rqpair_put(hw, seq)) {
+ efc_log_err(hw->os, "error writing buffers\n");
+ return -EIO;
+ }
+
+ return rc;
+}
+
+int
+efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = efc->base;
+
+ return efct_hw_rqpair_sequence_free(&efct->hw, seq);
+}
diff --git a/drivers/scsi/elx/efct/efct_io.c b/drivers/scsi/elx/efct/efct_io.c
new file mode 100644
index 000000000000..c612f0a48839
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_io.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_io.h"
+
+struct efct_io_pool {
+ struct efct *efct;
+ spinlock_t lock; /* IO pool lock */
+ u32 io_num_ios; /* Total IOs allocated */
+ struct efct_io *ios[EFCT_NUM_SCSI_IOS];
+ struct list_head freelist;
+
+};
+
+struct efct_io_pool *
+efct_io_pool_create(struct efct *efct, u32 num_sgl)
+{
+ u32 i = 0;
+ struct efct_io_pool *io_pool;
+ struct efct_io *io;
+
+ /* Allocate the IO pool */
+ io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL);
+ if (!io_pool)
+ return NULL;
+
+ io_pool->efct = efct;
+ INIT_LIST_HEAD(&io_pool->freelist);
+ /* initialize IO pool lock */
+ spin_lock_init(&io_pool->lock);
+
+ for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) {
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io)
+ break;
+
+ io_pool->io_num_ios++;
+ io_pool->ios[i] = io;
+ io->tag = i;
+ io->instance_index = i;
+
+ /* Allocate a response buffer */
+ io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
+ io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
+ io->rspbuf.size,
+ &io->rspbuf.phys, GFP_KERNEL);
+ if (!io->rspbuf.virt) {
+ efc_log_err(efct, "dma_alloc rspbuf failed\n");
+ efct_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ /* Allocate SGL */
+ io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL);
+ if (!io->sgl) {
+ efct_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ io->sgl_allocated = num_sgl;
+ io->sgl_count = 0;
+
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &io_pool->freelist);
+ }
+
+ return io_pool;
+}
+
+int
+efct_io_pool_free(struct efct_io_pool *io_pool)
+{
+ struct efct *efct;
+ u32 i;
+ struct efct_io *io;
+
+ if (io_pool) {
+ efct = io_pool->efct;
+
+ for (i = 0; i < io_pool->io_num_ios; i++) {
+ io = io_pool->ios[i];
+ if (!io)
+ continue;
+
+ kfree(io->sgl);
+ dma_free_coherent(&efct->pci->dev,
+ io->rspbuf.size, io->rspbuf.virt,
+ io->rspbuf.phys);
+ memset(&io->rspbuf, 0, sizeof(struct efc_dma));
+ }
+
+ kfree(io_pool);
+ efct->xport->io_pool = NULL;
+ }
+
+ return 0;
+}
+
+struct efct_io *
+efct_io_pool_io_alloc(struct efct_io_pool *io_pool)
+{
+ struct efct_io *io = NULL;
+ struct efct *efct;
+ unsigned long flags = 0;
+
+ efct = io_pool->efct;
+
+ spin_lock_irqsave(&io_pool->lock, flags);
+
+ if (!list_empty(&io_pool->freelist)) {
+ io = list_first_entry(&io_pool->freelist, struct efct_io,
+ list_entry);
+ list_del_init(&io->list_entry);
+ }
+
+ spin_unlock_irqrestore(&io_pool->lock, flags);
+
+ if (!io)
+ return NULL;
+
+ io->io_type = EFCT_IO_TYPE_MAX;
+ io->hio_type = EFCT_HW_IO_MAX;
+ io->hio = NULL;
+ io->transferred = 0;
+ io->efct = efct;
+ io->timeout = 0;
+ io->sgl_count = 0;
+ io->tgt_task_tag = 0;
+ io->init_task_tag = 0;
+ io->hw_tag = 0;
+ io->display_name = "pending";
+ io->seq_init = 0;
+ io->io_free = 0;
+ io->release = NULL;
+ atomic_add_return(1, &efct->xport->io_active_count);
+ atomic_add_return(1, &efct->xport->io_total_alloc);
+ return io;
+}
+
+/* Free an object used to track an IO */
+void
+efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io)
+{
+ struct efct *efct;
+ struct efct_hw_io *hio = NULL;
+ unsigned long flags = 0;
+
+ efct = io_pool->efct;
+
+ spin_lock_irqsave(&io_pool->lock, flags);
+ hio = io->hio;
+ io->hio = NULL;
+ io->io_free = 1;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add(&io->list_entry, &io_pool->freelist);
+ spin_unlock_irqrestore(&io_pool->lock, flags);
+
+ if (hio)
+ efct_hw_io_free(&efct->hw, hio);
+
+ atomic_sub_return(1, &efct->xport->io_active_count);
+ atomic_add_return(1, &efct->xport->io_total_free);
+}
+
+/* Find an I/O given it's node and ox_id */
+struct efct_io *
+efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
+ u16 ox_id, u16 rx_id)
+{
+ struct efct_io *io = NULL;
+ unsigned long flags = 0;
+ u8 found = false;
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_for_each_entry(io, &node->active_ios, list_entry) {
+ if ((io->cmd_tgt && io->init_task_tag == ox_id) &&
+ (rx_id == 0xffff || io->tgt_task_tag == rx_id)) {
+ if (kref_get_unless_zero(&io->ref))
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ return found ? io : NULL;
+}
diff --git a/drivers/scsi/elx/efct/efct_io.h b/drivers/scsi/elx/efct/efct_io.h
new file mode 100644
index 000000000000..bb0f51811a7c
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_io.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_IO_H__)
+#define __EFCT_IO_H__
+
+#include "efct_lio.h"
+
+#define EFCT_LOG_ENABLE_IO_ERRORS(efct) \
+ (((efct) != NULL) ? (((efct)->logmask & (1U << 6)) != 0) : 0)
+
+#define io_error_log(io, fmt, ...) \
+ do { \
+ if (EFCT_LOG_ENABLE_IO_ERRORS(io->efct)) \
+ efc_log_warn(io->efct, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define SCSI_CMD_BUF_LENGTH 48
+#define SCSI_RSP_BUF_LENGTH (FCP_RESP_WITH_EXT + SCSI_SENSE_BUFFERSIZE)
+#define EFCT_NUM_SCSI_IOS 8192
+
+enum efct_io_type {
+ EFCT_IO_TYPE_IO = 0,
+ EFCT_IO_TYPE_ELS,
+ EFCT_IO_TYPE_CT,
+ EFCT_IO_TYPE_CT_RESP,
+ EFCT_IO_TYPE_BLS_RESP,
+ EFCT_IO_TYPE_ABORT,
+
+ EFCT_IO_TYPE_MAX,
+};
+
+enum efct_els_state {
+ EFCT_ELS_REQUEST = 0,
+ EFCT_ELS_REQUEST_DELAYED,
+ EFCT_ELS_REQUEST_DELAY_ABORT,
+ EFCT_ELS_REQ_ABORT,
+ EFCT_ELS_REQ_ABORTED,
+ EFCT_ELS_ABORT_IO_COMPL,
+};
+
+/**
+ * Scsi target IO object
+ * @efct: pointer back to efct
+ * @instance_index: unique instance index value
+ * @io: IO display name
+ * @node: pointer to node
+ * @list_entry: io list entry
+ * @io_pending_link: io pending list entry
+ * @ref: reference counter
+ * @release: release callback function
+ * @init_task_tag: initiator task tag (OX_ID) for back-end and SCSI logging
+ * @tgt_task_tag: target task tag (RX_ID) for back-end and SCSI logging
+ * @hw_tag: HW layer unique IO id
+ * @tag: unique IO identifier
+ * @sgl: SGL
+ * @sgl_allocated: Number of allocated SGEs
+ * @sgl_count: Number of SGEs in this SGL
+ * @tgt_io: backend target private IO data
+ * @exp_xfer_len: expected data transfer length, based on FC header
+ * @hw_priv: Declarations private to HW/SLI
+ * @io_type: indicates what this struct efct_io structure is used for
+ * @hio: hw io object
+ * @transferred: Number of bytes transferred
+ * @auto_resp: set if auto_trsp was set
+ * @low_latency: set if low latency request
+ * @wq_steering: selected WQ steering request
+ * @wq_class: selected WQ class if steering is class
+ * @xfer_req: transfer size for current request
+ * @scsi_tgt_cb: target callback function
+ * @scsi_tgt_cb_arg: target callback function argument
+ * @abort_cb: abort callback function
+ * @abort_cb_arg: abort callback function argument
+ * @bls_cb: BLS callback function
+ * @bls_cb_arg: BLS callback function argument
+ * @tmf_cmd: TMF command being processed
+ * @abort_rx_id: rx_id from the ABTS that initiated the command abort
+ * @cmd_tgt: True if this is a Target command
+ * @send_abts: when aborting, indicates ABTS is to be sent
+ * @cmd_ini: True if this is an Initiator command
+ * @seq_init: True if local node has sequence initiative
+ * @iparam: iparams for hw io send call
+ * @hio_type: HW IO type
+ * @wire_len: wire length
+ * @hw_cb: saved HW callback
+ * @io_to_abort: for abort handling, pointer to IO to abort
+ * @rspbuf: SCSI Response buffer
+ * @timeout: Timeout value in seconds for this IO
+ * @cs_ctl: CS_CTL priority for this IO
+ * @io_free: Is io object in freelist
+ * @app_id: application id
+ */
+struct efct_io {
+ struct efct *efct;
+ u32 instance_index;
+ const char *display_name;
+ struct efct_node *node;
+
+ struct list_head list_entry;
+ struct list_head io_pending_link;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ u32 init_task_tag;
+ u32 tgt_task_tag;
+ u32 hw_tag;
+ u32 tag;
+ struct efct_scsi_sgl *sgl;
+ u32 sgl_allocated;
+ u32 sgl_count;
+ struct efct_scsi_tgt_io tgt_io;
+ u32 exp_xfer_len;
+
+ void *hw_priv;
+
+ enum efct_io_type io_type;
+ struct efct_hw_io *hio;
+ size_t transferred;
+
+ bool auto_resp;
+ bool low_latency;
+ u8 wq_steering;
+ u8 wq_class;
+ u64 xfer_req;
+ efct_scsi_io_cb_t scsi_tgt_cb;
+ void *scsi_tgt_cb_arg;
+ efct_scsi_io_cb_t abort_cb;
+ void *abort_cb_arg;
+ efct_scsi_io_cb_t bls_cb;
+ void *bls_cb_arg;
+ enum efct_scsi_tmf_cmd tmf_cmd;
+ u16 abort_rx_id;
+
+ bool cmd_tgt;
+ bool send_abts;
+ bool cmd_ini;
+ bool seq_init;
+ union efct_hw_io_param_u iparam;
+ enum efct_hw_io_type hio_type;
+ u64 wire_len;
+ void *hw_cb;
+
+ struct efct_io *io_to_abort;
+
+ struct efc_dma rspbuf;
+ u32 timeout;
+ u8 cs_ctl;
+ u8 io_free;
+ u32 app_id;
+};
+
+struct efct_io_cb_arg {
+ int status;
+ int ext_status;
+ void *app;
+};
+
+struct efct_io_pool *
+efct_io_pool_create(struct efct *efct, u32 num_sgl);
+int
+efct_io_pool_free(struct efct_io_pool *io_pool);
+u32
+efct_io_pool_allocated(struct efct_io_pool *io_pool);
+
+struct efct_io *
+efct_io_pool_io_alloc(struct efct_io_pool *io_pool);
+void
+efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io);
+struct efct_io *
+efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
+ u16 ox_id, u16 rx_id);
+#endif /* __EFCT_IO_H__ */
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
new file mode 100644
index 000000000000..be4b5c1ee32d
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -0,0 +1,1695 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include "efct_driver.h"
+#include "efct_lio.h"
+
+/*
+ * lio_wq is used to call the LIO backed during creation or deletion of
+ * sessions. This brings serialization to the session management as we create
+ * single threaded work queue.
+ */
+static struct workqueue_struct *lio_wq;
+
+static int
+efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn)
+{
+ u8 a[8];
+
+ put_unaligned_be64(wwn, a);
+ return snprintf(str, len, "%s%8phC", pre, a);
+}
+
+static int
+efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv)
+{
+ int num;
+ u8 b[8];
+
+ if (npiv) {
+ num = sscanf(name,
+ "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
+ &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
+ &b[7]);
+ } else {
+ num = sscanf(name,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
+ &b[7]);
+ }
+
+ if (num != 8)
+ return -EINVAL;
+
+ *wwp = get_unaligned_be64(b);
+ return 0;
+}
+
+static int
+efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn)
+{
+ unsigned int cnt = size;
+ int rc;
+
+ *wwpn = *wwnn = 0;
+ if (name[cnt - 1] == '\n' || name[cnt - 1] == 0)
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16 + 1 + 16)) || (name[16] != ':'))
+ return -EINVAL;
+
+ rc = efct_lio_parse_wwn(&name[0], wwpn, 1);
+ if (rc)
+ return rc;
+
+ rc = efct_lio_parse_wwn(&name[17], wwnn, 1);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static ssize_t
+efct_lio_tpg_enable_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
+}
+
+static ssize_t
+efct_lio_tpg_enable_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+ struct efct *efct;
+ struct efc *efc;
+ unsigned long op;
+
+ if (!tpg->nport || !tpg->nport->efct) {
+ pr_err("%s: Unable to find EFCT device\n", __func__);
+ return -EINVAL;
+ }
+
+ efct = tpg->nport->efct;
+ efc = efct->efcport;
+
+ if (kstrtoul(page, 0, &op) < 0)
+ return -EINVAL;
+
+ if (op == 1) {
+ int ret;
+
+ tpg->enabled = true;
+ efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
+
+ ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE);
+ if (ret) {
+ efct->tgt_efct.lio_nport = NULL;
+ efc_log_debug(efct, "cannot bring port online\n");
+ return ret;
+ }
+ } else if (op == 0) {
+ efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
+
+ if (efc->domain && efc->domain->nport)
+ efct_scsi_tgt_del_nport(efc, efc->domain->nport);
+
+ tpg->enabled = false;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
+}
+
+static ssize_t
+efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+ struct efct_lio_vport *lio_vport = tpg->vport;
+ struct efct *efct;
+ struct efc *efc;
+ unsigned long op;
+
+ if (kstrtoul(page, 0, &op) < 0)
+ return -EINVAL;
+
+ if (!lio_vport) {
+ pr_err("Unable to find vport\n");
+ return -EINVAL;
+ }
+
+ efct = lio_vport->efct;
+ efc = efct->efcport;
+
+ if (op == 1) {
+ tpg->enabled = true;
+ efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
+
+ if (efc->domain) {
+ int ret;
+
+ ret = efc_nport_vport_new(efc->domain,
+ lio_vport->npiv_wwpn,
+ lio_vport->npiv_wwnn,
+ U32_MAX, false, true,
+ NULL, NULL);
+ if (ret != 0) {
+ efc_log_err(efct, "Failed to create Vport\n");
+ return ret;
+ }
+ return count;
+ }
+
+ if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn,
+ lio_vport->npiv_wwpn, U32_MAX,
+ false, true, NULL, NULL)))
+ return -ENOMEM;
+
+ } else if (op == 0) {
+ efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
+
+ tpg->enabled = false;
+ /* only physical nport should exist, free lio_nport
+ * allocated in efct_lio_make_nport
+ */
+ if (efc->domain) {
+ efc_nport_vport_del(efct->efcport, efc->domain,
+ lio_vport->npiv_wwpn,
+ lio_vport->npiv_wwnn);
+ return count;
+ }
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->nport->wwpn_str;
+}
+
+static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->vport->wwpn_str;
+}
+
+static u16 efct_lio_get_tag(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpgt;
+}
+
+static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpgt;
+}
+
+static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int
+efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static int
+efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static u32 efct_lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE);
+ return target_put_sess_cmd(se_cmd);
+}
+
+static int
+efct_lio_abort_tgt_cb(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status);
+ return 0;
+}
+
+static void
+efct_lio_aborted_task(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK);
+
+ if (ocp->rsp_sent)
+ return;
+
+ /* command has been aborted, cleanup here */
+ ocp->aborting = true;
+ ocp->err = EFCT_SCSI_STATUS_ABORTED;
+ /* terminate the exchange */
+ efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL);
+}
+
+static void efct_lio_release_cmd(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct *efct = io->efct;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD);
+ efct_scsi_io_complete(io);
+ atomic_sub_return(1, &efct->tgt_efct.ios_in_use);
+}
+
+static void efct_lio_close_session(struct se_session *se_sess)
+{
+ struct efc_node *node = se_sess->fabric_sess_ptr;
+
+ pr_debug("se_sess=%p node=%p", se_sess, node);
+
+ if (!node) {
+ pr_debug("node is NULL");
+ return;
+ }
+
+ efc_node_post_shutdown(node, NULL);
+}
+
+static u32 efct_lio_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void efct_lio_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static int efct_lio_get_cmd_state(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ return io->tgt_io.state;
+}
+
+static int
+efct_lio_sg_map(struct efct_io *io)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &ocp->cmd;
+
+ ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg,
+ cmd->t_data_nents, cmd->data_direction);
+ if (ocp->seg_map_cnt == 0)
+ return -EFAULT;
+ return 0;
+}
+
+static void
+efct_lio_sg_unmap(struct efct_io *io)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &ocp->cmd;
+
+ if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
+ return;
+
+ dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
+ ocp->seg_map_cnt, cmd->data_direction);
+ ocp->seg_map_cnt = 0;
+}
+
+static int
+efct_lio_status_done(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ efct_lio_io_printf(io, "callback completed with error=%d\n",
+ scsi_status);
+ ocp->err = scsi_status;
+ }
+ if (ocp->seg_map_cnt)
+ efct_lio_sg_unmap(io);
+
+ efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n",
+ scsi_status, ocp->err, flags, ocp->ddir);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return 0;
+}
+
+static int
+efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg);
+
+static int
+efct_lio_write_pending(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct_scsi_sgl *sgl = io->sgl;
+ struct scatterlist *sg;
+ u32 flags = 0, cnt, curcnt;
+ u64 length = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING);
+ efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n",
+ cmd->transport_state, cmd->se_cmd_flags);
+
+ if (ocp->seg_cnt == 0) {
+ ocp->seg_cnt = cmd->t_data_nents;
+ ocp->cur_seg = 0;
+ if (efct_lio_sg_map(io)) {
+ efct_lio_io_printf(io, "efct_lio_sg_map failed\n");
+ return -EFAULT;
+ }
+ }
+ curcnt = (ocp->seg_map_cnt - ocp->cur_seg);
+ curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated;
+ /* find current sg */
+ for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++,
+ sg = sg_next(sg))
+ ;/* do nothing */
+
+ for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) {
+ sgl[cnt].addr = sg_dma_address(sg);
+ sgl[cnt].dif_addr = 0;
+ sgl[cnt].len = sg_dma_len(sg);
+ length += sgl[cnt].len;
+ ocp->cur_seg++;
+ }
+
+ if (ocp->cur_seg == ocp->seg_cnt)
+ flags = EFCT_SCSI_LAST_DATAPHASE;
+
+ return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length,
+ efct_lio_datamove_done, NULL);
+}
+
+static int
+efct_lio_queue_data_in(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct_scsi_sgl *sgl = io->sgl;
+ struct scatterlist *sg = NULL;
+ uint flags = 0, cnt = 0, curcnt = 0;
+ u64 length = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN);
+
+ if (ocp->seg_cnt == 0) {
+ if (cmd->data_length) {
+ ocp->seg_cnt = cmd->t_data_nents;
+ ocp->cur_seg = 0;
+ if (efct_lio_sg_map(io)) {
+ efct_lio_io_printf(io,
+ "efct_lio_sg_map failed\n");
+ return -EAGAIN;
+ }
+ } else {
+ /* If command length is 0, send the response status */
+ struct efct_scsi_cmd_resp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ efct_lio_io_printf(io,
+ "cmd : %p length 0, send status\n",
+ cmd);
+ return efct_scsi_send_resp(io, 0, &rsp,
+ efct_lio_status_done, NULL);
+ }
+ }
+ curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated);
+
+ while (cnt < curcnt) {
+ sg = &cmd->t_data_sg[ocp->cur_seg];
+ sgl[cnt].addr = sg_dma_address(sg);
+ sgl[cnt].dif_addr = 0;
+ if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length)
+ sgl[cnt].len = cmd->data_length - ocp->transferred_len;
+ else
+ sgl[cnt].len = sg_dma_len(sg);
+
+ ocp->transferred_len += sgl[cnt].len;
+ length += sgl[cnt].len;
+ ocp->cur_seg++;
+ cnt++;
+ if (ocp->transferred_len == cmd->data_length)
+ break;
+ }
+
+ if (ocp->transferred_len == cmd->data_length) {
+ flags = EFCT_SCSI_LAST_DATAPHASE;
+ ocp->seg_cnt = ocp->cur_seg;
+ }
+
+ /* If there is residual, disable Auto Good Response */
+ if (cmd->residual_count)
+ flags |= EFCT_SCSI_NO_AUTO_RESPONSE;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA);
+
+ return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length,
+ efct_lio_datamove_done, NULL);
+}
+
+static void
+efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags)
+{
+ struct efct_scsi_cmd_resp rsp;
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &io->tgt_io.cmd;
+ int rc;
+
+ if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) {
+ ocp->rsp_sent = true;
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return;
+ }
+
+ /* send check condition if an error occurred */
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.scsi_status = cmd->scsi_status;
+ rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer;
+ rsp.sense_data_length = cmd->scsi_sense_length;
+
+ /* Check for residual underrun or overrun */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+ rsp.residual = -cmd->residual_count;
+ else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
+ rsp.residual = cmd->residual_count;
+
+ rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
+ if (rc != 0) {
+ efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ } else {
+ ocp->rsp_sent = true;
+ }
+}
+
+static int
+efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ efct_lio_io_printf(io, "callback completed with error=%d\n",
+ scsi_status);
+ ocp->err = scsi_status;
+ }
+ efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt);
+ if (ocp->seg_map_cnt) {
+ if (ocp->err == EFCT_SCSI_STATUS_GOOD &&
+ ocp->cur_seg < ocp->seg_cnt) {
+ int rc;
+
+ efct_lio_io_printf(io, "continuing cmd at segm=%d\n",
+ ocp->cur_seg);
+ if (ocp->ddir == DMA_TO_DEVICE)
+ rc = efct_lio_write_pending(&ocp->cmd);
+ else
+ rc = efct_lio_queue_data_in(&ocp->cmd);
+ if (!rc)
+ return 0;
+
+ ocp->err = EFCT_SCSI_STATUS_ERROR;
+ efct_lio_io_printf(io, "could not continue command\n");
+ }
+ efct_lio_sg_unmap(io);
+ }
+
+ if (io->tgt_io.aborting) {
+ efct_lio_io_printf(io, "IO done aborted\n");
+ return 0;
+ }
+
+ if (ocp->ddir == DMA_TO_DEVICE) {
+ efct_lio_io_printf(io, "Write done, trans_state=0x%x\n",
+ io->tgt_io.cmd.transport_state);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ transport_generic_request_failure(&io->tgt_io.cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
+ efct_set_lio_io_state(io,
+ EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE);
+ } else {
+ efct_set_lio_io_state(io,
+ EFCT_LIO_STATE_TGT_EXECUTE_CMD);
+ target_execute_cmd(&io->tgt_io.cmd);
+ }
+ } else {
+ efct_lio_send_resp(io, scsi_status, flags);
+ }
+ return 0;
+}
+
+static int
+efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n",
+ &io->tgt_io.cmd, scsi_status, flags);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return 0;
+}
+
+static int
+efct_lio_null_tmf_done(struct efct_io *tmfio,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n",
+ &tmfio->tgt_io.cmd, scsi_status, flags);
+
+ /* free struct efct_io only, no active se_cmd */
+ efct_scsi_io_complete(tmfio);
+ return 0;
+}
+
+static int
+efct_lio_queue_status(struct se_cmd *cmd)
+{
+ struct efct_scsi_cmd_resp rsp;
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ int rc = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS);
+ efct_lio_io_printf(io,
+ "status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n",
+ cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags,
+ cmd->scsi_sense_length);
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.scsi_status = cmd->scsi_status;
+ rsp.sense_data = (u8 *)io->tgt_io.sense_buffer;
+ rsp.sense_data_length = cmd->scsi_sense_length;
+
+ /* Check for residual underrun or overrun, mark negitive value for
+ * underrun to recognize in HW
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+ rsp.residual = -cmd->residual_count;
+ else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
+ rsp.residual = cmd->residual_count;
+
+ rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
+ if (rc == 0)
+ ocp->rsp_sent = true;
+ return rc;
+}
+
+static void efct_lio_queue_tm_rsp(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io);
+ struct se_tmr_req *se_tmr = cmd->se_tmr_req;
+ u8 rspcode;
+
+ efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n",
+ cmd, se_tmr->function, se_tmr->response);
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE;
+ break;
+ case TMR_TASK_DOES_NOT_EXIST:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND;
+ break;
+ case TMR_LUN_DOES_NOT_EXIST:
+ rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER;
+ break;
+ case TMR_FUNCTION_REJECTED:
+ default:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED;
+ break;
+ }
+ efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL);
+}
+
+static struct efct *efct_find_wwpn(u64 wwpn)
+{
+ struct efct *efct;
+
+ /* Search for the HBA that has this WWPN */
+ list_for_each_entry(efct, &efct_devices, list_entry) {
+
+ if (wwpn == efct_get_wwpn(&efct->hw))
+ return efct;
+ }
+
+ return NULL;
+}
+
+static struct se_wwn *
+efct_lio_make_nport(struct target_fabric_configfs *tf,
+ struct config_group *group, const char *name)
+{
+ struct efct_lio_nport *lio_nport;
+ struct efct *efct;
+ int ret;
+ u64 wwpn;
+
+ ret = efct_lio_parse_wwn(name, &wwpn, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ efct = efct_find_wwpn(wwpn);
+ if (!efct) {
+ pr_err("cannot find EFCT for base wwpn %s\n", name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL);
+ if (!lio_nport)
+ return ERR_PTR(-ENOMEM);
+
+ lio_nport->efct = efct;
+ lio_nport->wwpn = wwpn;
+ efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str),
+ "naa.", wwpn);
+ efct->tgt_efct.lio_nport = lio_nport;
+
+ return &lio_nport->nport_wwn;
+}
+
+static struct se_wwn *
+efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
+ struct config_group *group, const char *name)
+{
+ struct efct_lio_vport *lio_vport;
+ struct efct *efct;
+ int ret;
+ u64 p_wwpn, npiv_wwpn, npiv_wwnn;
+ char *p, *pbuf, tmp[128];
+ struct efct_lio_vport_list_t *vport_list;
+ struct fc_vport *new_fc_vport;
+ struct fc_vport_identifiers vport_id;
+ unsigned long flags = 0;
+
+ snprintf(tmp, sizeof(tmp), "%s", name);
+ pbuf = &tmp[0];
+
+ p = strsep(&pbuf, "@");
+
+ if (!p || !pbuf) {
+ pr_err("Unable to find separator operator(@)\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = efct_lio_parse_wwn(p, &p_wwpn, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn,
+ &npiv_wwnn);
+ if (ret)
+ return ERR_PTR(ret);
+
+ efct = efct_find_wwpn(p_wwpn);
+ if (!efct) {
+ pr_err("cannot find EFCT for base wwpn %s\n", name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL);
+ if (!lio_vport)
+ return ERR_PTR(-ENOMEM);
+
+ lio_vport->efct = efct;
+ lio_vport->wwpn = p_wwpn;
+ lio_vport->npiv_wwpn = npiv_wwpn;
+ lio_vport->npiv_wwnn = npiv_wwnn;
+
+ efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str),
+ "naa.", npiv_wwpn);
+
+ vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL);
+ if (!vport_list) {
+ kfree(lio_vport);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vport_list->lio_vport = lio_vport;
+
+ memset(&vport_id, 0, sizeof(vport_id));
+ vport_id.port_name = npiv_wwpn;
+ vport_id.node_name = npiv_wwnn;
+ vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vport_id.vport_type = FC_PORTTYPE_NPIV;
+ vport_id.disable = false;
+
+ new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id);
+ if (!new_fc_vport) {
+ efc_log_err(efct, "fc_vport_create failed\n");
+ kfree(lio_vport);
+ kfree(vport_list);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lio_vport->fc_vport = new_fc_vport;
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+ INIT_LIST_HEAD(&vport_list->list_entry);
+ list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+
+ return &lio_vport->vport_wwn;
+}
+
+static void
+efct_lio_drop_nport(struct se_wwn *wwn)
+{
+ struct efct_lio_nport *lio_nport =
+ container_of(wwn, struct efct_lio_nport, nport_wwn);
+ struct efct *efct = lio_nport->efct;
+
+ /* only physical nport should exist, free lio_nport allocated
+ * in efct_lio_make_nport.
+ */
+ kfree(efct->tgt_efct.lio_nport);
+ efct->tgt_efct.lio_nport = NULL;
+}
+
+static void
+efct_lio_npiv_drop_nport(struct se_wwn *wwn)
+{
+ struct efct_lio_vport *lio_vport =
+ container_of(wwn, struct efct_lio_vport, vport_wwn);
+ struct efct_lio_vport_list_t *vport, *next_vport;
+ struct efct *efct = lio_vport->efct;
+ unsigned long flags = 0;
+
+ if (lio_vport->fc_vport)
+ fc_vport_terminate(lio_vport->fc_vport);
+
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+
+ list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
+ list_entry) {
+ if (vport->lio_vport == lio_vport) {
+ list_del(&vport->list_entry);
+ kfree(vport->lio_vport);
+ kfree(vport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+}
+
+static struct se_portal_group *
+efct_lio_make_tpg(struct se_wwn *wwn, const char *name)
+{
+ struct efct_lio_nport *lio_nport =
+ container_of(wwn, struct efct_lio_nport, nport_wwn);
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ unsigned long n;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg)
+ return ERR_PTR(-ENOMEM);
+
+ tpg->nport = lio_nport;
+ tpg->tpgt = n;
+ tpg->enabled = false;
+
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.session_deletion_wait = 1;
+
+ ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ efct = lio_nport->efct;
+ efct->tgt_efct.tpg = tpg;
+ efc_log_debug(efct, "create portal group %d\n", tpg->tpgt);
+
+ xa_init(&efct->lookup);
+ return &tpg->tpg;
+}
+
+static void
+efct_lio_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ struct efct *efct = tpg->nport->efct;
+
+ efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt);
+ tpg->nport->efct->tgt_efct.tpg = NULL;
+ core_tpg_deregister(se_tpg);
+ xa_destroy(&efct->lookup);
+ kfree(tpg);
+}
+
+static struct se_portal_group *
+efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name)
+{
+ struct efct_lio_vport *lio_vport =
+ container_of(wwn, struct efct_lio_vport, vport_wwn);
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ unsigned long n;
+ int ret;
+
+ efct = lio_vport->efct;
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (n != 1) {
+ efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n);
+ return ERR_PTR(-EINVAL);
+ }
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg)
+ return ERR_PTR(-ENOMEM);
+
+ tpg->vport = lio_vport;
+ tpg->tpgt = n;
+ tpg->enabled = false;
+
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.session_deletion_wait = 1;
+
+ ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
+
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ lio_vport->tpg = tpg;
+ efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt);
+
+ return &tpg->tpg;
+}
+
+static void
+efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n",
+ tpg->tpgt);
+ core_tpg_deregister(se_tpg);
+ kfree(tpg);
+}
+
+static int
+efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
+{
+ struct efct_lio_nacl *nacl;
+ u64 wwnn;
+
+ if (efct_lio_parse_wwn(name, &wwnn, 0) < 0)
+ return -EINVAL;
+
+ nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl);
+ nacl->nport_wwnn = wwnn;
+
+ efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn);
+ return 0;
+}
+
+static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg)
+{
+ struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static int
+efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg)
+{
+ struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static struct efct_lio_tpg *
+efct_get_vport_tpg(struct efc_node *node)
+{
+ struct efct *efct;
+ u64 wwpn = node->nport->wwpn;
+ struct efct_lio_vport_list_t *vport, *next;
+ struct efct_lio_vport *lio_vport = NULL;
+ struct efct_lio_tpg *tpg = NULL;
+ unsigned long flags = 0;
+
+ efct = node->efc->base;
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+ list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list,
+ list_entry) {
+ lio_vport = vport->lio_vport;
+ if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) {
+ efc_log_debug(efct, "found tpg on vport\n");
+ tpg = lio_vport->tpg;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+ return tpg;
+}
+
+static void
+_efct_tgt_node_free(struct kref *arg)
+{
+ struct efct_node *tgt_node = container_of(arg, struct efct_node, ref);
+ struct efc_node *node = tgt_node->node;
+
+ efc_scsi_del_initiator_complete(node->efc, node);
+ kfree(tgt_node);
+}
+
+static int efct_session_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *private)
+{
+ struct efc_node *node = private;
+ struct efct_node *tgt_node;
+ struct efct *efct = node->efc->base;
+
+ tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL);
+ if (!tgt_node)
+ return -ENOMEM;
+
+ kref_init(&tgt_node->ref);
+ tgt_node->release = _efct_tgt_node_free;
+
+ tgt_node->session = se_sess;
+ node->tgt_node = tgt_node;
+ tgt_node->efct = efct;
+
+ tgt_node->node = node;
+
+ tgt_node->node_fc_id = node->rnode.fc_id;
+ tgt_node->port_fc_id = node->nport->fc_id;
+ tgt_node->vpi = node->nport->indicator;
+ tgt_node->rpi = node->rnode.indicator;
+
+ spin_lock_init(&tgt_node->active_ios_lock);
+ INIT_LIST_HEAD(&tgt_node->active_ios);
+
+ return 0;
+}
+
+int efct_scsi_tgt_new_device(struct efct *efct)
+{
+ u32 total_ios;
+
+ /* Get the max settings */
+ efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli);
+ efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli);
+
+ /* initialize IO watermark fields */
+ atomic_set(&efct->tgt_efct.ios_in_use, 0);
+ total_ios = efct->hw.config.n_io;
+ efc_log_debug(efct, "total_ios=%d\n", total_ios);
+ efct->tgt_efct.watermark_min =
+ (total_ios * EFCT_WATERMARK_LOW_PCT) / 100;
+ efct->tgt_efct.watermark_max =
+ (total_ios * EFCT_WATERMARK_HIGH_PCT) / 100;
+ atomic_set(&efct->tgt_efct.io_high_watermark,
+ efct->tgt_efct.watermark_max);
+ atomic_set(&efct->tgt_efct.watermark_hit, 0);
+ atomic_set(&efct->tgt_efct.initiator_count, 0);
+
+ lio_wq = create_singlethread_workqueue("efct_lio_worker");
+ if (!lio_wq) {
+ efc_log_err(efct, "workqueue create failed\n");
+ return -EIO;
+ }
+
+ spin_lock_init(&efct->tgt_efct.efct_lio_lock);
+ INIT_LIST_HEAD(&efct->tgt_efct.vport_list);
+
+ return 0;
+}
+
+int efct_scsi_tgt_del_device(struct efct *efct)
+{
+ flush_workqueue(lio_wq);
+
+ return 0;
+}
+
+int
+efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport)
+{
+ struct efct *efct = nport->efc->base;
+
+ efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name,
+ efct->tgt_efct.lio_nport->wwpn_str);
+
+ return 0;
+}
+
+void
+efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport)
+{
+ efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name);
+}
+
+static void efct_lio_setup_session(struct work_struct *work)
+{
+ struct efct_lio_wq_data *wq_data =
+ container_of(work, struct efct_lio_wq_data, work);
+ struct efct *efct = wq_data->efct;
+ struct efc_node *node = wq_data->ptr;
+ char wwpn[WWN_NAME_LEN];
+ struct efct_lio_tpg *tpg;
+ struct efct_node *tgt_node;
+ struct se_portal_group *se_tpg;
+ struct se_session *se_sess;
+ int watermark;
+ int ini_count;
+ u64 id;
+
+ /* Check to see if it's belongs to vport,
+ * if not get physical port
+ */
+ tpg = efct_get_vport_tpg(node);
+ if (tpg) {
+ se_tpg = &tpg->tpg;
+ } else if (efct->tgt_efct.tpg) {
+ tpg = efct->tgt_efct.tpg;
+ se_tpg = &tpg->tpg;
+ } else {
+ efc_log_err(efct, "failed to init session\n");
+ return;
+ }
+
+ /*
+ * Format the FCP Initiator port_name into colon
+ * separated values to match the format by our explicit
+ * ConfigFS NodeACLs.
+ */
+ efct_format_wwn(wwpn, sizeof(wwpn), "", efc_node_get_wwpn(node));
+
+ se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn,
+ node, efct_session_cb);
+ if (IS_ERR(se_sess)) {
+ efc_log_err(efct, "failed to setup session\n");
+ kfree(wq_data);
+ efc_scsi_sess_reg_complete(node, -EIO);
+ return;
+ }
+
+ tgt_node = node->tgt_node;
+ id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
+
+ efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n",
+ se_sess, node, id);
+
+ if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL)))
+ efc_log_err(efct, "Node lookup store failed\n");
+
+ efc_scsi_sess_reg_complete(node, 0);
+
+ /* update IO watermark: increment initiator count */
+ ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count);
+ watermark = efct->tgt_efct.watermark_max -
+ ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
+ watermark = (efct->tgt_efct.watermark_min > watermark) ?
+ efct->tgt_efct.watermark_min : watermark;
+ atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
+
+ kfree(wq_data);
+}
+
+int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node)
+{
+ struct efct *efct = node->efc->base;
+ struct efct_lio_wq_data *wq_data;
+
+ /*
+ * Since LIO only supports initiator validation at thread level,
+ * we are open minded and accept all callers.
+ */
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ wq_data->ptr = node;
+ wq_data->efct = efct;
+ INIT_WORK(&wq_data->work, efct_lio_setup_session);
+ queue_work(lio_wq, &wq_data->work);
+ return EFC_SCSI_CALL_ASYNC;
+}
+
+static void efct_lio_remove_session(struct work_struct *work)
+{
+ struct efct_lio_wq_data *wq_data =
+ container_of(work, struct efct_lio_wq_data, work);
+ struct efct *efct = wq_data->efct;
+ struct efc_node *node = wq_data->ptr;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+
+ tgt_node = node->tgt_node;
+ if (!tgt_node) {
+ /* base driver has sent back-to-back requests
+ * to unreg session with no intervening
+ * register
+ */
+ efc_log_err(efct, "unreg session for NULL session\n");
+ efc_scsi_del_initiator_complete(node->efc, node);
+ return;
+ }
+
+ se_sess = tgt_node->session;
+ efc_log_debug(efct, "unreg session se_sess=%p node=%p\n",
+ se_sess, node);
+
+ /* first flag all session commands to complete */
+ target_stop_session(se_sess);
+
+ /* now wait for session commands to complete */
+ target_wait_for_sess_cmds(se_sess);
+ target_remove_session(se_sess);
+ tgt_node->session = NULL;
+ node->tgt_node = NULL;
+ kref_put(&tgt_node->ref, tgt_node->release);
+
+ kfree(wq_data);
+}
+
+int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason)
+{
+ struct efct *efct = node->efc->base;
+ struct efct_node *tgt_node = node->tgt_node;
+ struct efct_lio_wq_data *wq_data;
+ int watermark;
+ int ini_count;
+ u64 id;
+
+ if (reason == EFCT_SCSI_INITIATOR_MISSING)
+ return EFC_SCSI_CALL_COMPLETE;
+
+ if (!tgt_node) {
+ efc_log_err(efct, "tgt_node is NULL\n");
+ return -EIO;
+ }
+
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
+ xa_erase(&efct->lookup, id);
+
+ wq_data->ptr = node;
+ wq_data->efct = efct;
+ INIT_WORK(&wq_data->work, efct_lio_remove_session);
+ queue_work(lio_wq, &wq_data->work);
+
+ /*
+ * update IO watermark: decrement initiator count
+ */
+ ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count);
+
+ watermark = efct->tgt_efct.watermark_max -
+ ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
+ watermark = (efct->tgt_efct.watermark_min > watermark) ?
+ efct->tgt_efct.watermark_min : watermark;
+ atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
+
+ return EFC_SCSI_CALL_ASYNC;
+}
+
+void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb,
+ u32 cdb_len, u32 flags)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *se_cmd = &io->tgt_io.cmd;
+ struct efct *efct = io->efct;
+ char *ddir;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+ int rc = 0;
+
+ memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD);
+ atomic_add_return(1, &efct->tgt_efct.ios_in_use);
+
+ /* set target timeout */
+ io->timeout = efct->target_io_timer_sec;
+
+ if (flags & EFCT_SCSI_CMD_SIMPLE)
+ ocp->task_attr = TCM_SIMPLE_TAG;
+ else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE)
+ ocp->task_attr = TCM_HEAD_TAG;
+ else if (flags & EFCT_SCSI_CMD_ORDERED)
+ ocp->task_attr = TCM_ORDERED_TAG;
+ else if (flags & EFCT_SCSI_CMD_ACA)
+ ocp->task_attr = TCM_ACA_TAG;
+
+ switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) {
+ case EFCT_SCSI_CMD_DIR_IN:
+ ddir = "FROM_INITIATOR";
+ ocp->ddir = DMA_TO_DEVICE;
+ break;
+ case EFCT_SCSI_CMD_DIR_OUT:
+ ddir = "TO_INITIATOR";
+ ocp->ddir = DMA_FROM_DEVICE;
+ break;
+ case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT:
+ ddir = "BIDIR";
+ ocp->ddir = DMA_BIDIRECTIONAL;
+ break;
+ default:
+ ddir = "NONE";
+ ocp->ddir = DMA_NONE;
+ break;
+ }
+
+ ocp->lun = lun;
+ efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n",
+ cdb[0], ddir, io->exp_xfer_len);
+
+ tgt_node = io->node;
+ se_sess = tgt_node->session;
+ if (!se_sess) {
+ efc_log_err(efct, "No session found to submit IO se_cmd: %p\n",
+ &ocp->cmd);
+ efct_scsi_io_free(io);
+ return;
+ }
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD);
+ rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0],
+ ocp->lun, io->exp_xfer_len, ocp->task_attr,
+ ocp->ddir, TARGET_SCF_ACK_KREF);
+ if (rc) {
+ efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd);
+ efct_scsi_io_free(io);
+ return;
+ }
+
+ if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0,
+ NULL, 0, GFP_ATOMIC))
+ return;
+
+ target_submit(se_cmd);
+}
+
+int
+efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
+ struct efct_io *io_to_abort, u32 flags)
+{
+ unsigned char tmr_func;
+ struct efct *efct = tmfio->efct;
+ struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+ int rc;
+
+ memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
+ efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF);
+ atomic_add_return(1, &efct->tgt_efct.ios_in_use);
+ efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n",
+ tmfio->display_name, cmd, lun);
+
+ switch (cmd) {
+ case EFCT_SCSI_TMF_ABORT_TASK:
+ tmr_func = TMR_ABORT_TASK;
+ break;
+ case EFCT_SCSI_TMF_ABORT_TASK_SET:
+ tmr_func = TMR_ABORT_TASK_SET;
+ break;
+ case EFCT_SCSI_TMF_CLEAR_TASK_SET:
+ tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+ case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET:
+ tmr_func = TMR_LUN_RESET;
+ break;
+ case EFCT_SCSI_TMF_CLEAR_ACA:
+ tmr_func = TMR_CLEAR_ACA;
+ break;
+ case EFCT_SCSI_TMF_TARGET_RESET:
+ tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+ case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
+ case EFCT_SCSI_TMF_QUERY_TASK_SET:
+ default:
+ goto tmf_fail;
+ }
+
+ tmfio->tgt_io.tmf = tmr_func;
+ tmfio->tgt_io.lun = lun;
+ tmfio->tgt_io.io_to_abort = io_to_abort;
+
+ tgt_node = tmfio->node;
+
+ se_sess = tgt_node->session;
+ if (!se_sess)
+ return 0;
+
+ rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func,
+ GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF);
+
+ efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR);
+ if (rc)
+ goto tmf_fail;
+
+ return 0;
+
+tmf_fail:
+ efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ NULL, efct_lio_null_tmf_done, NULL);
+ return 0;
+}
+
+/* Start items for efct_lio_tpg_attrib_cit */
+
+#define DEF_EFCT_TPG_ATTRIB(name) \
+ \
+static ssize_t efct_lio_tpg_attrib_##name##_show( \
+ struct config_item *item, char *page) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+} \
+ \
+static ssize_t efct_lio_tpg_attrib_##name##_store( \
+ struct config_item *item, const char *page, size_t count) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return ret; \
+ } \
+ \
+ if (val != 0 && val != 1) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->name = val; \
+ \
+ return count; \
+} \
+CONFIGFS_ATTR(efct_lio_tpg_attrib_, name)
+
+DEF_EFCT_TPG_ATTRIB(generate_node_acls);
+DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls);
+DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect);
+DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect);
+DEF_EFCT_TPG_ATTRIB(demo_mode_login_only);
+DEF_EFCT_TPG_ATTRIB(session_deletion_wait);
+
+static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = {
+ &efct_lio_tpg_attrib_attr_generate_node_acls,
+ &efct_lio_tpg_attrib_attr_cache_dynamic_acls,
+ &efct_lio_tpg_attrib_attr_demo_mode_write_protect,
+ &efct_lio_tpg_attrib_attr_prod_mode_write_protect,
+ &efct_lio_tpg_attrib_attr_demo_mode_login_only,
+ &efct_lio_tpg_attrib_attr_session_deletion_wait,
+ NULL,
+};
+
+#define DEF_EFCT_NPIV_TPG_ATTRIB(name) \
+ \
+static ssize_t efct_lio_npiv_tpg_attrib_##name##_show( \
+ struct config_item *item, char *page) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+} \
+ \
+static ssize_t efct_lio_npiv_tpg_attrib_##name##_store( \
+ struct config_item *item, const char *page, size_t count) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return ret; \
+ } \
+ \
+ if (val != 0 && val != 1) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->name = val; \
+ \
+ return count; \
+} \
+CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name)
+
+DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls);
+DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls);
+DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect);
+DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect);
+DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only);
+DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait);
+
+static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = {
+ &efct_lio_npiv_tpg_attrib_attr_generate_node_acls,
+ &efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls,
+ &efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect,
+ &efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect,
+ &efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only,
+ &efct_lio_npiv_tpg_attrib_attr_session_deletion_wait,
+ NULL,
+};
+
+CONFIGFS_ATTR(efct_lio_tpg_, enable);
+static struct configfs_attribute *efct_lio_tpg_attrs[] = {
+ &efct_lio_tpg_attr_enable, NULL };
+CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable);
+static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = {
+ &efct_lio_npiv_tpg_attr_enable, NULL };
+
+static const struct target_core_fabric_ops efct_lio_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "efct",
+ .node_acl_size = sizeof(struct efct_lio_nacl),
+ .max_data_sg_nents = 65535,
+ .tpg_get_wwn = efct_lio_get_fabric_wwn,
+ .tpg_get_tag = efct_lio_get_tag,
+ .fabric_init_nodeacl = efct_lio_init_nodeacl,
+ .tpg_check_demo_mode = efct_lio_check_demo_mode,
+ .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect,
+ .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
+ .check_stop_free = efct_lio_check_stop_free,
+ .aborted_task = efct_lio_aborted_task,
+ .release_cmd = efct_lio_release_cmd,
+ .close_session = efct_lio_close_session,
+ .sess_get_index = efct_lio_sess_get_index,
+ .write_pending = efct_lio_write_pending,
+ .set_default_node_attributes = efct_lio_set_default_node_attrs,
+ .get_cmd_state = efct_lio_get_cmd_state,
+ .queue_data_in = efct_lio_queue_data_in,
+ .queue_status = efct_lio_queue_status,
+ .queue_tm_rsp = efct_lio_queue_tm_rsp,
+ .fabric_make_wwn = efct_lio_make_nport,
+ .fabric_drop_wwn = efct_lio_drop_nport,
+ .fabric_make_tpg = efct_lio_make_tpg,
+ .fabric_drop_tpg = efct_lio_drop_tpg,
+ .tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only,
+ .tpg_check_prot_fabric_only = NULL,
+ .sess_get_initiator_sid = NULL,
+ .tfc_tpg_base_attrs = efct_lio_tpg_attrs,
+ .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs,
+};
+
+static const struct target_core_fabric_ops efct_lio_npiv_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "efct_npiv",
+ .node_acl_size = sizeof(struct efct_lio_nacl),
+ .max_data_sg_nents = 65535,
+ .tpg_get_wwn = efct_lio_get_npiv_fabric_wwn,
+ .tpg_get_tag = efct_lio_get_npiv_tag,
+ .fabric_init_nodeacl = efct_lio_init_nodeacl,
+ .tpg_check_demo_mode = efct_lio_check_demo_mode,
+ .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ efct_lio_npiv_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ efct_lio_npiv_check_prod_write_protect,
+ .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
+ .check_stop_free = efct_lio_check_stop_free,
+ .aborted_task = efct_lio_aborted_task,
+ .release_cmd = efct_lio_release_cmd,
+ .close_session = efct_lio_close_session,
+ .sess_get_index = efct_lio_sess_get_index,
+ .write_pending = efct_lio_write_pending,
+ .set_default_node_attributes = efct_lio_set_default_node_attrs,
+ .get_cmd_state = efct_lio_get_cmd_state,
+ .queue_data_in = efct_lio_queue_data_in,
+ .queue_status = efct_lio_queue_status,
+ .queue_tm_rsp = efct_lio_queue_tm_rsp,
+ .fabric_make_wwn = efct_lio_npiv_make_nport,
+ .fabric_drop_wwn = efct_lio_npiv_drop_nport,
+ .fabric_make_tpg = efct_lio_npiv_make_tpg,
+ .fabric_drop_tpg = efct_lio_npiv_drop_tpg,
+ .tpg_check_demo_mode_login_only =
+ efct_lio_npiv_check_demo_mode_login_only,
+ .tpg_check_prot_fabric_only = NULL,
+ .sess_get_initiator_sid = NULL,
+ .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs,
+ .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs,
+};
+
+int efct_scsi_tgt_driver_init(void)
+{
+ int rc;
+
+ /* Register the top level struct config_item_type with TCM core */
+ rc = target_register_template(&efct_lio_ops);
+ if (rc < 0) {
+ pr_err("target_fabric_configfs_register failed with %d\n", rc);
+ return rc;
+ }
+ rc = target_register_template(&efct_lio_npiv_ops);
+ if (rc < 0) {
+ pr_err("target_fabric_configfs_register failed with %d\n", rc);
+ target_unregister_template(&efct_lio_ops);
+ return rc;
+ }
+ return 0;
+}
+
+int efct_scsi_tgt_driver_exit(void)
+{
+ target_unregister_template(&efct_lio_ops);
+ target_unregister_template(&efct_lio_npiv_ops);
+ return 0;
+}
diff --git a/drivers/scsi/elx/efct/efct_lio.h b/drivers/scsi/elx/efct/efct_lio.h
new file mode 100644
index 000000000000..569a0d4b1894
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_lio.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFCT_LIO_H__
+#define __EFCT_LIO_H__
+
+#include "efct_scsi.h"
+#include <target/target_core_base.h>
+
+#define efct_lio_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, \
+ "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt,\
+ io->node->display_name, io->instance_index, \
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
+ ##__VA_ARGS__)
+
+#define efct_lio_tmfio_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, \
+ "[%s] [%04x][i:%04x t:%04x h:%04x][f:%02x]" fmt,\
+ io->node->display_name, io->instance_index, \
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
+ io->tgt_io.tmf, ##__VA_ARGS__)
+
+#define efct_set_lio_io_state(io, value) (io->tgt_io.state |= value)
+
+struct efct_lio_wq_data {
+ struct efct *efct;
+ void *ptr;
+ struct work_struct work;
+};
+
+/* Target private efct structure */
+struct efct_scsi_tgt {
+ u32 max_sge;
+ u32 max_sgl;
+
+ /*
+ * Variables used to send task set full. We are using a high watermark
+ * method to send task set full. We will reserve a fixed number of IOs
+ * per initiator plus a fudge factor. Once we reach this number,
+ * then the target will start sending task set full/busy responses.
+ */
+ atomic_t initiator_count;
+ atomic_t ios_in_use;
+ atomic_t io_high_watermark;
+
+ atomic_t watermark_hit;
+ int watermark_min;
+ int watermark_max;
+
+ struct efct_lio_nport *lio_nport;
+ struct efct_lio_tpg *tpg;
+
+ struct list_head vport_list;
+ /* Protects vport list*/
+ spinlock_t efct_lio_lock;
+
+ u64 wwnn;
+};
+
+struct efct_scsi_tgt_nport {
+ struct efct_lio_nport *lio_nport;
+};
+
+struct efct_node {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efct *efct;
+ struct efc_node *node;
+ struct se_session *session;
+ spinlock_t active_ios_lock;
+ struct list_head active_ios;
+ char display_name[EFC_NAME_LENGTH];
+ u32 port_fc_id;
+ u32 node_fc_id;
+ u32 vpi;
+ u32 rpi;
+ u32 abort_cnt;
+};
+
+#define EFCT_LIO_STATE_SCSI_RECV_CMD (1 << 0)
+#define EFCT_LIO_STATE_TGT_SUBMIT_CMD (1 << 1)
+#define EFCT_LIO_STATE_TFO_QUEUE_DATA_IN (1 << 2)
+#define EFCT_LIO_STATE_TFO_WRITE_PENDING (1 << 3)
+#define EFCT_LIO_STATE_TGT_EXECUTE_CMD (1 << 4)
+#define EFCT_LIO_STATE_SCSI_SEND_RD_DATA (1 << 5)
+#define EFCT_LIO_STATE_TFO_CHK_STOP_FREE (1 << 6)
+#define EFCT_LIO_STATE_SCSI_DATA_DONE (1 << 7)
+#define EFCT_LIO_STATE_TFO_QUEUE_STATUS (1 << 8)
+#define EFCT_LIO_STATE_SCSI_SEND_RSP (1 << 9)
+#define EFCT_LIO_STATE_SCSI_RSP_DONE (1 << 10)
+#define EFCT_LIO_STATE_TGT_GENERIC_FREE (1 << 11)
+#define EFCT_LIO_STATE_SCSI_RECV_TMF (1 << 12)
+#define EFCT_LIO_STATE_TGT_SUBMIT_TMR (1 << 13)
+#define EFCT_LIO_STATE_TFO_WRITE_PEND_STATUS (1 << 14)
+#define EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE (1 << 15)
+
+#define EFCT_LIO_STATE_TFO_ABORTED_TASK (1 << 29)
+#define EFCT_LIO_STATE_TFO_RELEASE_CMD (1 << 30)
+#define EFCT_LIO_STATE_SCSI_CMPL_CMD (1u << 31)
+
+struct efct_scsi_tgt_io {
+ struct se_cmd cmd;
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+ enum dma_data_direction ddir;
+ int task_attr;
+ u64 lun;
+
+ u32 state;
+ u8 tmf;
+ struct efct_io *io_to_abort;
+ u32 seg_map_cnt;
+ u32 seg_cnt;
+ u32 cur_seg;
+ enum efct_scsi_io_status err;
+ bool aborting;
+ bool rsp_sent;
+ u32 transferred_len;
+};
+
+/* Handler return codes */
+enum {
+ SCSI_HANDLER_DATAPHASE_STARTED = 1,
+ SCSI_HANDLER_RESP_STARTED,
+ SCSI_HANDLER_VALIDATED_DATAPHASE_STARTED,
+ SCSI_CMD_NOT_SUPPORTED,
+};
+
+#define WWN_NAME_LEN 32
+struct efct_lio_vport {
+ u64 wwpn;
+ u64 npiv_wwpn;
+ u64 npiv_wwnn;
+ unsigned char wwpn_str[WWN_NAME_LEN];
+ struct se_wwn vport_wwn;
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ struct Scsi_Host *shost;
+ struct fc_vport *fc_vport;
+ atomic_t enable;
+};
+
+struct efct_lio_nport {
+ u64 wwpn;
+ unsigned char wwpn_str[WWN_NAME_LEN];
+ struct se_wwn nport_wwn;
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ atomic_t enable;
+};
+
+struct efct_lio_tpg_attrib {
+ u32 generate_node_acls;
+ u32 cache_dynamic_acls;
+ u32 demo_mode_write_protect;
+ u32 prod_mode_write_protect;
+ u32 demo_mode_login_only;
+ bool session_deletion_wait;
+};
+
+struct efct_lio_tpg {
+ struct se_portal_group tpg;
+ struct efct_lio_nport *nport;
+ struct efct_lio_vport *vport;
+ struct efct_lio_tpg_attrib tpg_attrib;
+ unsigned short tpgt;
+ bool enabled;
+};
+
+struct efct_lio_nacl {
+ u64 nport_wwnn;
+ char nport_name[WWN_NAME_LEN];
+ struct se_session *session;
+ struct se_node_acl se_node_acl;
+};
+
+struct efct_lio_vport_list_t {
+ struct list_head list_entry;
+ struct efct_lio_vport *lio_vport;
+};
+
+int efct_scsi_tgt_driver_init(void);
+int efct_scsi_tgt_driver_exit(void);
+
+#endif /*__EFCT_LIO_H__ */
diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c
new file mode 100644
index 000000000000..afb154992053
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_scsi.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+
+#define enable_tsend_auto_resp(efct) 1
+#define enable_treceive_auto_resp(efct) 0
+
+#define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]"
+
+#define scsi_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
+ io->node->display_name, io->instance_index,\
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
+
+#define EFCT_LOG_ENABLE_SCSI_TRACE(efct) \
+ (((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0)
+
+#define scsi_io_trace(io, fmt, ...) \
+ do { \
+ if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
+ scsi_io_printf(io, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+struct efct_io *
+efct_scsi_io_alloc(struct efct_node *node)
+{
+ struct efct *efct;
+ struct efct_xport *xport;
+ struct efct_io *io;
+ unsigned long flags;
+
+ efct = node->efct;
+
+ xport = efct->xport;
+
+ io = efct_io_pool_io_alloc(efct->xport->io_pool);
+ if (!io) {
+ efc_log_err(efct, "IO alloc Failed\n");
+ atomic_add_return(1, &xport->io_alloc_failed_count);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ kref_init(&io->ref);
+ io->release = _efct_scsi_io_free;
+
+ /* set generic fields */
+ io->efct = efct;
+ io->node = node;
+ kref_get(&node->ref);
+
+ /* set type and name */
+ io->io_type = EFCT_IO_TYPE_IO;
+ io->display_name = "scsi_io";
+
+ io->cmd_ini = false;
+ io->cmd_tgt = true;
+
+ /* Add to node's active_ios list */
+ INIT_LIST_HEAD(&io->list_entry);
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_add(&io->list_entry, &node->active_ios);
+
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+
+ return io;
+}
+
+void
+_efct_scsi_io_free(struct kref *arg)
+{
+ struct efct_io *io = container_of(arg, struct efct_io, ref);
+ struct efct *efct = io->efct;
+ struct efct_node *node = io->node;
+ unsigned long flags = 0;
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+
+ if (io->io_free) {
+ efc_log_err(efct, "IO already freed.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_del_init(&io->list_entry);
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+
+ kref_put(&node->ref, node->release);
+ io->node = NULL;
+ efct_io_pool_io_free(efct->xport->io_pool, io);
+}
+
+void
+efct_scsi_io_free(struct efct_io *io)
+{
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ WARN_ON(!refcount_read(&io->ref.refcount));
+ kref_put(&io->ref, io->release);
+}
+
+static void
+efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ u32 flags = 0;
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD;
+ efct_scsi_io_cb_t cb;
+
+ if (!io || !io->efct) {
+ pr_err("%s: IO can not be NULL\n", __func__);
+ return;
+ }
+
+ scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
+
+ efct = io->efct;
+
+ io->transferred += length;
+
+ if (!io->scsi_tgt_cb) {
+ efct_scsi_check_pending(efct);
+ return;
+ }
+
+ /* Call target server completion */
+ cb = io->scsi_tgt_cb;
+
+ /* Clear the callback before invoking the callback */
+ io->scsi_tgt_cb = NULL;
+
+ /* if status was good, and auto-good-response was set,
+ * then callback target-server with IO_CMPL_RSP_SENT,
+ * otherwise send IO_CMPL
+ */
+ if (status == 0 && io->auto_resp)
+ flags |= EFCT_SCSI_IO_CMPL_RSP_SENT;
+ else
+ flags |= EFCT_SCSI_IO_CMPL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_stat = EFCT_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ if (ext_status & SLI4_FC_DI_ERROR_GE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR;
+ else if (ext_status & SLI4_FC_DI_ERROR_AE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR;
+ else if (ext_status & SLI4_FC_DI_ERROR_RE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR;
+ else
+ scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
+ case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
+ scsi_stat = EFCT_SCSI_STATUS_ABORTED;
+ break;
+ case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
+ scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST;
+ break;
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_stat = EFCT_SCSI_STATUS_NO_IO;
+ break;
+ default:
+ /*we have seen 0x0d(TX_DMA_FAILED err)*/
+ scsi_stat = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+
+ case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
+ /* target IO timed out */
+ scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
+ break;
+
+ case SLI4_FC_WCQE_STATUS_SHUTDOWN:
+ /* Target IO cancelled by HW */
+ scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN;
+ break;
+
+ default:
+ scsi_stat = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+
+ cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
+
+ efct_scsi_check_pending(efct);
+}
+
+static int
+efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio,
+ struct efct_scsi_sgl *sgl, u32 sgl_count,
+ enum efct_hw_io_type type)
+{
+ int rc;
+ u32 i;
+ struct efct *efct = hw->os;
+
+ /* Initialize HW SGL */
+ rc = efct_hw_io_init_sges(hw, hio, type);
+ if (rc) {
+ efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc);
+ return -EIO;
+ }
+
+ for (i = 0; i < sgl_count; i++) {
+ /* Add data SGE */
+ rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
+ if (rc) {
+ efc_log_err(efct, "add sge failed cnt=%d rc=%d\n",
+ sgl_count, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void efc_log_sgl(struct efct_io *io)
+{
+ struct efct_hw_io *hio = io->hio;
+ struct sli4_sge *data = NULL;
+ u32 *dword = NULL;
+ u32 i;
+ u32 n_sge;
+
+ scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
+ upper_32_bits(hio->def_sgl.phys),
+ lower_32_bits(hio->def_sgl.phys));
+ n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count;
+ for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
+ dword = (u32 *)data;
+
+ scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, dword[0], dword[1], dword[2], dword[3]);
+
+ if (dword[2] & (1U << 31))
+ break;
+ }
+}
+
+static void
+efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg)
+{
+ struct efct_io *io = arg;
+
+ if (io) {
+ efct_hw_done_t cb = io->hw_cb;
+
+ if (!io->hw_cb)
+ return;
+
+ io->hw_cb = NULL;
+ (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
+ }
+}
+
+static int
+efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
+{
+ int rc = 0;
+ struct efct *efct = io->efct;
+
+ /* Got a HW IO;
+ * update ini/tgt_task_tag with HW IO info and dispatch
+ */
+ io->hio = hio;
+ if (io->cmd_tgt)
+ io->tgt_task_tag = hio->indicator;
+ else if (io->cmd_ini)
+ io->init_task_tag = hio->indicator;
+ io->hw_tag = hio->reqtag;
+
+ hio->eq = io->hw_priv;
+
+ /* Copy WQ steering */
+ switch (io->wq_steering) {
+ case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS;
+ break;
+ case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST;
+ break;
+ case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_CPU;
+ break;
+ }
+
+ switch (io->io_type) {
+ case EFCT_IO_TYPE_IO:
+ rc = efct_scsi_build_sgls(&efct->hw, io->hio,
+ io->sgl, io->sgl_count, io->hio_type);
+ if (rc)
+ break;
+
+ if (EFCT_LOG_ENABLE_SCSI_TRACE(efct))
+ efc_log_sgl(io);
+
+ if (io->app_id)
+ io->iparam.fcp_tgt.app_id = io->app_id;
+
+ io->iparam.fcp_tgt.vpi = io->node->vpi;
+ io->iparam.fcp_tgt.rpi = io->node->rpi;
+ io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
+ io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
+ io->iparam.fcp_tgt.xmit_len = io->wire_len;
+
+ rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
+ &io->iparam, io->hw_cb, io);
+ break;
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -EIO;
+ break;
+ }
+ return rc;
+}
+
+static int
+efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
+{
+ int rc;
+
+ switch (io->io_type) {
+ case EFCT_IO_TYPE_ABORT: {
+ struct efct_hw_io *hio_to_abort = NULL;
+
+ hio_to_abort = io->io_to_abort->hio;
+
+ if (!hio_to_abort) {
+ /*
+ * If "IO to abort" does not have an
+ * associated HW IO, immediately make callback with
+ * success. The command must have been sent to
+ * the backend, but the data phase has not yet
+ * started, so we don't have a HW IO.
+ *
+ * Note: since the backend shims should be
+ * taking a reference on io_to_abort, it should not
+ * be possible to have been completed and freed by
+ * the backend before the abort got here.
+ */
+ scsi_io_printf(io, "IO: not active\n");
+ ((efct_hw_done_t)io->hw_cb)(io->hio, 0,
+ SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
+ rc = 0;
+ break;
+ }
+
+ /* HW IO is valid, abort it */
+ scsi_io_printf(io, "aborting\n");
+ rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
+ io->send_abts, io->hw_cb, io);
+ if (rc) {
+ int status = SLI4_FC_WCQE_STATUS_SUCCESS;
+ efct_hw_done_t cb = io->hw_cb;
+
+ if (rc != -ENOENT && rc != -EINPROGRESS) {
+ status = -1;
+ scsi_io_printf(io, "Failed to abort IO rc=%d\n",
+ rc);
+ }
+ cb(io->hio, 0, status, 0, io);
+ rc = 0;
+ }
+
+ break;
+ }
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -EIO;
+ break;
+ }
+ return rc;
+}
+
+static struct efct_io *
+efct_scsi_dispatch_pending(struct efct *efct)
+{
+ struct efct_xport *xport = efct->xport;
+ struct efct_io *io = NULL;
+ struct efct_hw_io *hio;
+ unsigned long flags = 0;
+ int status;
+
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+
+ if (!list_empty(&xport->io_pending_list)) {
+ io = list_first_entry(&xport->io_pending_list, struct efct_io,
+ io_pending_link);
+ list_del_init(&io->io_pending_link);
+ }
+
+ if (!io) {
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ return NULL;
+ }
+
+ if (io->io_type == EFCT_IO_TYPE_ABORT) {
+ hio = NULL;
+ } else {
+ hio = efct_hw_io_alloc(&efct->hw);
+ if (!hio) {
+ /*
+ * No HW IO available.Put IO back on
+ * the front of pending list
+ */
+ list_add(&xport->io_pending_list, &io->io_pending_link);
+ io = NULL;
+ } else {
+ hio->eq = io->hw_priv;
+ }
+ }
+
+ /* Must drop the lock before dispatching the IO */
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ if (!io)
+ return NULL;
+
+ /*
+ * We pulled an IO off the pending list,
+ * and either got an HW IO or don't need one
+ */
+ atomic_sub_return(1, &xport->io_pending_count);
+ if (!hio)
+ status = efct_scsi_io_dispatch_no_hw_io(io);
+ else
+ status = efct_scsi_io_dispatch_hw_io(io, hio);
+ if (status) {
+ /*
+ * Invoke the HW callback, but do so in the
+ * separate execution context,provided by the
+ * NOP mailbox completion processing context
+ * by using efct_hw_async_call()
+ */
+ if (efct_hw_async_call(&efct->hw,
+ efct_scsi_check_pending_async_cb, io)) {
+ efc_log_debug(efct, "call hw async failed\n");
+ }
+ }
+
+ return io;
+}
+
+void
+efct_scsi_check_pending(struct efct *efct)
+{
+ struct efct_xport *xport = efct->xport;
+ struct efct_io *io = NULL;
+ int count = 0;
+ unsigned long flags = 0;
+ int dispatch = 0;
+
+ /* Guard against recursion */
+ if (atomic_add_return(1, &xport->io_pending_recursing)) {
+ /* This function is already running. Decrement and return. */
+ atomic_sub_return(1, &xport->io_pending_recursing);
+ return;
+ }
+
+ while (efct_scsi_dispatch_pending(efct))
+ count++;
+
+ if (count) {
+ atomic_sub_return(1, &xport->io_pending_recursing);
+ return;
+ }
+
+ /*
+ * If nothing was removed from the list,
+ * we might be in a case where we need to abort an
+ * active IO and the abort is on the pending list.
+ * Look for an abort we can dispatch.
+ */
+
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+
+ list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
+ if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
+ /* This IO has a HW IO, so it is
+ * active. Dispatch the abort.
+ */
+ dispatch = 1;
+ list_del_init(&io->io_pending_link);
+ atomic_sub_return(1, &xport->io_pending_count);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ if (dispatch) {
+ if (efct_scsi_io_dispatch_no_hw_io(io)) {
+ if (efct_hw_async_call(&efct->hw,
+ efct_scsi_check_pending_async_cb, io)) {
+ efc_log_debug(efct, "hw async failed\n");
+ }
+ }
+ }
+
+ atomic_sub_return(1, &xport->io_pending_recursing);
+}
+
+int
+efct_scsi_io_dispatch(struct efct_io *io, void *cb)
+{
+ struct efct_hw_io *hio;
+ struct efct *efct = io->efct;
+ struct efct_xport *xport = efct->xport;
+ unsigned long flags = 0;
+
+ io->hw_cb = cb;
+
+ /*
+ * if this IO already has a HW IO, then this is either
+ * not the first phase of the IO. Send it to the HW.
+ */
+ if (io->hio)
+ return efct_scsi_io_dispatch_hw_io(io, io->hio);
+
+ /*
+ * We don't already have a HW IO associated with the IO. First check
+ * the pending list. If not empty, add IO to the tail and process the
+ * pending list.
+ */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ if (!list_empty(&xport->io_pending_list)) {
+ /*
+ * If this is a low latency request,
+ * the put at the front of the IO pending
+ * queue, otherwise put it at the end of the queue.
+ */
+ if (io->low_latency) {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add(&xport->io_pending_list, &io->io_pending_link);
+ } else {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link,
+ &xport->io_pending_list);
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ atomic_add_return(1, &xport->io_pending_count);
+ atomic_add_return(1, &xport->io_total_pending);
+
+ /* process pending list */
+ efct_scsi_check_pending(efct);
+ return 0;
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ /*
+ * We don't have a HW IO associated with the IO and there's nothing
+ * on the pending list. Attempt to allocate a HW IO and dispatch it.
+ */
+ hio = efct_hw_io_alloc(&io->efct->hw);
+ if (!hio) {
+ /* Couldn't get a HW IO. Save this IO on the pending list */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link, &xport->io_pending_list);
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ atomic_add_return(1, &xport->io_total_pending);
+ atomic_add_return(1, &xport->io_pending_count);
+ return 0;
+ }
+
+ /* We successfully allocated a HW IO; dispatch to HW */
+ return efct_scsi_io_dispatch_hw_io(io, hio);
+}
+
+int
+efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
+{
+ struct efct *efct = io->efct;
+ struct efct_xport *xport = efct->xport;
+ unsigned long flags = 0;
+
+ io->hw_cb = cb;
+
+ /*
+ * For aborts, we don't need a HW IO, but we still want
+ * to pass through the pending list to preserve ordering.
+ * Thus, if the pending list is not empty, add this abort
+ * to the pending list and process the pending list.
+ */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ if (!list_empty(&xport->io_pending_list)) {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link, &xport->io_pending_list);
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ atomic_add_return(1, &xport->io_pending_count);
+ atomic_add_return(1, &xport->io_total_pending);
+
+ /* process pending list */
+ efct_scsi_check_pending(efct);
+ return 0;
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ /* nothing on pending list, dispatch abort */
+ return efct_scsi_io_dispatch_no_hw_io(io);
+}
+
+static inline int
+efct_scsi_xfer_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len,
+ enum efct_hw_io_type type, int enable_ar,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ size_t residual = 0;
+
+ io->sgl_count = sgl_count;
+
+ efct = io->efct;
+
+ scsi_io_trace(io, "%s wire_len %llu\n",
+ (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv",
+ xwire_len);
+
+ io->hio_type = type;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ residual = io->exp_xfer_len - io->transferred;
+ io->wire_len = (xwire_len < residual) ? xwire_len : residual;
+ residual = (xwire_len - io->wire_len);
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = io->transferred;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* if this is the last data phase and there is no residual, enable
+ * auto-good-response
+ */
+ if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
+ ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
+ (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+ io->auto_resp = true;
+ } else {
+ io->auto_resp = false;
+ }
+
+ /* save this transfer length */
+ io->xfer_req = io->wire_len;
+
+ /* Adjust the transferred count to account for overrun
+ * when the residual is calculated in efct_scsi_send_resp
+ */
+ io->transferred += residual;
+
+ /* Adjust the SGL size if there is overrun */
+
+ if (residual) {
+ struct efct_scsi_sgl *sgl_ptr = &io->sgl[sgl_count - 1];
+
+ while (residual) {
+ size_t len = sgl_ptr->len;
+
+ if (len > residual) {
+ sgl_ptr->len = len - residual;
+ residual = 0;
+ } else {
+ sgl_ptr->len = 0;
+ residual -= len;
+ io->sgl_count--;
+ }
+ sgl_ptr--;
+ }
+ }
+
+ /* Set latency and WQ steering */
+ io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
+ EFCT_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
+ EFCT_SCSI_WQ_CLASS_SHIFT;
+
+ if (efct->xport) {
+ struct efct_xport *xport = efct->xport;
+
+ if (type == EFCT_HW_IO_TARGET_READ) {
+ xport->fcp_stats.input_requests++;
+ xport->fcp_stats.input_bytes += xwire_len;
+ } else if (type == EFCT_HW_IO_TARGET_WRITE) {
+ xport->fcp_stats.output_requests++;
+ xport->fcp_stats.output_bytes += xwire_len;
+ }
+ }
+ return efct_scsi_io_dispatch(io, efct_target_io_cb);
+}
+
+int
+efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
+ len, EFCT_HW_IO_TARGET_READ,
+ enable_tsend_auto_resp(io->efct), cb, arg);
+}
+
+int
+efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
+ EFCT_HW_IO_TARGET_WRITE,
+ enable_treceive_auto_resp(io->efct), cb, arg);
+}
+
+int
+efct_scsi_send_resp(struct efct_io *io, u32 flags,
+ struct efct_scsi_cmd_resp *rsp,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ int residual;
+ /* Always try auto resp */
+ bool auto_resp = true;
+ u8 scsi_status = 0;
+ u16 scsi_status_qualifier = 0;
+ u8 *sense_data = NULL;
+ u32 sense_data_length = 0;
+
+ efct = io->efct;
+
+ if (rsp) {
+ scsi_status = rsp->scsi_status;
+ scsi_status_qualifier = rsp->scsi_status_qualifier;
+ sense_data = rsp->sense_data;
+ sense_data_length = rsp->sense_data_length;
+ residual = rsp->residual;
+ } else {
+ residual = io->exp_xfer_len - io->transferred;
+ }
+
+ io->wire_len = 0;
+ io->hio_type = EFCT_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* Set low latency queueing request */
+ io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
+ EFCT_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
+ EFCT_SCSI_WQ_CLASS_SHIFT;
+
+ if (scsi_status != 0 || residual || sense_data_length) {
+ struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
+ u8 *sns_data;
+
+ if (!fcprsp) {
+ efc_log_err(efct, "NULL response buffer\n");
+ return -EIO;
+ }
+
+ sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
+
+ auto_resp = false;
+
+ memset(fcprsp, 0, sizeof(*fcprsp));
+
+ io->wire_len += sizeof(*fcprsp);
+
+ fcprsp->resp.fr_status = scsi_status;
+ fcprsp->resp.fr_retry_delay =
+ cpu_to_be16(scsi_status_qualifier);
+
+ /* set residual status if necessary */
+ if (residual != 0) {
+ /* FCP: if data transferred is less than the
+ * amount expected, then this is an underflow.
+ * If data transferred would have been greater
+ * than the amount expected this is an overflow
+ */
+ if (residual > 0) {
+ fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
+ fcprsp->ext.fr_resid = cpu_to_be32(residual);
+ } else {
+ fcprsp->resp.fr_flags |= FCP_RESID_OVER;
+ fcprsp->ext.fr_resid = cpu_to_be32(-residual);
+ }
+ }
+
+ if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) {
+ if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
+ efc_log_err(efct, "Sense exceeds max size.\n");
+ return -EIO;
+ }
+
+ fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL;
+ memcpy(sns_data, sense_data, sense_data_length);
+ fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length);
+ io->wire_len += sense_data_length;
+ }
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+ }
+
+ if (auto_resp)
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+
+ return efct_scsi_io_dispatch(io, efct_target_io_cb);
+}
+
+static int
+efct_target_bls_resp_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status bls_status;
+
+ efct = io->efct;
+
+ /* BLS isn't really a "SCSI" concept, but use SCSI status */
+ if (status) {
+ io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
+ bls_status = EFCT_SCSI_STATUS_ERROR;
+ } else {
+ bls_status = EFCT_SCSI_STATUS_GOOD;
+ }
+
+ if (io->bls_cb) {
+ efct_scsi_io_cb_t bls_cb = io->bls_cb;
+ void *bls_cb_arg = io->bls_cb_arg;
+
+ io->bls_cb = NULL;
+ io->bls_cb_arg = NULL;
+
+ /* invoke callback */
+ bls_cb(io, bls_status, 0, bls_cb_arg);
+ }
+
+ efct_scsi_check_pending(efct);
+ return 0;
+}
+
+static int
+efct_target_send_bls_resp(struct efct_io *io,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct_node *node = io->node;
+ struct sli_bls_params *bls = &io->iparam.bls;
+ struct efct *efct = node->efct;
+ struct fc_ba_acc *acc;
+ int rc;
+
+ /* fill out IO structure with everything needed to send BA_ACC */
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ bls->ox_id = io->init_task_tag;
+ bls->rx_id = io->abort_rx_id;
+ bls->vpi = io->node->vpi;
+ bls->rpi = io->node->rpi;
+ bls->s_id = U32_MAX;
+ bls->d_id = io->node->node_fc_id;
+ bls->rpi_registered = true;
+
+ acc = (void *)bls->payload;
+ acc->ba_ox_id = cpu_to_be16(bls->ox_id);
+ acc->ba_rx_id = cpu_to_be16(bls->rx_id);
+ acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
+
+ /* generic io fields have already been populated */
+
+ /* set type and BLS-specific fields */
+ io->io_type = EFCT_IO_TYPE_BLS_RESP;
+ io->display_name = "bls_rsp";
+ io->hio_type = EFCT_HW_BLS_ACC;
+ io->bls_cb = cb;
+ io->bls_cb_arg = arg;
+
+ /* dispatch IO */
+ rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls,
+ efct_target_bls_resp_cb, io);
+ return rc;
+}
+
+static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+
+ efct_scsi_io_free(io);
+ return 0;
+}
+
+struct efct_io *
+efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
+{
+ struct efct_node *node = io->node;
+ struct sli_bls_params *bls = &io->iparam.bls;
+ struct efct *efct = node->efct;
+ struct fc_ba_rjt *acc;
+ int rc;
+
+ /* fill out BLS Response-specific fields */
+ io->io_type = EFCT_IO_TYPE_BLS_RESP;
+ io->display_name = "ba_rjt";
+ io->hio_type = EFCT_HW_BLS_RJT;
+ io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
+
+ /* fill out iparam fields */
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ bls->ox_id = be16_to_cpu(hdr->fh_ox_id);
+ bls->rx_id = be16_to_cpu(hdr->fh_rx_id);
+ bls->vpi = io->node->vpi;
+ bls->rpi = io->node->rpi;
+ bls->s_id = U32_MAX;
+ bls->d_id = io->node->node_fc_id;
+ bls->rpi_registered = true;
+
+ acc = (void *)bls->payload;
+ acc->br_reason = ELS_RJT_UNAB;
+ acc->br_explan = ELS_EXPL_NONE;
+
+ rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb,
+ io);
+ if (rc) {
+ efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc);
+ efct_scsi_io_free(io);
+ io = NULL;
+ }
+ return io;
+}
+
+int
+efct_scsi_send_tmf_resp(struct efct_io *io,
+ enum efct_scsi_tmf_resp rspcode,
+ u8 addl_rsp_info[3],
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ int rc;
+ struct {
+ struct fcp_resp_with_ext rsp_ext;
+ struct fcp_resp_rsp_info info;
+ } *fcprsp;
+ u8 fcp_rspcode;
+
+ io->wire_len = 0;
+
+ switch (rspcode) {
+ case EFCT_SCSI_TMF_FUNCTION_COMPLETE:
+ fcp_rspcode = FCP_TMF_CMPL;
+ break;
+ case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED:
+ case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
+ fcp_rspcode = FCP_TMF_CMPL;
+ break;
+ case EFCT_SCSI_TMF_FUNCTION_REJECTED:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
+ fcp_rspcode = FCP_TMF_INVALID_LUN;
+ break;
+ case EFCT_SCSI_TMF_SERVICE_DELIVERY:
+ fcp_rspcode = FCP_TMF_FAILED;
+ break;
+ default:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ }
+
+ io->hio_type = EFCT_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
+ rc = efct_target_send_bls_resp(io, cb, arg);
+ return rc;
+ }
+
+ /* populate the FCP TMF response */
+ fcprsp = io->rspbuf.virt;
+ memset(fcprsp, 0, sizeof(*fcprsp));
+
+ fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL;
+
+ if (addl_rsp_info) {
+ memcpy(fcprsp->info._fr_resvd, addl_rsp_info,
+ sizeof(fcprsp->info._fr_resvd));
+ }
+ fcprsp->info.rsp_code = fcp_rspcode;
+
+ io->wire_len = sizeof(*fcprsp);
+
+ fcprsp->rsp_ext.ext.fr_rsp_len =
+ cpu_to_be32(sizeof(struct fcp_resp_rsp_info));
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
+
+ return rc;
+}
+
+static int
+efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status scsi_status;
+ efct_scsi_io_cb_t abort_cb;
+ void *abort_cb_arg;
+
+ efct = io->efct;
+
+ if (!io->abort_cb)
+ goto done;
+
+ abort_cb = io->abort_cb;
+ abort_cb_arg = io->abort_cb_arg;
+
+ io->abort_cb = NULL;
+ io->abort_cb_arg = NULL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_status = EFCT_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_status = EFCT_SCSI_STATUS_NO_IO;
+ break;
+ case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
+ scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS;
+ break;
+ default:
+ /*we have seen 0x15 (abort in progress)*/
+ scsi_status = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE;
+ break;
+ default:
+ scsi_status = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ /* invoke callback */
+ abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
+
+done:
+ /* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */
+ kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
+
+ efct_io_pool_io_free(efct->xport->io_pool, io);
+
+ efct_scsi_check_pending(efct);
+ return 0;
+}
+
+int
+efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ struct efct_xport *xport;
+ int rc;
+ struct efct_io *abort_io = NULL;
+
+ efct = io->efct;
+ xport = efct->xport;
+
+ /* take a reference on IO being aborted */
+ if (kref_get_unless_zero(&io->ref) == 0) {
+ /* command no longer active */
+ scsi_io_printf(io, "command no longer active\n");
+ return -EIO;
+ }
+
+ /*
+ * allocate a new IO to send the abort request. Use efct_io_alloc()
+ * directly, as we need an IO object that will not fail allocation
+ * due to allocations being disabled (in efct_scsi_io_alloc())
+ */
+ abort_io = efct_io_pool_io_alloc(efct->xport->io_pool);
+ if (!abort_io) {
+ atomic_add_return(1, &xport->io_alloc_failed_count);
+ kref_put(&io->ref, io->release);
+ return -EIO;
+ }
+
+ /* Save the target server callback and argument */
+ /* set generic fields */
+ abort_io->cmd_tgt = true;
+ abort_io->node = io->node;
+
+ /* set type and abort-specific fields */
+ abort_io->io_type = EFCT_IO_TYPE_ABORT;
+ abort_io->display_name = "tgt_abort";
+ abort_io->io_to_abort = io;
+ abort_io->send_abts = false;
+ abort_io->abort_cb = cb;
+ abort_io->abort_cb_arg = arg;
+
+ /* now dispatch IO */
+ rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb);
+ if (rc)
+ kref_put(&io->ref, io->release);
+ return rc;
+}
+
+void
+efct_scsi_io_complete(struct efct_io *io)
+{
+ if (io->io_free) {
+ efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
+ io->tag);
+ return;
+ }
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ kref_put(&io->ref, io->release);
+}
diff --git a/drivers/scsi/elx/efct/efct_scsi.h b/drivers/scsi/elx/efct/efct_scsi.h
new file mode 100644
index 000000000000..b04faffa3984
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_scsi.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_SCSI_H__)
+#define __EFCT_SCSI_H__
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+/* efct_scsi_rcv_cmd() efct_scsi_rcv_tmf() flags */
+#define EFCT_SCSI_CMD_DIR_IN (1 << 0)
+#define EFCT_SCSI_CMD_DIR_OUT (1 << 1)
+#define EFCT_SCSI_CMD_SIMPLE (1 << 2)
+#define EFCT_SCSI_CMD_HEAD_OF_QUEUE (1 << 3)
+#define EFCT_SCSI_CMD_ORDERED (1 << 4)
+#define EFCT_SCSI_CMD_UNTAGGED (1 << 5)
+#define EFCT_SCSI_CMD_ACA (1 << 6)
+#define EFCT_SCSI_FIRST_BURST_ERR (1 << 7)
+#define EFCT_SCSI_FIRST_BURST_ABORTED (1 << 8)
+
+/* efct_scsi_send_rd_data/recv_wr_data/send_resp flags */
+#define EFCT_SCSI_LAST_DATAPHASE (1 << 0)
+#define EFCT_SCSI_NO_AUTO_RESPONSE (1 << 1)
+#define EFCT_SCSI_LOW_LATENCY (1 << 2)
+
+#define EFCT_SCSI_SNS_BUF_VALID(sense) ((sense) && \
+ (0x70 == (((const u8 *)(sense))[0] & 0x70)))
+
+#define EFCT_SCSI_WQ_STEERING_SHIFT 16
+#define EFCT_SCSI_WQ_STEERING_MASK (0xf << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_CLASS (0 << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_REQUEST (1 << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_CPU (2 << EFCT_SCSI_WQ_STEERING_SHIFT)
+
+#define EFCT_SCSI_WQ_CLASS_SHIFT (20)
+#define EFCT_SCSI_WQ_CLASS_MASK (0xf << EFCT_SCSI_WQ_CLASS_SHIFT)
+#define EFCT_SCSI_WQ_CLASS(x) ((x & EFCT_SCSI_WQ_CLASS_MASK) << \
+ EFCT_SCSI_WQ_CLASS_SHIFT)
+
+#define EFCT_SCSI_WQ_CLASS_LOW_LATENCY 1
+
+struct efct_scsi_cmd_resp {
+ u8 scsi_status;
+ u16 scsi_status_qualifier;
+ u8 *response_data;
+ u32 response_data_length;
+ u8 *sense_data;
+ u32 sense_data_length;
+ int residual;
+ u32 response_wire_length;
+};
+
+struct efct_vport {
+ struct efct *efct;
+ bool is_vport;
+ struct fc_host_statistics fc_host_stats;
+ struct Scsi_Host *shost;
+ struct fc_vport *fc_vport;
+ u64 npiv_wwpn;
+ u64 npiv_wwnn;
+};
+
+/* Status values returned by IO callbacks */
+enum efct_scsi_io_status {
+ EFCT_SCSI_STATUS_GOOD = 0,
+ EFCT_SCSI_STATUS_ABORTED,
+ EFCT_SCSI_STATUS_ERROR,
+ EFCT_SCSI_STATUS_DIF_GUARD_ERR,
+ EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR,
+ EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR,
+ EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR,
+ EFCT_SCSI_STATUS_PROTOCOL_CRC_ERROR,
+ EFCT_SCSI_STATUS_NO_IO,
+ EFCT_SCSI_STATUS_ABORT_IN_PROGRESS,
+ EFCT_SCSI_STATUS_CHECK_RESPONSE,
+ EFCT_SCSI_STATUS_COMMAND_TIMEOUT,
+ EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED,
+ EFCT_SCSI_STATUS_SHUTDOWN,
+ EFCT_SCSI_STATUS_NEXUS_LOST,
+};
+
+struct efct_node;
+struct efct_io;
+struct efc_node;
+struct efc_nport;
+
+/* Callback used by send_rd_data(), recv_wr_data(), send_resp() */
+typedef int (*efct_scsi_io_cb_t)(struct efct_io *io,
+ enum efct_scsi_io_status status,
+ u32 flags, void *arg);
+
+/* Callback used by send_rd_io(), send_wr_io() */
+typedef int (*efct_scsi_rsp_io_cb_t)(struct efct_io *io,
+ enum efct_scsi_io_status status,
+ struct efct_scsi_cmd_resp *rsp,
+ u32 flags, void *arg);
+
+/* efct_scsi_cb_t flags */
+#define EFCT_SCSI_IO_CMPL (1 << 0)
+/* IO completed, response sent */
+#define EFCT_SCSI_IO_CMPL_RSP_SENT (1 << 1)
+#define EFCT_SCSI_IO_ABORTED (1 << 2)
+
+/* efct_scsi_recv_tmf() request values */
+enum efct_scsi_tmf_cmd {
+ EFCT_SCSI_TMF_ABORT_TASK = 1,
+ EFCT_SCSI_TMF_QUERY_TASK_SET,
+ EFCT_SCSI_TMF_ABORT_TASK_SET,
+ EFCT_SCSI_TMF_CLEAR_TASK_SET,
+ EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT,
+ EFCT_SCSI_TMF_LOGICAL_UNIT_RESET,
+ EFCT_SCSI_TMF_CLEAR_ACA,
+ EFCT_SCSI_TMF_TARGET_RESET,
+};
+
+/* efct_scsi_send_tmf_resp() response values */
+enum efct_scsi_tmf_resp {
+ EFCT_SCSI_TMF_FUNCTION_COMPLETE = 1,
+ EFCT_SCSI_TMF_FUNCTION_SUCCEEDED,
+ EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND,
+ EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER,
+ EFCT_SCSI_TMF_SERVICE_DELIVERY,
+};
+
+struct efct_scsi_sgl {
+ uintptr_t addr;
+ uintptr_t dif_addr;
+ size_t len;
+};
+
+enum efct_scsi_io_role {
+ EFCT_SCSI_IO_ROLE_ORIGINATOR,
+ EFCT_SCSI_IO_ROLE_RESPONDER,
+};
+
+struct efct_io *
+efct_scsi_io_alloc(struct efct_node *node);
+void efct_scsi_io_free(struct efct_io *io);
+struct efct_io *efct_io_get_instance(struct efct *efct, u32 index);
+
+int efct_scsi_tgt_driver_init(void);
+int efct_scsi_tgt_driver_exit(void);
+int efct_scsi_tgt_new_device(struct efct *efct);
+int efct_scsi_tgt_del_device(struct efct *efct);
+int
+efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport);
+void
+efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport);
+
+int
+efct_scsi_new_initiator(struct efc *efc, struct efc_node *node);
+
+enum efct_scsi_del_initiator_reason {
+ EFCT_SCSI_INITIATOR_DELETED,
+ EFCT_SCSI_INITIATOR_MISSING,
+};
+
+int
+efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason);
+void
+efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb, u32 cdb_len,
+ u32 flags);
+int
+efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
+ struct efct_io *abortio, u32 flags);
+int
+efct_scsi_send_rd_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
+ u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_recv_wr_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
+ u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_send_resp(struct efct_io *io, u32 flags,
+ struct efct_scsi_cmd_resp *rsp, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_send_tmf_resp(struct efct_io *io, enum efct_scsi_tmf_resp rspcode,
+ u8 addl_rsp_info[3], efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg);
+
+void efct_scsi_io_complete(struct efct_io *io);
+
+int efct_scsi_reg_fc_transport(void);
+void efct_scsi_release_fc_transport(void);
+int efct_scsi_new_device(struct efct *efct);
+void efct_scsi_del_device(struct efct *efct);
+void _efct_scsi_io_free(struct kref *arg);
+
+int
+efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost);
+struct efct_vport *
+efct_scsi_new_vport(struct efct *efct, struct device *dev);
+
+int efct_scsi_io_dispatch(struct efct_io *io, void *cb);
+int efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb);
+void efct_scsi_check_pending(struct efct *efct);
+struct efct_io *
+efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr);
+
+#endif /* __EFCT_SCSI_H__ */
diff --git a/drivers/scsi/elx/efct/efct_unsol.c b/drivers/scsi/elx/efct/efct_unsol.c
new file mode 100644
index 000000000000..e6addab66a60
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_unsol.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_unsol.h"
+
+#define frame_printf(efct, hdr, fmt, ...) \
+ do { \
+ char s_id_text[16]; \
+ efc_node_fcid_display(ntoh24((hdr)->fh_s_id), \
+ s_id_text, sizeof(s_id_text)); \
+ efc_log_debug(efct, "[%06x.%s] %02x/%04x/%04x: " fmt, \
+ ntoh24((hdr)->fh_d_id), s_id_text, \
+ (hdr)->fh_r_ctl, be16_to_cpu((hdr)->fh_ox_id), \
+ be16_to_cpu((hdr)->fh_rx_id), ##__VA_ARGS__); \
+ } while (0)
+
+static struct efct_node *
+efct_node_find(struct efct *efct, u32 port_id, u32 node_id)
+{
+ struct efct_node *node;
+ u64 id = (u64)port_id << 32 | node_id;
+
+ /*
+ * During node shutdown, Lookup will be removed first,
+ * before announcing to backend. So, no new IOs will be allowed
+ */
+ /* Find a target node, given s_id and d_id */
+ node = xa_load(&efct->lookup, id);
+ if (node)
+ kref_get(&node->ref);
+
+ return node;
+}
+
+static int
+efct_dispatch_frame(struct efct *efct, struct efc_hw_sequence *seq)
+{
+ struct efct_node *node;
+ struct fc_frame_header *hdr;
+ u32 s_id, d_id;
+
+ hdr = seq->header->dma.virt;
+
+ /* extract the s_id and d_id */
+ s_id = ntoh24(hdr->fh_s_id);
+ d_id = ntoh24(hdr->fh_d_id);
+
+ if (!(hdr->fh_type == FC_TYPE_FCP || hdr->fh_type == FC_TYPE_BLS))
+ return -EIO;
+
+ if (hdr->fh_type == FC_TYPE_FCP) {
+ node = efct_node_find(efct, d_id, s_id);
+ if (!node) {
+ efc_log_err(efct,
+ "Node not found, drop cmd d_id:%x s_id:%x\n",
+ d_id, s_id);
+ efct_hw_sequence_free(&efct->hw, seq);
+ return 0;
+ }
+
+ efct_dispatch_fcp_cmd(node, seq);
+ } else {
+ node = efct_node_find(efct, d_id, s_id);
+ if (!node) {
+ efc_log_err(efct, "ABTS: Node not found, d_id:%x s_id:%x\n",
+ d_id, s_id);
+ return -EIO;
+ }
+
+ efc_log_err(efct, "Received ABTS for Node:%p\n", node);
+ efct_node_recv_abts_frame(node, seq);
+ }
+
+ kref_put(&node->ref, node->release);
+ efct_hw_sequence_free(&efct->hw, seq);
+ return 0;
+}
+
+int
+efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = arg;
+
+ /* Process FCP command */
+ if (!efct_dispatch_frame(efct, seq))
+ return 0;
+
+ /* Forward frame to discovery lib */
+ efc_dispatch_frame(efct->efcport, seq);
+ return 0;
+}
+
+static int
+efct_fc_tmf_rejected_cb(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_scsi_io_free(io);
+ return 0;
+}
+
+static void
+efct_dispatch_unsol_tmf(struct efct_io *io, u8 tm_flags, u32 lun)
+{
+ u32 i;
+ struct {
+ u32 mask;
+ enum efct_scsi_tmf_cmd cmd;
+ } tmflist[] = {
+ {FCP_TMF_ABT_TASK_SET, EFCT_SCSI_TMF_ABORT_TASK_SET},
+ {FCP_TMF_CLR_TASK_SET, EFCT_SCSI_TMF_CLEAR_TASK_SET},
+ {FCP_TMF_LUN_RESET, EFCT_SCSI_TMF_LOGICAL_UNIT_RESET},
+ {FCP_TMF_TGT_RESET, EFCT_SCSI_TMF_TARGET_RESET},
+ {FCP_TMF_CLR_ACA, EFCT_SCSI_TMF_CLEAR_ACA} };
+
+ io->exp_xfer_len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tmflist); i++) {
+ if (tmflist[i].mask & tm_flags) {
+ io->tmf_cmd = tmflist[i].cmd;
+ efct_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(tmflist)) {
+ /* Not handled */
+ efc_log_err(io->node->efct, "TMF x%x rejected\n", tm_flags);
+ efct_scsi_send_tmf_resp(io, EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ NULL, efct_fc_tmf_rejected_cb, NULL);
+ }
+}
+
+static int
+efct_validate_fcp_cmd(struct efct *efct, struct efc_hw_sequence *seq)
+{
+ /*
+ * If we received less than FCP_CMND_IU bytes, assume that the frame is
+ * corrupted in some way and drop it.
+ * This was seen when jamming the FCTL
+ * fill bytes field.
+ */
+ if (seq->payload->dma.len < sizeof(struct fcp_cmnd)) {
+ struct fc_frame_header *fchdr = seq->header->dma.virt;
+
+ efc_log_debug(efct,
+ "drop ox_id %04x payload (%zd) less than (%zd)\n",
+ be16_to_cpu(fchdr->fh_ox_id),
+ seq->payload->dma.len, sizeof(struct fcp_cmnd));
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+efct_populate_io_fcp_cmd(struct efct_io *io, struct fcp_cmnd *cmnd,
+ struct fc_frame_header *fchdr, bool sit)
+{
+ io->init_task_tag = be16_to_cpu(fchdr->fh_ox_id);
+ /* note, tgt_task_tag, hw_tag set when HW io is allocated */
+ io->exp_xfer_len = be32_to_cpu(cmnd->fc_dl);
+ io->transferred = 0;
+
+ /* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
+ * Our assertion here is, the priority given to a frame containing
+ * the FCP cmd should be the priority given to ALL frames contained
+ * in that IO. Thus we need to save the incoming CS_CTL here.
+ */
+ if (ntoh24(fchdr->fh_f_ctl) & FC_FC_RES_B17)
+ io->cs_ctl = fchdr->fh_cs_ctl;
+ else
+ io->cs_ctl = 0;
+
+ io->seq_init = sit;
+}
+
+static u32
+efct_get_flags_fcp_cmd(struct fcp_cmnd *cmnd)
+{
+ u32 flags = 0;
+
+ switch (cmnd->fc_pri_ta & FCP_PTA_MASK) {
+ case FCP_PTA_SIMPLE:
+ flags |= EFCT_SCSI_CMD_SIMPLE;
+ break;
+ case FCP_PTA_HEADQ:
+ flags |= EFCT_SCSI_CMD_HEAD_OF_QUEUE;
+ break;
+ case FCP_PTA_ORDERED:
+ flags |= EFCT_SCSI_CMD_ORDERED;
+ break;
+ case FCP_PTA_ACA:
+ flags |= EFCT_SCSI_CMD_ACA;
+ break;
+ }
+ if (cmnd->fc_flags & FCP_CFL_WRDATA)
+ flags |= EFCT_SCSI_CMD_DIR_IN;
+ if (cmnd->fc_flags & FCP_CFL_RDDATA)
+ flags |= EFCT_SCSI_CMD_DIR_OUT;
+
+ return flags;
+}
+
+static void
+efct_sframe_common_send_cb(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_send_frame_context *ctx = arg;
+ struct efct_hw *hw = ctx->hw;
+
+ /* Free WQ completion callback */
+ efct_hw_reqtag_free(hw, ctx->wqcb);
+
+ /* Free sequence */
+ efct_hw_sequence_free(hw, ctx->seq);
+}
+
+static int
+efct_sframe_common_send(struct efct_node *node,
+ struct efc_hw_sequence *seq,
+ enum fc_rctl r_ctl, u32 f_ctl,
+ u8 type, void *payload, u32 payload_len)
+{
+ struct efct *efct = node->efct;
+ struct efct_hw *hw = &efct->hw;
+ int rc = 0;
+ struct fc_frame_header *req_hdr = seq->header->dma.virt;
+ struct fc_frame_header hdr;
+ struct efct_hw_send_frame_context *ctx;
+
+ u32 heap_size = seq->payload->dma.size;
+ uintptr_t heap_phys_base = seq->payload->dma.phys;
+ u8 *heap_virt_base = seq->payload->dma.virt;
+ u32 heap_offset = 0;
+
+ /* Build the FC header reusing the RQ header DMA buffer */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.fh_r_ctl = r_ctl;
+ /* send it back to whomever sent it to us */
+ memcpy(hdr.fh_d_id, req_hdr->fh_s_id, sizeof(hdr.fh_d_id));
+ memcpy(hdr.fh_s_id, req_hdr->fh_d_id, sizeof(hdr.fh_s_id));
+ hdr.fh_type = type;
+ hton24(hdr.fh_f_ctl, f_ctl);
+ hdr.fh_ox_id = req_hdr->fh_ox_id;
+ hdr.fh_rx_id = req_hdr->fh_rx_id;
+ hdr.fh_cs_ctl = 0;
+ hdr.fh_df_ctl = 0;
+ hdr.fh_seq_cnt = 0;
+ hdr.fh_parm_offset = 0;
+
+ /*
+ * send_frame_seq_id is an atomic, we just let it increment,
+ * while storing only the low 8 bits to hdr->seq_id
+ */
+ hdr.fh_seq_id = (u8)atomic_add_return(1, &hw->send_frame_seq_id);
+ hdr.fh_seq_id--;
+
+ /* Allocate and fill in the send frame request context */
+ ctx = (void *)(heap_virt_base + heap_offset);
+ heap_offset += sizeof(*ctx);
+ if (heap_offset > heap_size) {
+ efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
+ heap_offset, heap_size);
+ return -EIO;
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ /* Save sequence */
+ ctx->seq = seq;
+
+ /* Allocate a response payload DMA buffer from the heap */
+ ctx->payload.phys = heap_phys_base + heap_offset;
+ ctx->payload.virt = heap_virt_base + heap_offset;
+ ctx->payload.size = payload_len;
+ ctx->payload.len = payload_len;
+ heap_offset += payload_len;
+ if (heap_offset > heap_size) {
+ efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
+ heap_offset, heap_size);
+ return -EIO;
+ }
+
+ /* Copy the payload in */
+ memcpy(ctx->payload.virt, payload, payload_len);
+
+ /* Send */
+ rc = efct_hw_send_frame(&efct->hw, (void *)&hdr, FC_SOF_N3,
+ FC_EOF_T, &ctx->payload, ctx,
+ efct_sframe_common_send_cb, ctx);
+ if (rc)
+ efc_log_debug(efct, "efct_hw_send_frame failed: %d\n", rc);
+
+ return rc;
+}
+
+static int
+efct_sframe_send_fcp_rsp(struct efct_node *node, struct efc_hw_sequence *seq,
+ void *rsp, u32 rsp_len)
+{
+ return efct_sframe_common_send(node, seq, FC_RCTL_DD_CMD_STATUS,
+ FC_FC_EX_CTX |
+ FC_FC_LAST_SEQ |
+ FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT,
+ FC_TYPE_FCP,
+ rsp, rsp_len);
+}
+
+static int
+efct_sframe_send_task_set_full_or_busy(struct efct_node *node,
+ struct efc_hw_sequence *seq)
+{
+ struct fcp_resp_with_ext fcprsp;
+ struct fcp_cmnd *fcpcmd = seq->payload->dma.virt;
+ int rc = 0;
+ unsigned long flags = 0;
+ struct efct *efct = node->efct;
+
+ /* construct task set full or busy response */
+ memset(&fcprsp, 0, sizeof(fcprsp));
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ fcprsp.resp.fr_status = list_empty(&node->active_ios) ?
+ SAM_STAT_BUSY : SAM_STAT_TASK_SET_FULL;
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ *((u32 *)&fcprsp.ext.fr_resid) = be32_to_cpu(fcpcmd->fc_dl);
+
+ /* send it using send_frame */
+ rc = efct_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp));
+ if (rc)
+ efc_log_debug(efct, "efct_sframe_send_fcp_rsp failed %d\n", rc);
+
+ return rc;
+}
+
+int
+efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = node->efct;
+ struct fc_frame_header *fchdr = seq->header->dma.virt;
+ struct fcp_cmnd *cmnd = NULL;
+ struct efct_io *io = NULL;
+ u32 lun;
+
+ if (!seq->payload) {
+ efc_log_err(efct, "Sequence payload is NULL.\n");
+ return -EIO;
+ }
+
+ cmnd = seq->payload->dma.virt;
+
+ /* perform FCP_CMND validation check(s) */
+ if (efct_validate_fcp_cmd(efct, seq))
+ return -EIO;
+
+ lun = scsilun_to_int(&cmnd->fc_lun);
+ if (lun == U32_MAX)
+ return -EIO;
+
+ io = efct_scsi_io_alloc(node);
+ if (!io) {
+ int rc;
+
+ /* Use SEND_FRAME to send task set full or busy */
+ rc = efct_sframe_send_task_set_full_or_busy(node, seq);
+ if (rc)
+ efc_log_err(efct, "Failed to send busy task: %d\n", rc);
+
+ return rc;
+ }
+
+ io->hw_priv = seq->hw_priv;
+
+ io->app_id = 0;
+
+ /* RQ pair, if we got here, SIT=1 */
+ efct_populate_io_fcp_cmd(io, cmnd, fchdr, true);
+
+ if (cmnd->fc_tm_flags) {
+ efct_dispatch_unsol_tmf(io, cmnd->fc_tm_flags, lun);
+ } else {
+ u32 flags = efct_get_flags_fcp_cmd(cmnd);
+
+ if (cmnd->fc_flags & FCP_CFL_LEN_MASK) {
+ efc_log_err(efct, "Additional CDB not supported\n");
+ return -EIO;
+ }
+ /*
+ * Can return failure for things like task set full and UAs,
+ * no need to treat as a dropped frame if rc != 0
+ */
+ efct_scsi_recv_cmd(io, lun, cmnd->fc_cdb,
+ sizeof(cmnd->fc_cdb), flags);
+ }
+
+ return 0;
+}
+
+static int
+efct_process_abts(struct efct_io *io, struct fc_frame_header *hdr)
+{
+ struct efct_node *node = io->node;
+ struct efct *efct = io->efct;
+ u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
+ u16 rx_id = be16_to_cpu(hdr->fh_rx_id);
+ struct efct_io *abortio;
+
+ /* Find IO and attempt to take a reference on it */
+ abortio = efct_io_find_tgt_io(efct, node, ox_id, rx_id);
+
+ if (abortio) {
+ /* Got a reference on the IO. Hold it until backend
+ * is notified below
+ */
+ efc_log_info(node->efct, "Abort ox_id [%04x] rx_id [%04x]\n",
+ ox_id, rx_id);
+
+ /*
+ * Save the ox_id for the ABTS as the init_task_tag in our
+ * manufactured
+ * TMF IO object
+ */
+ io->display_name = "abts";
+ io->init_task_tag = ox_id;
+ /* don't set tgt_task_tag, don't want to confuse with XRI */
+
+ /*
+ * Save the rx_id from the ABTS as it is
+ * needed for the BLS response,
+ * regardless of the IO context's rx_id
+ */
+ io->abort_rx_id = rx_id;
+
+ /* Call target server command abort */
+ io->tmf_cmd = EFCT_SCSI_TMF_ABORT_TASK;
+ efct_scsi_recv_tmf(io, abortio->tgt_io.lun,
+ EFCT_SCSI_TMF_ABORT_TASK, abortio, 0);
+
+ /*
+ * Backend will have taken an additional
+ * reference on the IO if needed;
+ * done with current reference.
+ */
+ kref_put(&abortio->ref, abortio->release);
+ } else {
+ /*
+ * Either IO was not found or it has been
+ * freed between finding it
+ * and attempting to get the reference,
+ */
+ efc_log_info(node->efct, "Abort: ox_id [%04x], IO not found\n",
+ ox_id);
+
+ /* Send a BA_RJT */
+ efct_bls_send_rjt(io, hdr);
+ }
+ return 0;
+}
+
+int
+efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = node->efct;
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ struct efct_io *io = NULL;
+
+ node->abort_cnt++;
+ io = efct_scsi_io_alloc(node);
+ if (io) {
+ io->hw_priv = seq->hw_priv;
+ /* If we got this far, SIT=1 */
+ io->seq_init = 1;
+
+ /* fill out generic fields */
+ io->efct = efct;
+ io->node = node;
+ io->cmd_tgt = true;
+
+ efct_process_abts(io, seq->header->dma.virt);
+ } else {
+ efc_log_err(efct,
+ "SCSI IO allocation failed for ABTS received ");
+ efc_log_err(efct, "s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
+ ntoh24(hdr->fh_s_id), ntoh24(hdr->fh_d_id),
+ be16_to_cpu(hdr->fh_ox_id),
+ be16_to_cpu(hdr->fh_rx_id));
+ }
+
+ return 0;
+}
diff --git a/drivers/scsi/elx/efct/efct_unsol.h b/drivers/scsi/elx/efct/efct_unsol.h
new file mode 100644
index 000000000000..16d1e3ba1833
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_unsol.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__OSC_UNSOL_H__)
+#define __OSC_UNSOL_H__
+
+int
+efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq);
+int
+efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq);
+int
+efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq);
+
+#endif /* __OSC_UNSOL_H__ */
diff --git a/drivers/scsi/elx/efct/efct_xport.c b/drivers/scsi/elx/efct/efct_xport.c
new file mode 100644
index 000000000000..9495cedcc0b9
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_xport.c
@@ -0,0 +1,1111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_unsol.h"
+
+static struct dentry *efct_debugfs_root;
+static atomic_t efct_debugfs_count;
+
+static struct scsi_host_template efct_template = {
+ .module = THIS_MODULE,
+ .name = EFCT_DRIVER_NAME,
+ .supported_mode = MODE_TARGET,
+};
+
+/* globals */
+static struct fc_function_template efct_xport_functions;
+static struct fc_function_template efct_vport_functions;
+
+static struct scsi_transport_template *efct_xport_fc_tt;
+static struct scsi_transport_template *efct_vport_fc_tt;
+
+struct efct_xport *
+efct_xport_alloc(struct efct *efct)
+{
+ struct efct_xport *xport;
+
+ xport = kzalloc(sizeof(*xport), GFP_KERNEL);
+ if (!xport)
+ return xport;
+
+ xport->efct = efct;
+ return xport;
+}
+
+static int
+efct_xport_init_debugfs(struct efct *efct)
+{
+ /* Setup efct debugfs root directory */
+ if (!efct_debugfs_root) {
+ efct_debugfs_root = debugfs_create_dir("efct", NULL);
+ atomic_set(&efct_debugfs_count, 0);
+ }
+
+ /* Create a directory for sessions in root */
+ if (!efct->sess_debugfs_dir) {
+ efct->sess_debugfs_dir = debugfs_create_dir("sessions",
+ efct_debugfs_root);
+ if (IS_ERR(efct->sess_debugfs_dir)) {
+ efc_log_err(efct,
+ "failed to create debugfs entry for sessions\n");
+ goto debugfs_fail;
+ }
+ atomic_inc(&efct_debugfs_count);
+ }
+
+ return 0;
+
+debugfs_fail:
+ return -EIO;
+}
+
+static void efct_xport_delete_debugfs(struct efct *efct)
+{
+ /* Remove session debugfs directory */
+ debugfs_remove(efct->sess_debugfs_dir);
+ efct->sess_debugfs_dir = NULL;
+ atomic_dec(&efct_debugfs_count);
+
+ if (atomic_read(&efct_debugfs_count) == 0) {
+ /* remove root debugfs directory */
+ debugfs_remove(efct_debugfs_root);
+ efct_debugfs_root = NULL;
+ }
+}
+
+int
+efct_xport_attach(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+ int rc;
+
+ rc = efct_hw_setup(&efct->hw, efct, efct->pci);
+ if (rc) {
+ efc_log_err(efct, "%s: Can't setup hardware\n", efct->desc);
+ return rc;
+ }
+
+ efct_hw_parse_filter(&efct->hw, (void *)efct->filter_def);
+
+ xport->io_pool = efct_io_pool_create(efct, efct->hw.config.n_sgl);
+ if (!xport->io_pool) {
+ efc_log_err(efct, "Can't allocate IO pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+efct_xport_link_stats_cb(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.link_stats.link_failure_error_count =
+ counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
+ result->stats.link_stats.loss_of_sync_error_count =
+ counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
+ result->stats.link_stats.primitive_sequence_error_count =
+ counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
+ result->stats.link_stats.invalid_transmission_word_error_count =
+ counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
+ result->stats.link_stats.crc_error_count =
+ counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
+
+ complete(&result->stats.done);
+}
+
+static void
+efct_xport_host_stats_cb(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.host_stats.transmit_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
+ result->stats.host_stats.receive_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
+ result->stats.host_stats.transmit_frame_count =
+ counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
+ result->stats.host_stats.receive_frame_count =
+ counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
+
+ complete(&result->stats.done);
+}
+
+static void
+efct_xport_async_link_stats_cb(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters,
+ void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.link_stats.link_failure_error_count =
+ counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
+ result->stats.link_stats.loss_of_sync_error_count =
+ counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
+ result->stats.link_stats.primitive_sequence_error_count =
+ counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
+ result->stats.link_stats.invalid_transmission_word_error_count =
+ counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
+ result->stats.link_stats.crc_error_count =
+ counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
+}
+
+static void
+efct_xport_async_host_stats_cb(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters,
+ void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.host_stats.transmit_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
+ result->stats.host_stats.receive_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
+ result->stats.host_stats.transmit_frame_count =
+ counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
+ result->stats.host_stats.receive_frame_count =
+ counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
+}
+
+static void
+efct_xport_config_stats_timer(struct efct *efct);
+
+static void
+efct_xport_stats_timer_cb(struct timer_list *t)
+{
+ struct efct_xport *xport = from_timer(xport, t, stats_timer);
+ struct efct *efct = xport->efct;
+
+ efct_xport_config_stats_timer(efct);
+}
+
+static void
+efct_xport_config_stats_timer(struct efct *efct)
+{
+ u32 timeout = 3 * 1000;
+ struct efct_xport *xport = NULL;
+
+ if (!efct) {
+ pr_err("%s: failed to locate EFCT device\n", __func__);
+ return;
+ }
+
+ xport = efct->xport;
+ efct_hw_get_link_stats(&efct->hw, 0, 0, 0,
+ efct_xport_async_link_stats_cb,
+ &xport->fc_xport_stats);
+ efct_hw_get_host_stats(&efct->hw, 0, efct_xport_async_host_stats_cb,
+ &xport->fc_xport_stats);
+
+ timer_setup(&xport->stats_timer,
+ &efct_xport_stats_timer_cb, 0);
+ mod_timer(&xport->stats_timer,
+ jiffies + msecs_to_jiffies(timeout));
+}
+
+int
+efct_xport_initialize(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+ int rc = 0;
+
+ /* Initialize io lists */
+ spin_lock_init(&xport->io_pending_lock);
+ INIT_LIST_HEAD(&xport->io_pending_list);
+ atomic_set(&xport->io_active_count, 0);
+ atomic_set(&xport->io_pending_count, 0);
+ atomic_set(&xport->io_total_free, 0);
+ atomic_set(&xport->io_total_pending, 0);
+ atomic_set(&xport->io_alloc_failed_count, 0);
+ atomic_set(&xport->io_pending_recursing, 0);
+
+ rc = efct_hw_init(&efct->hw);
+ if (rc) {
+ efc_log_err(efct, "efct_hw_init failure\n");
+ goto out;
+ }
+
+ rc = efct_scsi_tgt_new_device(efct);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize target\n");
+ goto hw_init_out;
+ }
+
+ rc = efct_scsi_new_device(efct);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize initiator\n");
+ goto tgt_dev_out;
+ }
+
+ /* Get FC link and host statistics perodically*/
+ efct_xport_config_stats_timer(efct);
+
+ efct_xport_init_debugfs(efct);
+
+ return rc;
+
+tgt_dev_out:
+ efct_scsi_tgt_del_device(efct);
+
+hw_init_out:
+ efct_hw_teardown(&efct->hw);
+out:
+ return rc;
+}
+
+int
+efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
+ union efct_xport_stats_u *result)
+{
+ int rc = 0;
+ struct efct *efct = NULL;
+ union efct_xport_stats_u value;
+
+ efct = xport->efct;
+
+ switch (cmd) {
+ case EFCT_XPORT_CONFIG_PORT_STATUS:
+ if (xport->configured_link_state == 0) {
+ /*
+ * Initial state is offline. configured_link_state is
+ * set to online explicitly when port is brought online
+ */
+ xport->configured_link_state = EFCT_XPORT_PORT_OFFLINE;
+ }
+ result->value = xport->configured_link_state;
+ break;
+
+ case EFCT_XPORT_PORT_STATUS:
+ /* Determine port status based on link speed. */
+ value.value = efct_hw_get_link_speed(&efct->hw);
+ if (value.value == 0)
+ result->value = EFCT_XPORT_PORT_OFFLINE;
+ else
+ result->value = EFCT_XPORT_PORT_ONLINE;
+ break;
+
+ case EFCT_XPORT_LINK_SPEED:
+ result->value = efct_hw_get_link_speed(&efct->hw);
+ break;
+
+ case EFCT_XPORT_LINK_STATISTICS:
+ memcpy((void *)result, &efct->xport->fc_xport_stats,
+ sizeof(union efct_xport_stats_u));
+ break;
+ case EFCT_XPORT_LINK_STAT_RESET: {
+ /* Create a completion to synchronize the stat reset process */
+ init_completion(&result->stats.done);
+
+ /* First reset the link stats */
+ rc = efct_hw_get_link_stats(&efct->hw, 0, 1, 1,
+ efct_xport_link_stats_cb, result);
+ if (rc)
+ break;
+
+ /* Wait for completion to be signaled when the cmd completes */
+ if (wait_for_completion_interruptible(&result->stats.done)) {
+ /* Undefined failure */
+ efc_log_debug(efct, "sem wait failed\n");
+ rc = -EIO;
+ break;
+ }
+
+ /* Next reset the host stats */
+ rc = efct_hw_get_host_stats(&efct->hw, 1,
+ efct_xport_host_stats_cb, result);
+
+ if (rc)
+ break;
+
+ /* Wait for completion to be signaled when the cmd completes */
+ if (wait_for_completion_interruptible(&result->stats.done)) {
+ /* Undefined failure */
+ efc_log_debug(efct, "sem wait failed\n");
+ rc = -EIO;
+ break;
+ }
+ break;
+ }
+ default:
+ rc = -EIO;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+efct_get_link_supported_speeds(struct efct *efct)
+{
+ u32 supported_speeds = 0;
+ u32 link_module_type, i;
+ struct {
+ u32 lmt_speed;
+ u32 speed;
+ } supported_speed_list[] = {
+ {SLI4_LINK_MODULE_TYPE_1GB, FC_PORTSPEED_1GBIT},
+ {SLI4_LINK_MODULE_TYPE_2GB, FC_PORTSPEED_2GBIT},
+ {SLI4_LINK_MODULE_TYPE_4GB, FC_PORTSPEED_4GBIT},
+ {SLI4_LINK_MODULE_TYPE_8GB, FC_PORTSPEED_8GBIT},
+ {SLI4_LINK_MODULE_TYPE_16GB, FC_PORTSPEED_16GBIT},
+ {SLI4_LINK_MODULE_TYPE_32GB, FC_PORTSPEED_32GBIT},
+ {SLI4_LINK_MODULE_TYPE_64GB, FC_PORTSPEED_64GBIT},
+ {SLI4_LINK_MODULE_TYPE_128GB, FC_PORTSPEED_128GBIT},
+ };
+
+ link_module_type = sli_get_lmt(&efct->hw.sli);
+
+ /* populate link supported speeds */
+ for (i = 0; i < ARRAY_SIZE(supported_speed_list); i++) {
+ if (link_module_type & supported_speed_list[i].lmt_speed)
+ supported_speeds |= supported_speed_list[i].speed;
+ }
+
+ return supported_speeds;
+}
+
+int
+efct_scsi_new_device(struct efct *efct)
+{
+ struct Scsi_Host *shost = NULL;
+ int error = 0;
+ struct efct_vport *vport = NULL;
+
+ shost = scsi_host_alloc(&efct_template, sizeof(*vport));
+ if (!shost) {
+ efc_log_err(efct, "failed to allocate Scsi_Host struct\n");
+ return -ENOMEM;
+ }
+
+ /* save shost to initiator-client context */
+ efct->shost = shost;
+
+ /* save efct information to shost LLD-specific space */
+ vport = (struct efct_vport *)shost->hostdata;
+ vport->efct = efct;
+
+ /*
+ * Set initial can_queue value to the max SCSI IOs. This is the maximum
+ * global queue depth (as opposed to the per-LUN queue depth --
+ * .cmd_per_lun This may need to be adjusted for I+T mode.
+ */
+ shost->can_queue = efct->hw.config.n_io;
+ shost->max_cmd_len = 16; /* 16-byte CDBs */
+ shost->max_id = 0xffff;
+ shost->max_lun = 0xffffffff;
+
+ /*
+ * can only accept (from mid-layer) as many SGEs as we've
+ * pre-registered
+ */
+ shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli);
+
+ /* attach FC Transport template to shost */
+ shost->transportt = efct_xport_fc_tt;
+ efc_log_debug(efct, "transport template=%p\n", efct_xport_fc_tt);
+
+ /* get pci_dev structure and add host to SCSI ML */
+ error = scsi_add_host_with_dma(shost, &efct->pci->dev,
+ &efct->pci->dev);
+ if (error) {
+ efc_log_debug(efct, "failed scsi_add_host_with_dma\n");
+ return -EIO;
+ }
+
+ /* Set symbolic name for host port */
+ snprintf(fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)),
+ "Emulex %s FV%s DV%s", efct->model,
+ efct->hw.sli.fw_name[0], EFCT_DRIVER_VERSION);
+
+ /* Set host port supported classes */
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+ fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct);
+
+ fc_host_node_name(shost) = efct_get_wwnn(&efct->hw);
+ fc_host_port_name(shost) = efct_get_wwpn(&efct->hw);
+ fc_host_max_npiv_vports(shost) = 128;
+
+ return 0;
+}
+
+struct scsi_transport_template *
+efct_attach_fc_transport(void)
+{
+ struct scsi_transport_template *efct_fc_template = NULL;
+
+ efct_fc_template = fc_attach_transport(&efct_xport_functions);
+
+ if (!efct_fc_template)
+ pr_err("failed to attach EFCT with fc transport\n");
+
+ return efct_fc_template;
+}
+
+struct scsi_transport_template *
+efct_attach_vport_fc_transport(void)
+{
+ struct scsi_transport_template *efct_fc_template = NULL;
+
+ efct_fc_template = fc_attach_transport(&efct_vport_functions);
+
+ if (!efct_fc_template)
+ pr_err("failed to attach EFCT with fc transport\n");
+
+ return efct_fc_template;
+}
+
+int
+efct_scsi_reg_fc_transport(void)
+{
+ /* attach to appropriate scsi_tranport_* module */
+ efct_xport_fc_tt = efct_attach_fc_transport();
+ if (!efct_xport_fc_tt) {
+ pr_err("%s: failed to attach to scsi_transport_*", __func__);
+ return -EIO;
+ }
+
+ efct_vport_fc_tt = efct_attach_vport_fc_transport();
+ if (!efct_vport_fc_tt) {
+ pr_err("%s: failed to attach to scsi_transport_*", __func__);
+ efct_release_fc_transport(efct_xport_fc_tt);
+ efct_xport_fc_tt = NULL;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void
+efct_scsi_release_fc_transport(void)
+{
+ /* detach from scsi_transport_* */
+ efct_release_fc_transport(efct_xport_fc_tt);
+ efct_xport_fc_tt = NULL;
+ if (efct_vport_fc_tt)
+ efct_release_fc_transport(efct_vport_fc_tt);
+
+ efct_vport_fc_tt = NULL;
+}
+
+void
+efct_xport_detach(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+
+ /* free resources associated with target-server and initiator-client */
+ efct_scsi_tgt_del_device(efct);
+
+ efct_scsi_del_device(efct);
+
+ /*Shutdown FC Statistics timer*/
+ if (timer_pending(&xport->stats_timer))
+ del_timer(&xport->stats_timer);
+
+ efct_hw_teardown(&efct->hw);
+
+ efct_xport_delete_debugfs(efct);
+}
+
+static void
+efct_xport_domain_free_cb(struct efc *efc, void *arg)
+{
+ struct completion *done = arg;
+
+ complete(done);
+}
+
+int
+efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...)
+{
+ u32 rc = 0;
+ struct efct *efct = NULL;
+ va_list argp;
+
+ efct = xport->efct;
+
+ switch (cmd) {
+ case EFCT_XPORT_PORT_ONLINE: {
+ /* Bring the port on-line */
+ rc = efct_hw_port_control(&efct->hw, EFCT_HW_PORT_INIT, 0,
+ NULL, NULL);
+ if (rc)
+ efc_log_err(efct,
+ "%s: Can't init port\n", efct->desc);
+ else
+ xport->configured_link_state = cmd;
+ break;
+ }
+ case EFCT_XPORT_PORT_OFFLINE: {
+ if (efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN, 0,
+ NULL, NULL))
+ efc_log_err(efct, "port shutdown failed\n");
+ else
+ xport->configured_link_state = cmd;
+ break;
+ }
+
+ case EFCT_XPORT_SHUTDOWN: {
+ struct completion done;
+ unsigned long timeout;
+
+ /* if a PHYSDEV reset was performed (e.g. hw dump), will affect
+ * all PCI functions; orderly shutdown won't work,
+ * just force free
+ */
+ if (sli_reset_required(&efct->hw.sli)) {
+ struct efc_domain *domain = efct->efcport->domain;
+
+ if (domain)
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST,
+ domain);
+ } else {
+ efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN,
+ 0, NULL, NULL);
+ }
+
+ init_completion(&done);
+
+ efc_register_domain_free_cb(efct->efcport,
+ efct_xport_domain_free_cb, &done);
+
+ efc_log_debug(efct, "Waiting %d seconds for domain shutdown\n",
+ (EFC_SHUTDOWN_TIMEOUT_USEC / 1000000));
+
+ timeout = usecs_to_jiffies(EFC_SHUTDOWN_TIMEOUT_USEC);
+ if (!wait_for_completion_timeout(&done, timeout)) {
+ efc_log_err(efct, "Domain shutdown timed out!!\n");
+ WARN_ON(1);
+ }
+
+ efc_register_domain_free_cb(efct->efcport, NULL, NULL);
+
+ /* Free up any saved virtual ports */
+ efc_vport_del_all(efct->efcport);
+ break;
+ }
+
+ /*
+ * Set wwnn for the port. This will be used instead of the default
+ * provided by FW.
+ */
+ case EFCT_XPORT_WWNN_SET: {
+ u64 wwnn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwnn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ efc_log_debug(efct, " WWNN %016llx\n", wwnn);
+ xport->req_wwnn = wwnn;
+
+ break;
+ }
+ /*
+ * Set wwpn for the port. This will be used instead of the default
+ * provided by FW.
+ */
+ case EFCT_XPORT_WWPN_SET: {
+ u64 wwpn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwpn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ efc_log_debug(efct, " WWPN %016llx\n", wwpn);
+ xport->req_wwpn = wwpn;
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ return rc;
+}
+
+void
+efct_xport_free(struct efct_xport *xport)
+{
+ if (xport) {
+ efct_io_pool_free(xport->io_pool);
+
+ kfree(xport);
+ }
+}
+
+void
+efct_release_fc_transport(struct scsi_transport_template *transport_template)
+{
+ if (transport_template)
+ pr_err("releasing transport layer\n");
+
+ /* Releasing FC transport */
+ fc_release_transport(transport_template);
+}
+
+static void
+efct_xport_remove_host(struct Scsi_Host *shost)
+{
+ fc_remove_host(shost);
+}
+
+void
+efct_scsi_del_device(struct efct *efct)
+{
+ if (!efct->shost)
+ return;
+
+ efc_log_debug(efct, "Unregistering with Transport Layer\n");
+ efct_xport_remove_host(efct->shost);
+ efc_log_debug(efct, "Unregistering with SCSI Midlayer\n");
+ scsi_remove_host(efct->shost);
+ scsi_host_put(efct->shost);
+ efct->shost = NULL;
+}
+
+static void
+efct_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ struct efc_nport *nport;
+
+ if (efc->domain && efc->domain->nport) {
+ nport = efc->domain->nport;
+ fc_host_port_id(shost) = nport->fc_id;
+ }
+}
+
+static void
+efct_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ int type = FC_PORTTYPE_UNKNOWN;
+
+ if (efc->domain && efc->domain->nport) {
+ if (efc->domain->is_loop) {
+ type = FC_PORTTYPE_LPORT;
+ } else {
+ struct efc_nport *nport = efc->domain->nport;
+
+ if (nport->is_vport)
+ type = FC_PORTTYPE_NPIV;
+ else if (nport->topology == EFC_NPORT_TOPO_P2P)
+ type = FC_PORTTYPE_PTP;
+ else if (nport->topology == EFC_NPORT_TOPO_UNKNOWN)
+ type = FC_PORTTYPE_UNKNOWN;
+ else
+ type = FC_PORTTYPE_NPORT;
+ }
+ }
+ fc_host_port_type(shost) = type;
+}
+
+static void
+efct_get_host_vport_type(struct Scsi_Host *shost)
+{
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+}
+
+static void
+efct_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ union efct_xport_stats_u status;
+ int rc;
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_PORT_STATUS, &status);
+ if ((!rc) && (status.value == EFCT_XPORT_PORT_ONLINE))
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+}
+
+static void
+efct_get_host_speed(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ union efct_xport_stats_u speed;
+ u32 fc_speed = FC_PORTSPEED_UNKNOWN;
+ int rc;
+
+ if (!efc->domain || !efc->domain->nport) {
+ fc_host_speed(shost) = fc_speed;
+ return;
+ }
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_SPEED, &speed);
+ if (!rc) {
+ switch (speed.value) {
+ case 1000:
+ fc_speed = FC_PORTSPEED_1GBIT;
+ break;
+ case 2000:
+ fc_speed = FC_PORTSPEED_2GBIT;
+ break;
+ case 4000:
+ fc_speed = FC_PORTSPEED_4GBIT;
+ break;
+ case 8000:
+ fc_speed = FC_PORTSPEED_8GBIT;
+ break;
+ case 10000:
+ fc_speed = FC_PORTSPEED_10GBIT;
+ break;
+ case 16000:
+ fc_speed = FC_PORTSPEED_16GBIT;
+ break;
+ case 32000:
+ fc_speed = FC_PORTSPEED_32GBIT;
+ break;
+ case 64000:
+ fc_speed = FC_PORTSPEED_64GBIT;
+ break;
+ case 128000:
+ fc_speed = FC_PORTSPEED_128GBIT;
+ break;
+ }
+ }
+
+ fc_host_speed(shost) = fc_speed;
+}
+
+static void
+efct_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+
+ if (efc->domain) {
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)
+ efc->domain->flogi_service_params;
+
+ fc_host_fabric_name(shost) = be64_to_cpu(sp->fl_wwnn);
+ }
+}
+
+static struct fc_host_statistics *
+efct_get_stats(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ union efct_xport_stats_u stats;
+ struct efct_xport *xport = efct->xport;
+ int rc = 0;
+
+ rc = efct_xport_status(xport, EFCT_XPORT_LINK_STATISTICS, &stats);
+ if (rc) {
+ pr_err("efct_xport_status returned non 0 - %d\n", rc);
+ return NULL;
+ }
+
+ vport->fc_host_stats.loss_of_sync_count =
+ stats.stats.link_stats.loss_of_sync_error_count;
+ vport->fc_host_stats.link_failure_count =
+ stats.stats.link_stats.link_failure_error_count;
+ vport->fc_host_stats.prim_seq_protocol_err_count =
+ stats.stats.link_stats.primitive_sequence_error_count;
+ vport->fc_host_stats.invalid_tx_word_count =
+ stats.stats.link_stats.invalid_transmission_word_error_count;
+ vport->fc_host_stats.invalid_crc_count =
+ stats.stats.link_stats.crc_error_count;
+ /* mbox returns kbyte count so we need to convert to words */
+ vport->fc_host_stats.tx_words =
+ stats.stats.host_stats.transmit_kbyte_count * 256;
+ /* mbox returns kbyte count so we need to convert to words */
+ vport->fc_host_stats.rx_words =
+ stats.stats.host_stats.receive_kbyte_count * 256;
+ vport->fc_host_stats.tx_frames =
+ stats.stats.host_stats.transmit_frame_count;
+ vport->fc_host_stats.rx_frames =
+ stats.stats.host_stats.receive_frame_count;
+
+ vport->fc_host_stats.fcp_input_requests =
+ xport->fcp_stats.input_requests;
+ vport->fc_host_stats.fcp_output_requests =
+ xport->fcp_stats.output_requests;
+ vport->fc_host_stats.fcp_output_megabytes =
+ xport->fcp_stats.output_bytes >> 20;
+ vport->fc_host_stats.fcp_input_megabytes =
+ xport->fcp_stats.input_bytes >> 20;
+ vport->fc_host_stats.fcp_control_requests =
+ xport->fcp_stats.control_requests;
+
+ return &vport->fc_host_stats;
+}
+
+static void
+efct_reset_stats(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ /* argument has no purpose for this action */
+ union efct_xport_stats_u dummy;
+ int rc;
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_STAT_RESET, &dummy);
+ if (rc)
+ pr_err("efct_xport_status returned non 0 - %d\n", rc);
+}
+
+static int
+efct_issue_lip(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport =
+ shost ? (struct efct_vport *)shost->hostdata : NULL;
+ struct efct *efct = vport ? vport->efct : NULL;
+
+ if (!shost || !vport || !efct) {
+ pr_err("%s: shost=%p vport=%p efct=%p\n", __func__,
+ shost, vport, efct);
+ return -EPERM;
+ }
+
+ /*
+ * Bring the link down gracefully then re-init the link.
+ * The firmware will re-initialize the Fibre Channel interface as
+ * required. It does not issue a LIP.
+ */
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_OFFLINE))
+ efc_log_debug(efct, "EFCT_XPORT_PORT_OFFLINE failed\n");
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE))
+ efc_log_debug(efct, "EFCT_XPORT_PORT_ONLINE failed\n");
+
+ return 0;
+}
+
+struct efct_vport *
+efct_scsi_new_vport(struct efct *efct, struct device *dev)
+{
+ struct Scsi_Host *shost = NULL;
+ int error = 0;
+ struct efct_vport *vport = NULL;
+
+ shost = scsi_host_alloc(&efct_template, sizeof(*vport));
+ if (!shost) {
+ efc_log_err(efct, "failed to allocate Scsi_Host struct\n");
+ return NULL;
+ }
+
+ /* save efct information to shost LLD-specific space */
+ vport = (struct efct_vport *)shost->hostdata;
+ vport->efct = efct;
+ vport->is_vport = true;
+
+ shost->can_queue = efct->hw.config.n_io;
+ shost->max_cmd_len = 16; /* 16-byte CDBs */
+ shost->max_id = 0xffff;
+ shost->max_lun = 0xffffffff;
+
+ /* can only accept (from mid-layer) as many SGEs as we've pre-regited*/
+ shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli);
+
+ /* attach FC Transport template to shost */
+ shost->transportt = efct_vport_fc_tt;
+ efc_log_debug(efct, "vport transport template=%p\n",
+ efct_vport_fc_tt);
+
+ /* get pci_dev structure and add host to SCSI ML */
+ error = scsi_add_host_with_dma(shost, dev, &efct->pci->dev);
+ if (error) {
+ efc_log_debug(efct, "failed scsi_add_host_with_dma\n");
+ return NULL;
+ }
+
+ /* Set symbolic name for host port */
+ snprintf(fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)),
+ "Emulex %s FV%s DV%s", efct->model, efct->hw.sli.fw_name[0],
+ EFCT_DRIVER_VERSION);
+
+ /* Set host port supported classes */
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+ fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct);
+ vport->shost = shost;
+
+ return vport;
+}
+
+int efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost)
+{
+ if (shost) {
+ efc_log_debug(efct,
+ "Unregistering vport with Transport Layer\n");
+ efct_xport_remove_host(shost);
+ efc_log_debug(efct, "Unregistering vport with SCSI Midlayer\n");
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+ return 0;
+ }
+ return -EIO;
+}
+
+static int
+efct_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct Scsi_Host *shost = fc_vport ? fc_vport->shost : NULL;
+ struct efct_vport *pport = shost ?
+ (struct efct_vport *)shost->hostdata :
+ NULL;
+ struct efct *efct = pport ? pport->efct : NULL;
+ struct efct_vport *vport = NULL;
+
+ if (!fc_vport || !shost || !efct)
+ goto fail;
+
+ vport = efct_scsi_new_vport(efct, &fc_vport->dev);
+ if (!vport) {
+ efc_log_err(efct, "failed to create vport\n");
+ goto fail;
+ }
+
+ vport->fc_vport = fc_vport;
+ vport->npiv_wwpn = fc_vport->port_name;
+ vport->npiv_wwnn = fc_vport->node_name;
+ fc_host_node_name(vport->shost) = vport->npiv_wwnn;
+ fc_host_port_name(vport->shost) = vport->npiv_wwpn;
+ *(struct efct_vport **)fc_vport->dd_data = vport;
+
+ return 0;
+
+fail:
+ return -EIO;
+}
+
+static int
+efct_vport_delete(struct fc_vport *fc_vport)
+{
+ struct efct_vport *vport = *(struct efct_vport **)fc_vport->dd_data;
+ struct Scsi_Host *shost = vport ? vport->shost : NULL;
+ struct efct *efct = vport ? vport->efct : NULL;
+ int rc;
+
+ rc = efct_scsi_del_vport(efct, shost);
+
+ if (rc)
+ pr_err("%s: vport delete failed\n", __func__);
+
+ return rc;
+}
+
+static int
+efct_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ return 0;
+}
+
+static struct fc_function_template efct_xport_functions = {
+ .get_host_port_id = efct_get_host_port_id,
+ .get_host_port_type = efct_get_host_port_type,
+ .get_host_port_state = efct_get_host_port_state,
+ .get_host_speed = efct_get_host_speed,
+ .get_host_fabric_name = efct_get_host_fabric_name,
+
+ .get_fc_host_stats = efct_get_stats,
+ .reset_fc_host_stats = efct_reset_stats,
+
+ .issue_fc_host_lip = efct_issue_lip,
+
+ .vport_disable = efct_vport_disable,
+
+ /* allocation lengths for host-specific data */
+ .dd_fcrport_size = sizeof(struct efct_rport_data),
+ .dd_fcvport_size = 128, /* should be sizeof(...) */
+
+ /* remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+
+ /* target dynamic attributes */
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+
+ /* host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* host dynamic attributes */
+ .show_host_port_id = 1,
+ .show_host_port_type = 1,
+ .show_host_port_state = 1,
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+ .show_host_speed = 1,
+ .show_host_fabric_name = 1,
+ .show_host_symbolic_name = 1,
+ .vport_create = efct_vport_create,
+ .vport_delete = efct_vport_delete,
+};
+
+static struct fc_function_template efct_vport_functions = {
+ .get_host_port_id = efct_get_host_port_id,
+ .get_host_port_type = efct_get_host_vport_type,
+ .get_host_port_state = efct_get_host_port_state,
+ .get_host_speed = efct_get_host_speed,
+ .get_host_fabric_name = efct_get_host_fabric_name,
+
+ .get_fc_host_stats = efct_get_stats,
+ .reset_fc_host_stats = efct_reset_stats,
+
+ .issue_fc_host_lip = efct_issue_lip,
+
+ /* allocation lengths for host-specific data */
+ .dd_fcrport_size = sizeof(struct efct_rport_data),
+ .dd_fcvport_size = 128, /* should be sizeof(...) */
+
+ /* remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+
+ /* target dynamic attributes */
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+
+ /* host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* host dynamic attributes */
+ .show_host_port_id = 1,
+ .show_host_port_type = 1,
+ .show_host_port_state = 1,
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+ .show_host_speed = 1,
+ .show_host_fabric_name = 1,
+ .show_host_symbolic_name = 1,
+};
diff --git a/drivers/scsi/elx/efct/efct_xport.h b/drivers/scsi/elx/efct/efct_xport.h
new file mode 100644
index 000000000000..89f3c20ecb59
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_xport.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_XPORT_H__)
+#define __EFCT_XPORT_H__
+
+enum efct_xport_ctrl {
+ EFCT_XPORT_PORT_ONLINE = 1,
+ EFCT_XPORT_PORT_OFFLINE,
+ EFCT_XPORT_SHUTDOWN,
+ EFCT_XPORT_POST_NODE_EVENT,
+ EFCT_XPORT_WWNN_SET,
+ EFCT_XPORT_WWPN_SET,
+};
+
+enum efct_xport_status {
+ EFCT_XPORT_PORT_STATUS,
+ EFCT_XPORT_CONFIG_PORT_STATUS,
+ EFCT_XPORT_LINK_SPEED,
+ EFCT_XPORT_IS_SUPPORTED_LINK_SPEED,
+ EFCT_XPORT_LINK_STATISTICS,
+ EFCT_XPORT_LINK_STAT_RESET,
+ EFCT_XPORT_IS_QUIESCED
+};
+
+struct efct_xport_link_stats {
+ bool rec;
+ bool gec;
+ bool w02of;
+ bool w03of;
+ bool w04of;
+ bool w05of;
+ bool w06of;
+ bool w07of;
+ bool w08of;
+ bool w09of;
+ bool w10of;
+ bool w11of;
+ bool w12of;
+ bool w13of;
+ bool w14of;
+ bool w15of;
+ bool w16of;
+ bool w17of;
+ bool w18of;
+ bool w19of;
+ bool w20of;
+ bool w21of;
+ bool clrc;
+ bool clof1;
+ u32 link_failure_error_count;
+ u32 loss_of_sync_error_count;
+ u32 loss_of_signal_error_count;
+ u32 primitive_sequence_error_count;
+ u32 invalid_transmission_word_error_count;
+ u32 crc_error_count;
+ u32 primitive_sequence_event_timeout_count;
+ u32 elastic_buffer_overrun_error_count;
+ u32 arbitration_fc_al_timeout_count;
+ u32 advertised_receive_bufftor_to_buffer_credit;
+ u32 current_receive_buffer_to_buffer_credit;
+ u32 advertised_transmit_buffer_to_buffer_credit;
+ u32 current_transmit_buffer_to_buffer_credit;
+ u32 received_eofa_count;
+ u32 received_eofdti_count;
+ u32 received_eofni_count;
+ u32 received_soff_count;
+ u32 received_dropped_no_aer_count;
+ u32 received_dropped_no_available_rpi_resources_count;
+ u32 received_dropped_no_available_xri_resources_count;
+};
+
+struct efct_xport_host_stats {
+ bool cc;
+ u32 transmit_kbyte_count;
+ u32 receive_kbyte_count;
+ u32 transmit_frame_count;
+ u32 receive_frame_count;
+ u32 transmit_sequence_count;
+ u32 receive_sequence_count;
+ u32 total_exchanges_originator;
+ u32 total_exchanges_responder;
+ u32 receive_p_bsy_count;
+ u32 receive_f_bsy_count;
+ u32 dropped_frames_due_to_no_rq_buffer_count;
+ u32 empty_rq_timeout_count;
+ u32 dropped_frames_due_to_no_xri_count;
+ u32 empty_xri_pool_count;
+};
+
+struct efct_xport_host_statistics {
+ struct completion done;
+ struct efct_xport_link_stats link_stats;
+ struct efct_xport_host_stats host_stats;
+};
+
+union efct_xport_stats_u {
+ u32 value;
+ struct efct_xport_host_statistics stats;
+};
+
+struct efct_xport_fcp_stats {
+ u64 input_bytes;
+ u64 output_bytes;
+ u64 input_requests;
+ u64 output_requests;
+ u64 control_requests;
+};
+
+struct efct_xport {
+ struct efct *efct;
+ /* wwpn requested by user for primary nport */
+ u64 req_wwpn;
+ /* wwnn requested by user for primary nport */
+ u64 req_wwnn;
+
+ /* Nodes */
+ /* number of allocated nodes */
+ u32 nodes_count;
+ /* used to track how often IO pool is empty */
+ atomic_t io_alloc_failed_count;
+ /* array of pointers to nodes */
+ struct efc_node **nodes;
+
+ /* Io pool and counts */
+ /* pointer to IO pool */
+ struct efct_io_pool *io_pool;
+ /* lock for io_pending_list */
+ spinlock_t io_pending_lock;
+ /* list of IOs waiting for HW resources
+ * lock: xport->io_pending_lock
+ * link: efct_io_s->io_pending_link
+ */
+ struct list_head io_pending_list;
+ /* count of totals IOS allocated */
+ atomic_t io_total_alloc;
+ /* count of totals IOS free'd */
+ atomic_t io_total_free;
+ /* count of totals IOS that were pended */
+ atomic_t io_total_pending;
+ /* count of active IOS */
+ atomic_t io_active_count;
+ /* count of pending IOS */
+ atomic_t io_pending_count;
+ /* non-zero if efct_scsi_check_pending is executing */
+ atomic_t io_pending_recursing;
+
+ /* Port */
+ /* requested link state */
+ u32 configured_link_state;
+
+ /* Timer for Statistics */
+ struct timer_list stats_timer;
+ union efct_xport_stats_u fc_xport_stats;
+ struct efct_xport_fcp_stats fcp_stats;
+};
+
+struct efct_rport_data {
+ struct efc_node *node;
+};
+
+struct efct_xport *
+efct_xport_alloc(struct efct *efct);
+int
+efct_xport_attach(struct efct_xport *xport);
+int
+efct_xport_initialize(struct efct_xport *xport);
+void
+efct_xport_detach(struct efct_xport *xport);
+int
+efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...);
+int
+efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
+ union efct_xport_stats_u *result);
+void
+efct_xport_free(struct efct_xport *xport);
+
+struct scsi_transport_template *efct_attach_fc_transport(void);
+struct scsi_transport_template *efct_attach_vport_fc_transport(void);
+void
+efct_release_fc_transport(struct scsi_transport_template *transport_template);
+
+#endif /* __EFCT_XPORT_H__ */
diff --git a/drivers/scsi/elx/include/efc_common.h b/drivers/scsi/elx/include/efc_common.h
new file mode 100644
index 000000000000..8d57f69ace0a
--- /dev/null
+++ b/drivers/scsi/elx/include/efc_common.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_COMMON_H__
+#define __EFC_COMMON_H__
+
+#include <linux/pci.h>
+
+struct efc_dma {
+ void *virt;
+ void *alloc;
+ dma_addr_t phys;
+
+ size_t size;
+ size_t len;
+ struct pci_dev *pdev;
+};
+
+#define efc_log_crit(efc, fmt, args...) \
+ dev_crit(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_err(efc, fmt, args...) \
+ dev_err(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_warn(efc, fmt, args...) \
+ dev_warn(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_info(efc, fmt, args...) \
+ dev_info(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_debug(efc, fmt, args...) \
+ dev_dbg(&((efc)->pci)->dev, fmt, ##args)
+
+#endif /* __EFC_COMMON_H__ */
diff --git a/drivers/scsi/elx/libefc/efc.h b/drivers/scsi/elx/libefc/efc.h
new file mode 100644
index 000000000000..468ff3cc9c00
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_H__
+#define __EFC_H__
+
+#include "../include/efc_common.h"
+#include "efclib.h"
+#include "efc_sm.h"
+#include "efc_cmds.h"
+#include "efc_domain.h"
+#include "efc_nport.h"
+#include "efc_node.h"
+#include "efc_fabric.h"
+#include "efc_device.h"
+#include "efc_els.h"
+
+#define EFC_MAX_REMOTE_NODES 2048
+#define NODE_SPARAMS_SIZE 256
+
+enum efc_scsi_del_initiator_reason {
+ EFC_SCSI_INITIATOR_DELETED,
+ EFC_SCSI_INITIATOR_MISSING,
+};
+
+enum efc_scsi_del_target_reason {
+ EFC_SCSI_TARGET_DELETED,
+ EFC_SCSI_TARGET_MISSING,
+};
+
+#define EFC_FC_ELS_DEFAULT_RETRIES 3
+
+#define domain_sm_trace(domain) \
+ efc_log_debug(domain->efc, "[domain:%s] %-20s %-20s\n", \
+ domain->display_name, __func__, efc_sm_event_name(evt)) \
+
+#define domain_trace(domain, fmt, ...) \
+ efc_log_debug(domain->efc, \
+ "[%s]" fmt, domain->display_name, ##__VA_ARGS__) \
+
+#define node_sm_trace() \
+ efc_log_debug(node->efc, "[%s] %-20s %-20s\n", \
+ node->display_name, __func__, efc_sm_event_name(evt)) \
+
+#define nport_sm_trace(nport) \
+ efc_log_debug(nport->efc, \
+ "[%s] %-20s %-20s\n", nport->display_name, __func__, efc_sm_event_name(evt)) \
+
+#endif /* __EFC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c
new file mode 100644
index 000000000000..da4ac8a4ce12
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_cmds.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efclib.h"
+#include "../libefc_sli/sli4.h"
+#include "efc_cmds.h"
+#include "efc_sm.h"
+
+static void
+efc_nport_free_resources(struct efc_nport *nport, int evt, void *data)
+{
+ struct efc *efc = nport->efc;
+
+ /* Clear the nport attached flag */
+ nport->attached = false;
+
+ /* Free the service parameters buffer */
+ if (nport->dma.virt) {
+ dma_free_coherent(&efc->pci->dev, nport->dma.size,
+ nport->dma.virt, nport->dma.phys);
+ memset(&nport->dma, 0, sizeof(struct efc_dma));
+ }
+
+ /* Free the SLI resources */
+ sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
+
+ efc_nport_cb(efc, evt, nport);
+}
+
+static int
+efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status)
+{
+ struct efc *efc = nport->efc;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n",
+ nport->indicator, status, le16_to_cpu(hdr->status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+ int evt = EFC_EVT_NPORT_FREE_OK;
+ int rc;
+
+ rc = efc_nport_get_mbox_status(nport, mqe, status);
+ if (rc)
+ evt = EFC_EVT_NPORT_FREE_FAIL;
+
+ efc_nport_free_resources(nport, evt, mqe);
+ return rc;
+}
+
+static void
+efc_nport_free_unreg_vpi(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ int rc;
+ u8 data[SLI4_BMBX_SIZE];
+
+ rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator,
+ SLI4_UNREG_TYPE_PORT);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_free_unreg_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
+ }
+}
+
+static void
+efc_nport_send_evt(struct efc_nport *nport, int evt, void *data)
+{
+ struct efc *efc = nport->efc;
+
+ /* Now inform the registered callbacks */
+ efc_nport_cb(efc, evt, nport);
+
+ /* Set the nport attached flag */
+ if (evt == EFC_EVT_NPORT_ATTACH_OK)
+ nport->attached = true;
+
+ /* If there is a pending free request, then handle it now */
+ if (nport->free_req_pending)
+ efc_nport_free_unreg_vpi(nport);
+}
+
+static int
+efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe);
+ return 0;
+}
+
+static void
+efc_nport_alloc_init_vpi(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /* If there is a pending free request, then handle it now */
+ if (nport->free_req_pending) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data);
+ return;
+ }
+
+ rc = sli_cmd_init_vpi(efc->sli, data,
+ nport->indicator, nport->domain->indicator);
+ if (rc) {
+ efc_log_err(efc, "INIT_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_alloc_init_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "INIT_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ }
+}
+
+static int
+efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+ u8 *payload = NULL;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ payload = nport->dma.virt;
+
+ memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
+ sizeof(nport->sli_wwpn));
+ memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
+ sizeof(nport->sli_wwnn));
+
+ dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt,
+ nport->dma.phys);
+ memset(&nport->dma, 0, sizeof(struct efc_dma));
+ efc_nport_alloc_init_vpi(nport);
+ return 0;
+}
+
+static void
+efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
+{
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /* Allocate memory for the service parameters */
+ nport->dma.size = EFC_SPARAM_DMA_SZ;
+ nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
+ nport->dma.size, &nport->dma.phys,
+ GFP_KERNEL);
+ if (!nport->dma.virt) {
+ efc_log_err(efc, "Failed to allocate DMA memory\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = sli_cmd_read_sparm64(efc->sli, data,
+ &nport->dma, nport->indicator);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_alloc_read_sparm64_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ }
+}
+
+int
+efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
+ struct efc_domain *domain, u8 *wwpn)
+{
+ u32 index;
+
+ nport->indicator = U32_MAX;
+ nport->free_req_pending = false;
+
+ if (wwpn)
+ memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn));
+
+ /*
+ * allocate a VPI object for the port and stores it in the
+ * indicator field of the port object.
+ */
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI,
+ &nport->indicator, &index)) {
+ efc_log_err(efc, "VPI allocation failure\n");
+ return -EIO;
+ }
+
+ if (domain) {
+ /*
+ * If the WWPN is NULL, fetch the default
+ * WWPN and WWNN before initializing the VPI
+ */
+ if (!wwpn)
+ efc_nport_alloc_read_sparm64(efc, nport);
+ else
+ efc_nport_alloc_init_vpi(nport);
+ } else if (!wwpn) {
+ /* domain NULL and wwpn non-NULL */
+ efc_log_err(efc, "need WWN for physical port\n");
+ sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_nport *nport = arg;
+
+ nport->attaching = false;
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe);
+ return 0;
+}
+
+int
+efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = 0;
+
+ if (!nport) {
+ efc_log_err(efc, "bad param(s) nport=%p\n", nport);
+ return -EIO;
+ }
+
+ nport->fc_id = fc_id;
+
+ /* register previously-allocated VPI with the device */
+ rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id,
+ nport->sli_wwpn, nport->indicator,
+ nport->domain->indicator, false);
+ if (rc) {
+ efc_log_err(efc, "REG_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ return rc;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_nport_attach_reg_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "REG_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ } else {
+ nport->attaching = true;
+ }
+
+ return rc;
+}
+
+int
+efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
+{
+ if (!nport) {
+ efc_log_err(efc, "bad parameter(s) nport=%p\n", nport);
+ return -EIO;
+ }
+
+ /* Issue the UNREG_VPI command to free the assigned VPI context */
+ if (nport->attached)
+ efc_nport_free_unreg_vpi(nport);
+ else if (nport->attaching)
+ nport->free_req_pending = true;
+ else
+ efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL);
+
+ return 0;
+}
+
+static int
+efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status)
+{
+ struct efc *efc = domain->efc;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n",
+ domain->indicator, status,
+ le16_to_cpu(hdr->status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void
+efc_domain_free_resources(struct efc_domain *domain, int evt, void *data)
+{
+ struct efc *efc = domain->efc;
+
+ /* Free the service parameters buffer */
+ if (domain->dma.virt) {
+ dma_free_coherent(&efc->pci->dev,
+ domain->dma.size, domain->dma.virt,
+ domain->dma.phys);
+ memset(&domain->dma, 0, sizeof(struct efc_dma));
+ }
+
+ /* Free the SLI resources */
+ sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator);
+
+ efc_domain_cb(efc, evt, domain);
+}
+
+static void
+efc_domain_send_nport_evt(struct efc_domain *domain,
+ int port_evt, int domain_evt, void *data)
+{
+ struct efc *efc = domain->efc;
+
+ /* Send alloc/attach ok to the physical nport */
+ efc_nport_send_evt(domain->nport, port_evt, NULL);
+
+ /* Now inform the registered callbacks */
+ efc_domain_cb(efc, domain_evt, domain);
+}
+
+static int
+efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK,
+ EFC_HW_DOMAIN_ALLOC_OK, mqe);
+ return 0;
+}
+
+static void
+efc_domain_alloc_read_sparm64(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 format failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_alloc_read_sparm64_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 command failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ }
+}
+
+static int
+efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_alloc_read_sparm64(domain);
+ return 0;
+}
+
+static void
+efc_domain_alloc_init_vfi(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_nport *nport = domain->nport;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /*
+ * For FC, the HW alread registered an FCFI.
+ * Copy FCF information into the domain and jump to INIT_VFI.
+ */
+ domain->fcf_indicator = efc->fcfi;
+ rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator,
+ domain->fcf_indicator, nport->indicator);
+ if (rc) {
+ efc_log_err(efc, "INIT_VFI format failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ return;
+ }
+
+ efc_log_err(efc, "%s issue mbox\n", __func__);
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_alloc_init_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "INIT_VFI command failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ }
+}
+
+int
+efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
+{
+ u32 index;
+
+ if (!domain || !domain->nport) {
+ efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n",
+ domain, domain ? domain->nport : NULL);
+ return -EIO;
+ }
+
+ /* allocate memory for the service parameters */
+ domain->dma.size = EFC_SPARAM_DMA_SZ;
+ domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
+ domain->dma.size,
+ &domain->dma.phys, GFP_KERNEL);
+ if (!domain->dma.virt) {
+ efc_log_err(efc, "Failed to allocate DMA memory\n");
+ return -EIO;
+ }
+
+ domain->fcf = fcf;
+ domain->fcf_indicator = U32_MAX;
+ domain->indicator = U32_MAX;
+
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator,
+ &index)) {
+ efc_log_err(efc, "VFI allocation failure\n");
+
+ dma_free_coherent(&efc->pci->dev,
+ domain->dma.size, domain->dma.virt,
+ domain->dma.phys);
+ memset(&domain->dma, 0, sizeof(struct efc_dma));
+
+ return -EIO;
+ }
+
+ efc_domain_alloc_init_vfi(domain);
+ return 0;
+}
+
+static int
+efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ATTACH_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK,
+ EFC_HW_DOMAIN_ATTACH_OK, mqe);
+ return 0;
+}
+
+int
+efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = 0;
+
+ if (!domain) {
+ efc_log_err(efc, "bad param(s) domain=%p\n", domain);
+ return -EIO;
+ }
+
+ domain->nport->fc_id = fc_id;
+
+ rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator,
+ domain->fcf_indicator, domain->dma,
+ domain->nport->indicator, domain->nport->sli_wwpn,
+ domain->nport->fc_id);
+ if (rc) {
+ efc_log_err(efc, "REG_VFI format failure\n");
+ goto cleanup;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_domain_attach_reg_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "REG_VFI command failure\n");
+ goto cleanup;
+ }
+
+ return rc;
+
+cleanup:
+ efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf);
+
+ return rc;
+}
+
+static int
+efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_domain *domain = arg;
+ int evt = EFC_HW_DOMAIN_FREE_OK;
+ int rc;
+
+ rc = efc_domain_get_mbox_status(domain, mqe, status);
+ if (rc) {
+ evt = EFC_HW_DOMAIN_FREE_FAIL;
+ rc = -EIO;
+ }
+
+ efc_domain_free_resources(domain, evt, mqe);
+ return rc;
+}
+
+static void
+efc_domain_free_unreg_vfi(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ int rc;
+ u8 data[SLI4_BMBX_SIZE];
+
+ rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator,
+ SLI4_UNREG_TYPE_DOMAIN);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VFI format failure\n");
+ goto cleanup;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_free_unreg_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VFI command failure\n");
+ goto cleanup;
+ }
+
+ return;
+
+cleanup:
+ efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data);
+}
+
+int
+efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain)
+{
+ if (!domain) {
+ efc_log_err(efc, "bad parameter(s) domain=%p\n", domain);
+ return -EIO;
+ }
+
+ efc_domain_free_unreg_vfi(domain);
+ return 0;
+}
+
+int
+efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
+ struct efc_nport *nport)
+{
+ /* Check for invalid indicator */
+ if (rnode->indicator != U32_MAX) {
+ efc_log_err(efc,
+ "RPI allocation failure addr=%#x rpi=%#x\n",
+ fc_addr, rnode->indicator);
+ return -EIO;
+ }
+
+ /* NULL SLI port indicates an unallocated remote node */
+ rnode->nport = NULL;
+
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI,
+ &rnode->indicator, &rnode->index)) {
+ efc_log_err(efc, "RPI allocation failure addr=%#x\n",
+ fc_addr);
+ return -EIO;
+ }
+
+ rnode->fc_id = fc_addr;
+ rnode->nport = nport;
+
+ return 0;
+}
+
+static int
+efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_remote_node *rnode = arg;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+ int evt = 0;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(hdr->status));
+ rnode->attached = false;
+ evt = EFC_EVT_NODE_ATTACH_FAIL;
+ } else {
+ rnode->attached = true;
+ evt = EFC_EVT_NODE_ATTACH_OK;
+ }
+
+ efc_remote_node_cb(efc, evt, rnode);
+
+ return 0;
+}
+
+int
+efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
+ struct efc_dma *sparms)
+{
+ int rc = -EIO;
+ u8 buf[SLI4_BMBX_SIZE];
+
+ if (!rnode || !sparms) {
+ efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n",
+ rnode, sparms);
+ return -EIO;
+ }
+
+ /*
+ * If the attach count is non-zero, this RPI has already been reg'd.
+ * Otherwise, register the RPI
+ */
+ if (rnode->index == U32_MAX) {
+ efc_log_err(efc, "bad parameter rnode->index invalid\n");
+ return -EIO;
+ }
+
+ /* Update a remote node object with the remote port's service params */
+ if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator,
+ rnode->nport->indicator, rnode->fc_id, sparms, 0, 0))
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_cmd_node_attach_cb, rnode);
+
+ return rc;
+}
+
+int
+efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode)
+{
+ int rc = 0;
+
+ if (!rnode) {
+ efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
+ return -EIO;
+ }
+
+ if (rnode->nport) {
+ if (rnode->attached) {
+ efc_log_err(efc, "rnode is still attached\n");
+ return -EIO;
+ }
+ if (rnode->indicator != U32_MAX) {
+ if (sli_resource_free(efc->sli, SLI4_RSRC_RPI,
+ rnode->indicator)) {
+ efc_log_err(efc,
+ "RPI free fail RPI %d addr=%#x\n",
+ rnode->indicator, rnode->fc_id);
+ rc = -EIO;
+ } else {
+ rnode->indicator = U32_MAX;
+ rnode->index = U32_MAX;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int
+efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_remote_node *rnode = arg;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+ int evt = EFC_EVT_NODE_FREE_FAIL;
+ int rc = 0;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(hdr->status));
+
+ /*
+ * In certain cases, a non-zero MQE status is OK (all must be
+ * true):
+ * - node is attached
+ * - status is 0x1400
+ */
+ if (!rnode->attached ||
+ (le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG))
+ rc = -EIO;
+ }
+
+ if (!rc) {
+ rnode->attached = false;
+ evt = EFC_EVT_NODE_FREE_OK;
+ }
+
+ efc_remote_node_cb(efc, evt, rnode);
+
+ return rc;
+}
+
+int
+efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = -EIO;
+
+ if (!rnode) {
+ efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
+ return -EIO;
+ }
+
+ if (rnode->nport) {
+ if (!rnode->attached)
+ return -EIO;
+
+ rc = -EIO;
+
+ if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator,
+ SLI4_RSRC_RPI, U32_MAX))
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_cmd_node_free_cb, rnode);
+
+ if (rc != 0) {
+ efc_log_err(efc, "UNREG_RPI failed\n");
+ rc = -EIO;
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/scsi/elx/libefc/efc_cmds.h b/drivers/scsi/elx/libefc/efc_cmds.h
new file mode 100644
index 000000000000..4d353ab04dc3
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_cmds.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_CMDS_H__
+#define __EFC_CMDS_H__
+
+#define EFC_SPARAM_DMA_SZ 112
+int
+efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
+ struct efc_domain *domain, u8 *wwpn);
+int
+efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id);
+int
+efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport);
+int
+efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf);
+int
+efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id);
+int
+efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain);
+int
+efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode);
+int
+efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode);
+int
+efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
+ struct efc_dma *sparms);
+int
+efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
+ struct efc_nport *nport);
+
+#endif /* __EFC_CMDS_H */
diff --git a/drivers/scsi/elx/libefc/efc_device.c b/drivers/scsi/elx/libefc/efc_device.c
new file mode 100644
index 000000000000..52be01333c6e
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_device.c
@@ -0,0 +1,1602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * device_sm Node State Machine: Remote Device States
+ */
+
+#include "efc.h"
+#include "efc_device.h"
+#include "efc_fabric.h"
+
+void
+efc_d_send_prli_rsp(struct efc_node *node, u16 ox_id)
+{
+ int rc = EFC_SCSI_CALL_COMPLETE;
+ struct efc *efc = node->efc;
+
+ node->ls_acc_oxid = ox_id;
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_PRLI;
+
+ /*
+ * Wait for backend session registration
+ * to complete before sending PRLI resp
+ */
+
+ if (node->init) {
+ efc_log_info(efc, "[%s] found(initiator) WWPN:%s WWNN:%s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_new_node(efc, node);
+ }
+
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_FAIL, NULL);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_OK, NULL);
+}
+
+static void
+__efc_d_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+ struct efc *efc = NULL;
+
+ node = ctx->app;
+ efc = node->efc;
+
+ switch (evt) {
+ /* Handle shutdown events */
+ case EFC_EVT_SHUTDOWN:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
+ funcname, efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
+ funcname, efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __efc_node_common(funcname, ctx, evt, arg);
+ }
+}
+
+static void
+__efc_d_wait_del_node(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ /*
+ * State is entered when a node sends a delete initiator/target call
+ * to the target-server/initiator-client and needs to wait for that
+ * work to complete.
+ */
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ fallthrough;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case EFC_EVT_NODE_DEL_INI_COMPLETE:
+ case EFC_EVT_NODE_DEL_TGT_COMPLETE:
+ /*
+ * node has either been detached or is in the process
+ * of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+__efc_d_wait_del_ini_tgt(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ fallthrough;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case EFC_EVT_NODE_DEL_INI_COMPLETE:
+ case EFC_EVT_NODE_DEL_TGT_COMPLETE:
+ efc_node_transition(node, __efc_d_wait_del_node, NULL);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ int rc = EFC_SCSI_CALL_COMPLETE;
+
+ /* assume no wait needed */
+ node->els_io_enabled = false;
+
+ /* make necessary delete upcall(s) */
+ if (node->init && !node->targ) {
+ efc_log_info(node->efc,
+ "[%s] delete (initiator) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ efc_node_transition(node,
+ __efc_d_wait_del_node,
+ NULL);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE || rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+
+ } else if (node->targ && !node->init) {
+ efc_log_info(node->efc,
+ "[%s] delete (target) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ efc_node_transition(node,
+ __efc_d_wait_del_node,
+ NULL);
+ if (node->nport->enable_ini)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+
+ } else if (node->init && node->targ) {
+ efc_log_info(node->efc,
+ "[%s] delete (I+T) WWPN %s WWNN %s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ efc_node_transition(node, __efc_d_wait_del_ini_tgt,
+ NULL);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ /* assume no wait needed */
+ rc = EFC_SCSI_CALL_COMPLETE;
+ if (node->nport->enable_ini)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ }
+
+ /* we've initiated the upcalls as needed, now kick off the node
+ * detach to precipitate the aborting of outstanding exchanges
+ * associated with said node
+ *
+ * Beware: if we've made upcall(s), we've already transitioned
+ * to a new state by the time we execute this.
+ * consider doing this before the upcalls?
+ */
+ if (node->attached) {
+ /* issue hw node free; don't care if succeeds right
+ * away or sometime later, will check node->attached
+ * later in shutdown process
+ */
+ rc = efc_cmd_node_detach(efc, &node->rnode);
+ if (rc < 0)
+ node_printf(node,
+ "Failed freeing HW node, rc=%d\n",
+ rc);
+ }
+
+ /* if neither initiator nor target, proceed to cleanup */
+ if (!node->init && !node->targ) {
+ /*
+ * node has either been detached or is in
+ * the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+ }
+ break;
+ }
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* Ignore, this can happen if an ELS is
+ * aborted while in a delay/retry state
+ */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_loop(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ /* send PLOGI automatically if initiator */
+ efc_node_init_device(node, true);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_send_ls_acc_after_attach(struct efc_node *node,
+ struct fc_frame_header *hdr,
+ enum efc_node_send_ls_acc ls)
+{
+ u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
+
+ /* Save the OX_ID for sending LS_ACC sometime later */
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE);
+
+ node->ls_acc_oxid = ox_id;
+ node->send_ls_acc = ls;
+ node->ls_acc_did = ntoh24(hdr->fh_d_id);
+}
+
+void
+efc_process_prli_payload(struct efc_node *node, void *prli)
+{
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = prli;
+ node->init = (pp->sp.spp_flags & FCP_SPPF_INIT_FCN) != 0;
+ node->targ = (pp->sp.spp_flags & FCP_SPPF_TARG_FCN) != 0;
+}
+
+void
+__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_transition(node, __efc_d_port_logged_in, NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* LOGO response received, sent shutdown */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_LOGO,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node_printf(node,
+ "LOGO sent (evt=%s), shutdown node\n",
+ efc_sm_event_name(evt));
+ /* sm: / post explicit logout */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_node_init_device(struct efc_node *node, bool send_plogi)
+{
+ node->send_plogi = send_plogi;
+ if ((node->efc->nodedb_mask & EFC_NODEDB_PAUSE_NEW_NODES) &&
+ (node->rnode.fc_id != FC_FID_DOM_MGR)) {
+ node->nodedb_state = __efc_d_init;
+ efc_node_transition(node, __efc_node_paused, NULL);
+ } else {
+ efc_node_transition(node, __efc_d_init, NULL);
+ }
+}
+
+static void
+efc_d_check_plogi_topology(struct efc_node *node, u32 d_id)
+{
+ switch (node->nport->topology) {
+ case EFC_NPORT_TOPO_P2P:
+ /* we're not attached and nport is p2p,
+ * need to attach
+ */
+ efc_domain_attach(node->nport->domain, d_id);
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ case EFC_NPORT_TOPO_FABRIC:
+ /* we're not attached and nport is fabric, domain
+ * attach should have already been requested as part
+ * of the fabric state machine, wait for it
+ */
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ case EFC_NPORT_TOPO_UNKNOWN:
+ /* Two possibilities:
+ * 1. received a PLOGI before our FLOGI has completed
+ * (possible since completion comes in on another
+ * CQ), thus we don't know what we're connected to
+ * yet; transition to a state to wait for the
+ * fabric node to tell us;
+ * 2. PLOGI received before link went down and we
+ * haven't performed domain attach yet.
+ * Note: we cannot distinguish between 1. and 2.
+ * so have to assume PLOGI
+ * was received after link back up.
+ */
+ node_printf(node, "received PLOGI, unknown topology did=0x%x\n",
+ d_id);
+ efc_node_transition(node, __efc_d_wait_topology_notify, NULL);
+ break;
+ default:
+ node_printf(node, "received PLOGI, unexpected topology %d\n",
+ node->nport->topology);
+ }
+}
+
+void
+__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * This state is entered when a node is instantiated,
+ * either having been discovered from a name services query,
+ * or having received a PLOGI/FLOGI.
+ */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ if (!node->send_plogi)
+ break;
+ /* only send if we have initiator capability,
+ * and domain is attached
+ */
+ if (node->nport->enable_ini &&
+ node->nport->domain->attached) {
+ efc_send_plogi(node);
+
+ efc_node_transition(node, __efc_d_wait_plogi_rsp, NULL);
+ } else {
+ node_printf(node, "not sending plogi nport.ini=%d,",
+ node->nport->enable_ini);
+ node_printf(node, "domain attached=%d\n",
+ node->nport->domain->attached);
+ }
+ break;
+ case EFC_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ int rc;
+
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /* domain not attached; several possibilities: */
+ if (!node->nport->domain->attached) {
+ efc_d_check_plogi_topology(node, ntoh24(hdr->fh_d_id));
+ break;
+ }
+
+ /* domain already attached */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+ }
+
+ case EFC_EVT_FDISC_RCVD: {
+ __efc_d_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ case EFC_EVT_FLOGI_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ u32 d_id = ntoh24(hdr->fh_d_id);
+
+ /* sm: / save sparams, send FLOGI acc */
+ memcpy(node->nport->domain->flogi_service_params,
+ cbdata->payload->dma.virt,
+ sizeof(struct fc_els_flogi));
+
+ /* send FC LS_ACC response, override s_id */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
+
+ efc_send_flogi_p2p_acc(node, be16_to_cpu(hdr->fh_ox_id), d_id);
+
+ if (efc_p2p_setup(node->nport)) {
+ node_printf(node, "p2p failed, shutting down node\n");
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ efc_node_transition(node, __efc_p2p_wait_flogi_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a link
+ * down; drop and
+ * shut node down w/ "explicit logout" so pending
+ * frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ }
+
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD:
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a link
+ * down; drop and shut node down w/ "explicit logout"
+ * so pending frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+ }
+ node_printf(node, "%s received, sending reject\n",
+ efc_sm_event_name(evt));
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
+
+ break;
+ }
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* note: problem, we're now expecting an ELS REQ completion
+ * from both the LOGO and PLOGI
+ */
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a
+ * link down; drop and
+ * shut node down w/ "explicit logout" so pending
+ * frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ /* Send LOGO */
+ node_printf(node, "FCP_CMND received, send LOGO\n");
+ if (efc_send_logo(node)) {
+ /*
+ * failed to send LOGO, go ahead and cleanup node
+ * anyways
+ */
+ node_printf(node, "Failed to send LOGO\n");
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ } else {
+ /* sent LOGO, wait for response */
+ efc_node_transition(node,
+ __efc_d_wait_logo_rsp, NULL);
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ /* received PLOGI with svc parms, go ahead and attach node
+ * when PLOGI that was sent ultimately completes, it'll be a
+ * no-op
+ *
+ * If there is an outstanding PLOGI sent, can we set a flag
+ * to indicate that we don't want to retry it if it times out?
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+ /* sm: domain->attached / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ efc_node_transition(node, __efc_d_wait_plogi_rsp_recvd_prli,
+ NULL);
+ break;
+
+ case EFC_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_FDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD:
+ case EFC_EVT_SCR_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received, sending reject\n",
+ efc_sm_event_name(evt));
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
+
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* Our PLOGI was rejected, this is ok in some cases */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* not logged in yet and outstanding PLOGI so don't send LOGO,
+ * just drop
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have
+ * accepted). At this time, we are not waiting on any other
+ * unsolicited frames to continue with the login process. Thus,
+ * it will not hurt to hold frames here.
+ */
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ WARN_ON(!node->nport->domain->attached);
+ /* sm: / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
+ enum efc_nport_topology *topology = arg;
+
+ WARN_ON(node->nport->domain->attached);
+
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ node_printf(node, "topology notification, topology=%d\n",
+ *topology);
+
+ /* At the time the PLOGI was received, the topology was unknown,
+ * so we didn't know which node would perform the domain attach:
+ * 1. The node from which the PLOGI was sent (p2p) or
+ * 2. The node to which the FLOGI was sent (fabric).
+ */
+ if (*topology == EFC_NPORT_TOPO_P2P) {
+ /* if this is p2p, need to attach to the domain using
+ * the d_id from the PLOGI received
+ */
+ efc_domain_attach(node->nport->domain,
+ node->ls_acc_did);
+ }
+ /* else, if this is fabric, the domain attach
+ * should be performed by the fabric node (node sending FLOGI);
+ * just wait for attach to complete
+ */
+
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ }
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ WARN_ON(!node->nport->domain->attached);
+ node_printf(node, "domain attach ok\n");
+ /* sm: / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ switch (node->send_ls_acc) {
+ case EFC_NODE_SEND_LS_ACC_PLOGI: {
+ /* sm: send_plogi_acc is set / send PLOGI acc */
+ /* Normal case for T, or I+T */
+ efc_send_plogi_acc(node, node->ls_acc_oxid);
+ efc_node_transition(node, __efc_d_wait_plogi_acc_cmpl,
+ NULL);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_PRLI: {
+ efc_d_send_prli_rsp(node, node->ls_acc_oxid);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ efc_node_transition(node,
+ __efc_d_port_logged_in, NULL);
+ break;
+ }
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ /* Handle shutdown events */
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ efc_node_transition(node,
+ __efc_d_wait_attach_evt_shutdown, NULL);
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* Normal case for I or I+T */
+ if (node->nport->enable_ini &&
+ !(node->rnode.fc_id != FC_FID_DOM_MGR)) {
+ /* sm: if enable_ini / send PRLI */
+ efc_send_prli(node);
+ /* can now expect ELS_REQ_OK/FAIL/RJT */
+ }
+ break;
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD: {
+ /* Normal case for T or I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = cbdata->payload->dma.virt;
+ if (pp->sp.spp_type != FC_TYPE_FCP) {
+ /*Only FCP is supported*/
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
+ break;
+ }
+
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_d_send_prli_rsp(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_NODE_SESS_REG_OK:
+ if (node->send_ls_acc == EFC_NODE_SEND_LS_ACC_PRLI)
+ efc_send_prli_acc(node, node->ls_acc_oxid);
+
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+
+ case EFC_EVT_NODE_SESS_REG_FAIL:
+ efc_send_ls_rjt(node, node->ls_acc_oxid, ELS_RJT_UNAB,
+ ELS_EXPL_UNSUPR, 0);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */
+ /* Normal case for I or I+T */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / process PRLI payload */
+ efc_process_prli_payload(node, cbdata->els_rsp.virt);
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */
+ /* I, I+T, assume some link failure, shutdown node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT: {
+ /* PRLI rejected by remote
+ * Normal for I, I+T (connected to an I)
+ * Node doesn't want to be a target, stay here and wait for a
+ * PRLI from the remote node
+ * if it really wants to connect to us as target
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK: {
+ /* Normal T, I+T, target-server rejected the process login */
+ /* This would be received only in the case where we sent
+ * LS_RJT for the PRLI, so
+ * do nothing. (note: as T only we could shutdown the node)
+ */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /*sm: / save sparams, set send_plogi_acc,
+ *post implicit logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /* Restart node attach with new service parameters,
+ * and send ACC
+ */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt),
+ node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /* sm: / post explicit logout */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_device_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ if (evt != EFC_EVT_FCP_CMD_RCVD)
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ node->fcp_enabled = true;
+ if (node->targ) {
+ efc_log_info(efc,
+ "[%s] found (target) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ if (node->nport->enable_ini)
+ efc->tt.scsi_new_node(efc, node);
+ }
+ break;
+
+ case EFC_EVT_EXIT:
+ node->fcp_enabled = false;
+ break;
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /* sm: / save sparams, set send_plogi_acc, post implicit
+ * logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /*
+ * Restart node attach with new service parameters,
+ * and send ACC
+ */
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD: {
+ /* T, I+T: remote initiator is slow to get started */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = cbdata->payload->dma.virt;
+ if (pp->sp.spp_type != FC_TYPE_FCP) {
+ /*Only FCP is supported*/
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
+ break;
+ }
+
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_prli_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_PRLO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* sm: / send PRLO acc */
+ efc_send_prlo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ /* need implicit logout? */
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_ADISC_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* sm: / send ADISC acc */
+ efc_send_adisc_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_ABTS_RCVD:
+ /* sm: / process ABTS */
+ efc_log_err(efc, "Unexpected event:%s\n",
+ efc_sm_event_name(evt));
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ break;
+
+ case EFC_EVT_NODE_REFOUND:
+ break;
+
+ case EFC_EVT_NODE_MISSING:
+ if (node->nport->enable_rscn)
+ efc_node_transition(node, __efc_d_device_gone, NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ /* T, or I+T, PRLI accept completed ok */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /* T, or I+T, PRLI accept failed to complete */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node_printf(node, "Failed to send PRLI LS_ACC\n");
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_device_gone(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ int rc = EFC_SCSI_CALL_COMPLETE;
+ int rc_2 = EFC_SCSI_CALL_COMPLETE;
+ static const char * const labels[] = {
+ "none", "initiator", "target", "initiator+target"
+ };
+
+ efc_log_info(efc, "[%s] missing (%s) WWPN %s WWNN %s\n",
+ node->display_name,
+ labels[(node->targ << 1) | (node->init)],
+ node->wwpn, node->wwnn);
+
+ switch (efc_node_get_enable(node)) {
+ case EFC_NODE_ENABLE_T_TO_T:
+ case EFC_NODE_ENABLE_I_TO_T:
+ case EFC_NODE_ENABLE_IT_TO_T:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_T_TO_I:
+ case EFC_NODE_ENABLE_I_TO_I:
+ case EFC_NODE_ENABLE_IT_TO_I:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_T_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_I_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_IT_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ rc_2 = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ default:
+ rc = EFC_SCSI_CALL_COMPLETE;
+ break;
+ }
+
+ if (rc == EFC_SCSI_CALL_COMPLETE &&
+ rc_2 == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+
+ break;
+ }
+ case EFC_EVT_NODE_REFOUND:
+ /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
+
+ /* reauthenticate with PLOGI/PRLI */
+ /* efc_node_transition(node, __efc_d_discovered, NULL); */
+
+ /* reauthenticate with ADISC */
+ /* sm: / send ADISC */
+ efc_send_adisc(node);
+ efc_node_transition(node, __efc_d_wait_adisc_rsp, NULL);
+ break;
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /* sm: / save sparams, set send_plogi_acc, post implicit
+ * logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /*
+ * Restart node attach with new service parameters, and send
+ * ACC
+ */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* most likely a stale frame (received prior to link down),
+ * if attempt to send LOGO, will probably timeout and eat
+ * up 20s; thus, drop FCP_CMND
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+ case EFC_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* received an LS_RJT, in this case, send shutdown
+ * (explicit logo) event which will unregister the node,
+ * and start over with PLOGI
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / post explicit logout */
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+
+ case EFC_EVT_LOGO_RCVD: {
+ /* In this case, we have the equivalent of an LS_RJT for
+ * the ADISC, so we need to abort the ADISC, and re-login
+ * with PLOGI
+ */
+ /* sm: / request abort, send LOGO acc */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
diff --git a/drivers/scsi/elx/libefc/efc_device.h b/drivers/scsi/elx/libefc/efc_device.h
new file mode 100644
index 000000000000..3cf1d8c6698f
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_device.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Node state machine functions for remote device node sm
+ */
+
+#ifndef __EFCT_DEVICE_H__
+#define __EFCT_DEVICE_H__
+void
+efc_node_init_device(struct efc_node *node, bool send_plogi);
+void
+efc_process_prli_payload(struct efc_node *node,
+ void *prli);
+void
+efc_d_send_prli_rsp(struct efc_node *node, uint16_t ox_id);
+void
+efc_send_ls_acc_after_attach(struct efc_node *node,
+ struct fc_frame_header *hdr,
+ enum efc_node_send_ls_acc ls);
+void
+__efc_d_wait_loop(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_device_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_device_gone(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+#endif /* __EFCT_DEVICE_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_domain.c b/drivers/scsi/elx/libefc/efc_domain.c
new file mode 100644
index 000000000000..ca9d7ff2c0d2
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_domain.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * domain_sm Domain State Machine: States
+ */
+
+#include "efc.h"
+
+int
+efc_domain_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_domain *domain = NULL;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ if (event != EFC_HW_DOMAIN_FOUND)
+ domain = data;
+
+ /* Accept domain callback events from the user driver */
+ spin_lock_irqsave(&efc->lock, flags);
+ switch (event) {
+ case EFC_HW_DOMAIN_FOUND: {
+ u64 fcf_wwn = 0;
+ struct efc_domain_record *drec = data;
+
+ /* extract the fcf_wwn */
+ fcf_wwn = be64_to_cpu(*((__be64 *)drec->wwn));
+
+ efc_log_debug(efc, "Domain found: wwn %016llX\n", fcf_wwn);
+
+ /* lookup domain, or allocate a new one */
+ domain = efc->domain;
+ if (!domain) {
+ domain = efc_domain_alloc(efc, fcf_wwn);
+ if (!domain) {
+ efc_log_err(efc, "efc_domain_alloc() failed\n");
+ rc = -1;
+ break;
+ }
+ efc_sm_transition(&domain->drvsm, __efc_domain_init,
+ NULL);
+ }
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FOUND, drec);
+ break;
+ }
+
+ case EFC_HW_DOMAIN_LOST:
+ domain_trace(domain, "EFC_HW_DOMAIN_LOST:\n");
+ efc->hold_frames = true;
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_LOST, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ALLOC_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ALLOC_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_FAIL:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_FAIL,
+ NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ATTACH_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ATTACH_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ATTACH_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_FAIL:\n");
+ efc_domain_post_event(domain,
+ EFC_EVT_DOMAIN_ATTACH_FAIL, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_FREE_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_FREE_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_FREE_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_FREE_FAIL:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_FAIL, NULL);
+ break;
+
+ default:
+ efc_log_warn(efc, "unsupported event %#x\n", event);
+ }
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ if (efc->domain && domain->req_accept_frames) {
+ domain->req_accept_frames = false;
+ efc->hold_frames = false;
+ }
+
+ return rc;
+}
+
+static void
+_efc_domain_free(struct kref *arg)
+{
+ struct efc_domain *domain = container_of(arg, struct efc_domain, ref);
+ struct efc *efc = domain->efc;
+
+ if (efc->domain_free_cb)
+ (*efc->domain_free_cb)(efc, efc->domain_free_cb_arg);
+
+ kfree(domain);
+}
+
+void
+efc_domain_free(struct efc_domain *domain)
+{
+ struct efc *efc;
+
+ efc = domain->efc;
+
+ /* Hold frames to clear the domain pointer from the xport lookup */
+ efc->hold_frames = false;
+
+ efc_log_debug(efc, "Domain free: wwn %016llX\n", domain->fcf_wwn);
+
+ xa_destroy(&domain->lookup);
+ efc->domain = NULL;
+ kref_put(&domain->ref, domain->release);
+}
+
+struct efc_domain *
+efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn)
+{
+ struct efc_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_ATOMIC);
+ if (!domain)
+ return NULL;
+
+ domain->efc = efc;
+ domain->drvsm.app = domain;
+
+ /* initialize refcount */
+ kref_init(&domain->ref);
+ domain->release = _efc_domain_free;
+
+ xa_init(&domain->lookup);
+
+ INIT_LIST_HEAD(&domain->nport_list);
+ efc->domain = domain;
+ domain->fcf_wwn = fcf_wwn;
+ efc_log_debug(efc, "Domain allocated: wwn %016llX\n", domain->fcf_wwn);
+
+ return domain;
+}
+
+void
+efc_register_domain_free_cb(struct efc *efc,
+ void (*callback)(struct efc *efc, void *arg),
+ void *arg)
+{
+ /* Register a callback to be called when the domain is freed */
+ efc->domain_free_cb = callback;
+ efc->domain_free_cb_arg = arg;
+ if (!efc->domain && callback)
+ (*callback)(efc, arg);
+}
+
+static void
+__efc_domain_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_domain *domain = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /*
+ * this can arise if an FLOGI fails on the NPORT,
+ * and the NPORT is shutdown
+ */
+ break;
+ default:
+ efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
+ funcname, efc_sm_event_name(evt));
+ }
+}
+
+static void
+__efc_domain_common_shutdown(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_domain *domain = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ break;
+ case EFC_EVT_DOMAIN_FOUND:
+ /* save drec, mark domain_found_pending */
+ memcpy(&domain->pending_drec, arg,
+ sizeof(domain->pending_drec));
+ domain->domain_found_pending = true;
+ break;
+ case EFC_EVT_DOMAIN_LOST:
+ /* unmark domain_found_pending */
+ domain->domain_found_pending = false;
+ break;
+
+ default:
+ efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
+ funcname, efc_sm_event_name(evt));
+ }
+}
+
+#define std_domain_state_decl(...)\
+ struct efc_domain *domain = NULL;\
+ struct efc *efc = NULL;\
+ \
+ WARN_ON(!ctx || !ctx->app);\
+ domain = ctx->app;\
+ WARN_ON(!domain->efc);\
+ efc = domain->efc
+
+void
+__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ domain->attached = false;
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND: {
+ u32 i;
+ struct efc_domain_record *drec = arg;
+ struct efc_nport *nport;
+
+ u64 my_wwnn = efc->req_wwnn;
+ u64 my_wwpn = efc->req_wwpn;
+ __be64 bewwpn;
+
+ if (my_wwpn == 0 || my_wwnn == 0) {
+ efc_log_debug(efc, "using default hardware WWN config\n");
+ my_wwpn = efc->def_wwpn;
+ my_wwnn = efc->def_wwnn;
+ }
+
+ efc_log_debug(efc, "Create nport WWPN %016llX WWNN %016llX\n",
+ my_wwpn, my_wwnn);
+
+ /* Allocate a nport and transition to __efc_nport_allocated */
+ nport = efc_nport_alloc(domain, my_wwpn, my_wwnn, U32_MAX,
+ efc->enable_ini, efc->enable_tgt);
+
+ if (!nport) {
+ efc_log_err(efc, "efc_nport_alloc() failed\n");
+ break;
+ }
+ efc_sm_transition(&nport->sm, __efc_nport_allocated, NULL);
+
+ bewwpn = cpu_to_be64(nport->wwpn);
+
+ /* allocate struct efc_nport object for local port
+ * Note: drec->fc_id is ALPA from read_topology only if loop
+ */
+ if (efc_cmd_nport_alloc(efc, nport, NULL, (uint8_t *)&bewwpn)) {
+ efc_log_err(efc, "Can't allocate port\n");
+ efc_nport_free(nport);
+ break;
+ }
+
+ domain->is_loop = drec->is_loop;
+
+ /*
+ * If the loop position map includes ALPA == 0,
+ * then we are in a public loop (NL_PORT)
+ * Note that the first element of the loopmap[]
+ * contains the count of elements, and if
+ * ALPA == 0 is present, it will occupy the first
+ * location after the count.
+ */
+ domain->is_nlport = drec->map.loop[1] == 0x00;
+
+ if (!domain->is_loop) {
+ /* Initiate HW domain alloc */
+ if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
+ efc_log_err(efc,
+ "Failed to initiate HW domain allocation\n");
+ break;
+ }
+ efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
+ break;
+ }
+
+ efc_log_debug(efc, "%s fc_id=%#x speed=%d\n",
+ drec->is_loop ?
+ (domain->is_nlport ?
+ "public-loop" : "loop") : "other",
+ drec->fc_id, drec->speed);
+
+ nport->fc_id = drec->fc_id;
+ nport->topology = EFC_NPORT_TOPO_FC_AL;
+ snprintf(nport->display_name, sizeof(nport->display_name),
+ "s%06x", drec->fc_id);
+
+ if (efc->enable_ini) {
+ u32 count = drec->map.loop[0];
+
+ efc_log_debug(efc, "%d position map entries\n",
+ count);
+ for (i = 1; i <= count; i++) {
+ if (drec->map.loop[i] != drec->fc_id) {
+ struct efc_node *node;
+
+ efc_log_debug(efc, "%#x -> %#x\n",
+ drec->fc_id,
+ drec->map.loop[i]);
+ node = efc_node_alloc(nport,
+ drec->map.loop[i],
+ false, true);
+ if (!node) {
+ efc_log_err(efc,
+ "efc_node_alloc() failed\n");
+ break;
+ }
+ efc_node_transition(node,
+ __efc_d_wait_loop,
+ NULL);
+ }
+ }
+ }
+
+ /* Initiate HW domain alloc */
+ if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
+ efc_log_err(efc,
+ "Failed to initiate HW domain allocation\n");
+ break;
+ }
+ efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
+ break;
+ }
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ALLOC_OK: {
+ struct fc_els_flogi *sp;
+ struct efc_nport *nport;
+
+ nport = domain->nport;
+ if (WARN_ON(!nport))
+ return;
+
+ sp = (struct fc_els_flogi *)nport->service_params;
+
+ /* Save the domain service parameters */
+ memcpy(domain->service_params + 4, domain->dma.virt,
+ sizeof(struct fc_els_flogi) - 4);
+ memcpy(nport->service_params + 4, domain->dma.virt,
+ sizeof(struct fc_els_flogi) - 4);
+
+ /*
+ * Update the nport's service parameters,
+ * user might have specified non-default names
+ */
+ sp->fl_wwpn = cpu_to_be64(nport->wwpn);
+ sp->fl_wwnn = cpu_to_be64(nport->wwnn);
+
+ /*
+ * Take the loop topology path,
+ * unless we are an NL_PORT (public loop)
+ */
+ if (domain->is_loop && !domain->is_nlport) {
+ /*
+ * For loop, we already have our FC ID
+ * and don't need fabric login.
+ * Transition to the allocated state and
+ * post an event to attach to
+ * the domain. Note that this breaks the
+ * normal action/transition
+ * pattern here to avoid a race with the
+ * domain attach callback.
+ */
+ /* sm: is_loop / domain_attach */
+ efc_sm_transition(ctx, __efc_domain_allocated, NULL);
+ __efc_domain_attach_internal(domain, nport->fc_id);
+ break;
+ }
+ {
+ struct efc_node *node;
+
+ /* alloc fabric node, send FLOGI */
+ node = efc_node_find(nport, FC_FID_FLOGI);
+ if (node) {
+ efc_log_err(efc,
+ "Fabric Controller node already exists\n");
+ break;
+ }
+ node = efc_node_alloc(nport, FC_FID_FLOGI,
+ false, false);
+ if (!node) {
+ efc_log_err(efc,
+ "Error: efc_node_alloc() failed\n");
+ } else {
+ efc_node_transition(node,
+ __efc_fabric_init, NULL);
+ }
+ /* Accept frames */
+ domain->req_accept_frames = true;
+ }
+ /* sm: / start fabric logins */
+ efc_sm_transition(ctx, __efc_domain_allocated, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ALLOC_FAIL:
+ efc_log_err(efc, "%s recv'd waiting for DOMAIN_ALLOC_OK;",
+ efc_sm_event_name(evt));
+ efc_log_err(efc, "shutting down domain\n");
+ domain->req_domain_free = true;
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ break;
+
+ case EFC_EVT_DOMAIN_LOST:
+ efc_log_debug(efc,
+ "%s received while waiting for hw_domain_alloc()\n",
+ efc_sm_event_name(evt));
+ efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
+ break;
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_REQ_ATTACH: {
+ int rc = 0;
+ u32 fc_id;
+
+ if (WARN_ON(!arg))
+ return;
+
+ fc_id = *((u32 *)arg);
+ efc_log_debug(efc, "Requesting hw domain attach fc_id x%x\n",
+ fc_id);
+ /* Update nport lookup */
+ rc = xa_err(xa_store(&domain->lookup, fc_id, domain->nport,
+ GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
+ return;
+ }
+
+ /* Update display name for the nport */
+ efc_node_fcid_display(fc_id, domain->nport->display_name,
+ sizeof(domain->nport->display_name));
+
+ /* Issue domain attach call */
+ rc = efc_cmd_domain_attach(efc, domain, fc_id);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_domain_attach failed: %d\n",
+ rc);
+ return;
+ }
+ /* sm: / domain_attach */
+ efc_sm_transition(ctx, __efc_domain_wait_attach, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_LOST: {
+ efc_log_debug(efc,
+ "%s received while in EFC_EVT_DOMAIN_REQ_ATTACH\n",
+ efc_sm_event_name(evt));
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to
+ * wait state and send shutdown to each
+ * nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free failed\n");
+ }
+
+ break;
+ }
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ struct efc_node *node = NULL;
+ struct efc_nport *nport, *next_nport;
+ unsigned long index;
+
+ /*
+ * Set domain notify pending state to avoid
+ * duplicate domain event post
+ */
+ domain->domain_notify_pend = true;
+
+ /* Mark as attached */
+ domain->attached = true;
+
+ /* Transition to ready */
+ /* sm: / forward event to all nports and nodes */
+ efc_sm_transition(ctx, __efc_domain_ready, NULL);
+
+ /* We have an FCFI, so we can accept frames */
+ domain->req_accept_frames = true;
+
+ /*
+ * Notify all nodes that the domain attach request
+ * has completed
+ * Note: nport will have already received notification
+ * of nport attached as a result of the HW's port attach.
+ */
+ list_for_each_entry_safe(nport, next_nport,
+ &domain->nport_list, list_entry) {
+ xa_for_each(&nport->lookup, index, node) {
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ }
+ domain->domain_notify_pend = false;
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ATTACH_FAIL:
+ efc_log_debug(efc,
+ "%s received while waiting for hw attach\n",
+ efc_sm_event_name(evt));
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_LOST:
+ /*
+ * Domain lost while waiting for an attach to complete,
+ * go to a state that waits for the domain attach to
+ * complete, then handle domain lost
+ */
+ efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
+ break;
+
+ case EFC_EVT_DOMAIN_REQ_ATTACH:
+ /*
+ * In P2P we can get an attach request from
+ * the other FLOGI path, so drop this one
+ */
+ break;
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ /* start any pending vports */
+ if (efc_vport_start(domain)) {
+ efc_log_debug(domain->efc,
+ "efc_vport_start didn't start vports\n");
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_LOST: {
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to wait state
+ * and send shutdown to each nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free failed\n");
+ }
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_REQ_ATTACH: {
+ /* can happen during p2p */
+ u32 fc_id;
+
+ fc_id = *((u32 *)arg);
+
+ /* Assume that the domain is attached */
+ WARN_ON(!domain->attached);
+
+ /*
+ * Verify that the requested FC_ID
+ * is the same as the one we're working with
+ */
+ WARN_ON(domain->nport->fc_id != fc_id);
+ break;
+ }
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ /* Wait for nodes to free prior to the domain shutdown */
+ switch (evt) {
+ case EFC_EVT_ALL_CHILD_NODES_FREE: {
+ int rc;
+
+ /* sm: / efc_hw_domain_free */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL);
+
+ /* Request efc_hw_domain_free and wait for completion */
+ rc = efc_cmd_domain_free(efc, domain);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_domain_free() failed: %d\n",
+ rc);
+ }
+ break;
+ }
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_FREE_OK:
+ /* sm: / domain_free */
+ if (domain->domain_found_pending) {
+ /*
+ * save fcf_wwn and drec from this domain,
+ * free current domain and allocate
+ * a new one with the same fcf_wwn
+ * could use a SLI-4 "re-register VPI"
+ * operation here?
+ */
+ u64 fcf_wwn = domain->fcf_wwn;
+ struct efc_domain_record drec = domain->pending_drec;
+
+ efc_log_debug(efc, "Reallocating domain\n");
+ domain->req_domain_free = true;
+ domain = efc_domain_alloc(efc, fcf_wwn);
+
+ if (!domain) {
+ efc_log_err(efc,
+ "efc_domain_alloc() failed\n");
+ return;
+ }
+ /*
+ * got a new domain; at this point,
+ * there are at least two domains
+ * once the req_domain_free flag is processed,
+ * the associated domain will be removed.
+ */
+ efc_sm_transition(&domain->drvsm, __efc_domain_init,
+ NULL);
+ efc_sm_post_event(&domain->drvsm,
+ EFC_EVT_DOMAIN_FOUND, &drec);
+ } else {
+ domain->req_domain_free = true;
+ }
+ break;
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ /*
+ * Wait for the domain alloc/attach completion
+ * after receiving a domain lost.
+ */
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ALLOC_OK:
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to
+ * wait state and send shutdown to each nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free() failed\n");
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_ALLOC_FAIL:
+ case EFC_EVT_DOMAIN_ATTACH_FAIL:
+ efc_log_err(efc, "[domain] %-20s: failed\n",
+ efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id)
+{
+ memcpy(domain->dma.virt,
+ ((uint8_t *)domain->flogi_service_params) + 4,
+ sizeof(struct fc_els_flogi) - 4);
+ (void)efc_sm_post_event(&domain->drvsm, EFC_EVT_DOMAIN_REQ_ATTACH,
+ &s_id);
+}
+
+void
+efc_domain_attach(struct efc_domain *domain, u32 s_id)
+{
+ __efc_domain_attach_internal(domain, s_id);
+}
+
+int
+efc_domain_post_event(struct efc_domain *domain,
+ enum efc_sm_event event, void *arg)
+{
+ int rc;
+ bool req_domain_free;
+
+ rc = efc_sm_post_event(&domain->drvsm, event, arg);
+
+ req_domain_free = domain->req_domain_free;
+ domain->req_domain_free = false;
+
+ if (req_domain_free)
+ efc_domain_free(domain);
+
+ return rc;
+}
+
+static void
+efct_domain_process_pending(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_hw_sequence *seq = NULL;
+ u32 processed = 0;
+ unsigned long flags = 0;
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame
+ * processed because any given frame could cause a transition
+ * to a state that holds frames
+ */
+ if (efc->hold_frames)
+ break;
+
+ /* Get next frame/sequence */
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+
+ if (!list_empty(&efc->pend_frames)) {
+ seq = list_first_entry(&efc->pend_frames,
+ struct efc_hw_sequence, list_entry);
+ list_del(&seq->list_entry);
+ }
+
+ if (!seq) {
+ processed = efc->pend_frames_processed;
+ efc->pend_frames_processed = 0;
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+ break;
+ }
+ efc->pend_frames_processed++;
+
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+
+ /* now dispatch frame(s) to dispatch function */
+ if (efc_domain_dispatch_frame(domain, seq))
+ efc->tt.hw_seq_free(efc, seq);
+
+ seq = NULL;
+ }
+
+ if (processed != 0)
+ efc_log_debug(efc, "%u domain frames held and processed\n",
+ processed);
+}
+
+void
+efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq)
+{
+ struct efc_domain *domain = efc->domain;
+
+ /*
+ * If we are holding frames or the domain is not yet registered or
+ * there's already frames on the pending list,
+ * then add the new frame to pending list
+ */
+ if (!domain || efc->hold_frames || !list_empty(&efc->pend_frames)) {
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+ INIT_LIST_HEAD(&seq->list_entry);
+ list_add_tail(&seq->list_entry, &efc->pend_frames);
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+
+ if (domain) {
+ /* immediately process pending frames */
+ efct_domain_process_pending(domain);
+ }
+ } else {
+ /*
+ * We are not holding frames and pending list is empty,
+ * just process frame. A non-zero return means the frame
+ * was not handled - so cleanup
+ */
+ if (efc_domain_dispatch_frame(domain, seq))
+ efc->tt.hw_seq_free(efc, seq);
+ }
+}
+
+int
+efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
+{
+ struct efc_domain *domain = (struct efc_domain *)arg;
+ struct efc *efc = domain->efc;
+ struct fc_frame_header *hdr;
+ struct efc_node *node = NULL;
+ struct efc_nport *nport = NULL;
+ unsigned long flags = 0;
+ u32 s_id, d_id, rc = EFC_HW_SEQ_FREE;
+
+ if (!seq->header || !seq->header->dma.virt || !seq->payload->dma.virt) {
+ efc_log_err(efc, "Sequence header or payload is null\n");
+ return rc;
+ }
+
+ hdr = seq->header->dma.virt;
+
+ /* extract the s_id and d_id */
+ s_id = ntoh24(hdr->fh_s_id);
+ d_id = ntoh24(hdr->fh_d_id);
+
+ spin_lock_irqsave(&efc->lock, flags);
+
+ nport = efc_nport_find(domain, d_id);
+ if (!nport) {
+ if (hdr->fh_type == FC_TYPE_FCP) {
+ /* Drop frame */
+ efc_log_warn(efc, "FCP frame with invalid d_id x%x\n",
+ d_id);
+ goto out;
+ }
+
+ /* p2p will use this case */
+ nport = domain->nport;
+ if (!nport || !kref_get_unless_zero(&nport->ref)) {
+ efc_log_err(efc, "Physical nport is NULL\n");
+ goto out;
+ }
+ }
+
+ /* Lookup the node given the remote s_id */
+ node = efc_node_find(nport, s_id);
+
+ /* If not found, then create a new node */
+ if (!node) {
+ /*
+ * If this is solicited data or control based on R_CTL and
+ * there is no node context, then we can drop the frame
+ */
+ if ((hdr->fh_r_ctl == FC_RCTL_DD_SOL_DATA) ||
+ (hdr->fh_r_ctl == FC_RCTL_DD_SOL_CTL)) {
+ efc_log_debug(efc, "sol data/ctrl frame without node\n");
+ goto out_release;
+ }
+
+ node = efc_node_alloc(nport, s_id, false, false);
+ if (!node) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ goto out_release;
+ }
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ }
+
+ if (node->hold_frames || !list_empty(&node->pend_frames)) {
+ /* add frame to node's pending list */
+ spin_lock(&node->pend_frames_lock);
+ INIT_LIST_HEAD(&seq->list_entry);
+ list_add_tail(&seq->list_entry, &node->pend_frames);
+ spin_unlock(&node->pend_frames_lock);
+ rc = EFC_HW_SEQ_HOLD;
+ goto out_release;
+ }
+
+ /* now dispatch frame to the node frame handler */
+ efc_node_dispatch_frame(node, seq);
+
+out_release:
+ kref_put(&nport->ref, nport->release);
+out:
+ spin_unlock_irqrestore(&efc->lock, flags);
+ return rc;
+}
+
+void
+efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
+{
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ u32 port_id;
+ struct efc_node *node = (struct efc_node *)arg;
+ struct efc *efc = node->efc;
+
+ port_id = ntoh24(hdr->fh_s_id);
+
+ if (WARN_ON(port_id != node->rnode.fc_id))
+ return;
+
+ if ((!(ntoh24(hdr->fh_f_ctl) & FC_FC_END_SEQ)) ||
+ !(ntoh24(hdr->fh_f_ctl) & FC_FC_SEQ_INIT)) {
+ node_printf(node,
+ "Drop frame hdr = %08x %08x %08x %08x %08x %08x\n",
+ cpu_to_be32(((u32 *)hdr)[0]),
+ cpu_to_be32(((u32 *)hdr)[1]),
+ cpu_to_be32(((u32 *)hdr)[2]),
+ cpu_to_be32(((u32 *)hdr)[3]),
+ cpu_to_be32(((u32 *)hdr)[4]),
+ cpu_to_be32(((u32 *)hdr)[5]));
+ return;
+ }
+
+ switch (hdr->fh_r_ctl) {
+ case FC_RCTL_ELS_REQ:
+ case FC_RCTL_ELS_REP:
+ efc_node_recv_els_frame(node, seq);
+ break;
+
+ case FC_RCTL_BA_ABTS:
+ case FC_RCTL_BA_ACC:
+ case FC_RCTL_BA_RJT:
+ case FC_RCTL_BA_NOP:
+ efc_log_err(efc, "Received ABTS:\n");
+ break;
+
+ case FC_RCTL_DD_UNSOL_CMD:
+ case FC_RCTL_DD_UNSOL_CTL:
+ switch (hdr->fh_type) {
+ case FC_TYPE_FCP:
+ if ((hdr->fh_r_ctl & 0xf) == FC_RCTL_DD_UNSOL_CMD) {
+ if (!node->fcp_enabled) {
+ efc_node_recv_fcp_cmd(node, seq);
+ break;
+ }
+ efc_log_err(efc, "Recvd FCP CMD. Drop IO\n");
+ } else if ((hdr->fh_r_ctl & 0xf) ==
+ FC_RCTL_DD_SOL_DATA) {
+ node_printf(node,
+ "solicited data recvd. Drop IO\n");
+ }
+ break;
+
+ case FC_TYPE_CT:
+ efc_node_recv_ct_frame(node, seq);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ efc_log_err(efc, "Unhandled frame rctl: %02x\n", hdr->fh_r_ctl);
+ }
+}
diff --git a/drivers/scsi/elx/libefc/efc_domain.h b/drivers/scsi/elx/libefc/efc_domain.h
new file mode 100644
index 000000000000..5468ea7ab19b
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_domain.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Declare driver's domain handler exported interface
+ */
+
+#ifndef __EFCT_DOMAIN_H__
+#define __EFCT_DOMAIN_H__
+
+struct efc_domain *
+efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn);
+void
+efc_domain_free(struct efc_domain *domain);
+
+void
+__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_domain_wait_alloc(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_allocated(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+efc_domain_attach(struct efc_domain *domain, u32 s_id);
+int
+efc_domain_post_event(struct efc_domain *domain, enum efc_sm_event event,
+ void *arg);
+void
+__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id);
+
+int
+efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
+void
+efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
+
+#endif /* __EFCT_DOMAIN_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
new file mode 100644
index 000000000000..84bc81d7ce76
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_els.c
@@ -0,0 +1,1094 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Functions to build and send ELS/CT/BLS commands and responses.
+ */
+
+#include "efc.h"
+#include "efc_els.h"
+#include "../libefc_sli/sli4.h"
+
+#define EFC_LOG_ENABLE_ELS_TRACE(efc) \
+ (((efc) != NULL) ? (((efc)->logmask & (1U << 1)) != 0) : 0)
+
+#define node_els_trace() \
+ do { \
+ if (EFC_LOG_ENABLE_ELS_TRACE(efc)) \
+ efc_log_info(efc, "[%s] %-20s\n", \
+ node->display_name, __func__); \
+ } while (0)
+
+#define els_io_printf(els, fmt, ...) \
+ efc_log_err((struct efc *)els->node->efc,\
+ "[%s] %-8s " fmt, \
+ els->node->display_name,\
+ els->display_name, ##__VA_ARGS__)
+
+#define EFC_ELS_RSP_LEN 1024
+#define EFC_ELS_GID_PT_RSP_LEN 8096
+
+struct efc_els_io_req *
+efc_els_io_alloc(struct efc_node *node, u32 reqlen)
+{
+ return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN);
+}
+
+struct efc_els_io_req *
+efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
+{
+ struct efc *efc;
+ struct efc_els_io_req *els;
+ unsigned long flags = 0;
+
+ efc = node->efc;
+
+ if (!node->els_io_enabled) {
+ efc_log_err(efc, "els io alloc disabled\n");
+ return NULL;
+ }
+
+ els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
+ if (!els) {
+ atomic_add_return(1, &efc->els_io_alloc_failed_count);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ kref_init(&els->ref);
+ els->release = _efc_els_io_free;
+
+ /* populate generic io fields */
+ els->node = node;
+
+ /* now allocate DMA for request and response */
+ els->io.req.size = reqlen;
+ els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
+ &els->io.req.phys, GFP_KERNEL);
+ if (!els->io.req.virt) {
+ mempool_free(els, efc->els_io_pool);
+ return NULL;
+ }
+
+ els->io.rsp.size = rsplen;
+ els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
+ &els->io.rsp.phys, GFP_KERNEL);
+ if (!els->io.rsp.virt) {
+ dma_free_coherent(&efc->pci->dev, els->io.req.size,
+ els->io.req.virt, els->io.req.phys);
+ mempool_free(els, efc->els_io_pool);
+ els = NULL;
+ }
+
+ if (els) {
+ /* initialize fields */
+ els->els_retries_remaining = EFC_FC_ELS_DEFAULT_RETRIES;
+
+ /* add els structure to ELS IO list */
+ INIT_LIST_HEAD(&els->list_entry);
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+ list_add_tail(&els->list_entry, &node->els_ios_list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ }
+
+ return els;
+}
+
+void
+efc_els_io_free(struct efc_els_io_req *els)
+{
+ kref_put(&els->ref, els->release);
+}
+
+void
+_efc_els_io_free(struct kref *arg)
+{
+ struct efc_els_io_req *els =
+ container_of(arg, struct efc_els_io_req, ref);
+ struct efc *efc;
+ struct efc_node *node;
+ int send_empty_event = false;
+ unsigned long flags = 0;
+
+ node = els->node;
+ efc = node->efc;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+
+ list_del(&els->list_entry);
+ /* Send list empty event if the IO allocator
+ * is disabled, and the list is empty
+ * If node->els_io_enabled was not checked,
+ * the event would be posted continually
+ */
+ send_empty_event = (!node->els_io_enabled &&
+ list_empty(&node->els_ios_list));
+
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+
+ /* free ELS request and response buffers */
+ dma_free_coherent(&efc->pci->dev, els->io.rsp.size,
+ els->io.rsp.virt, els->io.rsp.phys);
+ dma_free_coherent(&efc->pci->dev, els->io.req.size,
+ els->io.req.virt, els->io.req.phys);
+
+ mempool_free(els, efc->els_io_pool);
+
+ if (send_empty_event)
+ efc_scsi_io_list_empty(node->efc, node);
+}
+
+static void
+efc_els_retry(struct efc_els_io_req *els);
+
+static void
+efc_els_delay_timer_cb(struct timer_list *t)
+{
+ struct efc_els_io_req *els = from_timer(els, t, delay_timer);
+
+ /* Retry delay timer expired, retry the ELS request */
+ efc_els_retry(els);
+}
+
+static int
+efc_els_req_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els;
+ struct efc_node *node;
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+ u32 reason_code;
+
+ els = arg;
+ node = els->node;
+ efc = node->efc;
+
+ if (status)
+ els_io_printf(els, "status x%x ext x%x\n", status, ext_status);
+
+ /* set the response len element of els->rsp */
+ els->io.rsp.len = length;
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els_rsp = els->io.rsp;
+
+ /* set the response len element of els->rsp */
+ cbdata.rsp_len = length;
+
+ /* FW returns the number of bytes received on the link in
+ * the WCQE, not the amount placed in the buffer; use this info to
+ * check if there was an overrun.
+ */
+ if (length > els->io.rsp.size) {
+ efc_log_warn(efc,
+ "ELS response returned len=%d > buflen=%zu\n",
+ length, els->io.rsp.size);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ return 0;
+ }
+
+ /* Post event to ELS IO object */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_OK, &cbdata);
+ break;
+
+ case SLI4_FC_WCQE_STATUS_LS_RJT:
+ reason_code = (ext_status >> 16) & 0xff;
+
+ /* delay and retry if reason code is Logical Busy */
+ switch (reason_code) {
+ case ELS_RJT_BUSY:
+ els->node->els_req_cnt--;
+ els_io_printf(els,
+ "LS_RJT Logical Busy, delay and retry\n");
+ timer_setup(&els->delay_timer,
+ efc_els_delay_timer_cb, 0);
+ mod_timer(&els->delay_timer,
+ jiffies + msecs_to_jiffies(5000));
+ break;
+ default:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_RJT,
+ &cbdata);
+ break;
+ }
+ break;
+
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT:
+ efc_els_retry(els);
+ break;
+ default:
+ efc_log_err(efc, "LOCAL_REJECT with ext status:%x\n",
+ ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL,
+ &cbdata);
+ break;
+ }
+ break;
+ default: /* Other error */
+ efc_log_warn(efc, "els req failed status x%x, ext_status x%x\n",
+ status, ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ break;
+ }
+
+ return 0;
+}
+
+void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
+ u32 ext_status)
+{
+ struct efc_els_io_req *els =
+ container_of(io, struct efc_els_io_req, io);
+
+ WARN_ON_ONCE(!els->cb);
+
+ ((efc_hw_srrs_cb_t)els->cb) (els, len, status, ext_status);
+}
+
+static int efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els,
+ enum efc_disc_io_type io_type)
+{
+ int rc = 0;
+ struct efc *efc = node->efc;
+ struct efc_node_cb cbdata;
+
+ /* update ELS request counter */
+ els->node->els_req_cnt++;
+
+ /* Prepare the IO request details */
+ els->io.io_type = io_type;
+ els->io.xmit_len = els->io.req.size;
+ els->io.rsp_len = els->io.rsp.size;
+ els->io.rpi = node->rnode.indicator;
+ els->io.vpi = node->nport->indicator;
+ els->io.s_id = node->nport->fc_id;
+ els->io.d_id = node->rnode.fc_id;
+
+ if (node->rnode.attached)
+ els->io.rpi_registered = true;
+
+ els->cb = efc_els_req_cb;
+
+ rc = efc->tt.send_els(efc, &els->io);
+ if (!rc)
+ return rc;
+
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+ efc_log_err(efc, "efc_els_send failed: %d\n", rc);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+
+ return rc;
+}
+
+static void
+efc_els_retry(struct efc_els_io_req *els)
+{
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+ u32 rc;
+
+ efc = els->node->efc;
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+
+ if (els->els_retries_remaining) {
+ els->els_retries_remaining--;
+ rc = efc->tt.send_els(efc, &els->io);
+ } else {
+ rc = -EIO;
+ }
+
+ if (rc) {
+ efc_log_err(efc, "ELS retries exhausted\n");
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ }
+}
+
+static int
+efc_els_acc_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els;
+ struct efc_node *node;
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+
+ els = arg;
+ node = els->node;
+ efc = node->efc;
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els_rsp = els->io.rsp;
+
+ /* Post node event */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_OK, &cbdata);
+ break;
+
+ default: /* Other error */
+ efc_log_warn(efc, "[%s] %-8s failed status x%x, ext x%x\n",
+ node->display_name, els->display_name,
+ status, ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+efc_els_send_rsp(struct efc_els_io_req *els, u32 rsplen)
+{
+ int rc = 0;
+ struct efc_node_cb cbdata;
+ struct efc_node *node = els->node;
+ struct efc *efc = node->efc;
+
+ /* increment ELS completion counter */
+ node->els_cmpl_cnt++;
+
+ els->io.io_type = EFC_DISC_IO_ELS_RESP;
+ els->cb = efc_els_acc_cb;
+
+ /* Prepare the IO request details */
+ els->io.xmit_len = rsplen;
+ els->io.rsp_len = els->io.rsp.size;
+ els->io.rpi = node->rnode.indicator;
+ els->io.vpi = node->nport->indicator;
+ if (node->nport->fc_id != U32_MAX)
+ els->io.s_id = node->nport->fc_id;
+ else
+ els->io.s_id = els->io.iparam.els.s_id;
+ els->io.d_id = node->rnode.fc_id;
+
+ if (node->attached)
+ els->io.rpi_registered = true;
+
+ rc = efc->tt.send_els(efc, &els->io);
+ if (!rc)
+ return rc;
+
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
+
+ return rc;
+}
+
+int
+efc_send_plogi(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc = node->efc;
+ struct fc_els_flogi *plogi;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*plogi));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+ els->display_name = "plogi";
+
+ /* Build PLOGI request */
+ plogi = els->io.req.virt;
+
+ memcpy(plogi, node->nport->service_params, sizeof(*plogi));
+
+ plogi->fl_cmd = ELS_PLOGI;
+ memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_flogi(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc;
+ struct fc_els_flogi *flogi;
+
+ efc = node->efc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*flogi));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "flogi";
+
+ /* Build FLOGI request */
+ flogi = els->io.req.virt;
+
+ memcpy(flogi, node->nport->service_params, sizeof(*flogi));
+ flogi->fl_cmd = ELS_FLOGI;
+ memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_fdisc(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc;
+ struct fc_els_flogi *fdisc;
+
+ efc = node->efc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*fdisc));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "fdisc";
+
+ /* Build FDISC request */
+ fdisc = els->io.req.virt;
+
+ memcpy(fdisc, node->nport->service_params, sizeof(*fdisc));
+ fdisc->fl_cmd = ELS_FDISC;
+ memset(fdisc->_fl_resvd, 0, sizeof(fdisc->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_prli(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prli";
+
+ /* Build PRLI request */
+ pp = els->io.req.virt;
+
+ memset(pp, 0, sizeof(*pp));
+
+ pp->prli.prli_cmd = ELS_PRLI;
+ pp->prli.prli_spp_len = 16;
+ pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+ pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
+ (node->nport->enable_ini ?
+ FCP_SPPF_INIT_FCN : 0) |
+ (node->nport->enable_tgt ?
+ FCP_SPPF_TARG_FCN : 0));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_logo(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct fc_els_logo *logo;
+ struct fc_els_flogi *sparams;
+
+ node_els_trace();
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+
+ els = efc_els_io_alloc(node, sizeof(*logo));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "logo";
+
+ /* Build LOGO request */
+
+ logo = els->io.req.virt;
+
+ memset(logo, 0, sizeof(*logo));
+ logo->fl_cmd = ELS_LOGO;
+ hton24(logo->fl_n_port_id, node->rnode.nport->fc_id);
+ logo->fl_n_port_wwn = sparams->fl_wwpn;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_adisc(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct fc_els_adisc *adisc;
+ struct fc_els_flogi *sparams;
+ struct efc_nport *nport = node->nport;
+
+ node_els_trace();
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+
+ els = efc_els_io_alloc(node, sizeof(*adisc));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "adisc";
+
+ /* Build ADISC request */
+
+ adisc = els->io.req.virt;
+
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_ADISC;
+ hton24(adisc->adisc_hard_addr, nport->fc_id);
+ adisc->adisc_wwpn = sparams->fl_wwpn;
+ adisc->adisc_wwnn = sparams->fl_wwnn;
+ hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_scr(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc = node->efc;
+ struct fc_els_scr *req;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*req));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "scr";
+
+ req = els->io.req.virt;
+
+ memset(req, 0, sizeof(*req));
+ req->scr_cmd = ELS_SCR;
+ req->scr_reg_func = ELS_SCRF_FULL;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code,
+ u32 reason_code_expl, u32 vendor_unique)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_ls_rjt *rjt;
+
+ els = efc_els_io_alloc(node, sizeof(*rjt));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ node_els_trace();
+
+ els->display_name = "ls_rjt";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ rjt = els->io.req.virt;
+ memset(rjt, 0, sizeof(*rjt));
+
+ rjt->er_cmd = ELS_LS_RJT;
+ rjt->er_reason = reason_code;
+ rjt->er_explan = reason_code_expl;
+
+ return efc_els_send_rsp(els, sizeof(*rjt));
+}
+
+int
+efc_send_plogi_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_flogi *plogi;
+ struct fc_els_flogi *req = (struct fc_els_flogi *)node->service_params;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*plogi));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "plogi_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ plogi = els->io.req.virt;
+
+ /* copy our port's service parameters to payload */
+ memcpy(plogi, node->nport->service_params, sizeof(*plogi));
+ plogi->fl_cmd = ELS_LS_ACC;
+ memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
+
+ /* Set Application header support bit if requested */
+ if (req->fl_csp.sp_features & cpu_to_be16(FC_SP_FT_BCAST))
+ plogi->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_BCAST);
+
+ return efc_els_send_rsp(els, sizeof(*plogi));
+}
+
+int
+efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_flogi *flogi;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*flogi));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "flogi_p2p_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+ els->io.iparam.els.s_id = s_id;
+
+ flogi = els->io.req.virt;
+
+ /* copy our port's service parameters to payload */
+ memcpy(flogi, node->nport->service_params, sizeof(*flogi));
+ flogi->fl_cmd = ELS_LS_ACC;
+ memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
+
+ memset(flogi->fl_cssp, 0, sizeof(flogi->fl_cssp));
+
+ return efc_els_send_rsp(els, sizeof(*flogi));
+}
+
+int
+efc_send_prli_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prli_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ pp = els->io.req.virt;
+ memset(pp, 0, sizeof(*pp));
+
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = 0x10;
+ pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR | FC_SPP_RESP_ACK;
+
+ pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
+ (node->nport->enable_ini ?
+ FCP_SPPF_INIT_FCN : 0) |
+ (node->nport->enable_tgt ?
+ FCP_SPPF_TARG_FCN : 0));
+
+ return efc_els_send_rsp(els, sizeof(*pp));
+}
+
+int
+efc_send_prlo_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct {
+ struct fc_els_prlo prlo;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prlo_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ pp = els->io.req.virt;
+ memset(pp, 0, sizeof(*pp));
+ pp->prlo.prlo_cmd = ELS_LS_ACC;
+ pp->prlo.prlo_obs = 0x10;
+ pp->prlo.prlo_len = cpu_to_be16(sizeof(*pp));
+
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_RESP_ACK;
+
+ return efc_els_send_rsp(els, sizeof(*pp));
+}
+
+int
+efc_send_ls_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_ls_acc *acc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*acc));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "ls_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ acc = els->io.req.virt;
+ memset(acc, 0, sizeof(*acc));
+
+ acc->la_cmd = ELS_LS_ACC;
+
+ return efc_els_send_rsp(els, sizeof(*acc));
+}
+
+int
+efc_send_logo_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc_els_io_req *els = NULL;
+ struct efc *efc = node->efc;
+ struct fc_els_ls_acc *logo;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*logo));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "logo_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ logo = els->io.req.virt;
+ memset(logo, 0, sizeof(*logo));
+
+ logo->la_cmd = ELS_LS_ACC;
+
+ return efc_els_send_rsp(els, sizeof(*logo));
+}
+
+int
+efc_send_adisc_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_adisc *adisc;
+ struct fc_els_flogi *sparams;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*adisc));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "adisc_acc";
+
+ /* Go ahead and send the ELS_ACC */
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+ adisc = els->io.req.virt;
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_LS_ACC;
+ adisc->adisc_wwpn = sparams->fl_wwpn;
+ adisc->adisc_wwnn = sparams->fl_wwnn;
+ hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
+
+ return efc_els_send_rsp(els, sizeof(*adisc));
+}
+
+static inline void
+fcct_build_req_header(struct fc_ct_hdr *hdr, u16 cmd, u16 max_size)
+{
+ hdr->ct_rev = FC_CT_REV;
+ hdr->ct_fs_type = FC_FST_DIR;
+ hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+ hdr->ct_options = 0;
+ hdr->ct_cmd = cpu_to_be16(cmd);
+ /* words */
+ hdr->ct_mr_size = cpu_to_be16(max_size / (sizeof(u32)));
+ hdr->ct_reason = 0;
+ hdr->ct_explan = 0;
+ hdr->ct_vendor = 0;
+}
+
+int
+efc_ns_send_rftid(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_rft_id rftid;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*ct));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "rftid";
+
+ ct = els->io.req.virt;
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_RFT_ID,
+ sizeof(struct fc_ns_rft_id));
+
+ hton24(ct->rftid.fr_fid.fp_fid, node->rnode.nport->fc_id);
+ ct->rftid.fr_fts.ff_type_map[FC_TYPE_FCP / FC_NS_BPW] =
+ cpu_to_be32(1 << (FC_TYPE_FCP % FC_NS_BPW));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+int
+efc_ns_send_rffid(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_rff_id rffid;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*ct));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "rffid";
+ ct = els->io.req.virt;
+
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_RFF_ID,
+ sizeof(struct fc_ns_rff_id));
+
+ hton24(ct->rffid.fr_fid.fp_fid, node->rnode.nport->fc_id);
+ if (node->nport->enable_ini)
+ ct->rffid.fr_feat |= FCP_FEAT_INIT;
+ if (node->nport->enable_tgt)
+ ct->rffid.fr_feat |= FCP_FEAT_TARG;
+ ct->rffid.fr_type = FC_TYPE_FCP;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+int
+efc_ns_send_gidpt(struct efc_node *node)
+{
+ struct efc_els_io_req *els = NULL;
+ struct efc *efc = node->efc;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_gid_pt gidpt;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc_size(node, sizeof(*ct), EFC_ELS_GID_PT_RSP_LEN);
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "gidpt";
+
+ ct = els->io.req.virt;
+
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_GID_PT,
+ sizeof(struct fc_ns_gid_pt));
+
+ ct->gidpt.fn_pt_type = FC_TYPE_FCP;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+void
+efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg)
+{
+ /* don't want further events that could come; e.g. abort requests
+ * from the node state machine; thus, disable state machine
+ */
+ els->els_req_free = true;
+ efc_node_post_els_resp(els->node, evt, arg);
+
+ efc_els_io_free(els);
+}
+
+static int
+efc_ct_acc_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els = arg;
+
+ efc_els_io_free(els);
+
+ return 0;
+}
+
+int
+efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
+ struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code,
+ u32 reason_code, u32 reason_code_explanation)
+{
+ struct efc_els_io_req *els = NULL;
+ struct fc_ct_hdr *rsp = NULL;
+
+ els = efc_els_io_alloc(node, 256);
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ rsp = els->io.rsp.virt;
+
+ *rsp = *ct_hdr;
+
+ fcct_build_req_header(rsp, cmd_rsp_code, 0);
+ rsp->ct_reason = reason_code;
+ rsp->ct_explan = reason_code_explanation;
+
+ els->display_name = "ct_rsp";
+ els->cb = efc_ct_acc_cb;
+
+ /* Prepare the IO request details */
+ els->io.io_type = EFC_DISC_IO_CT_RESP;
+ els->io.xmit_len = sizeof(*rsp);
+
+ els->io.rpi = node->rnode.indicator;
+ els->io.d_id = node->rnode.fc_id;
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+
+ els->io.iparam.ct.ox_id = ox_id;
+ els->io.iparam.ct.r_ctl = 3;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = 5;
+
+ if (efc->tt.send_els(efc, &els->io)) {
+ efc_els_io_free(els);
+ return -EIO;
+ }
+ return 0;
+}
+
+int
+efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr)
+{
+ struct sli_bls_params bls;
+ struct fc_ba_acc *acc;
+ struct efc *efc = node->efc;
+
+ memset(&bls, 0, sizeof(bls));
+ bls.ox_id = be16_to_cpu(hdr->fh_ox_id);
+ bls.rx_id = be16_to_cpu(hdr->fh_rx_id);
+ bls.s_id = ntoh24(hdr->fh_d_id);
+ bls.d_id = node->rnode.fc_id;
+ bls.rpi = node->rnode.indicator;
+ bls.vpi = node->nport->indicator;
+
+ acc = (void *)bls.payload;
+ acc->ba_ox_id = cpu_to_be16(bls.ox_id);
+ acc->ba_rx_id = cpu_to_be16(bls.rx_id);
+ acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
+
+ return efc->tt.send_bls(efc, FC_RCTL_BA_ACC, &bls);
+}
diff --git a/drivers/scsi/elx/libefc/efc_els.h b/drivers/scsi/elx/libefc/efc_els.h
new file mode 100644
index 000000000000..3c4f820f602e
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_els.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_ELS_H__
+#define __EFC_ELS_H__
+
+#define EFC_STATUS_INVALID INT_MAX
+#define EFC_ELS_IO_POOL_SZ 1024
+
+struct efc_els_io_req {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efc_node *node;
+ void *cb;
+ u32 els_retries_remaining;
+ bool els_req_free;
+ struct timer_list delay_timer;
+
+ const char *display_name;
+
+ struct efc_disc_io io;
+};
+
+typedef int(*efc_hw_srrs_cb_t)(void *arg, u32 length, int status,
+ u32 ext_status);
+
+void _efc_els_io_free(struct kref *arg);
+struct efc_els_io_req *
+efc_els_io_alloc(struct efc_node *node, u32 reqlen);
+struct efc_els_io_req *
+efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen);
+void efc_els_io_free(struct efc_els_io_req *els);
+
+/* ELS command send */
+typedef void (*els_cb_t)(struct efc_node *node,
+ struct efc_node_cb *cbdata, void *arg);
+int
+efc_send_plogi(struct efc_node *node);
+int
+efc_send_flogi(struct efc_node *node);
+int
+efc_send_fdisc(struct efc_node *node);
+int
+efc_send_prli(struct efc_node *node);
+int
+efc_send_prlo(struct efc_node *node);
+int
+efc_send_logo(struct efc_node *node);
+int
+efc_send_adisc(struct efc_node *node);
+int
+efc_send_pdisc(struct efc_node *node);
+int
+efc_send_scr(struct efc_node *node);
+int
+efc_ns_send_rftid(struct efc_node *node);
+int
+efc_ns_send_rffid(struct efc_node *node);
+int
+efc_ns_send_gidpt(struct efc_node *node);
+void
+efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg);
+
+/* ELS acc send */
+int
+efc_send_ls_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_cod,
+ u32 reason_code_expl, u32 vendor_unique);
+int
+efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id);
+int
+efc_send_flogi_acc(struct efc_node *node, u32 ox_id, u32 is_fport);
+int
+efc_send_plogi_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_prli_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_logo_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_prlo_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_adisc_acc(struct efc_node *node, u32 ox_id);
+
+int
+efc_bls_send_acc_hdr(struct efc *efc, struct efc_node *node,
+ struct fc_frame_header *hdr);
+int
+efc_bls_send_rjt_hdr(struct efc_els_io_req *io, struct fc_frame_header *hdr);
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
+
+/* CT */
+int
+efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
+ struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, u32 reason_code,
+ u32 reason_code_explanation);
+
+int
+efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr);
+
+#endif /* __EFC_ELS_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
new file mode 100644
index 000000000000..9661eea93aa1
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -0,0 +1,1563 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * This file implements remote node state machines for:
+ * - Fabric logins.
+ * - Fabric controller events.
+ * - Name/directory services interaction.
+ * - Point-to-point logins.
+ */
+
+/*
+ * fabric_sm Node State Machine: Fabric States
+ * ns_sm Node State Machine: Name/Directory Services States
+ * p2p_sm Node State Machine: Point-to-Point Node States
+ */
+
+#include "efc.h"
+
+static void
+efc_fabric_initiate_shutdown(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+
+ node->els_io_enabled = false;
+
+ if (node->attached) {
+ int rc;
+
+ /* issue hw node free; don't care if succeeds right away
+ * or sometime later, will check node->attached later in
+ * shutdown process
+ */
+ rc = efc_cmd_node_detach(efc, &node->rnode);
+ if (rc < 0) {
+ node_printf(node, "Failed freeing HW node, rc=%d\n",
+ rc);
+ }
+ }
+ /*
+ * node has either been detached or is in the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+}
+
+static void
+__efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+
+ node = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+ case EFC_EVT_SHUTDOWN:
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __efc_node_common(funcname, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_REENTER:
+ efc_log_debug(efc, ">>> reenter !!\n");
+ fallthrough;
+
+ case EFC_EVT_ENTER:
+ /* send FLOGI */
+ efc_send_flogi(node);
+ efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_fabric_set_topology(struct efc_node *node,
+ enum efc_nport_topology topology)
+{
+ node->nport->topology = topology;
+}
+
+void
+efc_fabric_notify_topology(struct efc_node *node)
+{
+ struct efc_node *tmp_node;
+ unsigned long index;
+
+ /*
+ * now loop through the nodes in the nport
+ * and send topology notification
+ */
+ xa_for_each(&node->nport->lookup, index, tmp_node) {
+ if (tmp_node != node) {
+ efc_node_post_event(tmp_node,
+ EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
+ &node->nport->topology);
+ }
+ }
+}
+
+static bool efc_rnode_is_nport(struct fc_els_flogi *rsp)
+{
+ return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT);
+}
+
+void
+__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+
+ memcpy(node->nport->domain->flogi_service_params,
+ cbdata->els_rsp.virt,
+ sizeof(struct fc_els_flogi));
+
+ /* Check to see if the fabric is an F_PORT or and N_PORT */
+ if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
+ /* sm: if not nport / efc_domain_attach */
+ /* ext_status has the fc_id, attach domain */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
+ efc_fabric_notify_topology(node);
+ WARN_ON(node->nport->domain->attached);
+ efc_domain_attach(node->nport->domain,
+ cbdata->ext_status);
+ efc_node_transition(node,
+ __efc_fabric_wait_domain_attach,
+ NULL);
+ break;
+ }
+
+ /* sm: if nport and p2p_winner / efc_domain_attach */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
+ if (efc_p2p_setup(node->nport)) {
+ node_printf(node,
+ "p2p setup failed, shutting down node\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+ }
+
+ if (node->nport->p2p_winner) {
+ efc_node_transition(node,
+ __efc_p2p_wait_domain_attach,
+ NULL);
+ if (node->nport->domain->attached &&
+ !node->nport->domain->domain_notify_pend) {
+ /*
+ * already attached,
+ * just send ATTACH_OK
+ */
+ node_printf(node,
+ "p2p winner, domain already attached\n");
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ } else {
+ /*
+ * peer is p2p winner;
+ * PLOGI will be received on the
+ * remote SID=1 node;
+ * this node has served its purpose
+ */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ }
+
+ break;
+ }
+
+ case EFC_EVT_ELS_REQ_ABORTED:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ struct efc_nport *nport = node->nport;
+ /*
+ * with these errors, we have no recovery,
+ * so shutdown the nport, leave the link
+ * up and the domain ready
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ node_printf(node,
+ "FLOGI failed evt=%s, shutting down nport [%s]\n",
+ efc_sm_event_name(evt), nport->display_name);
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send FDISC */
+ efc_send_fdisc(node);
+ efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ /* fc_id is in ext_status */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / efc_nport_attach */
+ efc_nport_attach(node->nport, cbdata->ext_status);
+ efc_node_transition(node, __efc_fabric_wait_domain_attach,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
+ /* sm: / shutdown nport */
+ efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_start_ns_node(struct efc_nport *nport)
+{
+ struct efc_node *ns;
+
+ /* Instantiate a name services node */
+ ns = efc_node_find(nport, FC_FID_DIR_SERV);
+ if (!ns) {
+ ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false);
+ if (!ns)
+ return -EIO;
+ }
+ /*
+ * for found ns, should we be transitioning from here?
+ * breaks transition only
+ * 1. from within state machine or
+ * 2. if after alloc
+ */
+ if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
+ efc_node_pause(ns, __efc_ns_init);
+ else
+ efc_node_transition(ns, __efc_ns_init, NULL);
+ return 0;
+}
+
+static int
+efc_start_fabctl_node(struct efc_nport *nport)
+{
+ struct efc_node *fabctl;
+
+ fabctl = efc_node_find(nport, FC_FID_FCTRL);
+ if (!fabctl) {
+ fabctl = efc_node_alloc(nport, FC_FID_FCTRL,
+ false, false);
+ if (!fabctl)
+ return -EIO;
+ }
+ /*
+ * for found ns, should we be transitioning from here?
+ * breaks transition only
+ * 1. from within state machine or
+ * 2. if after alloc
+ */
+ efc_node_transition(fabctl, __efc_fabctl_init, NULL);
+ return 0;
+}
+
+void
+__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ case EFC_EVT_NPORT_ATTACH_OK: {
+ int rc;
+
+ rc = efc_start_ns_node(node->nport);
+ if (rc)
+ return;
+
+ /* sm: if enable_ini / start fabctl node */
+ /* Instantiate the fabric controller (sends SCR) */
+ if (node->nport->enable_rscn) {
+ rc = efc_start_fabctl_node(node->nport);
+ if (rc)
+ return;
+ }
+ efc_node_transition(node, __efc_fabric_idle, NULL);
+ break;
+ }
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send PLOGI */
+ efc_send_plogi(node);
+ efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ int rc;
+
+ /* Save service parameters */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ /* sm: / send RFTID */
+ efc_ns_send_rftid(node);
+ efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node,
+ __efc_fabric_wait_attach_evt_shutdown,
+ NULL);
+ break;
+
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ node->attached = false;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ /* ignore shutdown event as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / send RFFID */
+ efc_ns_send_rffid(node);
+ efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
+ break;
+
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Waits for an RFFID response event;
+ * if rscn enabled, a GIDPT name services request is issued.
+ */
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ if (node->nport->enable_rscn) {
+ /* sm: if enable_rscn / send GIDPT */
+ efc_ns_send_gidpt(node);
+
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
+ NULL);
+ } else {
+ /* if 'T' only, we're done, go to idle */
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ }
+ break;
+ }
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_process_gidpt_payload(struct efc_node *node,
+ void *data, u32 gidpt_len)
+{
+ u32 i, j;
+ struct efc_node *newnode;
+ struct efc_nport *nport = node->nport;
+ struct efc *efc = node->efc;
+ u32 port_id = 0, port_count, plist_count;
+ struct efc_node *n;
+ struct efc_node **active_nodes;
+ int residual;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_gid_pn_resp pn_rsp;
+ } *rsp;
+ struct fc_gid_pn_resp *gidpt;
+ unsigned long index;
+
+ rsp = data;
+ gidpt = &rsp->pn_rsp;
+ residual = be16_to_cpu(rsp->hdr.ct_mr_size);
+
+ if (residual != 0)
+ efc_log_debug(node->efc, "residual is %u words\n", residual);
+
+ if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) {
+ node_printf(node,
+ "GIDPT request failed: rsn x%x rsn_expl x%x\n",
+ rsp->hdr.ct_reason, rsp->hdr.ct_explan);
+ return -EIO;
+ }
+
+ plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt);
+
+ /* Count the number of nodes */
+ port_count = 0;
+ xa_for_each(&nport->lookup, index, n) {
+ port_count++;
+ }
+
+ /* Allocate a buffer for all nodes */
+ active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC);
+ if (!active_nodes) {
+ node_printf(node, "efc_malloc failed\n");
+ return -EIO;
+ }
+
+ /* Fill buffer with fc_id of active nodes */
+ i = 0;
+ xa_for_each(&nport->lookup, index, n) {
+ port_id = n->rnode.fc_id;
+ switch (port_id) {
+ case FC_FID_FLOGI:
+ case FC_FID_FCTRL:
+ case FC_FID_DIR_SERV:
+ break;
+ default:
+ if (port_id != FC_FID_DOM_MGR)
+ active_nodes[i++] = n;
+ break;
+ }
+ }
+
+ /* update the active nodes buffer */
+ for (i = 0; i < plist_count; i++) {
+ hton24(gidpt[i].fp_fid, port_id);
+
+ for (j = 0; j < port_count; j++) {
+ if (active_nodes[j] &&
+ port_id == active_nodes[j]->rnode.fc_id) {
+ active_nodes[j] = NULL;
+ }
+ }
+
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ }
+
+ /* Those remaining in the active_nodes[] are now gone ! */
+ for (i = 0; i < port_count; i++) {
+ /*
+ * if we're an initiator and the remote node
+ * is a target, then post the node missing event.
+ * if we're target and we have enabled
+ * target RSCN, then post the node missing event.
+ */
+ if (!active_nodes[i])
+ continue;
+
+ if ((node->nport->enable_ini && active_nodes[i]->targ) ||
+ (node->nport->enable_tgt && enable_target_rscn(efc))) {
+ efc_node_post_event(active_nodes[i],
+ EFC_EVT_NODE_MISSING, NULL);
+ } else {
+ node_printf(node,
+ "GID_PT: skipping non-tgt port_id x%06x\n",
+ active_nodes[i]->rnode.fc_id);
+ }
+ }
+ kfree(active_nodes);
+
+ for (i = 0; i < plist_count; i++) {
+ hton24(gidpt[i].fp_fid, port_id);
+
+ /* Don't create node for ourselves */
+ if (port_id == node->rnode.nport->fc_id) {
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ continue;
+ }
+
+ newnode = efc_node_find(nport, port_id);
+ if (!newnode) {
+ if (!node->nport->enable_ini)
+ continue;
+
+ newnode = efc_node_alloc(nport, port_id, false, false);
+ if (!newnode) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ return -EIO;
+ }
+ /*
+ * send PLOGI automatically
+ * if initiator
+ */
+ efc_node_init_device(newnode, true);
+ }
+
+ if (node->nport->enable_ini && newnode->targ) {
+ efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND,
+ NULL);
+ }
+
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ }
+ return 0;
+}
+
+void
+__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+ /*
+ * Wait for a GIDPT response from the name server. Process the FC_IDs
+ * that are reported by creating new remote ports, as needed.
+ */
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / process GIDPT payload */
+ efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
+ cbdata->els_rsp.len);
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ /* not much we can do; will retry with the next RSCN */
+ node_printf(node, "GID_PT failed to complete\n");
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ break;
+ }
+
+ /* if receive RSCN here, queue up another discovery processing */
+ case EFC_EVT_RSCN_RCVD: {
+ node_printf(node, "RSCN received during GID_PT processing\n");
+ node->rscn_pending = true;
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Wait for RSCN received events (posted from the fabric controller)
+ * and restart the GIDPT name services query and processing.
+ */
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ if (!node->rscn_pending)
+ break;
+
+ node_printf(node, "RSCN pending, restart discovery\n");
+ node->rscn_pending = false;
+ fallthrough;
+
+ case EFC_EVT_RSCN_RCVD: {
+ /* sm: / send GIDPT */
+ /*
+ * If target RSCN processing is enabled,
+ * and this is target only (not initiator),
+ * and tgt_rscn_delay is non-zero,
+ * then we delay issuing the GID_PT
+ */
+ if (efc->tgt_rscn_delay_msec != 0 &&
+ !node->nport->enable_ini && node->nport->enable_tgt &&
+ enable_target_rscn(efc)) {
+ efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
+ } else {
+ efc_ns_send_gidpt(node);
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
+ NULL);
+ }
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+gidpt_delay_timer_cb(struct timer_list *t)
+{
+ struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
+
+ del_timer(&node->gidpt_delay_timer);
+
+ efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
+}
+
+void
+__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ u64 delay_msec, tmp;
+
+ /*
+ * Compute the delay time.
+ * Set to tgt_rscn_delay, if the time since last GIDPT
+ * is less than tgt_rscn_period, then use tgt_rscn_period.
+ */
+ delay_msec = efc->tgt_rscn_delay_msec;
+ tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
+ if (tmp < efc->tgt_rscn_period_msec)
+ delay_msec = efc->tgt_rscn_period_msec;
+
+ timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
+ 0);
+ mod_timer(&node->gidpt_delay_timer,
+ jiffies + msecs_to_jiffies(delay_msec));
+
+ break;
+ }
+
+ case EFC_EVT_GIDPT_DELAY_EXPIRED:
+ node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
+
+ efc_ns_send_gidpt(node);
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
+ break;
+
+ case EFC_EVT_RSCN_RCVD: {
+ efc_log_debug(efc,
+ "RSCN received while in GIDPT delay - no action\n");
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* no need to login to fabric controller, just send SCR */
+ efc_send_scr(node);
+ efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Fabric controller node state machine:
+ * Wait for an SCR response from the fabric controller.
+ */
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_fabctl_ready, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
+{
+ struct efc *efc = node->efc;
+ struct efc_nport *nport = node->nport;
+ struct efc_node *ns;
+
+ /* Forward this event to the name-services node */
+ ns = efc_node_find(nport, FC_FID_DIR_SERV);
+ if (ns)
+ efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
+ else
+ efc_log_warn(efc, "can't find name server node\n");
+}
+
+void
+__efc_fabctl_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Fabric controller node state machine: Ready.
+ * In this state, the fabric controller sends a RSCN, which is received
+ * by this node and is forwarded to the name services node object; and
+ * the RSCN LS_ACC is sent.
+ */
+ switch (evt) {
+ case EFC_EVT_RSCN_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /*
+ * sm: / process RSCN (forward to name services node),
+ * send LS_ACC
+ */
+ efc_process_rscn(node, cbdata);
+ efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
+ NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_transition(node, __efc_fabctl_ready, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static uint64_t
+efc_get_wwpn(struct fc_els_flogi *sp)
+{
+ return be64_to_cpu(sp->fl_wwnn);
+}
+
+static int
+efc_rnode_is_winner(struct efc_nport *nport)
+{
+ struct fc_els_flogi *remote_sp;
+ u64 remote_wwpn;
+ u64 local_wwpn = nport->wwpn;
+ u64 wwn_bump = 0;
+
+ remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params;
+ remote_wwpn = efc_get_wwpn(remote_sp);
+
+ local_wwpn ^= wwn_bump;
+
+ efc_log_debug(nport->efc, "r: %llx\n",
+ be64_to_cpu(remote_sp->fl_wwpn));
+ efc_log_debug(nport->efc, "l: %llx\n", local_wwpn);
+
+ if (remote_wwpn == local_wwpn) {
+ efc_log_warn(nport->efc,
+ "WWPN of remote node [%08x %08x] matches local WWPN\n",
+ (u32)(local_wwpn >> 32ll),
+ (u32)local_wwpn);
+ return -1;
+ }
+
+ return (remote_wwpn > local_wwpn);
+}
+
+void
+__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ struct efc_nport *nport = node->nport;
+ struct efc_node *rnode;
+
+ /*
+ * this transient node (SID=0 (recv'd FLOGI)
+ * or DID=fabric (sent FLOGI))
+ * is the p2p winner, will use a separate node
+ * to send PLOGI to peer
+ */
+ WARN_ON(!node->nport->p2p_winner);
+
+ rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
+ if (rnode) {
+ /*
+ * the "other" transient p2p node has
+ * already kicked off the
+ * new node from which PLOGI is sent
+ */
+ node_printf(node,
+ "Node with fc_id x%x already exists\n",
+ rnode->rnode.fc_id);
+ } else {
+ /*
+ * create new node (SID=1, DID=2)
+ * from which to send PLOGI
+ */
+ rnode = efc_node_alloc(nport,
+ nport->p2p_remote_port_id,
+ false, false);
+ if (!rnode) {
+ efc_log_err(efc, "node alloc failed\n");
+ return;
+ }
+
+ efc_fabric_notify_topology(node);
+ /* sm: / allocate p2p remote node */
+ efc_node_transition(rnode, __efc_p2p_rnode_init,
+ NULL);
+ }
+
+ /*
+ * the transient node (SID=0 or DID=fabric)
+ * has served its purpose
+ */
+ if (node->rnode.fc_id == 0) {
+ /*
+ * if this is the SID=0 node,
+ * move to the init state in case peer
+ * has restarted FLOGI discovery and FLOGI is pending
+ */
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ } else {
+ /*
+ * if this is the DID=fabric node
+ * (we initiated FLOGI), shut it down
+ */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ }
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send PLOGI */
+ efc_send_plogi(node);
+ efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
+ break;
+
+ case EFC_EVT_ABTS_RCVD:
+ /* sm: send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+
+ /* sm: if p2p_winner / domain_attach */
+ if (node->nport->p2p_winner) {
+ efc_node_transition(node,
+ __efc_p2p_wait_domain_attach,
+ NULL);
+ if (!node->nport->domain->attached) {
+ node_printf(node, "Domain not attached\n");
+ efc_domain_attach(node->nport->domain,
+ node->nport->p2p_port_id);
+ } else {
+ node_printf(node, "Domain already attached\n");
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ } else {
+ /* this node has served its purpose;
+ * we'll expect a PLOGI on a separate
+ * node (remote SID=0x1); return this node
+ * to init state in case peer
+ * restarts discovery -- it may already
+ * have (pending frames may exist).
+ */
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ }
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /*
+ * LS_ACC failed, possibly due to link down;
+ * shutdown node and wait
+ * for FLOGI discovery to restart
+ */
+ node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_ABTS_RCVD: {
+ /* sm: / send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ int rc;
+
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ node_printf(node, "PLOGI failed, shutting down\n");
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* if we're in external loopback mode, just send LS_ACC */
+ if (node->efc->external_loopback) {
+ efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ } else {
+ /*
+ * if this isn't external loopback,
+ * pass to default handler
+ */
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+ break;
+ }
+ case EFC_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
+ NULL);
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have
+ * accepted).
+ * At this time, we are not waiting on any other unsolicited
+ * frames to continue with the login process. Thus, it will not
+ * hurt to hold frames here.
+ */
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */
+ int rc;
+
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ switch (node->send_ls_acc) {
+ case EFC_NODE_SEND_LS_ACC_PRLI: {
+ efc_d_send_prli_rsp(node->ls_acc_io,
+ node->ls_acc_oxid);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
+ case EFC_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ efc_node_transition(node, __efc_d_port_logged_in,
+ NULL);
+ break;
+ }
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node,
+ __efc_fabric_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_PRLI_RCVD:
+ node_printf(node, "%s: PRLI received before node is attached\n",
+ efc_sm_event_name(evt));
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+int
+efc_p2p_setup(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ int rnode_winner;
+
+ rnode_winner = efc_rnode_is_winner(nport);
+
+ /* set nport flags to indicate p2p "winner" */
+ if (rnode_winner == 1) {
+ nport->p2p_remote_port_id = 0;
+ nport->p2p_port_id = 0;
+ nport->p2p_winner = false;
+ } else if (rnode_winner == 0) {
+ nport->p2p_remote_port_id = 2;
+ nport->p2p_port_id = 1;
+ nport->p2p_winner = true;
+ } else {
+ /* no winner; only okay if external loopback enabled */
+ if (nport->efc->external_loopback) {
+ /*
+ * External loopback mode enabled;
+ * local nport and remote node
+ * will be registered with an NPortID = 1;
+ */
+ efc_log_debug(efc,
+ "External loopback mode enabled\n");
+ nport->p2p_remote_port_id = 1;
+ nport->p2p_port_id = 1;
+ nport->p2p_winner = true;
+ } else {
+ efc_log_warn(efc,
+ "failed to determine p2p winner\n");
+ return rnode_winner;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/scsi/elx/libefc/efc_fabric.h b/drivers/scsi/elx/libefc/efc_fabric.h
new file mode 100644
index 000000000000..b0947ae6fdca
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_fabric.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Declarations for the interface exported by efc_fabric
+ */
+
+#ifndef __EFCT_FABRIC_H__
+#define __EFCT_FABRIC_H__
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "scsi/fc/fc_ns.h"
+
+void
+__efc_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_domain_attach_wait(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_nport_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_logo_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event, void *arg);
+void
+__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_idle(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_domain_attach_wait(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+int
+efc_p2p_setup(struct efc_nport *nport);
+void
+efc_fabric_set_topology(struct efc_node *node,
+ enum efc_nport_topology topology);
+void efc_fabric_notify_topology(struct efc_node *node);
+
+#endif /* __EFCT_FABRIC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_node.c b/drivers/scsi/elx/libefc/efc_node.c
new file mode 100644
index 000000000000..a1b4ce6a27b4
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_node.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efc.h"
+
+int
+efc_remote_node_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_remote_node *rnode = data;
+ struct efc_node *node = rnode->node;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, event, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ return 0;
+}
+
+struct efc_node *
+efc_node_find(struct efc_nport *nport, u32 port_id)
+{
+ /* Find an FC node structure given the FC port ID */
+ return xa_load(&nport->lookup, port_id);
+}
+
+static void
+_efc_node_free(struct kref *arg)
+{
+ struct efc_node *node = container_of(arg, struct efc_node, ref);
+ struct efc *efc = node->efc;
+ struct efc_dma *dma;
+
+ dma = &node->sparm_dma_buf;
+ dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+ mempool_free(node, efc->node_pool);
+}
+
+struct efc_node *efc_node_alloc(struct efc_nport *nport,
+ u32 port_id, bool init, bool targ)
+{
+ int rc;
+ struct efc_node *node = NULL;
+ struct efc *efc = nport->efc;
+ struct efc_dma *dma;
+
+ if (nport->shutting_down) {
+ efc_log_debug(efc, "node allocation when shutting down %06x",
+ port_id);
+ return NULL;
+ }
+
+ node = mempool_alloc(efc->node_pool, GFP_ATOMIC);
+ if (!node) {
+ efc_log_err(efc, "node allocation failed %06x", port_id);
+ return NULL;
+ }
+ memset(node, 0, sizeof(*node));
+
+ dma = &node->sparm_dma_buf;
+ dma->size = NODE_SPARAMS_SIZE;
+ dma->virt = dma_pool_zalloc(efc->node_dma_pool, GFP_ATOMIC, &dma->phys);
+ if (!dma->virt) {
+ efc_log_err(efc, "node dma alloc failed\n");
+ goto dma_fail;
+ }
+ node->rnode.indicator = U32_MAX;
+ node->nport = nport;
+
+ node->efc = efc;
+ node->init = init;
+ node->targ = targ;
+
+ spin_lock_init(&node->pend_frames_lock);
+ INIT_LIST_HEAD(&node->pend_frames);
+ spin_lock_init(&node->els_ios_lock);
+ INIT_LIST_HEAD(&node->els_ios_list);
+ node->els_io_enabled = true;
+
+ rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_node_alloc failed: %d\n", rc);
+ goto hw_alloc_fail;
+ }
+
+ node->rnode.node = node;
+ node->sm.app = node;
+ node->evtdepth = 0;
+
+ efc_node_update_display_name(node);
+
+ rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Node lookup store failed: %d\n", rc);
+ goto xa_fail;
+ }
+
+ /* initialize refcount */
+ kref_init(&node->ref);
+ node->release = _efc_node_free;
+ kref_get(&nport->ref);
+
+ return node;
+
+xa_fail:
+ efc_node_free_resources(efc, &node->rnode);
+hw_alloc_fail:
+ dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
+dma_fail:
+ mempool_free(node, efc->node_pool);
+ return NULL;
+}
+
+void
+efc_node_free(struct efc_node *node)
+{
+ struct efc_nport *nport;
+ struct efc *efc;
+ int rc = 0;
+ struct efc_node *ns = NULL;
+
+ nport = node->nport;
+ efc = node->efc;
+
+ node_printf(node, "Free'd\n");
+
+ if (node->refound) {
+ /*
+ * Save the name server node. We will send fake RSCN event at
+ * the end to handle ignored RSCN event during node deletion
+ */
+ ns = efc_node_find(node->nport, FC_FID_DIR_SERV);
+ }
+
+ if (!node->nport) {
+ efc_log_err(efc, "Node already Freed\n");
+ return;
+ }
+
+ /* Free HW resources */
+ rc = efc_node_free_resources(efc, &node->rnode);
+ if (rc < 0)
+ efc_log_err(efc, "efc_hw_node_free failed: %d\n", rc);
+
+ /* if the gidpt_delay_timer is still running, then delete it */
+ if (timer_pending(&node->gidpt_delay_timer))
+ del_timer(&node->gidpt_delay_timer);
+
+ xa_erase(&nport->lookup, node->rnode.fc_id);
+
+ /*
+ * If the node_list is empty,
+ * then post a ALL_CHILD_NODES_FREE event to the nport,
+ * after the lock is released.
+ * The nport may be free'd as a result of the event.
+ */
+ if (xa_empty(&nport->lookup))
+ efc_sm_post_event(&nport->sm, EFC_EVT_ALL_CHILD_NODES_FREE,
+ NULL);
+
+ node->nport = NULL;
+ node->sm.current_state = NULL;
+
+ kref_put(&nport->ref, nport->release);
+ kref_put(&node->ref, node->release);
+
+ if (ns) {
+ /* sending fake RSCN event to name server node */
+ efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, NULL);
+ }
+}
+
+static void
+efc_dma_copy_in(struct efc_dma *dma, void *buffer, u32 buffer_length)
+{
+ if (!dma || !buffer || !buffer_length)
+ return;
+
+ if (buffer_length > dma->size)
+ buffer_length = dma->size;
+
+ memcpy(dma->virt, buffer, buffer_length);
+ dma->len = buffer_length;
+}
+
+int
+efc_node_attach(struct efc_node *node)
+{
+ int rc = 0;
+ struct efc_nport *nport = node->nport;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = node->efc;
+
+ if (!domain->attached) {
+ efc_log_err(efc, "Warning: unattached domain\n");
+ return -EIO;
+ }
+ /* Update node->wwpn/wwnn */
+
+ efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn),
+ efc_node_get_wwpn(node));
+ efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn),
+ efc_node_get_wwnn(node));
+
+ efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4,
+ sizeof(node->service_params) - 4);
+
+ /* take lock to protect node->rnode.attached */
+ rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf);
+ if (rc < 0)
+ efc_log_debug(efc, "efc_hw_node_attach failed: %d\n", rc);
+
+ return rc;
+}
+
+void
+efc_node_fcid_display(u32 fc_id, char *buffer, u32 buffer_length)
+{
+ switch (fc_id) {
+ case FC_FID_FLOGI:
+ snprintf(buffer, buffer_length, "fabric");
+ break;
+ case FC_FID_FCTRL:
+ snprintf(buffer, buffer_length, "fabctl");
+ break;
+ case FC_FID_DIR_SERV:
+ snprintf(buffer, buffer_length, "nserve");
+ break;
+ default:
+ if (fc_id == FC_FID_DOM_MGR) {
+ snprintf(buffer, buffer_length, "dctl%02x",
+ (fc_id & 0x0000ff));
+ } else {
+ snprintf(buffer, buffer_length, "%06x", fc_id);
+ }
+ break;
+ }
+}
+
+void
+efc_node_update_display_name(struct efc_node *node)
+{
+ u32 port_id = node->rnode.fc_id;
+ struct efc_nport *nport = node->nport;
+ char portid_display[16];
+
+ efc_node_fcid_display(port_id, portid_display, sizeof(portid_display));
+
+ snprintf(node->display_name, sizeof(node->display_name), "%s.%s",
+ nport->display_name, portid_display);
+}
+
+void
+efc_node_send_ls_io_cleanup(struct efc_node *node)
+{
+ if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) {
+ efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n",
+ node->display_name, node->ls_acc_oxid);
+
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ }
+}
+
+static void efc_node_handle_implicit_logo(struct efc_node *node)
+{
+ int rc;
+
+ /*
+ * currently, only case for implicit logo is PLOGI
+ * recvd. Thus, node's ELS IO pending list won't be
+ * empty (PLOGI will be on it)
+ */
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
+ node_printf(node, "Reason: implicit logout, re-authenticate\n");
+
+ /* Re-attach node with the same HW node resources */
+ node->req_free = false;
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ node->els_io_enabled = true;
+
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
+}
+
+static void efc_node_handle_explicit_logo(struct efc_node *node)
+{
+ s8 pend_frames_empty;
+ unsigned long flags = 0;
+
+ /* cleanup any pending LS_ACC ELSs */
+ efc_node_send_ls_io_cleanup(node);
+
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+ pend_frames_empty = list_empty(&node->pend_frames);
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+
+ /*
+ * there are two scenarios where we want to keep
+ * this node alive:
+ * 1. there are pending frames that need to be
+ * processed or
+ * 2. we're an initiator and the remote node is
+ * a target and we need to re-authenticate
+ */
+ node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty);
+ node_printf(node, "nport.ini=%d node.tgt=%d\n",
+ node->nport->enable_ini, node->targ);
+ if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) {
+ u8 send_plogi = false;
+
+ if (node->nport->enable_ini && node->targ) {
+ /*
+ * we're an initiator and
+ * node shutting down is a target;
+ * we'll need to re-authenticate in
+ * initial state
+ */
+ send_plogi = true;
+ }
+
+ /*
+ * transition to __efc_d_init
+ * (will retain HW node resources)
+ */
+ node->els_io_enabled = true;
+ node->req_free = false;
+
+ /*
+ * either pending frames exist or we are re-authenticating
+ * with PLOGI (or both); in either case, return to initial
+ * state
+ */
+ efc_node_init_device(node, send_plogi);
+ }
+ /* else: let node shutdown occur */
+}
+
+static void
+efc_node_purge_pending(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_hw_sequence *frame, *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+
+ list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) {
+ list_del(&frame->list_entry);
+ efc->tt.hw_seq_free(efc, frame);
+ }
+
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+}
+
+void
+__efc_node_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ efc_node_hold_frames(node);
+ WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
+ /* by default, we will be freeing node after we unwind */
+ node->req_free = true;
+
+ switch (node->shutdown_reason) {
+ case EFC_NODE_SHUTDOWN_IMPLICIT_LOGO:
+ /* Node shutdown b/c of PLOGI received when node
+ * already logged in. We have PLOGI service
+ * parameters, so submit node attach; we won't be
+ * freeing this node
+ */
+
+ efc_node_handle_implicit_logo(node);
+ break;
+
+ case EFC_NODE_SHUTDOWN_EXPLICIT_LOGO:
+ efc_node_handle_explicit_logo(node);
+ break;
+
+ case EFC_NODE_SHUTDOWN_DEFAULT:
+ default: {
+ /*
+ * shutdown due to link down,
+ * node going away (xport event) or
+ * nport shutdown, purge pending and
+ * proceed to cleanup node
+ */
+
+ /* cleanup any pending LS_ACC ELSs */
+ efc_node_send_ls_io_cleanup(node);
+
+ node_printf(node,
+ "Shutdown reason: default, purge pending\n");
+ efc_node_purge_pending(node);
+ break;
+ }
+ }
+
+ break;
+ }
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+static bool
+efc_node_check_els_quiesced(struct efc_node *node)
+{
+ /* check to see if ELS requests, completions are quiesced */
+ if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 &&
+ efc_els_io_list_empty(node, &node->els_ios_list)) {
+ if (!node->attached) {
+ /* hw node detach already completed, proceed */
+ node_printf(node, "HW node not attached\n");
+ efc_node_transition(node,
+ __efc_node_wait_ios_shutdown,
+ NULL);
+ } else {
+ /*
+ * hw node detach hasn't completed,
+ * transition and wait
+ */
+ node_printf(node, "HW node still attached\n");
+ efc_node_transition(node, __efc_node_wait_node_free,
+ NULL);
+ }
+ return true;
+ }
+ return false;
+}
+
+void
+efc_node_initiate_cleanup(struct efc_node *node)
+{
+ /*
+ * if ELS's have already been quiesced, will move to next state
+ * if ELS's have not been quiesced, abort them
+ */
+ if (!efc_node_check_els_quiesced(node)) {
+ efc_node_hold_frames(node);
+ efc_node_transition(node, __efc_node_wait_els_shutdown, NULL);
+ }
+}
+
+void
+__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ bool check_quiesce = false;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+ /* Node state machine: Wait for all ELSs to complete */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ if (efc_els_io_list_empty(node, &node->els_ios_list)) {
+ node_printf(node, "All ELS IOs complete\n");
+ check_quiesce = true;
+ }
+ break;
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_ELS_REQ_ABORTED:
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ if (WARN_ON(!node->els_cmpl_cnt))
+ break;
+ node->els_cmpl_cnt--;
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* all ELS IO's complete */
+ node_printf(node, "All ELS IOs complete\n");
+ WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+
+ if (check_quiesce)
+ efc_node_check_els_quiesced(node);
+}
+
+void
+__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_FREE_OK:
+ /* node is officially no longer attached */
+ node->attached = false;
+ efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL);
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ /* As IOs and ELS IO's complete we expect to get these events */
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+
+ /* first check to see if no ELS IOs are outstanding */
+ if (efc_els_io_list_empty(node, &node->els_ios_list))
+ /* If there are any active IOS, Free them. */
+ efc_node_transition(node, __efc_node_shutdown, NULL);
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ if (efc_els_io_list_empty(node, &node->els_ios_list))
+ efc_node_transition(node, __efc_node_shutdown, NULL);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s\n", node->display_name,
+ efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+ struct efc *efc = NULL;
+ struct efc_node_cb *cbdata = arg;
+
+ node = ctx->app;
+ efc = node->efc;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_NPORT_TOPOLOGY_NOTIFY:
+ case EFC_EVT_NODE_MISSING:
+ case EFC_EVT_FCP_CMD_RCVD:
+ break;
+
+ case EFC_EVT_NODE_REFOUND:
+ node->refound = true;
+ break;
+
+ /*
+ * node->attached must be set appropriately
+ * for all node attach/detach events
+ */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ break;
+
+ case EFC_EVT_NODE_FREE_OK:
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ node->attached = false;
+ break;
+
+ /*
+ * handle any ELS completions that
+ * other states either didn't care about
+ * or forgot about
+ */
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ if (WARN_ON(!node->els_cmpl_cnt))
+ break;
+ node->els_cmpl_cnt--;
+ break;
+
+ /*
+ * handle any ELS request completions that
+ * other states either didn't care about
+ * or forgot about
+ */
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_ELS_REQ_ABORTED:
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ break;
+
+ case EFC_EVT_ELS_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /*
+ * Unsupported ELS was received,
+ * send LS_RJT, command not supported
+ */
+ efc_log_debug(efc,
+ "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
+ node->display_name, funcname,
+ ((u8 *)cbdata->payload->dma.virt)[0]);
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNSUP, ELS_EXPL_NONE, 0);
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD:
+ case EFC_EVT_FLOGI_RCVD:
+ case EFC_EVT_LOGO_RCVD:
+ case EFC_EVT_PRLI_RCVD:
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_FDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD:
+ case EFC_EVT_SCR_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /* sm: / send ELS_RJT */
+ efc_log_debug(efc, "[%s] (%s) %s sending ELS_RJT\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ /* if we didn't catch this in a state, send generic LS_RJT */
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_NONE, 0);
+ break;
+ }
+ case EFC_EVT_ABTS_RCVD: {
+ efc_log_debug(efc, "[%s] (%s) %s sending BA_ACC\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+
+ /* sm: / send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+ break;
+ }
+
+ default:
+ efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ }
+}
+
+void
+efc_node_save_sparms(struct efc_node *node, void *payload)
+{
+ memcpy(node->service_params, payload, sizeof(node->service_params));
+}
+
+void
+efc_node_post_event(struct efc_node *node,
+ enum efc_sm_event evt, void *arg)
+{
+ bool free_node = false;
+
+ node->evtdepth++;
+
+ efc_sm_post_event(&node->sm, evt, arg);
+
+ /* If our event call depth is one and
+ * we're not holding frames
+ * then we can dispatch any pending frames.
+ * We don't want to allow the efc_process_node_pending()
+ * call to recurse.
+ */
+ if (!node->hold_frames && node->evtdepth == 1)
+ efc_process_node_pending(node);
+
+ node->evtdepth--;
+
+ /*
+ * Free the node object if so requested,
+ * and we're at an event call depth of zero
+ */
+ if (node->evtdepth == 0 && node->req_free)
+ free_node = true;
+
+ if (free_node)
+ efc_node_free(node);
+}
+
+void
+efc_node_transition(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *), void *data)
+{
+ struct efc_sm_ctx *ctx = &node->sm;
+
+ if (ctx->current_state == state) {
+ efc_node_post_event(node, EFC_EVT_REENTER, data);
+ } else {
+ efc_node_post_event(node, EFC_EVT_EXIT, data);
+ ctx->current_state = state;
+ efc_node_post_event(node, EFC_EVT_ENTER, data);
+ }
+}
+
+void
+efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name)
+{
+ memset(buf, 0, buf_len);
+
+ snprintf(buf, buf_len, "eui.%016llX", (unsigned long long)eui_name);
+}
+
+u64
+efc_node_get_wwpn(struct efc_node *node)
+{
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)node->service_params;
+
+ return be64_to_cpu(sp->fl_wwpn);
+}
+
+u64
+efc_node_get_wwnn(struct efc_node *node)
+{
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)node->service_params;
+
+ return be64_to_cpu(sp->fl_wwnn);
+}
+
+int
+efc_node_check_els_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
+ u8 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname)
+{
+ return 0;
+}
+
+int
+efc_node_check_ns_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
+ u16 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname)
+{
+ return 0;
+}
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list)
+{
+ int empty;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+ empty = list_empty(list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return empty;
+}
+
+void
+efc_node_pause(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *))
+
+{
+ node->nodedb_state = state;
+ efc_node_transition(node, __efc_node_paused, NULL);
+}
+
+void
+__efc_node_paused(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * This state is entered when a state is "paused". When resumed, the
+ * node is transitioned to a previously saved state (node->ndoedb_state)
+ */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ node_printf(node, "Paused\n");
+ break;
+
+ case EFC_EVT_RESUME: {
+ void (*pf)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+ pf = node->nodedb_state;
+
+ node->nodedb_state = NULL;
+ efc_node_transition(node, pf, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node->req_free = true;
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_node_recv_els_frame(struct efc_node *node,
+ struct efc_hw_sequence *seq)
+{
+ u32 prli_size = sizeof(struct fc_els_prli) + sizeof(struct fc_els_spp);
+ struct {
+ u32 cmd;
+ enum efc_sm_event evt;
+ u32 payload_size;
+ } els_cmd_list[] = {
+ {ELS_PLOGI, EFC_EVT_PLOGI_RCVD, sizeof(struct fc_els_flogi)},
+ {ELS_FLOGI, EFC_EVT_FLOGI_RCVD, sizeof(struct fc_els_flogi)},
+ {ELS_LOGO, EFC_EVT_LOGO_RCVD, sizeof(struct fc_els_ls_acc)},
+ {ELS_PRLI, EFC_EVT_PRLI_RCVD, prli_size},
+ {ELS_PRLO, EFC_EVT_PRLO_RCVD, prli_size},
+ {ELS_PDISC, EFC_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_FDISC, EFC_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_ADISC, EFC_EVT_ADISC_RCVD, sizeof(struct fc_els_adisc)},
+ {ELS_RSCN, EFC_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_SCR, EFC_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ };
+ struct efc_node_cb cbdata;
+ u8 *buf = seq->payload->dma.virt;
+ enum efc_sm_event evt = EFC_EVT_ELS_RCVD;
+ u32 i;
+
+ memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ /* find a matching event for the ELS command */
+ for (i = 0; i < ARRAY_SIZE(els_cmd_list); i++) {
+ if (els_cmd_list[i].cmd == buf[0]) {
+ evt = els_cmd_list[i].evt;
+ break;
+ }
+ }
+
+ efc_node_post_event(node, evt, &cbdata);
+}
+
+void
+efc_node_recv_ct_frame(struct efc_node *node,
+ struct efc_hw_sequence *seq)
+{
+ struct fc_ct_hdr *iu = seq->payload->dma.virt;
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ struct efc *efc = node->efc;
+ u16 gscmd = be16_to_cpu(iu->ct_cmd);
+
+ efc_log_err(efc, "[%s] Received cmd :%x sending CT_REJECT\n",
+ node->display_name, gscmd);
+ efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu,
+ FC_FS_RJT, FC_FS_RJT_UNSUP, 0);
+}
+
+void
+efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq)
+{
+ struct efc_node_cb cbdata;
+
+ memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata);
+}
+
+void
+efc_process_node_pending(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_hw_sequence *seq = NULL;
+ u32 pend_frames_processed = 0;
+ unsigned long flags = 0;
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame
+ * processed because any given frame could cause a transition
+ * to a state that holds frames
+ */
+ if (node->hold_frames)
+ break;
+
+ seq = NULL;
+ /* Get next frame/sequence */
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+
+ if (!list_empty(&node->pend_frames)) {
+ seq = list_first_entry(&node->pend_frames,
+ struct efc_hw_sequence, list_entry);
+ list_del(&seq->list_entry);
+ }
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+
+ if (!seq) {
+ pend_frames_processed = node->pend_frames_processed;
+ node->pend_frames_processed = 0;
+ break;
+ }
+ node->pend_frames_processed++;
+
+ /* now dispatch frame(s) to dispatch function */
+ efc_node_dispatch_frame(node, seq);
+ efc->tt.hw_seq_free(efc, seq);
+ }
+
+ if (pend_frames_processed != 0)
+ efc_log_debug(efc, "%u node frames held and processed\n",
+ pend_frames_processed);
+}
+
+void
+efc_scsi_sess_reg_complete(struct efc_node *node, u32 status)
+{
+ unsigned long flags = 0;
+ enum efc_sm_event evt = EFC_EVT_NODE_SESS_REG_OK;
+ struct efc *efc = node->efc;
+
+ if (status)
+ evt = EFC_EVT_NODE_SESS_REG_FAIL;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, evt, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg)
+{
+ struct efc *efc = node->efc;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, evt, arg);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void efc_node_post_shutdown(struct efc_node *node, void *arg)
+{
+ unsigned long flags = 0;
+ struct efc *efc = node->efc;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
diff --git a/drivers/scsi/elx/libefc/efc_node.h b/drivers/scsi/elx/libefc/efc_node.h
new file mode 100644
index 000000000000..e9c600ac45d5
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_node.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFC_NODE_H__)
+#define __EFC_NODE_H__
+#include "scsi/fc/fc_ns.h"
+
+#define EFC_NODEDB_PAUSE_FABRIC_LOGIN (1 << 0)
+#define EFC_NODEDB_PAUSE_NAMESERVER (1 << 1)
+#define EFC_NODEDB_PAUSE_NEW_NODES (1 << 2)
+
+#define MAX_ACC_REJECT_PAYLOAD sizeof(struct fc_els_ls_rjt)
+
+#define scsi_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efc, "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt, \
+ io->node->display_name, io->instance_index, io->init_task_tag, \
+ io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
+
+static inline void
+efc_node_evt_set(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ const char *handler)
+{
+ struct efc_node *node = ctx->app;
+
+ if (evt == EFC_EVT_ENTER) {
+ strncpy(node->current_state_name, handler,
+ sizeof(node->current_state_name));
+ } else if (evt == EFC_EVT_EXIT) {
+ strncpy(node->prev_state_name, node->current_state_name,
+ sizeof(node->prev_state_name));
+ strncpy(node->current_state_name, "invalid",
+ sizeof(node->current_state_name));
+ }
+ node->prev_evt = node->current_evt;
+ node->current_evt = evt;
+}
+
+/**
+ * hold frames in pending frame list
+ *
+ * Unsolicited receive frames are held on the node pending frame list,
+ * rather than being processed.
+ */
+
+static inline void
+efc_node_hold_frames(struct efc_node *node)
+{
+ node->hold_frames = true;
+}
+
+/**
+ * accept frames
+ *
+ * Unsolicited receive frames processed rather than being held on the node
+ * pending frame list.
+ */
+
+static inline void
+efc_node_accept_frames(struct efc_node *node)
+{
+ node->hold_frames = false;
+}
+
+/*
+ * Node initiator/target enable defines
+ * All combinations of the SLI port (nport) initiator/target enable,
+ * and remote node initiator/target enable are enumerated.
+ * ex: EFC_NODE_ENABLE_T_TO_IT decodes to target mode is enabled on SLI port
+ * and I+T is enabled on remote node.
+ */
+enum efc_node_enable {
+ EFC_NODE_ENABLE_x_TO_x,
+ EFC_NODE_ENABLE_x_TO_T,
+ EFC_NODE_ENABLE_x_TO_I,
+ EFC_NODE_ENABLE_x_TO_IT,
+ EFC_NODE_ENABLE_T_TO_x,
+ EFC_NODE_ENABLE_T_TO_T,
+ EFC_NODE_ENABLE_T_TO_I,
+ EFC_NODE_ENABLE_T_TO_IT,
+ EFC_NODE_ENABLE_I_TO_x,
+ EFC_NODE_ENABLE_I_TO_T,
+ EFC_NODE_ENABLE_I_TO_I,
+ EFC_NODE_ENABLE_I_TO_IT,
+ EFC_NODE_ENABLE_IT_TO_x,
+ EFC_NODE_ENABLE_IT_TO_T,
+ EFC_NODE_ENABLE_IT_TO_I,
+ EFC_NODE_ENABLE_IT_TO_IT,
+};
+
+static inline enum efc_node_enable
+efc_node_get_enable(struct efc_node *node)
+{
+ u32 retval = 0;
+
+ if (node->nport->enable_ini)
+ retval |= (1U << 3);
+ if (node->nport->enable_tgt)
+ retval |= (1U << 2);
+ if (node->init)
+ retval |= (1U << 1);
+ if (node->targ)
+ retval |= (1U << 0);
+ return (enum efc_node_enable)retval;
+}
+
+int
+efc_node_check_els_req(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg,
+ u8 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname);
+int
+efc_node_check_ns_req(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg,
+ u16 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname);
+int
+efc_node_attach(struct efc_node *node);
+struct efc_node *
+efc_node_alloc(struct efc_nport *nport, u32 port_id,
+ bool init, bool targ);
+void
+efc_node_free(struct efc_node *efc);
+void
+efc_node_update_display_name(struct efc_node *node);
+void efc_node_post_event(struct efc_node *node, enum efc_sm_event evt,
+ void *arg);
+
+void
+__efc_node_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+efc_node_save_sparms(struct efc_node *node, void *payload);
+void
+efc_node_transition(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *, enum efc_sm_event,
+ void *), void *data);
+void
+__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+efc_node_initiate_cleanup(struct efc_node *node);
+
+void
+efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name);
+
+void
+efc_node_pause(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg));
+void
+__efc_node_paused(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+int
+efc_node_active_ios_empty(struct efc_node *node);
+void
+efc_node_send_ls_io_cleanup(struct efc_node *node);
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
+
+void
+efc_process_node_pending(struct efc_node *domain);
+
+u64 efc_node_get_wwnn(struct efc_node *node);
+struct efc_node *
+efc_node_find(struct efc_nport *nport, u32 id);
+void
+efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg);
+void
+efc_node_recv_els_frame(struct efc_node *node, struct efc_hw_sequence *s);
+void
+efc_node_recv_ct_frame(struct efc_node *node, struct efc_hw_sequence *seq);
+void
+efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq);
+
+#endif /* __EFC_NODE_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c
new file mode 100644
index 000000000000..2e83a667901f
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_nport.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * NPORT
+ *
+ * Port object for physical port and NPIV ports.
+ */
+
+/*
+ * NPORT REFERENCE COUNTING
+ *
+ * A nport reference should be taken when:
+ * - an nport is allocated
+ * - a vport populates associated nport
+ * - a remote node is allocated
+ * - a unsolicited frame is processed
+ * The reference should be dropped when:
+ * - the unsolicited frame processesing is done
+ * - the remote node is removed
+ * - the vport is removed
+ * - the nport is removed
+ */
+
+#include "efc.h"
+
+void
+efc_nport_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_nport *nport = data;
+ unsigned long flags = 0;
+
+ efc_log_debug(efc, "nport event: %s\n", efc_sm_event_name(event));
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_sm_post_event(&nport->sm, event, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+static struct efc_nport *
+efc_nport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn)
+{
+ struct efc_nport *nport = NULL;
+
+ /* Find a nport, given the WWNN and WWPN */
+ list_for_each_entry(nport, &domain->nport_list, list_entry) {
+ if (nport->wwnn == wwnn && nport->wwpn == wwpn)
+ return nport;
+ }
+ return NULL;
+}
+
+static void
+_efc_nport_free(struct kref *arg)
+{
+ struct efc_nport *nport = container_of(arg, struct efc_nport, ref);
+
+ kfree(nport);
+}
+
+struct efc_nport *
+efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool enable_ini, bool enable_tgt)
+{
+ struct efc_nport *nport;
+
+ if (domain->efc->enable_ini)
+ enable_ini = 0;
+
+ /* Return a failure if this nport has already been allocated */
+ if ((wwpn != 0) || (wwnn != 0)) {
+ nport = efc_nport_find_wwn(domain, wwnn, wwpn);
+ if (nport) {
+ efc_log_err(domain->efc,
+ "NPORT %016llX %016llX already allocated\n",
+ wwnn, wwpn);
+ return NULL;
+ }
+ }
+
+ nport = kzalloc(sizeof(*nport), GFP_ATOMIC);
+ if (!nport)
+ return nport;
+
+ /* initialize refcount */
+ kref_init(&nport->ref);
+ nport->release = _efc_nport_free;
+
+ nport->efc = domain->efc;
+ snprintf(nport->display_name, sizeof(nport->display_name), "------");
+ nport->domain = domain;
+ xa_init(&nport->lookup);
+ nport->instance_index = domain->nport_count++;
+ nport->sm.app = nport;
+ nport->enable_ini = enable_ini;
+ nport->enable_tgt = enable_tgt;
+ nport->enable_rscn = (nport->enable_ini ||
+ (nport->enable_tgt && enable_target_rscn(nport->efc)));
+
+ /* Copy service parameters from domain */
+ memcpy(nport->service_params, domain->service_params,
+ sizeof(struct fc_els_flogi));
+
+ /* Update requested fc_id */
+ nport->fc_id = fc_id;
+
+ /* Update the nport's service parameters for the new wwn's */
+ nport->wwpn = wwpn;
+ nport->wwnn = wwnn;
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX",
+ (unsigned long long)wwnn);
+
+ /*
+ * if this is the "first" nport of the domain,
+ * then make it the "phys" nport
+ */
+ if (list_empty(&domain->nport_list))
+ domain->nport = nport;
+
+ INIT_LIST_HEAD(&nport->list_entry);
+ list_add_tail(&nport->list_entry, &domain->nport_list);
+
+ kref_get(&domain->ref);
+
+ efc_log_debug(domain->efc, "New Nport [%s]\n", nport->display_name);
+
+ return nport;
+}
+
+void
+efc_nport_free(struct efc_nport *nport)
+{
+ struct efc_domain *domain;
+
+ if (!nport)
+ return;
+
+ domain = nport->domain;
+ efc_log_debug(domain->efc, "[%s] free nport\n", nport->display_name);
+ list_del(&nport->list_entry);
+ /*
+ * if this is the physical nport,
+ * then clear it out of the domain
+ */
+ if (nport == domain->nport)
+ domain->nport = NULL;
+
+ xa_destroy(&nport->lookup);
+ xa_erase(&domain->lookup, nport->fc_id);
+
+ if (list_empty(&domain->nport_list))
+ efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE,
+ NULL);
+
+ kref_put(&domain->ref, domain->release);
+ kref_put(&nport->ref, nport->release);
+}
+
+struct efc_nport *
+efc_nport_find(struct efc_domain *domain, u32 d_id)
+{
+ struct efc_nport *nport;
+
+ /* Find a nport object, given an FC_ID */
+ nport = xa_load(&domain->lookup, d_id);
+ if (!nport || !kref_get_unless_zero(&nport->ref))
+ return NULL;
+
+ return nport;
+}
+
+int
+efc_nport_attach(struct efc_nport *nport, u32 fc_id)
+{
+ int rc;
+ struct efc_node *node;
+ struct efc *efc = nport->efc;
+ unsigned long index;
+
+ /* Set our lookup */
+ rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
+ return rc;
+ }
+
+ /* Update our display_name */
+ efc_node_fcid_display(fc_id, nport->display_name,
+ sizeof(nport->display_name));
+
+ xa_for_each(&nport->lookup, index, node) {
+ efc_node_update_display_name(node);
+ }
+
+ efc_log_debug(nport->efc, "[%s] attach nport: fc_id x%06x\n",
+ nport->display_name, fc_id);
+
+ /* Register a nport, given an FC_ID */
+ rc = efc_cmd_nport_attach(efc, nport, fc_id);
+ if (rc < 0) {
+ efc_log_err(nport->efc,
+ "efc_hw_port_attach failed: %d\n", rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+efc_nport_shutdown(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_node *node;
+ unsigned long index;
+
+ xa_for_each(&nport->lookup, index, node) {
+ if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) {
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ continue;
+ }
+
+ /*
+ * If this is a vport, logout of the fabric
+ * controller so that it deletes the vport
+ * on the switch.
+ */
+ /* if link is down, don't send logo */
+ if (efc->link_status == EFC_LINK_STATUS_DOWN) {
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ continue;
+ }
+
+ efc_log_debug(efc, "[%s] nport shutdown vport, send logo\n",
+ node->display_name);
+
+ if (!efc_send_logo(node)) {
+ /* sent LOGO, wait for response */
+ efc_node_transition(node, __efc_d_wait_logo_rsp, NULL);
+ continue;
+ }
+
+ /*
+ * failed to send LOGO,
+ * go ahead and cleanup node anyways
+ */
+ node_printf(node, "Failed to send LOGO\n");
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ }
+}
+
+static void
+efc_vport_link_down(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_vport *vport;
+
+ /* Clear the nport reference in the vport specification */
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if (vport->nport == nport) {
+ kref_put(&nport->ref, nport->release);
+ vport->nport = NULL;
+ break;
+ }
+ }
+}
+
+static void
+__efc_nport_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = nport->efc;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ break;
+ case EFC_EVT_NPORT_ATTACH_OK:
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN:
+ /* Flag this nport as shutting down */
+ nport->shutting_down = true;
+
+ if (nport->is_vport)
+ efc_vport_link_down(nport);
+
+ if (xa_empty(&nport->lookup)) {
+ /* Remove the nport from the domain's lookup table */
+ xa_erase(&domain->lookup, nport->fc_id);
+ efc_sm_transition(ctx, __efc_nport_wait_port_free,
+ NULL);
+ if (efc_cmd_nport_free(efc, nport)) {
+ efc_log_debug(nport->efc,
+ "efc_hw_port_free failed\n");
+ /* Not much we can do, free the nport anyways */
+ efc_nport_free(nport);
+ }
+ } else {
+ /* sm: node list is not empty / shutdown nodes */
+ efc_sm_transition(ctx,
+ __efc_nport_wait_shutdown, NULL);
+ efc_nport_shutdown(nport);
+ }
+ break;
+ default:
+ efc_log_debug(nport->efc, "[%s] %-20s %-20s not handled\n",
+ nport->display_name, funcname,
+ efc_sm_event_name(evt));
+ }
+}
+
+void
+__efc_nport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ /* the physical nport is attached */
+ case EFC_EVT_NPORT_ATTACH_OK:
+ WARN_ON(nport != domain->nport);
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+
+ case EFC_EVT_NPORT_ALLOC_OK:
+ /* ignore */
+ break;
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ __be64 be_wwpn = cpu_to_be64(nport->wwpn);
+
+ if (nport->wwpn == 0)
+ efc_log_debug(efc, "vport: letting f/w select WWN\n");
+
+ if (nport->fc_id != U32_MAX) {
+ efc_log_debug(efc, "vport: hard coding port id: %x\n",
+ nport->fc_id);
+ }
+
+ efc_sm_transition(ctx, __efc_nport_vport_wait_alloc, NULL);
+ /* If wwpn is zero, then we'll let the f/w assign wwpn*/
+ if (efc_cmd_nport_alloc(efc, nport, nport->domain,
+ nport->wwpn == 0 ? NULL :
+ (uint8_t *)&be_wwpn)) {
+ efc_log_err(efc, "Can't allocate port\n");
+ break;
+ }
+
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ALLOC_OK: {
+ struct fc_els_flogi *sp;
+
+ sp = (struct fc_els_flogi *)nport->service_params;
+
+ if (nport->wwnn == 0) {
+ nport->wwnn = be64_to_cpu(nport->sli_wwnn);
+ nport->wwpn = be64_to_cpu(nport->sli_wwpn);
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
+ "%016llX", nport->wwpn);
+ }
+
+ /* Update the nport's service parameters */
+ sp->fl_wwpn = cpu_to_be64(nport->wwpn);
+ sp->fl_wwnn = cpu_to_be64(nport->wwnn);
+
+ /*
+ * if nport->fc_id is uninitialized,
+ * then request that the fabric node use FDISC
+ * to find an fc_id.
+ * Otherwise we're restoring vports, or we're in
+ * fabric emulation mode, so attach the fc_id
+ */
+ if (nport->fc_id == U32_MAX) {
+ struct efc_node *fabric;
+
+ fabric = efc_node_alloc(nport, FC_FID_FLOGI, false,
+ false);
+ if (!fabric) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ return;
+ }
+ efc_node_transition(fabric, __efc_vport_fabric_init,
+ NULL);
+ } else {
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
+ "%016llX", nport->wwpn);
+ efc_nport_attach(nport, nport->fc_id);
+ }
+ efc_sm_transition(ctx, __efc_nport_vport_allocated, NULL);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ /*
+ * This state is entered after the nport is allocated;
+ * it then waits for a fabric node
+ * FDISC to complete, which requests a nport attach.
+ * The nport attach complete is handled in this state.
+ */
+ switch (evt) {
+ case EFC_EVT_NPORT_ATTACH_OK: {
+ struct efc_node *node;
+
+ /* Find our fabric node, and forward this event */
+ node = efc_node_find(nport, FC_FID_FLOGI);
+ if (!node) {
+ efc_log_debug(efc, "can't find node %06x\n", FC_FID_FLOGI);
+ break;
+ }
+ /* sm: / forward nport attach to fabric node */
+ efc_node_post_event(node, evt, NULL);
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+efc_vport_update_spec(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_vport *vport;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if (vport->nport == nport) {
+ vport->wwnn = nport->wwnn;
+ vport->wwpn = nport->wwpn;
+ vport->tgt_data = nport->tgt_data;
+ vport->ini_data = nport->ini_data;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+}
+
+void
+__efc_nport_attached(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ struct efc_node *node;
+ unsigned long index;
+
+ efc_log_debug(efc,
+ "[%s] NPORT attached WWPN %016llX WWNN %016llX\n",
+ nport->display_name,
+ nport->wwpn, nport->wwnn);
+
+ xa_for_each(&nport->lookup, index, node)
+ efc_node_update_display_name(node);
+
+ efc->tt.new_nport(efc, nport);
+
+ /*
+ * Update the vport (if its not the physical nport)
+ * parameters
+ */
+ if (nport->is_vport)
+ efc_vport_update_spec(nport);
+ break;
+ }
+
+ case EFC_EVT_EXIT:
+ efc_log_debug(efc,
+ "[%s] NPORT deattached WWPN %016llX WWNN %016llX\n",
+ nport->display_name,
+ nport->wwpn, nport->wwnn);
+
+ efc->tt.del_nport(efc, nport);
+ break;
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ALLOC_OK:
+ case EFC_EVT_NPORT_ALLOC_FAIL:
+ case EFC_EVT_NPORT_ATTACH_OK:
+ case EFC_EVT_NPORT_ATTACH_FAIL:
+ /* ignore these events - just wait for the all free event */
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE: {
+ /*
+ * Remove the nport from the domain's
+ * sparse vector lookup table
+ */
+ xa_erase(&domain->lookup, nport->fc_id);
+ efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL);
+ if (efc_cmd_nport_free(efc, nport)) {
+ efc_log_err(nport->efc, "efc_hw_port_free failed\n");
+ /* Not much we can do, free the nport anyways */
+ efc_nport_free(nport);
+ }
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ATTACH_OK:
+ /* Ignore as we are waiting for the free CB */
+ break;
+ case EFC_EVT_NPORT_FREE_OK: {
+ /* All done, free myself */
+ efc_nport_free(nport);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_vport_nport_alloc(struct efc_domain *domain, struct efc_vport *vport)
+{
+ struct efc_nport *nport;
+
+ lockdep_assert_held(&domain->efc->lock);
+
+ nport = efc_nport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
+ vport->enable_ini, vport->enable_tgt);
+ vport->nport = nport;
+ if (!nport)
+ return -EIO;
+
+ kref_get(&nport->ref);
+ nport->is_vport = true;
+ nport->tgt_data = vport->tgt_data;
+ nport->ini_data = vport->ini_data;
+
+ efc_sm_transition(&nport->sm, __efc_nport_vport_init, NULL);
+
+ return 0;
+}
+
+int
+efc_vport_start(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ /* Use the vport spec to find the associated vports and start them */
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ if (!vport->nport) {
+ if (efc_vport_nport_alloc(domain, vport))
+ rc = -EIO;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+
+ return rc;
+}
+
+int
+efc_nport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool ini, bool tgt, void *tgt_data,
+ void *ini_data)
+{
+ struct efc *efc = domain->efc;
+ struct efc_vport *vport;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ if (ini && domain->efc->enable_ini == 0) {
+ efc_log_debug(efc, "driver initiator mode not enabled\n");
+ return -EIO;
+ }
+
+ if (tgt && domain->efc->enable_tgt == 0) {
+ efc_log_debug(efc, "driver target mode not enabled\n");
+ return -EIO;
+ }
+
+ /*
+ * Create a vport spec if we need to recreate
+ * this vport after a link up event
+ */
+ vport = efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id, ini, tgt,
+ tgt_data, ini_data);
+ if (!vport) {
+ efc_log_err(efc, "failed to create vport object entry\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&efc->lock, flags);
+ rc = efc_vport_nport_alloc(domain, vport);
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ return rc;
+}
+
+int
+efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
+ u64 wwpn, uint64_t wwnn)
+{
+ struct efc_nport *nport;
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ /* walk the efc_vport_list and remove from there */
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ if (vport->wwpn == wwpn && vport->wwnn == wwnn) {
+ list_del(&vport->list_entry);
+ kfree(vport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+
+ if (!domain) {
+ /* No domain means no nport to look for */
+ return 0;
+ }
+
+ spin_lock_irqsave(&efc->lock, flags);
+ list_for_each_entry(nport, &domain->nport_list, list_entry) {
+ if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
+ kref_put(&nport->ref, nport->release);
+ /* Shutdown this NPORT */
+ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&efc->lock, flags);
+ return 0;
+}
+
+void
+efc_vport_del_all(struct efc *efc)
+{
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ list_del(&vport->list_entry);
+ kfree(vport);
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+}
+
+struct efc_vport *
+efc_vport_create_spec(struct efc *efc, uint64_t wwnn, uint64_t wwpn,
+ u32 fc_id, bool enable_ini,
+ bool enable_tgt, void *tgt_data, void *ini_data)
+{
+ struct efc_vport *vport;
+ unsigned long flags = 0;
+
+ /*
+ * walk the efc_vport_list and return failure
+ * if a valid(vport with non zero WWPN and WWNN) vport entry
+ * is already created
+ */
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if ((wwpn && vport->wwpn == wwpn) &&
+ (wwnn && vport->wwnn == wwnn)) {
+ efc_log_err(efc,
+ "VPORT %016llX %016llX already allocated\n",
+ wwnn, wwpn);
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return NULL;
+ }
+ }
+
+ vport = kzalloc(sizeof(*vport), GFP_ATOMIC);
+ if (!vport) {
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return NULL;
+ }
+
+ vport->wwnn = wwnn;
+ vport->wwpn = wwpn;
+ vport->fc_id = fc_id;
+ vport->enable_tgt = enable_tgt;
+ vport->enable_ini = enable_ini;
+ vport->tgt_data = tgt_data;
+ vport->ini_data = ini_data;
+
+ INIT_LIST_HEAD(&vport->list_entry);
+ list_add_tail(&vport->list_entry, &efc->vport_list);
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return vport;
+}
diff --git a/drivers/scsi/elx/libefc/efc_nport.h b/drivers/scsi/elx/libefc/efc_nport.h
new file mode 100644
index 000000000000..b575ea205bbf
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_nport.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/**
+ * EFC FC port (NPORT) exported declarations
+ *
+ */
+
+#ifndef __EFC_NPORT_H__
+#define __EFC_NPORT_H__
+
+struct efc_nport *
+efc_nport_find(struct efc_domain *domain, u32 d_id);
+struct efc_nport *
+efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool enable_ini, bool enable_tgt);
+void
+efc_nport_free(struct efc_nport *nport);
+int
+efc_nport_attach(struct efc_nport *nport, u32 fc_id);
+
+void
+__efc_nport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_attached(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+int
+efc_vport_start(struct efc_domain *domain);
+
+#endif /* __EFC_NPORT_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_sm.c b/drivers/scsi/elx/libefc/efc_sm.c
new file mode 100644
index 000000000000..afd963782c1c
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_sm.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Generic state machine framework.
+ */
+#include "efc.h"
+#include "efc_sm.h"
+
+/**
+ * efc_sm_post_event() - Post an event to a context.
+ *
+ * @ctx: State machine context
+ * @evt: Event to post
+ * @data: Event-specific data (if any)
+ */
+int
+efc_sm_post_event(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *data)
+{
+ if (!ctx->current_state)
+ return -EIO;
+
+ ctx->current_state(ctx, evt, data);
+ return 0;
+}
+
+void
+efc_sm_transition(struct efc_sm_ctx *ctx,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *), void *data)
+
+{
+ if (ctx->current_state == state) {
+ efc_sm_post_event(ctx, EFC_EVT_REENTER, data);
+ } else {
+ efc_sm_post_event(ctx, EFC_EVT_EXIT, data);
+ ctx->current_state = state;
+ efc_sm_post_event(ctx, EFC_EVT_ENTER, data);
+ }
+}
+
+static char *event_name[] = EFC_SM_EVENT_NAME;
+
+const char *efc_sm_event_name(enum efc_sm_event evt)
+{
+ if (evt > EFC_EVT_LAST)
+ return "unknown";
+
+ return event_name[evt];
+}
diff --git a/drivers/scsi/elx/libefc/efc_sm.h b/drivers/scsi/elx/libefc/efc_sm.h
new file mode 100644
index 000000000000..e26867b4db24
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_sm.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ */
+
+/**
+ * Generic state machine framework declarations.
+ */
+
+#ifndef _EFC_SM_H
+#define _EFC_SM_H
+
+struct efc_sm_ctx;
+
+/* State Machine events */
+enum efc_sm_event {
+ /* Common Events */
+ EFC_EVT_ENTER,
+ EFC_EVT_REENTER,
+ EFC_EVT_EXIT,
+ EFC_EVT_SHUTDOWN,
+ EFC_EVT_ALL_CHILD_NODES_FREE,
+ EFC_EVT_RESUME,
+ EFC_EVT_TIMER_EXPIRED,
+
+ /* Domain Events */
+ EFC_EVT_RESPONSE,
+ EFC_EVT_ERROR,
+
+ EFC_EVT_DOMAIN_FOUND,
+ EFC_EVT_DOMAIN_ALLOC_OK,
+ EFC_EVT_DOMAIN_ALLOC_FAIL,
+ EFC_EVT_DOMAIN_REQ_ATTACH,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ EFC_EVT_DOMAIN_ATTACH_FAIL,
+ EFC_EVT_DOMAIN_LOST,
+ EFC_EVT_DOMAIN_FREE_OK,
+ EFC_EVT_DOMAIN_FREE_FAIL,
+ EFC_EVT_HW_DOMAIN_REQ_ATTACH,
+ EFC_EVT_HW_DOMAIN_REQ_FREE,
+
+ /* Sport Events */
+ EFC_EVT_NPORT_ALLOC_OK,
+ EFC_EVT_NPORT_ALLOC_FAIL,
+ EFC_EVT_NPORT_ATTACH_OK,
+ EFC_EVT_NPORT_ATTACH_FAIL,
+ EFC_EVT_NPORT_FREE_OK,
+ EFC_EVT_NPORT_FREE_FAIL,
+ EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
+ EFC_EVT_HW_PORT_ALLOC_OK,
+ EFC_EVT_HW_PORT_ALLOC_FAIL,
+ EFC_EVT_HW_PORT_ATTACH_OK,
+ EFC_EVT_HW_PORT_REQ_ATTACH,
+ EFC_EVT_HW_PORT_REQ_FREE,
+ EFC_EVT_HW_PORT_FREE_OK,
+
+ /* Login Events */
+ EFC_EVT_SRRS_ELS_REQ_OK,
+ EFC_EVT_SRRS_ELS_CMPL_OK,
+ EFC_EVT_SRRS_ELS_REQ_FAIL,
+ EFC_EVT_SRRS_ELS_CMPL_FAIL,
+ EFC_EVT_SRRS_ELS_REQ_RJT,
+ EFC_EVT_NODE_ATTACH_OK,
+ EFC_EVT_NODE_ATTACH_FAIL,
+ EFC_EVT_NODE_FREE_OK,
+ EFC_EVT_NODE_FREE_FAIL,
+ EFC_EVT_ELS_FRAME,
+ EFC_EVT_ELS_REQ_TIMEOUT,
+ EFC_EVT_ELS_REQ_ABORTED,
+ /* request an ELS IO be aborted */
+ EFC_EVT_ABORT_ELS,
+ /* ELS abort process complete */
+ EFC_EVT_ELS_ABORT_CMPL,
+
+ EFC_EVT_ABTS_RCVD,
+
+ /* node is not in the GID_PT payload */
+ EFC_EVT_NODE_MISSING,
+ /* node is allocated and in the GID_PT payload */
+ EFC_EVT_NODE_REFOUND,
+ /* node shutting down due to PLOGI recvd (implicit logo) */
+ EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ /* node shutting down due to LOGO recvd/sent (explicit logo) */
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+
+ EFC_EVT_PLOGI_RCVD,
+ EFC_EVT_FLOGI_RCVD,
+ EFC_EVT_LOGO_RCVD,
+ EFC_EVT_PRLI_RCVD,
+ EFC_EVT_PRLO_RCVD,
+ EFC_EVT_PDISC_RCVD,
+ EFC_EVT_FDISC_RCVD,
+ EFC_EVT_ADISC_RCVD,
+ EFC_EVT_RSCN_RCVD,
+ EFC_EVT_SCR_RCVD,
+ EFC_EVT_ELS_RCVD,
+
+ EFC_EVT_FCP_CMD_RCVD,
+
+ EFC_EVT_GIDPT_DELAY_EXPIRED,
+
+ /* SCSI Target Server events */
+ EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY,
+ EFC_EVT_NODE_DEL_INI_COMPLETE,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE,
+ EFC_EVT_NODE_SESS_REG_OK,
+ EFC_EVT_NODE_SESS_REG_FAIL,
+
+ /* Must be last */
+ EFC_EVT_LAST
+};
+
+/* State Machine event name lookup array */
+#define EFC_SM_EVENT_NAME { \
+ [EFC_EVT_ENTER] = "EFC_EVT_ENTER", \
+ [EFC_EVT_REENTER] = "EFC_EVT_REENTER", \
+ [EFC_EVT_EXIT] = "EFC_EVT_EXIT", \
+ [EFC_EVT_SHUTDOWN] = "EFC_EVT_SHUTDOWN", \
+ [EFC_EVT_ALL_CHILD_NODES_FREE] = "EFC_EVT_ALL_CHILD_NODES_FREE",\
+ [EFC_EVT_RESUME] = "EFC_EVT_RESUME", \
+ [EFC_EVT_TIMER_EXPIRED] = "EFC_EVT_TIMER_EXPIRED", \
+ [EFC_EVT_RESPONSE] = "EFC_EVT_RESPONSE", \
+ [EFC_EVT_ERROR] = "EFC_EVT_ERROR", \
+ [EFC_EVT_DOMAIN_FOUND] = "EFC_EVT_DOMAIN_FOUND", \
+ [EFC_EVT_DOMAIN_ALLOC_OK] = "EFC_EVT_DOMAIN_ALLOC_OK", \
+ [EFC_EVT_DOMAIN_ALLOC_FAIL] = "EFC_EVT_DOMAIN_ALLOC_FAIL", \
+ [EFC_EVT_DOMAIN_REQ_ATTACH] = "EFC_EVT_DOMAIN_REQ_ATTACH", \
+ [EFC_EVT_DOMAIN_ATTACH_OK] = "EFC_EVT_DOMAIN_ATTACH_OK", \
+ [EFC_EVT_DOMAIN_ATTACH_FAIL] = "EFC_EVT_DOMAIN_ATTACH_FAIL", \
+ [EFC_EVT_DOMAIN_LOST] = "EFC_EVT_DOMAIN_LOST", \
+ [EFC_EVT_DOMAIN_FREE_OK] = "EFC_EVT_DOMAIN_FREE_OK", \
+ [EFC_EVT_DOMAIN_FREE_FAIL] = "EFC_EVT_DOMAIN_FREE_FAIL", \
+ [EFC_EVT_HW_DOMAIN_REQ_ATTACH] = "EFC_EVT_HW_DOMAIN_REQ_ATTACH",\
+ [EFC_EVT_HW_DOMAIN_REQ_FREE] = "EFC_EVT_HW_DOMAIN_REQ_FREE", \
+ [EFC_EVT_NPORT_ALLOC_OK] = "EFC_EVT_NPORT_ALLOC_OK", \
+ [EFC_EVT_NPORT_ALLOC_FAIL] = "EFC_EVT_NPORT_ALLOC_FAIL", \
+ [EFC_EVT_NPORT_ATTACH_OK] = "EFC_EVT_NPORT_ATTACH_OK", \
+ [EFC_EVT_NPORT_ATTACH_FAIL] = "EFC_EVT_NPORT_ATTACH_FAIL", \
+ [EFC_EVT_NPORT_FREE_OK] = "EFC_EVT_NPORT_FREE_OK", \
+ [EFC_EVT_NPORT_FREE_FAIL] = "EFC_EVT_NPORT_FREE_FAIL", \
+ [EFC_EVT_NPORT_TOPOLOGY_NOTIFY] = "EFC_EVT_NPORT_TOPOLOGY_NOTIFY",\
+ [EFC_EVT_HW_PORT_ALLOC_OK] = "EFC_EVT_HW_PORT_ALLOC_OK", \
+ [EFC_EVT_HW_PORT_ALLOC_FAIL] = "EFC_EVT_HW_PORT_ALLOC_FAIL", \
+ [EFC_EVT_HW_PORT_ATTACH_OK] = "EFC_EVT_HW_PORT_ATTACH_OK", \
+ [EFC_EVT_HW_PORT_REQ_ATTACH] = "EFC_EVT_HW_PORT_REQ_ATTACH", \
+ [EFC_EVT_HW_PORT_REQ_FREE] = "EFC_EVT_HW_PORT_REQ_FREE", \
+ [EFC_EVT_HW_PORT_FREE_OK] = "EFC_EVT_HW_PORT_FREE_OK", \
+ [EFC_EVT_SRRS_ELS_REQ_OK] = "EFC_EVT_SRRS_ELS_REQ_OK", \
+ [EFC_EVT_SRRS_ELS_CMPL_OK] = "EFC_EVT_SRRS_ELS_CMPL_OK", \
+ [EFC_EVT_SRRS_ELS_REQ_FAIL] = "EFC_EVT_SRRS_ELS_REQ_FAIL", \
+ [EFC_EVT_SRRS_ELS_CMPL_FAIL] = "EFC_EVT_SRRS_ELS_CMPL_FAIL", \
+ [EFC_EVT_SRRS_ELS_REQ_RJT] = "EFC_EVT_SRRS_ELS_REQ_RJT", \
+ [EFC_EVT_NODE_ATTACH_OK] = "EFC_EVT_NODE_ATTACH_OK", \
+ [EFC_EVT_NODE_ATTACH_FAIL] = "EFC_EVT_NODE_ATTACH_FAIL", \
+ [EFC_EVT_NODE_FREE_OK] = "EFC_EVT_NODE_FREE_OK", \
+ [EFC_EVT_NODE_FREE_FAIL] = "EFC_EVT_NODE_FREE_FAIL", \
+ [EFC_EVT_ELS_FRAME] = "EFC_EVT_ELS_FRAME", \
+ [EFC_EVT_ELS_REQ_TIMEOUT] = "EFC_EVT_ELS_REQ_TIMEOUT", \
+ [EFC_EVT_ELS_REQ_ABORTED] = "EFC_EVT_ELS_REQ_ABORTED", \
+ [EFC_EVT_ABORT_ELS] = "EFC_EVT_ABORT_ELS", \
+ [EFC_EVT_ELS_ABORT_CMPL] = "EFC_EVT_ELS_ABORT_CMPL", \
+ [EFC_EVT_ABTS_RCVD] = "EFC_EVT_ABTS_RCVD", \
+ [EFC_EVT_NODE_MISSING] = "EFC_EVT_NODE_MISSING", \
+ [EFC_EVT_NODE_REFOUND] = "EFC_EVT_NODE_REFOUND", \
+ [EFC_EVT_SHUTDOWN_IMPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_IMPLICIT_LOGO",\
+ [EFC_EVT_SHUTDOWN_EXPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_EXPLICIT_LOGO",\
+ [EFC_EVT_PLOGI_RCVD] = "EFC_EVT_PLOGI_RCVD", \
+ [EFC_EVT_FLOGI_RCVD] = "EFC_EVT_FLOGI_RCVD", \
+ [EFC_EVT_LOGO_RCVD] = "EFC_EVT_LOGO_RCVD", \
+ [EFC_EVT_PRLI_RCVD] = "EFC_EVT_PRLI_RCVD", \
+ [EFC_EVT_PRLO_RCVD] = "EFC_EVT_PRLO_RCVD", \
+ [EFC_EVT_PDISC_RCVD] = "EFC_EVT_PDISC_RCVD", \
+ [EFC_EVT_FDISC_RCVD] = "EFC_EVT_FDISC_RCVD", \
+ [EFC_EVT_ADISC_RCVD] = "EFC_EVT_ADISC_RCVD", \
+ [EFC_EVT_RSCN_RCVD] = "EFC_EVT_RSCN_RCVD", \
+ [EFC_EVT_SCR_RCVD] = "EFC_EVT_SCR_RCVD", \
+ [EFC_EVT_ELS_RCVD] = "EFC_EVT_ELS_RCVD", \
+ [EFC_EVT_FCP_CMD_RCVD] = "EFC_EVT_FCP_CMD_RCVD", \
+ [EFC_EVT_NODE_DEL_INI_COMPLETE] = "EFC_EVT_NODE_DEL_INI_COMPLETE",\
+ [EFC_EVT_NODE_DEL_TGT_COMPLETE] = "EFC_EVT_NODE_DEL_TGT_COMPLETE",\
+ [EFC_EVT_LAST] = "EFC_EVT_LAST", \
+}
+
+int
+efc_sm_post_event(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *data);
+void
+efc_sm_transition(struct efc_sm_ctx *ctx,
+ void (*state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg),
+ void *data);
+void efc_sm_disable(struct efc_sm_ctx *ctx);
+const char *efc_sm_event_name(enum efc_sm_event evt);
+
+#endif /* ! _EFC_SM_H */
diff --git a/drivers/scsi/elx/libefc/efclib.c b/drivers/scsi/elx/libefc/efclib.c
new file mode 100644
index 000000000000..dd3e3d0a4761
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efclib.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * LIBEFC LOCKING
+ *
+ * The critical sections protected by the efc's spinlock are quite broad and
+ * may be improved upon in the future. The libefc code and its locking doesn't
+ * influence the I/O path, so excessive locking doesn't impact I/O performance.
+ *
+ * The strategy is to lock whenever processing a request from user driver. This
+ * means that the entry points into the libefc library are protected by efc
+ * lock. So all the state machine transitions are protected.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "efc.h"
+
+int efcport_init(struct efc *efc)
+{
+ u32 rc = 0;
+
+ spin_lock_init(&efc->lock);
+ INIT_LIST_HEAD(&efc->vport_list);
+ efc->hold_frames = false;
+ spin_lock_init(&efc->pend_frames_lock);
+ INIT_LIST_HEAD(&efc->pend_frames);
+
+ /* Create Node pool */
+ efc->node_pool = mempool_create_kmalloc_pool(EFC_MAX_REMOTE_NODES,
+ sizeof(struct efc_node));
+ if (!efc->node_pool) {
+ efc_log_err(efc, "Can't allocate node pool\n");
+ return -ENOMEM;
+ }
+
+ efc->node_dma_pool = dma_pool_create("node_dma_pool", &efc->pci->dev,
+ NODE_SPARAMS_SIZE, 0, 0);
+ if (!efc->node_dma_pool) {
+ efc_log_err(efc, "Can't allocate node dma pool\n");
+ mempool_destroy(efc->node_pool);
+ return -ENOMEM;
+ }
+
+ efc->els_io_pool = mempool_create_kmalloc_pool(EFC_ELS_IO_POOL_SZ,
+ sizeof(struct efc_els_io_req));
+ if (!efc->els_io_pool) {
+ efc_log_err(efc, "Can't allocate els io pool\n");
+ return -ENOMEM;
+ }
+
+ return rc;
+}
+
+static void
+efc_purge_pending(struct efc *efc)
+{
+ struct efc_hw_sequence *frame, *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+
+ list_for_each_entry_safe(frame, next, &efc->pend_frames, list_entry) {
+ list_del(&frame->list_entry);
+ efc->tt.hw_seq_free(efc, frame);
+ }
+
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+}
+
+void efcport_destroy(struct efc *efc)
+{
+ efc_purge_pending(efc);
+ mempool_destroy(efc->els_io_pool);
+ mempool_destroy(efc->node_pool);
+ dma_pool_destroy(efc->node_dma_pool);
+}
diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h
new file mode 100644
index 000000000000..dde20891c2dd
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efclib.h
@@ -0,0 +1,621 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFCLIB_H__
+#define __EFCLIB_H__
+
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "scsi/fc/fc_ns.h"
+#include "scsi/fc/fc_gs.h"
+#include "scsi/fc_frame.h"
+#include "../include/efc_common.h"
+#include "../libefc_sli/sli4.h"
+
+#define EFC_SERVICE_PARMS_LENGTH 120
+#define EFC_NAME_LENGTH 32
+#define EFC_SM_NAME_LENGTH 64
+#define EFC_DISPLAY_BUS_INFO_LENGTH 16
+
+#define EFC_WWN_LENGTH 32
+
+#define EFC_FC_ELS_DEFAULT_RETRIES 3
+
+/* Timeouts */
+#define EFC_FC_ELS_SEND_DEFAULT_TIMEOUT 0
+#define EFC_FC_FLOGI_TIMEOUT_SEC 5
+#define EFC_SHUTDOWN_TIMEOUT_USEC 30000000
+
+/* Return values for calls from base driver to libefc */
+#define EFC_SCSI_CALL_COMPLETE 0
+#define EFC_SCSI_CALL_ASYNC 1
+
+/* Local port topology */
+enum efc_nport_topology {
+ EFC_NPORT_TOPO_UNKNOWN = 0,
+ EFC_NPORT_TOPO_FABRIC,
+ EFC_NPORT_TOPO_P2P,
+ EFC_NPORT_TOPO_FC_AL,
+};
+
+#define enable_target_rscn(efc) 1
+
+enum efc_node_shutd_rsn {
+ EFC_NODE_SHUTDOWN_DEFAULT = 0,
+ EFC_NODE_SHUTDOWN_EXPLICIT_LOGO,
+ EFC_NODE_SHUTDOWN_IMPLICIT_LOGO,
+};
+
+enum efc_node_send_ls_acc {
+ EFC_NODE_SEND_LS_ACC_NONE = 0,
+ EFC_NODE_SEND_LS_ACC_PLOGI,
+ EFC_NODE_SEND_LS_ACC_PRLI,
+};
+
+#define EFC_LINK_STATUS_UP 0
+#define EFC_LINK_STATUS_DOWN 1
+
+/* State machine context header */
+struct efc_sm_ctx {
+ void (*current_state)(struct efc_sm_ctx *ctx,
+ u32 evt, void *arg);
+
+ const char *description;
+ void *app;
+};
+
+/* Description of discovered Fabric Domain */
+struct efc_domain_record {
+ u32 index;
+ u32 priority;
+ u8 address[6];
+ u8 wwn[8];
+ union {
+ u8 vlan[512];
+ u8 loop[128];
+ } map;
+ u32 speed;
+ u32 fc_id;
+ bool is_loop;
+ bool is_nport;
+};
+
+/* Domain events */
+enum efc_hw_domain_event {
+ EFC_HW_DOMAIN_ALLOC_OK,
+ EFC_HW_DOMAIN_ALLOC_FAIL,
+ EFC_HW_DOMAIN_ATTACH_OK,
+ EFC_HW_DOMAIN_ATTACH_FAIL,
+ EFC_HW_DOMAIN_FREE_OK,
+ EFC_HW_DOMAIN_FREE_FAIL,
+ EFC_HW_DOMAIN_LOST,
+ EFC_HW_DOMAIN_FOUND,
+ EFC_HW_DOMAIN_CHANGED,
+};
+
+/**
+ * Fibre Channel port object
+ *
+ * @list_entry: nport list entry
+ * @ref: reference count, each node takes a reference
+ * @release: function to free nport object
+ * @efc: pointer back to efc
+ * @instance_index: unique instance index value
+ * @display_name: port display name
+ * @is_vport: Is NPIV port
+ * @free_req_pending: pending request to free resources
+ * @attached: mark attached if reg VPI succeeds
+ * @p2p_winner: TRUE if we're the point-to-point winner
+ * @domain: pointer back to domain
+ * @wwpn: port wwpn
+ * @wwnn: port wwnn
+ * @tgt_data: target backend private port data
+ * @ini_data: initiator backend private port data
+ * @indicator: VPI
+ * @fc_id: port FC address
+ * @dma: memory for Service Parameters
+ * @wwnn_str: wwpn string
+ * @sli_wwpn: SLI provided wwpn
+ * @sli_wwnn: SLI provided wwnn
+ * @sm: nport state machine context
+ * @lookup: fc_id to node lookup object
+ * @enable_ini: SCSI initiator enabled for this port
+ * @enable_tgt: SCSI target enabled for this port
+ * @enable_rscn: port will be expecting RSCN
+ * @shutting_down: nport in process of shutting down
+ * @p2p_port_id: our port id for point-to-point
+ * @topology: topology: fabric/p2p/unknown
+ * @service_params: login parameters
+ * @p2p_remote_port_id: remote node's port id for point-to-point
+ */
+
+struct efc_nport {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efc *efc;
+ u32 instance_index;
+ char display_name[EFC_NAME_LENGTH];
+ bool is_vport;
+ bool free_req_pending;
+ bool attached;
+ bool attaching;
+ bool p2p_winner;
+ struct efc_domain *domain;
+ u64 wwpn;
+ u64 wwnn;
+ void *tgt_data;
+ void *ini_data;
+
+ u32 indicator;
+ u32 fc_id;
+ struct efc_dma dma;
+
+ u8 wwnn_str[EFC_WWN_LENGTH];
+ __be64 sli_wwpn;
+ __be64 sli_wwnn;
+
+ struct efc_sm_ctx sm;
+ struct xarray lookup;
+ bool enable_ini;
+ bool enable_tgt;
+ bool enable_rscn;
+ bool shutting_down;
+ u32 p2p_port_id;
+ enum efc_nport_topology topology;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ u32 p2p_remote_port_id;
+};
+
+/**
+ * Fibre Channel domain object
+ *
+ * This object is a container for the various SLI components needed
+ * to connect to the domain of a FC or FCoE switch
+ * @efc: pointer back to efc
+ * @instance_index: unique instance index value
+ * @display_name: Node display name
+ * @nport_list: linked list of nports associated with this domain
+ * @ref: Reference count, each nport takes a reference
+ * @release: Function to free domain object
+ * @ini_domain: initiator backend private domain data
+ * @tgt_domain: target backend private domain data
+ * @sm: state machine context
+ * @fcf: FC Forwarder table index
+ * @fcf_indicator: FCFI
+ * @indicator: VFI
+ * @nport_count: Number of nports allocated
+ * @dma: memory for Service Parameters
+ * @fcf_wwn: WWN for FCF/switch
+ * @drvsm: driver domain sm context
+ * @attached: set true after attach completes
+ * @is_fc: is FC
+ * @is_loop: is loop topology
+ * @is_nlport: is public loop
+ * @domain_found_pending:A domain found is pending, drec is updated
+ * @req_domain_free: True if domain object should be free'd
+ * @req_accept_frames: set in domain state machine to enable frames
+ * @domain_notify_pend: Set in domain SM to avoid duplicate node event post
+ * @pending_drec: Pending drec if a domain found is pending
+ * @service_params: any nports service parameters
+ * @flogi_service_params:Fabric/P2p service parameters from FLOGI
+ * @lookup: d_id to node lookup object
+ * @nport: Pointer to first (physical) SLI port
+ */
+struct efc_domain {
+ struct efc *efc;
+ char display_name[EFC_NAME_LENGTH];
+ struct list_head nport_list;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ void *ini_domain;
+ void *tgt_domain;
+
+ /* Declarations private to HW/SLI */
+ u32 fcf;
+ u32 fcf_indicator;
+ u32 indicator;
+ u32 nport_count;
+ struct efc_dma dma;
+
+ /* Declarations private to FC trannport */
+ u64 fcf_wwn;
+ struct efc_sm_ctx drvsm;
+ bool attached;
+ bool is_fc;
+ bool is_loop;
+ bool is_nlport;
+ bool domain_found_pending;
+ bool req_domain_free;
+ bool req_accept_frames;
+ bool domain_notify_pend;
+
+ struct efc_domain_record pending_drec;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ u8 flogi_service_params[EFC_SERVICE_PARMS_LENGTH];
+
+ struct xarray lookup;
+
+ struct efc_nport *nport;
+};
+
+/**
+ * Remote Node object
+ *
+ * This object represents a connection between the SLI port and another
+ * Nx_Port on the fabric. Note this can be either a well known port such
+ * as a F_Port (i.e. ff:ff:fe) or another N_Port.
+ * @indicator: RPI
+ * @fc_id: FC address
+ * @attached: true if attached
+ * @nport: associated SLI port
+ * @node: associated node
+ */
+struct efc_remote_node {
+ u32 indicator;
+ u32 index;
+ u32 fc_id;
+
+ bool attached;
+
+ struct efc_nport *nport;
+ void *node;
+};
+
+/**
+ * FC Node object
+ * @efc: pointer back to efc structure
+ * @display_name: Node display name
+ * @nort: Assosiated nport pointer.
+ * @hold_frames: hold incoming frames if true
+ * @els_io_enabled: Enable allocating els ios for this node
+ * @els_ios_lock: lock to protect the els ios list
+ * @els_ios_list: ELS I/O's for this node
+ * @ini_node: backend initiator private node data
+ * @tgt_node: backend target private node data
+ * @rnode: Remote node
+ * @sm: state machine context
+ * @evtdepth: current event posting nesting depth
+ * @req_free: this node is to be free'd
+ * @attached: node is attached (REGLOGIN complete)
+ * @fcp_enabled: node is enabled to handle FCP
+ * @rscn_pending: for name server node RSCN is pending
+ * @send_plogi: send PLOGI accept, upon completion of node attach
+ * @send_plogi_acc: TRUE if io_alloc() is enabled.
+ * @send_ls_acc: type of LS acc to send
+ * @ls_acc_io: SCSI IO for LS acc
+ * @ls_acc_oxid: OX_ID for pending accept
+ * @ls_acc_did: D_ID for pending accept
+ * @shutdown_reason: reason for node shutdown
+ * @sparm_dma_buf: service parameters buffer
+ * @service_params: plogi/acc frame from remote device
+ * @pend_frames_lock: lock for inbound pending frames list
+ * @pend_frames: inbound pending frames list
+ * @pend_frames_processed:count of frames processed in hold frames interval
+ * @ox_id_in_use: used to verify one at a time us of ox_id
+ * @els_retries_remaining:for ELS, number of retries remaining
+ * @els_req_cnt: number of outstanding ELS requests
+ * @els_cmpl_cnt: number of outstanding ELS completions
+ * @abort_cnt: Abort counter for debugging purpos
+ * @current_state_name: current node state
+ * @prev_state_name: previous node state
+ * @current_evt: current event
+ * @prev_evt: previous event
+ * @targ: node is target capable
+ * @init: node is init capable
+ * @refound: Handle node refound case when node is being deleted
+ * @els_io_pend_list: list of pending (not yet processed) ELS IOs
+ * @els_io_active_list: list of active (processed) ELS IOs
+ * @nodedb_state: Node debugging, saved state
+ * @gidpt_delay_timer: GIDPT delay timer
+ * @time_last_gidpt_msec:Start time of last target RSCN GIDPT
+ * @wwnn: remote port WWNN
+ * @wwpn: remote port WWPN
+ */
+struct efc_node {
+ struct efc *efc;
+ char display_name[EFC_NAME_LENGTH];
+ struct efc_nport *nport;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ bool hold_frames;
+ bool els_io_enabled;
+ bool send_plogi_acc;
+ bool send_plogi;
+ bool rscn_pending;
+ bool fcp_enabled;
+ bool attached;
+ bool req_free;
+
+ spinlock_t els_ios_lock;
+ struct list_head els_ios_list;
+ void *ini_node;
+ void *tgt_node;
+
+ struct efc_remote_node rnode;
+ /* Declarations private to FC trannport */
+ struct efc_sm_ctx sm;
+ u32 evtdepth;
+
+ enum efc_node_send_ls_acc send_ls_acc;
+ void *ls_acc_io;
+ u32 ls_acc_oxid;
+ u32 ls_acc_did;
+ enum efc_node_shutd_rsn shutdown_reason;
+ bool targ;
+ bool init;
+ bool refound;
+ struct efc_dma sparm_dma_buf;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ spinlock_t pend_frames_lock;
+ struct list_head pend_frames;
+ u32 pend_frames_processed;
+ u32 ox_id_in_use;
+ u32 els_retries_remaining;
+ u32 els_req_cnt;
+ u32 els_cmpl_cnt;
+ u32 abort_cnt;
+
+ char current_state_name[EFC_SM_NAME_LENGTH];
+ char prev_state_name[EFC_SM_NAME_LENGTH];
+ int current_evt;
+ int prev_evt;
+
+ void (*nodedb_state)(struct efc_sm_ctx *ctx,
+ u32 evt, void *arg);
+ struct timer_list gidpt_delay_timer;
+ u64 time_last_gidpt_msec;
+
+ char wwnn[EFC_WWN_LENGTH];
+ char wwpn[EFC_WWN_LENGTH];
+};
+
+/**
+ * NPIV port
+ *
+ * Collection of the information required to restore a virtual port across
+ * link events
+ * @wwnn: node name
+ * @wwpn: port name
+ * @fc_id: port id
+ * @tgt_data: target backend pointer
+ * @ini_data: initiator backend pointe
+ * @nport: Used to match record after attaching for update
+ *
+ */
+
+struct efc_vport {
+ struct list_head list_entry;
+ u64 wwnn;
+ u64 wwpn;
+ u32 fc_id;
+ bool enable_tgt;
+ bool enable_ini;
+ void *tgt_data;
+ void *ini_data;
+ struct efc_nport *nport;
+};
+
+#define node_printf(node, fmt, args...) \
+ efc_log_info(node->efc, "[%s] " fmt, node->display_name, ##args)
+
+/* Node SM IO Context Callback structure */
+struct efc_node_cb {
+ int status;
+ int ext_status;
+ struct efc_hw_rq_buffer *header;
+ struct efc_hw_rq_buffer *payload;
+ struct efc_dma els_rsp;
+
+ /* Actual length of data received */
+ int rsp_len;
+};
+
+struct efc_hw_rq_buffer {
+ u16 rqindex;
+ struct efc_dma dma;
+};
+
+/**
+ * FC sequence object
+ *
+ * Defines a general FC sequence object
+ * @hw: HW that owns this sequence
+ * @fcfi: FCFI associated with sequence
+ * @header: Received frame header
+ * @payload: Received frame header
+ * @hw_priv: HW private context
+ */
+struct efc_hw_sequence {
+ struct list_head list_entry;
+ void *hw;
+ u8 fcfi;
+ struct efc_hw_rq_buffer *header;
+ struct efc_hw_rq_buffer *payload;
+ void *hw_priv;
+};
+
+enum efc_disc_io_type {
+ EFC_DISC_IO_ELS_REQ,
+ EFC_DISC_IO_ELS_RESP,
+ EFC_DISC_IO_CT_REQ,
+ EFC_DISC_IO_CT_RESP
+};
+
+struct efc_io_els_params {
+ u32 s_id;
+ u16 ox_id;
+ u8 timeout;
+};
+
+struct efc_io_ct_params {
+ u8 r_ctl;
+ u8 type;
+ u8 df_ctl;
+ u8 timeout;
+ u16 ox_id;
+};
+
+union efc_disc_io_param {
+ struct efc_io_els_params els;
+ struct efc_io_ct_params ct;
+};
+
+struct efc_disc_io {
+ struct efc_dma req; /* send buffer */
+ struct efc_dma rsp; /* receive buffer */
+ enum efc_disc_io_type io_type; /* EFC_DISC_IO_TYPE enum*/
+ u16 xmit_len; /* Length of els request*/
+ u16 rsp_len; /* Max length of rsps to be rcvd */
+ u32 rpi; /* Registered RPI */
+ u32 vpi; /* VPI for this nport */
+ u32 s_id;
+ u32 d_id;
+ bool rpi_registered; /* if false, use tmp RPI */
+ union efc_disc_io_param iparam;
+};
+
+/* Return value indiacating the sequence can not be freed */
+#define EFC_HW_SEQ_HOLD 0
+/* Return value indiacating the sequence can be freed */
+#define EFC_HW_SEQ_FREE 1
+
+struct libefc_function_template {
+ /*Sport*/
+ int (*new_nport)(struct efc *efc, struct efc_nport *sp);
+ void (*del_nport)(struct efc *efc, struct efc_nport *sp);
+
+ /*Scsi Node*/
+ int (*scsi_new_node)(struct efc *efc, struct efc_node *n);
+ int (*scsi_del_node)(struct efc *efc, struct efc_node *n, int reason);
+
+ int (*issue_mbox_rqst)(void *efct, void *buf, void *cb, void *arg);
+ /*Send ELS IO*/
+ int (*send_els)(struct efc *efc, struct efc_disc_io *io);
+ /*Send BLS IO*/
+ int (*send_bls)(struct efc *efc, u32 type, struct sli_bls_params *bls);
+ /*Free HW frame*/
+ int (*hw_seq_free)(struct efc *efc, struct efc_hw_sequence *seq);
+};
+
+#define EFC_LOG_LIB 0x01
+#define EFC_LOG_NODE 0x02
+#define EFC_LOG_PORT 0x04
+#define EFC_LOG_DOMAIN 0x08
+#define EFC_LOG_ELS 0x10
+#define EFC_LOG_DOMAIN_SM 0x20
+#define EFC_LOG_SM 0x40
+
+/* efc library port structure */
+struct efc {
+ void *base;
+ struct pci_dev *pci;
+ struct sli4 *sli;
+ u32 fcfi;
+ u64 req_wwpn;
+ u64 req_wwnn;
+
+ u64 def_wwpn;
+ u64 def_wwnn;
+ u64 max_xfer_size;
+ mempool_t *node_pool;
+ struct dma_pool *node_dma_pool;
+ u32 nodes_count;
+
+ u32 link_status;
+
+ struct list_head vport_list;
+ /* lock to protect the vport list */
+ spinlock_t vport_lock;
+
+ struct libefc_function_template tt;
+ /* lock to protect the discovery library.
+ * Refer to efclib.c for more details.
+ */
+ spinlock_t lock;
+
+ bool enable_ini;
+ bool enable_tgt;
+
+ u32 log_level;
+
+ struct efc_domain *domain;
+ void (*domain_free_cb)(struct efc *efc, void *arg);
+ void *domain_free_cb_arg;
+
+ u64 tgt_rscn_delay_msec;
+ u64 tgt_rscn_period_msec;
+
+ bool external_loopback;
+ u32 nodedb_mask;
+ u32 logmask;
+ mempool_t *els_io_pool;
+ atomic_t els_io_alloc_failed_count;
+
+ /* hold pending frames */
+ bool hold_frames;
+ /* lock to protect pending frames list access */
+ spinlock_t pend_frames_lock;
+ struct list_head pend_frames;
+ /* count of pending frames that were processed */
+ u32 pend_frames_processed;
+
+};
+
+/*
+ * EFC library registration
+ * **********************************/
+int efcport_init(struct efc *efc);
+void efcport_destroy(struct efc *efc);
+/*
+ * EFC Domain
+ * **********************************/
+int efc_domain_cb(void *arg, int event, void *data);
+void
+efc_register_domain_free_cb(struct efc *efc,
+ void (*callback)(struct efc *efc, void *arg),
+ void *arg);
+
+/*
+ * EFC nport
+ * **********************************/
+void efc_nport_cb(void *arg, int event, void *data);
+struct efc_vport *
+efc_vport_create_spec(struct efc *efc, u64 wwnn, u64 wwpn, u32 fc_id,
+ bool enable_ini, bool enable_tgt,
+ void *tgt_data, void *ini_data);
+int efc_nport_vport_new(struct efc_domain *domain, u64 wwpn,
+ u64 wwnn, u32 fc_id, bool ini, bool tgt,
+ void *tgt_data, void *ini_data);
+int efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
+ u64 wwpn, u64 wwnn);
+
+void efc_vport_del_all(struct efc *efc);
+
+/*
+ * EFC Node
+ * **********************************/
+int efc_remote_node_cb(void *arg, int event, void *data);
+void efc_node_fcid_display(u32 fc_id, char *buffer, u32 buf_len);
+void efc_node_post_shutdown(struct efc_node *node, void *arg);
+u64 efc_node_get_wwpn(struct efc_node *node);
+
+/*
+ * EFC FCP/ELS/CT interface
+ * **********************************/
+void efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq);
+void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
+ u32 ext_status);
+
+/*
+ * EFC SCSI INTERACTION LAYER
+ * **********************************/
+void efc_scsi_sess_reg_complete(struct efc_node *node, u32 status);
+void efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node);
+void efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node);
+void efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node);
+
+#endif /* __EFCLIB_H__ */
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
new file mode 100644
index 000000000000..b8c048cdb17f
--- /dev/null
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -0,0 +1,5159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/**
+ * All common (i.e. transport-independent) SLI-4 functions are implemented
+ * in this file.
+ */
+#include "sli4.h"
+
+static struct sli4_asic_entry_t sli4_asic_table[] = {
+ { SLI4_ASIC_REV_B0, SLI4_ASIC_GEN_5},
+ { SLI4_ASIC_REV_D0, SLI4_ASIC_GEN_5},
+ { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_7},
+ { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_7},
+};
+
+/* Convert queue type enum (SLI_QTYPE_*) into a string */
+static char *SLI4_QNAME[] = {
+ "Event Queue",
+ "Completion Queue",
+ "Mailbox Queue",
+ "Work Queue",
+ "Receive Queue",
+ "Undefined"
+};
+
+/**
+ * sli_config_cmd_init() - Write a SLI_CONFIG command to the provided buffer.
+ *
+ * @sli4: SLI context pointer.
+ * @buf: Destination buffer for the command.
+ * @length: Length in bytes of attached command.
+ * @dma: DMA buffer for non-embedded commands.
+ * Return: Command payload buffer.
+ */
+static void *
+sli_config_cmd_init(struct sli4 *sli4, void *buf, u32 length,
+ struct efc_dma *dma)
+{
+ struct sli4_cmd_sli_config *config;
+ u32 flags;
+
+ if (length > sizeof(config->payload.embed) && !dma) {
+ efc_log_err(sli4, "Too big for an embedded cmd with len(%d)\n",
+ length);
+ return NULL;
+ }
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ config = buf;
+
+ config->hdr.command = SLI4_MBX_CMD_SLI_CONFIG;
+ if (!dma) {
+ flags = SLI4_SLICONF_EMB;
+ config->dw1_flags = cpu_to_le32(flags);
+ config->payload_len = cpu_to_le32(length);
+ return config->payload.embed;
+ }
+
+ flags = SLI4_SLICONF_PMDCMD_VAL_1;
+ flags &= ~SLI4_SLICONF_EMB;
+ config->dw1_flags = cpu_to_le32(flags);
+
+ config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys));
+ config->payload.mem.addr.high = cpu_to_le32(upper_32_bits(dma->phys));
+ config->payload.mem.length =
+ cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN);
+ config->payload_len = cpu_to_le32(dma->size);
+ /* save pointer to DMA for BMBX dumping purposes */
+ sli4->bmbx_non_emb_pmd = dma;
+ return dma->virt;
+}
+
+/**
+ * sli_cmd_common_create_cq() - Write a COMMON_CREATE_CQ V2 command.
+ *
+ * @sli4: SLI context pointer.
+ * @buf: Destination buffer for the command.
+ * @qmem: DMA memory for queue.
+ * @eq_id: EQ id assosiated with this cq.
+ * Return: status -EIO/0.
+ */
+static int
+sli_cmd_common_create_cq(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
+ u16 eq_id)
+{
+ struct sli4_rqst_cmn_create_cq_v2 *cqv2 = NULL;
+ u32 p;
+ uintptr_t addr;
+ u32 num_pages = 0;
+ size_t cmd_size = 0;
+ u32 page_size = 0;
+ u32 n_cqe = 0;
+ u32 dw5_flags = 0;
+ u16 dw6w1_arm = 0;
+ __le32 len;
+
+ /* First calculate number of pages and the mailbox cmd length */
+ n_cqe = qmem->size / SLI4_CQE_BYTES;
+ switch (n_cqe) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ page_size = SZ_4K;
+ break;
+ case 4096:
+ page_size = SZ_8K;
+ break;
+ default:
+ return -EIO;
+ }
+ num_pages = sli_page_count(qmem->size, page_size);
+
+ cmd_size = SLI4_RQST_CMDSZ(cmn_create_cq_v2)
+ + SZ_DMAADDR * num_pages;
+
+ cqv2 = sli_config_cmd_init(sli4, buf, cmd_size, NULL);
+ if (!cqv2)
+ return -EIO;
+
+ len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_v2, SZ_DMAADDR * num_pages);
+ sli_cmd_fill_hdr(&cqv2->hdr, SLI4_CMN_CREATE_CQ, SLI4_SUBSYSTEM_COMMON,
+ CMD_V2, len);
+ cqv2->page_size = page_size / SLI_PAGE_SIZE;
+
+ /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */
+ cqv2->num_pages = cpu_to_le16(num_pages);
+ if (!num_pages || num_pages > SLI4_CREATE_CQV2_MAX_PAGES)
+ return -EIO;
+
+ switch (num_pages) {
+ case 1:
+ dw5_flags |= SLI4_CQ_CNT_VAL(256);
+ break;
+ case 2:
+ dw5_flags |= SLI4_CQ_CNT_VAL(512);
+ break;
+ case 4:
+ dw5_flags |= SLI4_CQ_CNT_VAL(1024);
+ break;
+ case 8:
+ dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
+ cqv2->cqe_count = cpu_to_le16(n_cqe);
+ break;
+ default:
+ efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
+ return -EIO;
+ }
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ dw5_flags |= SLI4_CREATE_CQV2_AUTOVALID;
+
+ dw5_flags |= SLI4_CREATE_CQV2_EVT;
+ dw5_flags |= SLI4_CREATE_CQV2_VALID;
+
+ cqv2->dw5_flags = cpu_to_le32(dw5_flags);
+ cqv2->dw6w1_arm = cpu_to_le16(dw6w1_arm);
+ cqv2->eq_id = cpu_to_le16(eq_id);
+
+ for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
+ cqv2->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ cqv2->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_common_create_eq(struct sli4 *sli4, void *buf, struct efc_dma *qmem)
+{
+ struct sli4_rqst_cmn_create_eq *eq;
+ u32 p;
+ uintptr_t addr;
+ u16 num_pages;
+ u32 dw5_flags = 0;
+ u32 dw6_flags = 0, ver;
+
+ eq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_eq),
+ NULL);
+ if (!eq)
+ return -EIO;
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ ver = CMD_V2;
+ else
+ ver = CMD_V0;
+
+ sli_cmd_fill_hdr(&eq->hdr, SLI4_CMN_CREATE_EQ, SLI4_SUBSYSTEM_COMMON,
+ ver, SLI4_RQST_PYLD_LEN(cmn_create_eq));
+
+ /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
+ num_pages = qmem->size / SLI_PAGE_SIZE;
+ eq->num_pages = cpu_to_le16(num_pages);
+
+ switch (num_pages) {
+ case 1:
+ dw5_flags |= SLI4_EQE_SIZE_4;
+ dw6_flags |= SLI4_EQ_CNT_VAL(1024);
+ break;
+ case 2:
+ dw5_flags |= SLI4_EQE_SIZE_4;
+ dw6_flags |= SLI4_EQ_CNT_VAL(2048);
+ break;
+ case 4:
+ dw5_flags |= SLI4_EQE_SIZE_4;
+ dw6_flags |= SLI4_EQ_CNT_VAL(4096);
+ break;
+ default:
+ efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
+ return -EIO;
+ }
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ dw5_flags |= SLI4_CREATE_EQ_AUTOVALID;
+
+ dw5_flags |= SLI4_CREATE_EQ_VALID;
+ dw6_flags &= (~SLI4_CREATE_EQ_ARM);
+ eq->dw5_flags = cpu_to_le32(dw5_flags);
+ eq->dw6_flags = cpu_to_le32(dw6_flags);
+ eq->dw7_delaymulti = cpu_to_le32(SLI4_CREATE_EQ_DELAYMULTI);
+
+ for (p = 0, addr = qmem->phys; p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ eq->page_address[p].low = cpu_to_le32(lower_32_bits(addr));
+ eq->page_address[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_common_create_mq_ext(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
+ u16 cq_id)
+{
+ struct sli4_rqst_cmn_create_mq_ext *mq;
+ u32 p;
+ uintptr_t addr;
+ u32 num_pages;
+ u16 dw6w1_flags = 0;
+
+ mq = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_create_mq_ext), NULL);
+ if (!mq)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&mq->hdr, SLI4_CMN_CREATE_MQ_EXT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_create_mq_ext));
+
+ /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */
+ num_pages = qmem->size / SLI_PAGE_SIZE;
+ mq->num_pages = cpu_to_le16(num_pages);
+ switch (num_pages) {
+ case 1:
+ dw6w1_flags |= SLI4_MQE_SIZE_16;
+ break;
+ case 2:
+ dw6w1_flags |= SLI4_MQE_SIZE_32;
+ break;
+ case 4:
+ dw6w1_flags |= SLI4_MQE_SIZE_64;
+ break;
+ case 8:
+ dw6w1_flags |= SLI4_MQE_SIZE_128;
+ break;
+ default:
+ efc_log_info(sli4, "num_pages %d not valid\n", num_pages);
+ return -EIO;
+ }
+
+ mq->async_event_bitmap = cpu_to_le32(SLI4_ASYNC_EVT_FC_ALL);
+
+ if (sli4->params.mq_create_version) {
+ mq->cq_id_v1 = cpu_to_le16(cq_id);
+ mq->hdr.dw3_version = cpu_to_le32(CMD_V1);
+ } else {
+ dw6w1_flags |= (cq_id << SLI4_CREATE_MQEXT_CQID_SHIFT);
+ }
+ mq->dw7_val = cpu_to_le32(SLI4_CREATE_MQEXT_VAL);
+
+ mq->dw6w1_flags = cpu_to_le16(dw6w1_flags);
+ for (p = 0, addr = qmem->phys; p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ mq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ mq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_wq_create(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id)
+{
+ struct sli4_rqst_wq_create *wq;
+ u32 p;
+ uintptr_t addr;
+ u32 page_size = 0;
+ u32 n_wqe = 0;
+ u16 num_pages;
+
+ wq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(wq_create),
+ NULL);
+ if (!wq)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&wq->hdr, SLI4_OPC_WQ_CREATE, SLI4_SUBSYSTEM_FC,
+ CMD_V1, SLI4_RQST_PYLD_LEN(wq_create));
+ n_wqe = qmem->size / sli4->wqe_size;
+
+ switch (qmem->size) {
+ case 4096:
+ case 8192:
+ case 16384:
+ case 32768:
+ page_size = SZ_4K;
+ break;
+ case 65536:
+ page_size = SZ_8K;
+ break;
+ case 131072:
+ page_size = SZ_16K;
+ break;
+ case 262144:
+ page_size = SZ_32K;
+ break;
+ case 524288:
+ page_size = SZ_64K;
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* valid values for number of pages(num_pages): 1-8 */
+ num_pages = sli_page_count(qmem->size, page_size);
+ wq->num_pages = cpu_to_le16(num_pages);
+ if (!num_pages || num_pages > SLI4_WQ_CREATE_MAX_PAGES)
+ return -EIO;
+
+ wq->cq_id = cpu_to_le16(cq_id);
+
+ wq->page_size = page_size / SLI_PAGE_SIZE;
+
+ if (sli4->wqe_size == SLI4_WQE_EXT_BYTES)
+ wq->wqe_size_byte |= SLI4_WQE_EXT_SIZE;
+ else
+ wq->wqe_size_byte |= SLI4_WQE_SIZE;
+
+ wq->wqe_count = cpu_to_le16(n_wqe);
+
+ for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
+ wq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ wq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_rq_create_v1(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
+ u16 cq_id, u16 buffer_size)
+{
+ struct sli4_rqst_rq_create_v1 *rq;
+ u32 p;
+ uintptr_t addr;
+ u32 num_pages;
+
+ rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1),
+ NULL);
+ if (!rq)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
+ CMD_V1, SLI4_RQST_PYLD_LEN(rq_create_v1));
+ /* Disable "no buffer warnings" to avoid Lancer bug */
+ rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB;
+
+ /* valid values for number of pages: 1-8 (sec 4.5.6) */
+ num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
+ rq->num_pages = cpu_to_le16(num_pages);
+ if (!num_pages ||
+ num_pages > SLI4_RQ_CREATE_V1_MAX_PAGES) {
+ efc_log_info(sli4, "num_pages %d not valid, max %d\n",
+ num_pages, SLI4_RQ_CREATE_V1_MAX_PAGES);
+ return -EIO;
+ }
+
+ /*
+ * RQE count is the total number of entries (note not lg2(# entries))
+ */
+ rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE);
+
+ rq->rqe_size_byte |= SLI4_RQE_SIZE_8;
+
+ rq->page_size = SLI4_RQ_PAGE_SIZE_4096;
+
+ if (buffer_size < sli4->rq_min_buf_size ||
+ buffer_size > sli4->rq_max_buf_size) {
+ efc_log_err(sli4, "buffer_size %d out of range (%d-%d)\n",
+ buffer_size, sli4->rq_min_buf_size,
+ sli4->rq_max_buf_size);
+ return -EIO;
+ }
+ rq->buffer_size = cpu_to_le32(buffer_size);
+
+ rq->cq_id = cpu_to_le16(cq_id);
+
+ for (p = 0, addr = qmem->phys;
+ p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ rq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
+ struct sli4_queue *qs[], u32 base_cq_id,
+ u32 header_buffer_size,
+ u32 payload_buffer_size, struct efc_dma *dma)
+{
+ struct sli4_rqst_rq_create_v2 *req = NULL;
+ u32 i, p, offset = 0;
+ u32 payload_size, page_count;
+ uintptr_t addr;
+ u32 num_pages;
+ __le32 len;
+
+ page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(SLI4_RQST_CMDSZ(rq_create_v2) +
+ SZ_DMAADDR * page_count,
+ sizeof(struct sli4_rsp_cmn_create_queue_set));
+
+ dma->size = payload_size;
+ dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
+ &dma->phys, GFP_KERNEL);
+ if (!dma->virt)
+ return -EIO;
+
+ memset(dma->virt, 0, payload_size);
+
+ req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
+ if (!req)
+ return -EIO;
+
+ len = SLI4_RQST_PYLD_LEN_VAR(rq_create_v2, SZ_DMAADDR * page_count);
+ sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
+ CMD_V2, len);
+ /* Fill Payload fields */
+ req->dim_dfd_dnb |= SLI4_RQCREATEV2_DNB;
+ num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE);
+ req->num_pages = cpu_to_le16(num_pages);
+ req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE);
+ req->rqe_size_byte |= SLI4_RQE_SIZE_8;
+ req->page_size = SLI4_RQ_PAGE_SIZE_4096;
+ req->rq_count = num_rqs;
+ req->base_cq_id = cpu_to_le16(base_cq_id);
+ req->hdr_buffer_size = cpu_to_le16(header_buffer_size);
+ req->payload_buffer_size = cpu_to_le16(payload_buffer_size);
+
+ for (i = 0; i < num_rqs; i++) {
+ for (p = 0, addr = qs[i]->dma.phys; p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ req->page_phys_addr[offset].low =
+ cpu_to_le32(lower_32_bits(addr));
+ req->page_phys_addr[offset].high =
+ cpu_to_le32(upper_32_bits(addr));
+ offset++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+__sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q)
+{
+ if (!q->dma.size)
+ return;
+
+ dma_free_coherent(&sli4->pci->dev, q->dma.size,
+ q->dma.virt, q->dma.phys);
+ memset(&q->dma, 0, sizeof(struct efc_dma));
+}
+
+int
+__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
+ size_t size, u32 n_entries, u32 align)
+{
+ if (q->dma.virt) {
+ efc_log_err(sli4, "%s failed\n", __func__);
+ return -EIO;
+ }
+
+ memset(q, 0, sizeof(struct sli4_queue));
+
+ q->dma.size = size * n_entries;
+ q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
+ &q->dma.phys, GFP_KERNEL);
+ if (!q->dma.virt) {
+ memset(&q->dma, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
+ return -EIO;
+ }
+
+ memset(q->dma.virt, 0, size * n_entries);
+
+ spin_lock_init(&q->lock);
+
+ q->type = qtype;
+ q->size = size;
+ q->length = n_entries;
+
+ if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) {
+ /* For prism, phase will be flipped after
+ * a sweep through eq and cq
+ */
+ q->phase = 1;
+ }
+
+ /* Limit to hwf the queue size per interrupt */
+ q->proc_limit = n_entries / 2;
+
+ if (q->type == SLI4_QTYPE_EQ)
+ q->posted_limit = q->length / 2;
+ else
+ q->posted_limit = 64;
+
+ return 0;
+}
+
+int
+sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q,
+ u32 n_entries, u32 buffer_size,
+ struct sli4_queue *cq, bool is_hdr)
+{
+ if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE,
+ n_entries, SLI_PAGE_SIZE))
+ return -EIO;
+
+ if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id,
+ buffer_size))
+ goto error;
+
+ if (__sli_create_queue(sli4, q))
+ goto error;
+
+ if (is_hdr && q->id & 1) {
+ efc_log_info(sli4, "bad header RQ_ID %d\n", q->id);
+ goto error;
+ } else if (!is_hdr && (q->id & 1) == 0) {
+ efc_log_info(sli4, "bad data RQ_ID %d\n", q->id);
+ goto error;
+ }
+
+ if (is_hdr)
+ q->u.flag |= SLI4_QUEUE_FLAG_HDR;
+ else
+ q->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
+
+ return 0;
+
+error:
+ __sli_queue_destroy(sli4, q);
+ return -EIO;
+}
+
+int
+sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs,
+ struct sli4_queue *qs[], u32 base_cq_id,
+ u32 n_entries, u32 header_buffer_size,
+ u32 payload_buffer_size)
+{
+ u32 i;
+ struct efc_dma dma = {0};
+ struct sli4_rsp_cmn_create_queue_set *rsp = NULL;
+ void __iomem *db_regaddr = NULL;
+ u32 num_rqs = num_rq_pairs * 2;
+
+ for (i = 0; i < num_rqs; i++) {
+ if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_RQ,
+ SLI4_RQE_SIZE, n_entries,
+ SLI_PAGE_SIZE)) {
+ goto error;
+ }
+ }
+
+ if (sli_cmd_rq_create_v2(sli4, num_rqs, qs, base_cq_id,
+ header_buffer_size, payload_buffer_size,
+ &dma)) {
+ goto error;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_err(sli4, "bootstrap mailbox write failed RQSet\n");
+ goto error;
+ }
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
+ else
+ db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG;
+
+ rsp = dma.virt;
+ if (rsp->hdr.status) {
+ efc_log_err(sli4, "bad create RQSet status=%#x addl=%#x\n",
+ rsp->hdr.status, rsp->hdr.additional_status);
+ goto error;
+ }
+
+ for (i = 0; i < num_rqs; i++) {
+ qs[i]->id = i + le16_to_cpu(rsp->q_id);
+ if ((qs[i]->id & 1) == 0)
+ qs[i]->u.flag |= SLI4_QUEUE_FLAG_HDR;
+ else
+ qs[i]->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
+
+ qs[i]->db_regaddr = db_regaddr;
+ }
+
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
+
+ return 0;
+
+error:
+ for (i = 0; i < num_rqs; i++)
+ __sli_queue_destroy(sli4, qs[i]);
+
+ if (dma.virt)
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
+ dma.phys);
+
+ return -EIO;
+}
+
+static int
+sli_res_sli_config(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_sli_config *sli_config = buf;
+
+ /* sanity check */
+ if (!buf || sli_config->hdr.command !=
+ SLI4_MBX_CMD_SLI_CONFIG) {
+ efc_log_err(sli4, "bad parameter buf=%p cmd=%#x\n", buf,
+ buf ? sli_config->hdr.command : -1);
+ return -EIO;
+ }
+
+ if (le16_to_cpu(sli_config->hdr.status))
+ return le16_to_cpu(sli_config->hdr.status);
+
+ if (le32_to_cpu(sli_config->dw1_flags) & SLI4_SLICONF_EMB)
+ return sli_config->payload.embed[4];
+
+ efc_log_info(sli4, "external buffers not supported\n");
+ return -EIO;
+}
+
+int
+__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q)
+{
+ struct sli4_rsp_cmn_create_queue *res_q = NULL;
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail %s\n",
+ SLI4_QNAME[q->type]);
+ return -EIO;
+ }
+ if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad status create %s\n",
+ SLI4_QNAME[q->type]);
+ return -EIO;
+ }
+ res_q = (void *)((u8 *)sli4->bmbx.virt +
+ offsetof(struct sli4_cmd_sli_config, payload));
+
+ if (res_q->hdr.status) {
+ efc_log_err(sli4, "bad create %s status=%#x addl=%#x\n",
+ SLI4_QNAME[q->type], res_q->hdr.status,
+ res_q->hdr.additional_status);
+ return -EIO;
+ }
+ q->id = le16_to_cpu(res_q->q_id);
+ switch (q->type) {
+ case SLI4_QTYPE_EQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
+ break;
+ case SLI4_QTYPE_CQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
+ break;
+ case SLI4_QTYPE_MQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_MQ_DB_REG;
+ break;
+ case SLI4_QTYPE_RQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG;
+ break;
+ case SLI4_QTYPE_WQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_IO_WQ_DB_REG;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int
+sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype)
+{
+ u32 size = 0;
+
+ switch (qtype) {
+ case SLI4_QTYPE_EQ:
+ size = sizeof(u32);
+ break;
+ case SLI4_QTYPE_CQ:
+ size = 16;
+ break;
+ case SLI4_QTYPE_MQ:
+ size = 256;
+ break;
+ case SLI4_QTYPE_WQ:
+ size = sli4->wqe_size;
+ break;
+ case SLI4_QTYPE_RQ:
+ size = SLI4_RQE_SIZE;
+ break;
+ default:
+ efc_log_info(sli4, "unknown queue type %d\n", qtype);
+ return -1;
+ }
+ return size;
+}
+
+int
+sli_queue_alloc(struct sli4 *sli4, u32 qtype,
+ struct sli4_queue *q, u32 n_entries,
+ struct sli4_queue *assoc)
+{
+ int size;
+ u32 align = 0;
+
+ /* get queue size */
+ size = sli_get_queue_entry_size(sli4, qtype);
+ if (size < 0)
+ return -EIO;
+ align = SLI_PAGE_SIZE;
+
+ if (__sli_queue_init(sli4, q, qtype, size, n_entries, align))
+ return -EIO;
+
+ switch (qtype) {
+ case SLI4_QTYPE_EQ:
+ if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ case SLI4_QTYPE_CQ:
+ if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma,
+ assoc ? assoc->id : 0) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ case SLI4_QTYPE_MQ:
+ assoc->u.flag |= SLI4_QUEUE_FLAG_MQ;
+ if (!sli_cmd_common_create_mq_ext(sli4, sli4->bmbx.virt,
+ &q->dma, assoc->id) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ case SLI4_QTYPE_WQ:
+ if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma,
+ assoc ? assoc->id : 0) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ default:
+ efc_log_info(sli4, "unknown queue type %d\n", qtype);
+ }
+
+ __sli_queue_destroy(sli4, q);
+ return -EIO;
+}
+
+static int sli_cmd_cq_set_create(struct sli4 *sli4,
+ struct sli4_queue *qs[], u32 num_cqs,
+ struct sli4_queue *eqs[],
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_create_cq_set_v0 *req = NULL;
+ uintptr_t addr;
+ u32 i, offset = 0, page_bytes = 0, payload_size;
+ u32 p = 0, page_size = 0, n_cqe = 0, num_pages_cq;
+ u32 dw5_flags = 0;
+ u16 dw6w1_flags = 0;
+ __le32 req_len;
+
+ n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES;
+ switch (n_cqe) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ page_size = 1;
+ break;
+ case 4096:
+ page_size = 2;
+ break;
+ default:
+ return -EIO;
+ }
+
+ page_bytes = page_size * SLI_PAGE_SIZE;
+ num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes);
+ payload_size = max(SLI4_RQST_CMDSZ(cmn_create_cq_set_v0) +
+ (SZ_DMAADDR * num_pages_cq * num_cqs),
+ sizeof(struct sli4_rsp_cmn_create_queue_set));
+
+ dma->size = payload_size;
+ dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
+ &dma->phys, GFP_KERNEL);
+ if (!dma->virt)
+ return -EIO;
+
+ memset(dma->virt, 0, payload_size);
+
+ req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
+ if (!req)
+ return -EIO;
+
+ req_len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_set_v0,
+ SZ_DMAADDR * num_pages_cq * num_cqs);
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_CREATE_CQ_SET, SLI4_SUBSYSTEM_FC,
+ CMD_V0, req_len);
+ req->page_size = page_size;
+
+ req->num_pages = cpu_to_le16(num_pages_cq);
+ switch (num_pages_cq) {
+ case 1:
+ dw5_flags |= SLI4_CQ_CNT_VAL(256);
+ break;
+ case 2:
+ dw5_flags |= SLI4_CQ_CNT_VAL(512);
+ break;
+ case 4:
+ dw5_flags |= SLI4_CQ_CNT_VAL(1024);
+ break;
+ case 8:
+ dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
+ dw6w1_flags |= (n_cqe & SLI4_CREATE_CQSETV0_CQE_COUNT);
+ break;
+ default:
+ efc_log_info(sli4, "num_pages %d not valid\n", num_pages_cq);
+ return -EIO;
+ }
+
+ dw5_flags |= SLI4_CREATE_CQSETV0_EVT;
+ dw5_flags |= SLI4_CREATE_CQSETV0_VALID;
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ dw5_flags |= SLI4_CREATE_CQSETV0_AUTOVALID;
+
+ dw6w1_flags &= ~SLI4_CREATE_CQSETV0_ARM;
+
+ req->dw5_flags = cpu_to_le32(dw5_flags);
+ req->dw6w1_flags = cpu_to_le16(dw6w1_flags);
+
+ req->num_cq_req = cpu_to_le16(num_cqs);
+
+ /* Fill page addresses of all the CQs. */
+ for (i = 0; i < num_cqs; i++) {
+ req->eq_id[i] = cpu_to_le16(eqs[i]->id);
+ for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq;
+ p++, addr += page_bytes) {
+ req->page_phys_addr[offset].low =
+ cpu_to_le32(lower_32_bits(addr));
+ req->page_phys_addr[offset].high =
+ cpu_to_le32(upper_32_bits(addr));
+ offset++;
+ }
+ }
+
+ return 0;
+}
+
+int
+sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[],
+ u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[])
+{
+ u32 i;
+ struct efc_dma dma = {0};
+ struct sli4_rsp_cmn_create_queue_set *res;
+ void __iomem *db_regaddr;
+
+ /* Align the queue DMA memory */
+ for (i = 0; i < num_cqs; i++) {
+ if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_CQ, SLI4_CQE_BYTES,
+ n_entries, SLI_PAGE_SIZE))
+ goto error;
+ }
+
+ if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma))
+ goto error;
+
+ if (sli_bmbx_command(sli4))
+ goto error;
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
+ else
+ db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
+
+ res = dma.virt;
+ if (res->hdr.status) {
+ efc_log_err(sli4, "bad create CQSet status=%#x addl=%#x\n",
+ res->hdr.status, res->hdr.additional_status);
+ goto error;
+ }
+
+ /* Check if we got all requested CQs. */
+ if (le16_to_cpu(res->num_q_allocated) != num_cqs) {
+ efc_log_crit(sli4, "Requested count CQs doesn't match.\n");
+ goto error;
+ }
+ /* Fill the resp cq ids. */
+ for (i = 0; i < num_cqs; i++) {
+ qs[i]->id = le16_to_cpu(res->q_id) + i;
+ qs[i]->db_regaddr = db_regaddr;
+ }
+
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
+
+ return 0;
+
+error:
+ for (i = 0; i < num_cqs; i++)
+ __sli_queue_destroy(sli4, qs[i]);
+
+ if (dma.virt)
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
+ dma.phys);
+
+ return -EIO;
+}
+
+static int
+sli_cmd_common_destroy_q(struct sli4 *sli4, u8 opc, u8 subsystem, u16 q_id)
+{
+ struct sli4_rqst_cmn_destroy_q *req;
+
+ /* Payload length must accommodate both request and response */
+ req = sli_config_cmd_init(sli4, sli4->bmbx.virt,
+ SLI4_CFG_PYLD_LENGTH(cmn_destroy_q), NULL);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, opc, subsystem,
+ CMD_V0, SLI4_RQST_PYLD_LEN(cmn_destroy_q));
+ req->q_id = cpu_to_le16(q_id);
+
+ return 0;
+}
+
+int
+sli_queue_free(struct sli4 *sli4, struct sli4_queue *q,
+ u32 destroy_queues, u32 free_memory)
+{
+ int rc = 0;
+ u8 opcode, subsystem;
+ struct sli4_rsp_hdr *res;
+
+ if (!q) {
+ efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q);
+ return -EIO;
+ }
+
+ if (!destroy_queues)
+ goto free_mem;
+
+ switch (q->type) {
+ case SLI4_QTYPE_EQ:
+ opcode = SLI4_CMN_DESTROY_EQ;
+ subsystem = SLI4_SUBSYSTEM_COMMON;
+ break;
+ case SLI4_QTYPE_CQ:
+ opcode = SLI4_CMN_DESTROY_CQ;
+ subsystem = SLI4_SUBSYSTEM_COMMON;
+ break;
+ case SLI4_QTYPE_MQ:
+ opcode = SLI4_CMN_DESTROY_MQ;
+ subsystem = SLI4_SUBSYSTEM_COMMON;
+ break;
+ case SLI4_QTYPE_WQ:
+ opcode = SLI4_OPC_WQ_DESTROY;
+ subsystem = SLI4_SUBSYSTEM_FC;
+ break;
+ case SLI4_QTYPE_RQ:
+ opcode = SLI4_OPC_RQ_DESTROY;
+ subsystem = SLI4_SUBSYSTEM_FC;
+ break;
+ default:
+ efc_log_info(sli4, "bad queue type %d\n", q->type);
+ rc = -EIO;
+ goto free_mem;
+ }
+
+ rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id);
+ if (rc)
+ goto free_mem;
+
+ rc = sli_bmbx_command(sli4);
+ if (rc)
+ goto free_mem;
+
+ rc = sli_res_sli_config(sli4, sli4->bmbx.virt);
+ if (rc)
+ goto free_mem;
+
+ res = (void *)((u8 *)sli4->bmbx.virt +
+ offsetof(struct sli4_cmd_sli_config, payload));
+ if (res->status) {
+ efc_log_err(sli4, "destroy %s st=%#x addl=%#x\n",
+ SLI4_QNAME[q->type], res->status,
+ res->additional_status);
+ rc = -EIO;
+ goto free_mem;
+ }
+
+free_mem:
+ if (free_memory)
+ __sli_queue_destroy(sli4, q);
+
+ return rc;
+}
+
+int
+sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
+{
+ u32 val;
+ unsigned long flags = 0;
+ u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
+ else
+ val = sli_format_eq_db_data(q->n_posted, q->id, a);
+
+ writel(val, q->db_regaddr);
+ q->n_posted = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
+{
+ u32 val = 0;
+ unsigned long flags = 0;
+ u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ switch (q->type) {
+ case SLI4_QTYPE_EQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
+ else
+ val = sli_format_eq_db_data(q->n_posted, q->id, a);
+
+ writel(val, q->db_regaddr);
+ q->n_posted = 0;
+ break;
+ case SLI4_QTYPE_CQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ val = sli_format_if6_cq_db_data(q->n_posted, q->id, a);
+ else
+ val = sli_format_cq_db_data(q->n_posted, q->id, a);
+
+ writel(val, q->db_regaddr);
+ q->n_posted = 0;
+ break;
+ default:
+ efc_log_info(sli4, "should only be used for EQ/CQ, not %s\n",
+ SLI4_QNAME[q->type]);
+ }
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ u32 qindex;
+ u32 val = 0;
+
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ if (sli4->params.perf_wq_id_association)
+ sli_set_wq_id_association(entry, q->id);
+
+ memcpy(qe, entry, q->size);
+ val = sli_format_wq_db_data(q->id);
+
+ writel(val, q->db_regaddr);
+ q->index = (q->index + 1) & (q->length - 1);
+
+ return qindex;
+}
+
+int
+sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ u32 qindex;
+ u32 val = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ memcpy(qe, entry, q->size);
+ val = sli_format_mq_db_data(q->id);
+ writel(val, q->db_regaddr);
+ q->index = (q->index + 1) & (q->length - 1);
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return qindex;
+}
+
+int
+sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ u32 qindex;
+ u32 val = 0;
+
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ memcpy(qe, entry, q->size);
+
+ /*
+ * In RQ-pair, an RQ either contains the FC header
+ * (i.e. is_hdr == TRUE) or the payload.
+ *
+ * Don't ring doorbell for payload RQ
+ */
+ if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR))
+ goto skip;
+
+ val = sli_format_rq_db_data(q->id);
+ writel(val, q->db_regaddr);
+skip:
+ q->index = (q->index + 1) & (q->length - 1);
+
+ return qindex;
+}
+
+int
+sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ unsigned long flags = 0;
+ u16 wflags = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ qe += q->index * q->size;
+
+ /* Check if eqe is valid */
+ wflags = le16_to_cpu(((struct sli4_eqe *)qe)->dw0w0_flags);
+
+ if ((wflags & SLI4_EQE_VALID) != q->phase) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -EIO;
+ }
+
+ if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
+ wflags &= ~SLI4_EQE_VALID;
+ ((struct sli4_eqe *)qe)->dw0w0_flags = cpu_to_le16(wflags);
+ }
+
+ memcpy(entry, qe, q->size);
+ q->index = (q->index + 1) & (q->length - 1);
+ q->n_posted++;
+ /*
+ * For prism, the phase value will be used
+ * to check the validity of eq/cq entries.
+ * The value toggles after a complete sweep
+ * through the queue.
+ */
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
+ q->phase ^= (u16)0x1;
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ unsigned long flags = 0;
+ u32 dwflags = 0;
+ bool valid_bit_set;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ qe += q->index * q->size;
+
+ /* Check if cqe is valid */
+ dwflags = le32_to_cpu(((struct sli4_mcqe *)qe)->dw3_flags);
+ valid_bit_set = (dwflags & SLI4_MCQE_VALID) != 0;
+
+ if (valid_bit_set != q->phase) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -EIO;
+ }
+
+ if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
+ dwflags &= ~SLI4_MCQE_VALID;
+ ((struct sli4_mcqe *)qe)->dw3_flags = cpu_to_le32(dwflags);
+ }
+
+ memcpy(entry, qe, q->size);
+ q->index = (q->index + 1) & (q->length - 1);
+ q->n_posted++;
+ /*
+ * For prism, the phase value will be used
+ * to check the validity of eq/cq entries.
+ * The value toggles after a complete sweep
+ * through the queue.
+ */
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
+ q->phase ^= (u16)0x1;
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ qe += q->u.r_idx * q->size;
+
+ /* Check if mqe is valid */
+ if (q->index == q->u.r_idx) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -EIO;
+ }
+
+ memcpy(entry, qe, q->size);
+ q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1);
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id)
+{
+ struct sli4_eqe *eqe = (void *)buf;
+ int rc = 0;
+ u16 flags = 0;
+ u16 majorcode;
+ u16 minorcode;
+
+ if (!buf || !cq_id) {
+ efc_log_err(sli4, "bad parameters sli4=%p buf=%p cq_id=%p\n",
+ sli4, buf, cq_id);
+ return -EIO;
+ }
+
+ flags = le16_to_cpu(eqe->dw0w0_flags);
+ majorcode = (flags & SLI4_EQE_MJCODE) >> 1;
+ minorcode = (flags & SLI4_EQE_MNCODE) >> 4;
+ switch (majorcode) {
+ case SLI4_MAJOR_CODE_STANDARD:
+ *cq_id = le16_to_cpu(eqe->resource_id);
+ break;
+ case SLI4_MAJOR_CODE_SENTINEL:
+ efc_log_info(sli4, "sentinel EQE\n");
+ rc = SLI4_EQE_STATUS_EQ_FULL;
+ break;
+ default:
+ efc_log_info(sli4, "Unsupported EQE: major %x minor %x\n",
+ majorcode, minorcode);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+int
+sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe,
+ enum sli4_qentry *etype, u16 *q_id)
+{
+ int rc = 0;
+
+ if (!cq || !cqe || !etype) {
+ efc_log_err(sli4, "bad params sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n",
+ sli4, cq, cqe, etype, q_id);
+ return -EINVAL;
+ }
+
+ /* Parse a CQ entry to retrieve the event type and the queue id */
+ if (cq->u.flag & SLI4_QUEUE_FLAG_MQ) {
+ struct sli4_mcqe *mcqe = (void *)cqe;
+
+ if (le32_to_cpu(mcqe->dw3_flags) & SLI4_MCQE_AE) {
+ *etype = SLI4_QENTRY_ASYNC;
+ } else {
+ *etype = SLI4_QENTRY_MQ;
+ rc = sli_cqe_mq(sli4, mcqe);
+ }
+ *q_id = -1;
+ } else {
+ rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id);
+ }
+
+ return rc;
+}
+
+int
+sli_abort_wqe(struct sli4 *sli, void *buf, enum sli4_abort_type type,
+ bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id)
+{
+ struct sli4_abort_wqe *abort = buf;
+
+ memset(buf, 0, sli->wqe_size);
+
+ switch (type) {
+ case SLI4_ABORT_XRI:
+ abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
+ if (mask) {
+ efc_log_warn(sli, "%#x aborting XRI %#x warning non-zero mask",
+ mask, ids);
+ mask = 0;
+ }
+ break;
+ case SLI4_ABORT_ABORT_ID:
+ abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG;
+ break;
+ case SLI4_ABORT_REQUEST_ID:
+ abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG;
+ break;
+ default:
+ efc_log_info(sli, "unsupported type %#x\n", type);
+ return -EIO;
+ }
+
+ abort->ia_ir_byte |= send_abts ? 0 : 1;
+
+ /* Suppress ABTS retries */
+ abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
+
+ abort->t_mask = cpu_to_le32(mask);
+ abort->t_tag = cpu_to_le32(ids);
+ abort->command = SLI4_WQE_ABORT;
+ abort->request_tag = cpu_to_le16(tag);
+
+ abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
+
+ abort->cq_id = cpu_to_le16(cq_id);
+ abort->cmdtype_wqec_byte |= SLI4_CMD_ABORT_WQE;
+
+ return 0;
+}
+
+int
+sli_els_request64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ struct sli_els_params *params)
+{
+ struct sli4_els_request64_wqe *els = buf;
+ struct sli4_sge *sge = sgl->virt;
+ bool is_fabric = false;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli->wqe_size);
+
+ bptr = &els->els_request_payload;
+ if (sli->params.sgl_pre_registered) {
+ els->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_REQ_WQE_XBL;
+
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->xmit_len & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ ((2 * sizeof(struct sli4_sge)) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+
+ els->els_request_payload_length = cpu_to_le32(params->xmit_len);
+ els->max_response_payload_length = cpu_to_le32(params->rsp_len);
+
+ els->xri_tag = cpu_to_le16(params->xri);
+ els->timer = params->timeout;
+ els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ els->command = SLI4_WQE_ELS_REQUEST64;
+
+ els->request_tag = cpu_to_le16(params->tag);
+
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_IOD;
+
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_QOSD;
+
+ /* figure out the ELS_ID value from the request buffer */
+
+ switch (params->cmd) {
+ case ELS_LOGO:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_LOGO << SLI4_REQ_WQE_ELSID_SHFT;
+ if (params->rpi_registered) {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->rpi);
+ } else {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ }
+ if (params->d_id == FC_FID_FLOGI)
+ is_fabric = true;
+ break;
+ case ELS_FDISC:
+ if (params->d_id == FC_FID_FLOGI)
+ is_fabric = true;
+ if (params->s_id == 0) {
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_FDISC << SLI4_REQ_WQE_ELSID_SHFT;
+ is_fabric = true;
+ } else {
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
+ }
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
+ break;
+ case ELS_FLOGI:
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ /*
+ * Set SP here ... we haven't done a REG_VPI yet
+ * need to maybe not set this when we have
+ * completed VFI/VPI registrations ...
+ *
+ * Use the FC_ID of the SPORT if it has been allocated,
+ * otherwise use an S_ID of zero.
+ */
+ els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
+ if (params->s_id != U32_MAX)
+ els->sid_sp_dword |= cpu_to_le32(params->s_id);
+ break;
+ case ELS_PLOGI:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_PLOGI << SLI4_REQ_WQE_ELSID_SHFT;
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ break;
+ case ELS_SCR:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ break;
+ default:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
+ if (params->rpi_registered) {
+ els->ct_byte |= (SLI4_GENERIC_CONTEXT_RPI <<
+ SLI4_REQ_WQE_CT_SHFT);
+ els->context_tag = cpu_to_le16(params->vpi);
+ } else {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ }
+ break;
+ }
+
+ if (is_fabric)
+ els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_FABRIC;
+ else
+ els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_NON_FABRIC;
+
+ els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) !=
+ SLI4_GENERIC_CONTEXT_RPI)
+ els->remote_id_dword = cpu_to_le32(params->d_id);
+
+ if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) ==
+ SLI4_GENERIC_CONTEXT_VPI)
+ els->temporary_rpi = cpu_to_le16(params->rpi);
+
+ return 0;
+}
+
+int
+sli_fcp_icmnd64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u16 xri,
+ u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout)
+{
+ struct sli4_fcp_icmnd64_wqe *icmnd = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+ u32 len;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &icmnd->bde;
+ if (sli->params.sgl_pre_registered) {
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_ICMD_WQE_XBL;
+
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+
+ len = le32_to_cpu(sge[0].buffer_length) +
+ le32_to_cpu(sge[1].buffer_length);
+ icmnd->payload_offset_length = cpu_to_le16(len);
+ icmnd->xri_tag = cpu_to_le16(xri);
+ icmnd->context_tag = cpu_to_le16(rpi);
+ icmnd->timer = timeout;
+
+ /* WQE word 4 contains read transfer length */
+ icmnd->class_pu_byte |= 2 << SLI4_ICMD_WQE_PU_SHFT;
+ icmnd->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ icmnd->command = SLI4_WQE_FCP_ICMND64;
+ icmnd->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_ICMD_WQE_CT_SHFT;
+
+ icmnd->abort_tag = cpu_to_le32(xri);
+
+ icmnd->request_tag = cpu_to_le16(tag);
+ icmnd->len_loc1_byte |= SLI4_ICMD_WQE_LEN_LOC_BIT1;
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_LEN_LOC_BIT2;
+ icmnd->cmd_type_byte |= SLI4_CMD_FCP_ICMND64_WQE;
+ icmnd->cq_id = cpu_to_le16(cq_id);
+
+ return 0;
+}
+
+int
+sli_fcp_iread64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len, u16 xri, u16 tag,
+ u16 cq_id, u32 rpi, u32 rnode_fcid,
+ u8 dif, u8 bs, u8 timeout)
+{
+ struct sli4_fcp_iread64_wqe *iread = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+ u32 sge_flags, len;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+
+ sge = sgl->virt;
+ bptr = &iread->bde;
+ if (sli->params.sgl_pre_registered) {
+ iread->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IR_WQE_XBL;
+
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low = sge[0].buffer_address_low;
+ bptr->u.blp.high = sge[0].buffer_address_high;
+ } else {
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low =
+ cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+
+ /*
+ * fill out fcp_cmnd buffer len and change resp buffer to be of
+ * type "skip" (note: response will still be written to sge[1]
+ * if necessary)
+ */
+ len = le32_to_cpu(sge[0].buffer_length);
+ iread->fcp_cmd_buffer_length = cpu_to_le16(len);
+
+ sge_flags = le32_to_cpu(sge[1].dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ sge[1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ len = le32_to_cpu(sge[0].buffer_length) +
+ le32_to_cpu(sge[1].buffer_length);
+ iread->payload_offset_length = cpu_to_le16(len);
+ iread->total_transfer_length = cpu_to_le32(xfer_len);
+
+ iread->xri_tag = cpu_to_le16(xri);
+ iread->context_tag = cpu_to_le16(rpi);
+
+ iread->timer = timeout;
+
+ /* WQE word 4 contains read transfer length */
+ iread->class_pu_byte |= 2 << SLI4_IR_WQE_PU_SHFT;
+ iread->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ iread->command = SLI4_WQE_FCP_IREAD64;
+ iread->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_IR_WQE_CT_SHFT;
+ iread->dif_ct_bs_byte |= dif;
+ iread->dif_ct_bs_byte |= bs << SLI4_IR_WQE_BS_SHFT;
+
+ iread->abort_tag = cpu_to_le32(xri);
+
+ iread->request_tag = cpu_to_le16(tag);
+ iread->len_loc1_byte |= SLI4_IR_WQE_LEN_LOC_BIT1;
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_LEN_LOC_BIT2;
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_IOD;
+ iread->cmd_type_byte |= SLI4_CMD_FCP_IREAD64_WQE;
+ iread->cq_id = cpu_to_le16(cq_id);
+
+ if (sli->params.perf_hint) {
+ bptr = &iread->first_data_bde;
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low =
+ sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high =
+ sge[first_data_sge].buffer_address_high;
+ }
+
+ return 0;
+}
+
+int
+sli_fcp_iwrite64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len,
+ u32 first_burst, u16 xri, u16 tag,
+ u16 cq_id, u32 rpi,
+ u32 rnode_fcid,
+ u8 dif, u8 bs, u8 timeout)
+{
+ struct sli4_fcp_iwrite64_wqe *iwrite = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+ u32 sge_flags, min, len;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &iwrite->bde;
+ if (sli->params.sgl_pre_registered) {
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IWR_WQE_XBL;
+
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_DBDE;
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK));
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_XBL;
+
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
+
+ /*
+ * fill out fcp_cmnd buffer len and change resp buffer to be of
+ * type "skip" (note: response will still be written to sge[1]
+ * if necessary)
+ */
+ len = le32_to_cpu(sge[0].buffer_length);
+ iwrite->fcp_cmd_buffer_length = cpu_to_le16(len);
+ sge_flags = le32_to_cpu(sge[1].dw2_flags);
+ sge_flags &= ~SLI4_SGE_TYPE_MASK;
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ sge[1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ len = le32_to_cpu(sge[0].buffer_length) +
+ le32_to_cpu(sge[1].buffer_length);
+ iwrite->payload_offset_length = cpu_to_le16(len);
+ iwrite->total_transfer_length = cpu_to_le16(xfer_len);
+ min = (xfer_len < first_burst) ? xfer_len : first_burst;
+ iwrite->initial_transfer_length = cpu_to_le16(min);
+
+ iwrite->xri_tag = cpu_to_le16(xri);
+ iwrite->context_tag = cpu_to_le16(rpi);
+
+ iwrite->timer = timeout;
+ /* WQE word 4 contains read transfer length */
+ iwrite->class_pu_byte |= 2 << SLI4_IWR_WQE_PU_SHFT;
+ iwrite->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ iwrite->command = SLI4_WQE_FCP_IWRITE64;
+ iwrite->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_IWR_WQE_CT_SHFT;
+ iwrite->dif_ct_bs_byte |= dif;
+ iwrite->dif_ct_bs_byte |= bs << SLI4_IWR_WQE_BS_SHFT;
+
+ iwrite->abort_tag = cpu_to_le32(xri);
+
+ iwrite->request_tag = cpu_to_le16(tag);
+ iwrite->len_loc1_byte |= SLI4_IWR_WQE_LEN_LOC_BIT1;
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_LEN_LOC_BIT2;
+ iwrite->cmd_type_byte |= SLI4_CMD_FCP_IWRITE64_WQE;
+ iwrite->cq_id = cpu_to_le16(cq_id);
+
+ if (sli->params.perf_hint) {
+ bptr = &iwrite->first_data_bde;
+
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high = sge[first_data_sge].buffer_address_high;
+ }
+
+ return 0;
+}
+
+int
+sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params)
+{
+ struct sli4_fcp_treceive64_wqe *trecv = buf;
+ struct sli4_fcp_128byte_wqe *trecv_128 = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &trecv->bde;
+ if (sli->params.sgl_pre_registered) {
+ trecv->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_TRCV_WQE_XBL;
+
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length)
+ & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+
+ trecv->payload_offset_length = sge[0].buffer_length;
+ } else {
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_XBL;
+
+ /* if data is a single physical address, use a BDE */
+ if (!dif &&
+ params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[2].buffer_length)
+ & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[2].buffer_address_low;
+ bptr->u.data.high = sge[2].buffer_address_high;
+ } else {
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+ }
+
+ trecv->relative_offset = cpu_to_le32(params->offset);
+
+ if (params->flags & SLI4_IO_CONTINUATION)
+ trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_XC;
+
+ trecv->xri_tag = cpu_to_le16(params->xri);
+
+ trecv->context_tag = cpu_to_le16(params->rpi);
+
+ /* WQE uses relative offset */
+ trecv->class_ar_pu_byte |= 1 << SLI4_TRCV_WQE_PU_SHFT;
+
+ if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
+ trecv->class_ar_pu_byte |= SLI4_TRCV_WQE_AR;
+
+ trecv->command = SLI4_WQE_FCP_TRECEIVE64;
+ trecv->class_ar_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ trecv->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_TRCV_WQE_CT_SHFT;
+ trecv->dif_ct_bs_byte |= bs << SLI4_TRCV_WQE_BS_SHFT;
+
+ trecv->remote_xid = cpu_to_le16(params->ox_id);
+
+ trecv->request_tag = cpu_to_le16(params->tag);
+
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_IOD;
+
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_LEN_LOC_BIT2;
+
+ trecv->cmd_type_byte |= SLI4_CMD_FCP_TRECEIVE64_WQE;
+
+ trecv->cq_id = cpu_to_le16(cq_id);
+
+ trecv->fcp_data_receive_length = cpu_to_le32(params->xmit_len);
+
+ if (sli->params.perf_hint) {
+ bptr = &trecv->first_data_bde;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low = sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high = sge[first_data_sge].buffer_address_high;
+ }
+
+ /* The upper 7 bits of csctl is the priority */
+ if (params->cs_ctl & SLI4_MASK_CCP) {
+ trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_CCPE;
+ trecv->ccp = (params->cs_ctl & SLI4_MASK_CCP);
+ }
+
+ if (params->app_id && sli->wqe_size == SLI4_WQE_EXT_BYTES &&
+ !(trecv->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
+ trecv->lloc1_appid |= SLI4_TRCV_WQE_APPID;
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_WQES;
+ trecv_128->dw[31] = params->app_id;
+ }
+ return 0;
+}
+
+int
+sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf,
+ struct efc_dma *sgl, u32 first_data_sge,
+ u16 sec_xri, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params)
+{
+ int rc;
+
+ rc = sli_fcp_treceive64_wqe(sli, buf, sgl, first_data_sge,
+ cq_id, dif, bs, params);
+ if (!rc) {
+ struct sli4_fcp_treceive64_wqe *trecv = buf;
+
+ trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64;
+ trecv->dword5.sec_xri_tag = cpu_to_le16(sec_xri);
+ }
+ return rc;
+}
+
+int
+sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params)
+{
+ struct sli4_fcp_trsp64_wqe *trsp = buf;
+ struct sli4_fcp_128byte_wqe *trsp_128 = buf;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
+ trsp->class_ag_byte |= SLI4_TRSP_WQE_AG;
+ } else {
+ struct sli4_sge *sge = sgl->virt;
+ struct sli4_bde *bptr;
+
+ if (sli4->params.sgl_pre_registered || port_owned)
+ trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_DBDE;
+ else
+ trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_XBL;
+ bptr = &trsp->bde;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+
+ trsp->fcp_response_length = cpu_to_le32(params->xmit_len);
+ }
+
+ if (params->flags & SLI4_IO_CONTINUATION)
+ trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_XC;
+
+ trsp->xri_tag = cpu_to_le16(params->xri);
+ trsp->rpi = cpu_to_le16(params->rpi);
+
+ trsp->command = SLI4_WQE_FCP_TRSP64;
+ trsp->class_ag_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ trsp->remote_xid = cpu_to_le16(params->ox_id);
+ trsp->request_tag = cpu_to_le16(params->tag);
+ if (params->flags & SLI4_IO_DNRX)
+ trsp->ct_dnrx_byte |= SLI4_TRSP_WQE_DNRX;
+ else
+ trsp->ct_dnrx_byte &= ~SLI4_TRSP_WQE_DNRX;
+
+ trsp->lloc1_appid |= 0x1;
+ trsp->cq_id = cpu_to_le16(cq_id);
+ trsp->cmd_type_byte = SLI4_CMD_FCP_TRSP64_WQE;
+
+ /* The upper 7 bits of csctl is the priority */
+ if (params->cs_ctl & SLI4_MASK_CCP) {
+ trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_CCPE;
+ trsp->ccp = (params->cs_ctl & SLI4_MASK_CCP);
+ }
+
+ if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
+ !(trsp->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
+ trsp->lloc1_appid |= SLI4_TRSP_WQE_APPID;
+ trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_WQES;
+ trsp_128->dw[31] = params->app_id;
+ }
+ return 0;
+}
+
+int
+sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params)
+{
+ struct sli4_fcp_tsend64_wqe *tsend = buf;
+ struct sli4_fcp_128byte_wqe *tsend_128 = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+
+ bptr = &tsend->bde;
+ if (sli4->params.sgl_pre_registered) {
+ tsend->ll_qd_xbl_hlm_iod_dbde &= ~SLI4_TSEND_WQE_XBL;
+
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[2].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ /* TSEND64_WQE specifies first two SGE are skipped (3rd is
+ * valid)
+ */
+ bptr->u.data.low = sge[2].buffer_address_low;
+ bptr->u.data.high = sge[2].buffer_address_high;
+ } else {
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_XBL;
+
+ /* if data is a single physical address, use a BDE */
+ if (!dif &&
+ params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[2].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ /*
+ * TSEND64_WQE specifies first two SGE are skipped
+ * (i.e. 3rd is valid)
+ */
+ bptr->u.data.low =
+ sge[2].buffer_address_low;
+ bptr->u.data.high =
+ sge[2].buffer_address_high;
+ } else {
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.blp.low =
+ cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+ }
+
+ tsend->relative_offset = cpu_to_le32(params->offset);
+
+ if (params->flags & SLI4_IO_CONTINUATION)
+ tsend->dw10byte2 |= SLI4_TSEND_XC;
+
+ tsend->xri_tag = cpu_to_le16(params->xri);
+
+ tsend->rpi = cpu_to_le16(params->rpi);
+ /* WQE uses relative offset */
+ tsend->class_pu_ar_byte |= 1 << SLI4_TSEND_WQE_PU_SHFT;
+
+ if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
+ tsend->class_pu_ar_byte |= SLI4_TSEND_WQE_AR;
+
+ tsend->command = SLI4_WQE_FCP_TSEND64;
+ tsend->class_pu_ar_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ tsend->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_TSEND_CT_SHFT;
+ tsend->ct_byte |= dif;
+ tsend->ct_byte |= bs << SLI4_TSEND_BS_SHFT;
+
+ tsend->remote_xid = cpu_to_le16(params->ox_id);
+
+ tsend->request_tag = cpu_to_le16(params->tag);
+
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_LEN_LOC_BIT2;
+
+ tsend->cq_id = cpu_to_le16(cq_id);
+
+ tsend->cmd_type_byte |= SLI4_CMD_FCP_TSEND64_WQE;
+
+ tsend->fcp_data_transmit_length = cpu_to_le32(params->xmit_len);
+
+ if (sli4->params.perf_hint) {
+ bptr = &tsend->first_data_bde;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low =
+ sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high =
+ sge[first_data_sge].buffer_address_high;
+ }
+
+ /* The upper 7 bits of csctl is the priority */
+ if (params->cs_ctl & SLI4_MASK_CCP) {
+ tsend->dw10byte2 |= SLI4_TSEND_CCPE;
+ tsend->ccp = (params->cs_ctl & SLI4_MASK_CCP);
+ }
+
+ if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
+ !(tsend->dw10byte2 & SLI4_TSEND_EAT)) {
+ tsend->dw10byte0 |= SLI4_TSEND_APPID_VALID;
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQES;
+ tsend_128->dw[31] = params->app_id;
+ }
+ return 0;
+}
+
+int
+sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ struct sli_ct_params *params)
+{
+ struct sli4_gen_request64_wqe *gen = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &gen->bde;
+
+ if (sli4->params.sgl_pre_registered) {
+ gen->dw10flags1 &= ~SLI4_GEN_REQ64_WQE_XBL;
+
+ gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->xmit_len & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ ((2 * sizeof(struct sli4_sge)) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low =
+ cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+
+ gen->request_payload_length = cpu_to_le32(params->xmit_len);
+ gen->max_response_payload_length = cpu_to_le32(params->rsp_len);
+
+ gen->df_ctl = params->df_ctl;
+ gen->type = params->type;
+ gen->r_ctl = params->r_ctl;
+
+ gen->xri_tag = cpu_to_le16(params->xri);
+
+ gen->ct_byte = SLI4_GENERIC_CONTEXT_RPI << SLI4_GEN_REQ64_CT_SHFT;
+ gen->context_tag = cpu_to_le16(params->rpi);
+
+ gen->class_byte = SLI4_GENERIC_CLASS_CLASS_3;
+
+ gen->command = SLI4_WQE_GEN_REQUEST64;
+
+ gen->timer = params->timeout;
+
+ gen->request_tag = cpu_to_le16(params->tag);
+
+ gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_IOD;
+
+ gen->dw10flags0 |= SLI4_GEN_REQ64_WQE_QOSD;
+
+ gen->cmd_type_byte = SLI4_CMD_GEN_REQUEST64_WQE;
+
+ gen->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ return 0;
+}
+
+int
+sli_send_frame_wqe(struct sli4 *sli, void *buf, u8 sof, u8 eof, u32 *hdr,
+ struct efc_dma *payload, u32 req_len, u8 timeout, u16 xri,
+ u16 req_tag)
+{
+ struct sli4_send_frame_wqe *sf = buf;
+
+ memset(buf, 0, sli->wqe_size);
+
+ sf->dw10flags1 |= SLI4_SF_WQE_DBDE;
+ sf->bde.bde_type_buflen = cpu_to_le32(req_len &
+ SLI4_BDE_LEN_MASK);
+ sf->bde.u.data.low = cpu_to_le32(lower_32_bits(payload->phys));
+ sf->bde.u.data.high = cpu_to_le32(upper_32_bits(payload->phys));
+
+ /* Copy FC header */
+ sf->fc_header_0_1[0] = cpu_to_le32(hdr[0]);
+ sf->fc_header_0_1[1] = cpu_to_le32(hdr[1]);
+ sf->fc_header_2_5[0] = cpu_to_le32(hdr[2]);
+ sf->fc_header_2_5[1] = cpu_to_le32(hdr[3]);
+ sf->fc_header_2_5[2] = cpu_to_le32(hdr[4]);
+ sf->fc_header_2_5[3] = cpu_to_le32(hdr[5]);
+
+ sf->frame_length = cpu_to_le32(req_len);
+
+ sf->xri_tag = cpu_to_le16(xri);
+ sf->dw7flags0 &= ~SLI4_SF_PU;
+ sf->context_tag = 0;
+
+ sf->ct_byte &= ~SLI4_SF_CT;
+ sf->command = SLI4_WQE_SEND_FRAME;
+ sf->dw7flags0 |= SLI4_GENERIC_CLASS_CLASS_3;
+ sf->timer = timeout;
+
+ sf->request_tag = cpu_to_le16(req_tag);
+ sf->eof = eof;
+ sf->sof = sof;
+
+ sf->dw10flags1 &= ~SLI4_SF_QOSD;
+ sf->dw10flags0 |= SLI4_SF_LEN_LOC_BIT1;
+ sf->dw10flags2 &= ~SLI4_SF_XC;
+
+ sf->dw10flags1 |= SLI4_SF_XBL;
+
+ sf->cmd_type_byte |= SLI4_CMD_SEND_FRAME_WQE;
+ sf->cq_id = cpu_to_le16(0xffff);
+
+ return 0;
+}
+
+int
+sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf,
+ struct sli_bls_payload *payload,
+ struct sli_bls_params *params)
+{
+ struct sli4_xmit_bls_rsp_wqe *bls = buf;
+ u32 dw_ridflags = 0;
+
+ /*
+ * Callers can either specify RPI or S_ID, but not both
+ */
+ if (params->rpi_registered && params->s_id != U32_MAX) {
+ efc_log_info(sli, "S_ID specified for attached remote node %d\n",
+ params->rpi);
+ return -EIO;
+ }
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (payload->type == SLI4_SLI_BLS_ACC) {
+ bls->payload_word0 =
+ cpu_to_le32((payload->u.acc.seq_id_last << 16) |
+ (payload->u.acc.seq_id_validity << 24));
+ bls->high_seq_cnt = payload->u.acc.high_seq_cnt;
+ bls->low_seq_cnt = payload->u.acc.low_seq_cnt;
+ } else if (payload->type == SLI4_SLI_BLS_RJT) {
+ bls->payload_word0 =
+ cpu_to_le32(*((u32 *)&payload->u.rjt));
+ dw_ridflags |= SLI4_BLS_RSP_WQE_AR;
+ } else {
+ efc_log_info(sli, "bad BLS type %#x\n", payload->type);
+ return -EIO;
+ }
+
+ bls->ox_id = payload->ox_id;
+ bls->rx_id = payload->rx_id;
+
+ if (params->rpi_registered) {
+ bls->dw8flags0 |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_BLS_RSP_WQE_CT_SHFT;
+ bls->context_tag = cpu_to_le16(params->rpi);
+ } else {
+ bls->dw8flags0 |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT;
+ bls->context_tag = cpu_to_le16(params->vpi);
+
+ if (params->s_id != U32_MAX)
+ bls->local_n_port_id_dword |=
+ cpu_to_le32(params->s_id & 0x00ffffff);
+ else
+ bls->local_n_port_id_dword |=
+ cpu_to_le32(params->s_id & 0x00ffffff);
+
+ dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) |
+ (params->d_id & SLI4_BLS_RSP_RID);
+
+ bls->temporary_rpi = cpu_to_le16(params->rpi);
+ }
+
+ bls->xri_tag = cpu_to_le16(params->xri);
+
+ bls->dw8flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ bls->command = SLI4_WQE_XMIT_BLS_RSP;
+
+ bls->request_tag = cpu_to_le16(params->tag);
+
+ bls->dw11flags1 |= SLI4_BLS_RSP_WQE_QOSD;
+
+ bls->remote_id_dword = cpu_to_le32(dw_ridflags);
+ bls->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ bls->dw12flags0 |= SLI4_CMD_XMIT_BLS_RSP64_WQE;
+
+ return 0;
+}
+
+int
+sli_xmit_els_rsp64_wqe(struct sli4 *sli, void *buf, struct efc_dma *rsp,
+ struct sli_els_params *params)
+{
+ struct sli4_xmit_els_rsp64_wqe *els = buf;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (sli->params.sgl_pre_registered)
+ els->flags2 |= SLI4_ELS_DBDE;
+ else
+ els->flags2 |= SLI4_ELS_XBL;
+
+ els->els_response_payload.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->rsp_len & SLI4_BDE_LEN_MASK));
+ els->els_response_payload.u.data.low =
+ cpu_to_le32(lower_32_bits(rsp->phys));
+ els->els_response_payload.u.data.high =
+ cpu_to_le32(upper_32_bits(rsp->phys));
+
+ els->els_response_payload_length = cpu_to_le32(params->rsp_len);
+
+ els->xri_tag = cpu_to_le16(params->xri);
+
+ els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ els->command = SLI4_WQE_ELS_RSP64;
+
+ els->request_tag = cpu_to_le16(params->tag);
+
+ els->ox_id = cpu_to_le16(params->ox_id);
+
+ els->flags2 |= SLI4_ELS_QOSD;
+
+ els->cmd_type_wqec = SLI4_ELS_REQUEST64_CMD_GEN;
+
+ els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ if (params->rpi_registered) {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_ELS_CT_OFFSET;
+ els->context_tag = cpu_to_le16(params->rpi);
+ return 0;
+ }
+
+ els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_ELS_CT_OFFSET;
+ els->context_tag = cpu_to_le16(params->vpi);
+ els->rid_dw = cpu_to_le32(params->d_id & SLI4_ELS_RID);
+ els->temporary_rpi = cpu_to_le16(params->rpi);
+ if (params->s_id != U32_MAX) {
+ els->sid_dw |=
+ cpu_to_le32(SLI4_ELS_SP | (params->s_id & SLI4_ELS_SID));
+ }
+
+ return 0;
+}
+
+int
+sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload,
+ struct sli_ct_params *params)
+{
+ struct sli4_xmit_sequence64_wqe *xmit = buf;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (!payload || !payload->virt) {
+ efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
+ payload, payload ? payload->virt : NULL);
+ return -EIO;
+ }
+
+ if (sli4->params.sgl_pre_registered)
+ xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_DBDE);
+ else
+ xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_XBL);
+
+ xmit->bde.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->rsp_len & SLI4_BDE_LEN_MASK));
+ xmit->bde.u.data.low =
+ cpu_to_le32(lower_32_bits(payload->phys));
+ xmit->bde.u.data.high =
+ cpu_to_le32(upper_32_bits(payload->phys));
+ xmit->sequence_payload_len = cpu_to_le32(params->rsp_len);
+
+ xmit->remote_n_port_id_dword |= cpu_to_le32(params->d_id & 0x00ffffff);
+
+ xmit->relative_offset = 0;
+
+ /* sequence initiative - this matches what is seen from
+ * FC switches in response to FCGS commands
+ */
+ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_SI);
+ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_FT);/* force transmit */
+ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_XO);/* exchange responder */
+ xmit->dw5flags0 |= SLI4_SEQ_WQE_LS;/* last in seqence */
+ xmit->df_ctl = params->df_ctl;
+ xmit->type = params->type;
+ xmit->r_ctl = params->r_ctl;
+
+ xmit->xri_tag = cpu_to_le16(params->xri);
+ xmit->context_tag = cpu_to_le16(params->rpi);
+
+ xmit->dw7flags0 &= ~SLI4_SEQ_WQE_DIF;
+ xmit->dw7flags0 |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_SEQ_WQE_CT_SHIFT;
+ xmit->dw7flags0 &= ~SLI4_SEQ_WQE_BS;
+
+ xmit->command = SLI4_WQE_XMIT_SEQUENCE64;
+ xmit->dw7flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
+ xmit->dw7flags1 &= ~SLI4_SEQ_WQE_PU;
+ xmit->timer = params->timeout;
+
+ xmit->abort_tag = 0;
+ xmit->request_tag = cpu_to_le16(params->tag);
+ xmit->remote_xid = cpu_to_le16(params->ox_id);
+
+ xmit->dw10w0 |=
+ cpu_to_le16(SLI4_ELS_REQUEST64_DIR_READ << SLI4_SEQ_WQE_IOD_SHIFT);
+
+ xmit->cmd_type_wqec_byte |= SLI4_CMD_XMIT_SEQUENCE64_WQE;
+
+ xmit->dw10w0 |= cpu_to_le16(2 << SLI4_SEQ_WQE_LEN_LOC_SHIFT);
+
+ xmit->cq_id = cpu_to_le16(0xFFFF);
+
+ return 0;
+}
+
+int
+sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id)
+{
+ struct sli4_requeue_xri_wqe *requeue = buf;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ requeue->command = SLI4_WQE_REQUEUE_XRI;
+ requeue->xri_tag = cpu_to_le16(xri);
+ requeue->request_tag = cpu_to_le16(tag);
+ requeue->flags2 |= cpu_to_le16(SLI4_REQU_XRI_WQE_XC);
+ requeue->flags1 |= cpu_to_le16(SLI4_REQU_XRI_WQE_QOSD);
+ requeue->cq_id = cpu_to_le16(cq_id);
+ requeue->cmd_type_wqec_byte = SLI4_CMD_REQUEUE_XRI_WQE;
+ return 0;
+}
+
+int
+sli_fc_process_link_attention(struct sli4 *sli4, void *acqe)
+{
+ struct sli4_link_attention *link_attn = acqe;
+ struct sli4_link_event event = { 0 };
+
+ efc_log_info(sli4, "link=%d attn_type=%#x top=%#x speed=%#x pfault=%#x\n",
+ link_attn->link_number, link_attn->attn_type,
+ link_attn->topology, link_attn->port_speed,
+ link_attn->port_fault);
+ efc_log_info(sli4, "shared_lnk_status=%#x logl_lnk_speed=%#x evttag=%#x\n",
+ link_attn->shared_link_status,
+ le16_to_cpu(link_attn->logical_link_speed),
+ le32_to_cpu(link_attn->event_tag));
+
+ if (!sli4->link)
+ return -EIO;
+
+ event.medium = SLI4_LINK_MEDIUM_FC;
+
+ switch (link_attn->attn_type) {
+ case SLI4_LNK_ATTN_TYPE_LINK_UP:
+ event.status = SLI4_LINK_STATUS_UP;
+ break;
+ case SLI4_LNK_ATTN_TYPE_LINK_DOWN:
+ event.status = SLI4_LINK_STATUS_DOWN;
+ break;
+ case SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA:
+ efc_log_info(sli4, "attn_type: no hard alpa\n");
+ event.status = SLI4_LINK_STATUS_NO_ALPA;
+ break;
+ default:
+ efc_log_info(sli4, "attn_type: unknown\n");
+ break;
+ }
+
+ switch (link_attn->event_type) {
+ case SLI4_EVENT_LINK_ATTENTION:
+ break;
+ case SLI4_EVENT_SHARED_LINK_ATTENTION:
+ efc_log_info(sli4, "event_type: FC shared link event\n");
+ break;
+ default:
+ efc_log_info(sli4, "event_type: unknown\n");
+ break;
+ }
+
+ switch (link_attn->topology) {
+ case SLI4_LNK_ATTN_P2P:
+ event.topology = SLI4_LINK_TOPO_NON_FC_AL;
+ break;
+ case SLI4_LNK_ATTN_FC_AL:
+ event.topology = SLI4_LINK_TOPO_FC_AL;
+ break;
+ case SLI4_LNK_ATTN_INTERNAL_LOOPBACK:
+ efc_log_info(sli4, "topology Internal loopback\n");
+ event.topology = SLI4_LINK_TOPO_LOOPBACK_INTERNAL;
+ break;
+ case SLI4_LNK_ATTN_SERDES_LOOPBACK:
+ efc_log_info(sli4, "topology serdes loopback\n");
+ event.topology = SLI4_LINK_TOPO_LOOPBACK_EXTERNAL;
+ break;
+ default:
+ efc_log_info(sli4, "topology: unknown\n");
+ break;
+ }
+
+ event.speed = link_attn->port_speed * 1000;
+
+ sli4->link(sli4->link_arg, (void *)&event);
+
+ return 0;
+}
+
+int
+sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq,
+ u8 *cqe, enum sli4_qentry *etype, u16 *r_id)
+{
+ u8 code = cqe[SLI4_CQE_CODE_OFFSET];
+ int rc;
+
+ switch (code) {
+ case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION:
+ {
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_WQ;
+ *r_id = le16_to_cpu(wcqe->request_tag);
+ rc = wcqe->status;
+
+ /* Flag errors except for FCP_RSP_FAILURE */
+ if (rc && rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE) {
+ efc_log_info(sli4, "WCQE: status=%#x hw_status=%#x tag=%#x\n",
+ wcqe->status, wcqe->hw_status,
+ le16_to_cpu(wcqe->request_tag));
+ efc_log_info(sli4, "w1=%#x w2=%#x xb=%d\n",
+ le32_to_cpu(wcqe->wqe_specific_1),
+ le32_to_cpu(wcqe->wqe_specific_2),
+ (wcqe->flags & SLI4_WCQE_XB));
+ efc_log_info(sli4, " %08X %08X %08X %08X\n",
+ ((u32 *)cqe)[0], ((u32 *)cqe)[1],
+ ((u32 *)cqe)[2], ((u32 *)cqe)[3]);
+ }
+
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC:
+ {
+ struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_RQ;
+ *r_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC_V1:
+ {
+ struct sli4_fc_async_rcqe_v1 *rcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_RQ;
+ *r_id = le16_to_cpu(rcqe->rq_id);
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD:
+ {
+ struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_OPT_WRITE_CMD;
+ *r_id = le16_to_cpu(optcqe->rq_id);
+ rc = optcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA:
+ {
+ struct sli4_fc_optimized_write_data_cqe *dcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_OPT_WRITE_DATA;
+ *r_id = le16_to_cpu(dcqe->xri);
+ rc = dcqe->status;
+
+ /* Flag errors */
+ if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) {
+ efc_log_info(sli4, "Optimized DATA CQE: status=%#x\n",
+ dcqe->status);
+ efc_log_info(sli4, "hstat=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n",
+ dcqe->hw_status, le16_to_cpu(dcqe->xri),
+ le32_to_cpu(dcqe->total_data_placed),
+ ((u32 *)cqe)[3],
+ (dcqe->flags & SLI4_OCQE_XB));
+ }
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_COALESCING:
+ {
+ struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_RQ;
+ *r_id = le16_to_cpu(rcqe->rq_id);
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_XRI_ABORTED:
+ {
+ struct sli4_fc_xri_aborted_cqe *xa = (void *)cqe;
+
+ *etype = SLI4_QENTRY_XABT;
+ *r_id = le16_to_cpu(xa->xri);
+ rc = 0;
+ break;
+ }
+ case SLI4_CQE_CODE_RELEASE_WQE:
+ {
+ struct sli4_fc_wqec *wqec = (void *)cqe;
+
+ *etype = SLI4_QENTRY_WQ_RELEASE;
+ *r_id = le16_to_cpu(wqec->wq_id);
+ rc = 0;
+ break;
+ }
+ default:
+ efc_log_info(sli4, "CQE completion code %d not handled\n",
+ code);
+ *etype = SLI4_QENTRY_MAX;
+ *r_id = U16_MAX;
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+u32
+sli_fc_response_length(struct sli4 *sli4, u8 *cqe)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ return le32_to_cpu(wcqe->wqe_specific_1);
+}
+
+u32
+sli_fc_io_length(struct sli4 *sli4, u8 *cqe)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ return le32_to_cpu(wcqe->wqe_specific_1);
+}
+
+int
+sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ *d_id = 0;
+
+ if (wcqe->status)
+ return -EIO;
+ *d_id = le32_to_cpu(wcqe->wqe_specific_2) & 0x00ffffff;
+ return 0;
+}
+
+u32
+sli_fc_ext_status(struct sli4 *sli4, u8 *cqe)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+ u32 mask;
+
+ switch (wcqe->status) {
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ mask = U32_MAX;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ case SLI4_FC_WCQE_STATUS_CMD_REJECT:
+ mask = 0xff;
+ break;
+ case SLI4_FC_WCQE_STATUS_NPORT_RJT:
+ case SLI4_FC_WCQE_STATUS_FABRIC_RJT:
+ case SLI4_FC_WCQE_STATUS_NPORT_BSY:
+ case SLI4_FC_WCQE_STATUS_FABRIC_BSY:
+ case SLI4_FC_WCQE_STATUS_LS_RJT:
+ mask = U32_MAX;
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ mask = U32_MAX;
+ break;
+ default:
+ mask = 0;
+ }
+
+ return le32_to_cpu(wcqe->wqe_specific_2) & mask;
+}
+
+int
+sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index)
+{
+ int rc = -EIO;
+ u8 code = 0;
+ u16 rq_element_index;
+
+ *rq_id = 0;
+ *index = U32_MAX;
+
+ code = cqe[SLI4_CQE_CODE_OFFSET];
+
+ /* Retrieve the RQ index from the completion */
+ if (code == SLI4_CQE_CODE_RQ_ASYNC) {
+ struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
+
+ *rq_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
+ rq_element_index =
+ le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX;
+ *index = rq_element_index;
+ if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ rc = 0;
+ } else {
+ rc = rcqe->status;
+ efc_log_info(sli4, "status=%02x (%s) rq_id=%d\n",
+ rcqe->status,
+ sli_fc_get_status_string(rcqe->status),
+ le16_to_cpu(rcqe->fcfi_rq_id_word) &
+ SLI4_RACQE_RQ_ID);
+
+ efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
+ le16_to_cpu(rcqe->data_placement_length),
+ rcqe->sof_byte, rcqe->eof_byte,
+ rcqe->hdpl_byte & SLI4_RACQE_HDPL);
+ }
+ } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) {
+ struct sli4_fc_async_rcqe_v1 *rcqe_v1 = (void *)cqe;
+
+ *rq_id = le16_to_cpu(rcqe_v1->rq_id);
+ rq_element_index =
+ (le16_to_cpu(rcqe_v1->rq_elmt_indx_word) &
+ SLI4_RACQE_RQ_EL_INDX);
+ *index = rq_element_index;
+ if (rcqe_v1->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ rc = 0;
+ } else {
+ rc = rcqe_v1->status;
+ efc_log_info(sli4, "status=%02x (%s) rq_id=%d, index=%x\n",
+ rcqe_v1->status,
+ sli_fc_get_status_string(rcqe_v1->status),
+ le16_to_cpu(rcqe_v1->rq_id), rq_element_index);
+
+ efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
+ le16_to_cpu(rcqe_v1->data_placement_length),
+ rcqe_v1->sof_byte, rcqe_v1->eof_byte,
+ rcqe_v1->hdpl_byte & SLI4_RACQE_HDPL);
+ }
+ } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) {
+ struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
+
+ *rq_id = le16_to_cpu(optcqe->rq_id);
+ *index = le16_to_cpu(optcqe->w1) & SLI4_OCQE_RQ_EL_INDX;
+ if (optcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ rc = 0;
+ } else {
+ rc = optcqe->status;
+ efc_log_info(sli4, "stat=%02x (%s) rqid=%d, idx=%x pdpl=%x\n",
+ optcqe->status,
+ sli_fc_get_status_string(optcqe->status),
+ le16_to_cpu(optcqe->rq_id), *index,
+ le16_to_cpu(optcqe->data_placement_length));
+
+ efc_log_info(sli4, "hdpl=%x oox=%d agxr=%d xri=0x%x rpi=%x\n",
+ (optcqe->hdpl_vld & SLI4_OCQE_HDPL),
+ (optcqe->flags1 & SLI4_OCQE_OOX),
+ (optcqe->flags1 & SLI4_OCQE_AGXR),
+ optcqe->xri, le16_to_cpu(optcqe->rpi));
+ }
+ } else if (code == SLI4_CQE_CODE_RQ_COALESCING) {
+ struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe;
+
+ rq_element_index = (le16_to_cpu(rcqe->rq_elmt_indx_word) &
+ SLI4_RCQE_RQ_EL_INDX);
+
+ *rq_id = le16_to_cpu(rcqe->rq_id);
+ if (rcqe->status == SLI4_FC_COALESCE_RQ_SUCCESS) {
+ *index = rq_element_index;
+ rc = 0;
+ } else {
+ *index = U32_MAX;
+ rc = rcqe->status;
+
+ efc_log_info(sli4, "stat=%02x (%s) rq_id=%d, idx=%x\n",
+ rcqe->status,
+ sli_fc_get_status_string(rcqe->status),
+ le16_to_cpu(rcqe->rq_id), rq_element_index);
+ efc_log_info(sli4, "rq_id=%#x sdpl=%x\n",
+ le16_to_cpu(rcqe->rq_id),
+ le16_to_cpu(rcqe->seq_placement_length));
+ }
+ } else {
+ struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
+
+ *index = U32_MAX;
+ rc = rcqe->status;
+
+ efc_log_info(sli4, "status=%02x rq_id=%d, index=%x pdpl=%x\n",
+ rcqe->status,
+ le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID,
+ (le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX),
+ le16_to_cpu(rcqe->data_placement_length));
+ efc_log_info(sli4, "sof=%02x eof=%02x hdpl=%x\n",
+ rcqe->sof_byte, rcqe->eof_byte,
+ rcqe->hdpl_byte & SLI4_RACQE_HDPL);
+ }
+
+ return rc;
+}
+
+static int
+sli_bmbx_wait(struct sli4 *sli4, u32 msec)
+{
+ u32 val;
+ unsigned long end;
+
+ /* Wait for the bootstrap mailbox to report "ready" */
+ end = jiffies + msecs_to_jiffies(msec);
+ do {
+ val = readl(sli4->reg[0] + SLI4_BMBX_REG);
+ if (val & SLI4_BMBX_RDY)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end));
+
+ return -EIO;
+}
+
+static int
+sli_bmbx_write(struct sli4 *sli4)
+{
+ u32 val;
+
+ /* write buffer location to bootstrap mailbox register */
+ val = sli_bmbx_write_hi(sli4->bmbx.phys);
+ writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
+
+ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
+ efc_log_crit(sli4, "BMBX WRITE_HI failed\n");
+ return -EIO;
+ }
+ val = sli_bmbx_write_lo(sli4->bmbx.phys);
+ writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
+
+ /* wait for SLI Port to set ready bit */
+ return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC);
+}
+
+int
+sli_bmbx_command(struct sli4 *sli4)
+{
+ void *cqe = (u8 *)sli4->bmbx.virt + SLI4_BMBX_SIZE;
+
+ if (sli_fw_error_status(sli4) > 0) {
+ efc_log_crit(sli4, "Chip is in an error state -Mailbox command rejected");
+ efc_log_crit(sli4, " status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(sli4),
+ sli_reg_read_err1(sli4),
+ sli_reg_read_err2(sli4));
+ return -EIO;
+ }
+
+ /* Submit a command to the bootstrap mailbox and check the status */
+ if (sli_bmbx_write(sli4)) {
+ efc_log_crit(sli4, "bmbx write fail phys=%pad reg=%#x\n",
+ &sli4->bmbx.phys, readl(sli4->reg[0] + SLI4_BMBX_REG));
+ return -EIO;
+ }
+
+ /* check completion queue entry status */
+ if (le32_to_cpu(((struct sli4_mcqe *)cqe)->dw3_flags) &
+ SLI4_MCQE_VALID) {
+ return sli_cqe_mq(sli4, cqe);
+ }
+ efc_log_crit(sli4, "invalid or wrong type\n");
+ return -EIO;
+}
+
+int
+sli_cmd_config_link(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_config_link *config_link = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ config_link->hdr.command = SLI4_MBX_CMD_CONFIG_LINK;
+
+ /* Port interprets zero in a field as "use default value" */
+
+ return 0;
+}
+
+int
+sli_cmd_down_link(struct sli4 *sli4, void *buf)
+{
+ struct sli4_mbox_command_header *hdr = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ hdr->command = SLI4_MBX_CMD_DOWN_LINK;
+
+ /* Port interprets zero in a field as "use default value" */
+
+ return 0;
+}
+
+int
+sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki)
+{
+ struct sli4_cmd_dump4 *cmd = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ cmd->hdr.command = SLI4_MBX_CMD_DUMP;
+ cmd->type_dword = cpu_to_le32(0x4);
+ cmd->wki_selection = cpu_to_le16(wki);
+ return 0;
+}
+
+int
+sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf, u32 page_num,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_read_transceiver_data *req = NULL;
+ u32 psize;
+
+ if (!dma)
+ psize = SLI4_CFG_PYLD_LENGTH(cmn_read_transceiver_data);
+ else
+ psize = dma->size;
+
+ req = sli_config_cmd_init(sli4, buf, psize, dma);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_READ_TRANS_DATA,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_read_transceiver_data));
+
+ req->page_number = cpu_to_le32(page_num);
+ req->port = cpu_to_le32(sli4->port_number);
+
+ return 0;
+}
+
+int
+sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_ext_counters,
+ u8 clear_overflow_flags,
+ u8 clear_all_counters)
+{
+ struct sli4_cmd_read_link_stats *cmd = buf;
+ u32 flags;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ cmd->hdr.command = SLI4_MBX_CMD_READ_LNK_STAT;
+
+ flags = 0;
+ if (req_ext_counters)
+ flags |= SLI4_READ_LNKSTAT_REC;
+ if (clear_all_counters)
+ flags |= SLI4_READ_LNKSTAT_CLRC;
+ if (clear_overflow_flags)
+ flags |= SLI4_READ_LNKSTAT_CLOF;
+
+ cmd->dw1_flags = cpu_to_le32(flags);
+ return 0;
+}
+
+int
+sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear_counters)
+{
+ struct sli4_cmd_read_status *cmd = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ cmd->hdr.command = SLI4_MBX_CMD_READ_STATUS;
+ if (clear_counters)
+ flags |= SLI4_READSTATUS_CLEAR_COUNTERS;
+ else
+ flags &= ~SLI4_READSTATUS_CLEAR_COUNTERS;
+
+ cmd->dw1_flags = cpu_to_le32(flags);
+ return 0;
+}
+
+int
+sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed, u8 reset_alpa)
+{
+ struct sli4_cmd_init_link *init_link = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ init_link->hdr.command = SLI4_MBX_CMD_INIT_LINK;
+
+ init_link->sel_reset_al_pa_dword =
+ cpu_to_le32(reset_alpa);
+ flags &= ~SLI4_INIT_LINK_F_LOOPBACK;
+
+ init_link->link_speed_sel_code = cpu_to_le32(speed);
+ switch (speed) {
+ case SLI4_LINK_SPEED_1G:
+ case SLI4_LINK_SPEED_2G:
+ case SLI4_LINK_SPEED_4G:
+ case SLI4_LINK_SPEED_8G:
+ case SLI4_LINK_SPEED_16G:
+ case SLI4_LINK_SPEED_32G:
+ case SLI4_LINK_SPEED_64G:
+ flags |= SLI4_INIT_LINK_F_FIXED_SPEED;
+ break;
+ case SLI4_LINK_SPEED_10G:
+ efc_log_info(sli4, "unsupported FC speed %d\n", speed);
+ init_link->flags0 = cpu_to_le32(flags);
+ return -EIO;
+ }
+
+ switch (sli4->topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ /* Attempt P2P but failover to FC-AL */
+ flags |= SLI4_INIT_LINK_F_FAIL_OVER;
+ flags |= SLI4_INIT_LINK_F_P2P_FAIL_OVER;
+ break;
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ flags |= SLI4_INIT_LINK_F_FCAL_ONLY;
+ if (speed == SLI4_LINK_SPEED_16G ||
+ speed == SLI4_LINK_SPEED_32G) {
+ efc_log_info(sli4, "unsupported FC-AL speed %d\n",
+ speed);
+ init_link->flags0 = cpu_to_le32(flags);
+ return -EIO;
+ }
+ break;
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ flags |= SLI4_INIT_LINK_F_P2P_ONLY;
+ break;
+ default:
+
+ efc_log_info(sli4, "unsupported topology %#x\n", sli4->topology);
+
+ init_link->flags0 = cpu_to_le32(flags);
+ return -EIO;
+ }
+
+ flags &= ~SLI4_INIT_LINK_F_UNFAIR;
+ flags &= ~SLI4_INIT_LINK_F_NO_LIRP;
+ flags &= ~SLI4_INIT_LINK_F_LOOP_VALID_CHK;
+ flags &= ~SLI4_INIT_LINK_F_NO_LISA;
+ flags &= ~SLI4_INIT_LINK_F_PICK_HI_ALPA;
+ init_link->flags0 = cpu_to_le32(flags);
+
+ return 0;
+}
+
+int
+sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi, u16 vpi)
+{
+ struct sli4_cmd_init_vfi *init_vfi = buf;
+ u16 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ init_vfi->hdr.command = SLI4_MBX_CMD_INIT_VFI;
+ init_vfi->vfi = cpu_to_le16(vfi);
+ init_vfi->fcfi = cpu_to_le16(fcfi);
+
+ /*
+ * If the VPI is valid, initialize it at the same time as
+ * the VFI
+ */
+ if (vpi != U16_MAX) {
+ flags |= SLI4_INIT_VFI_FLAG_VP;
+ init_vfi->flags0_word = cpu_to_le16(flags);
+ init_vfi->vpi = cpu_to_le16(vpi);
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi)
+{
+ struct sli4_cmd_init_vpi *init_vpi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ init_vpi->hdr.command = SLI4_MBX_CMD_INIT_VPI;
+ init_vpi->vpi = cpu_to_le16(vpi);
+ init_vpi->vfi = cpu_to_le16(vfi);
+
+ return 0;
+}
+
+int
+sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 xri_base, u16 xri_count)
+{
+ struct sli4_cmd_post_xri *post_xri = buf;
+ u16 xri_count_flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ post_xri->hdr.command = SLI4_MBX_CMD_POST_XRI;
+ post_xri->xri_base = cpu_to_le16(xri_base);
+ xri_count_flags = xri_count & SLI4_POST_XRI_COUNT;
+ xri_count_flags |= SLI4_POST_XRI_FLAG_ENX;
+ xri_count_flags |= SLI4_POST_XRI_FLAG_VAL;
+ post_xri->xri_count_flags = cpu_to_le16(xri_count_flags);
+
+ return 0;
+}
+
+int
+sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri)
+{
+ struct sli4_cmd_release_xri *release_xri = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ release_xri->hdr.command = SLI4_MBX_CMD_RELEASE_XRI;
+ release_xri->xri_count_word = cpu_to_le16(num_xri &
+ SLI4_RELEASE_XRI_COUNT);
+
+ return 0;
+}
+
+static int
+sli_cmd_read_config(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_read_config *read_config = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_config->hdr.command = SLI4_MBX_CMD_READ_CONFIG;
+
+ return 0;
+}
+
+int
+sli_cmd_read_nvparms(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_read_nvparms *read_nvparms = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_nvparms->hdr.command = SLI4_MBX_CMD_READ_NVPARMS;
+
+ return 0;
+}
+
+int
+sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn, u8 *wwnn,
+ u8 hard_alpa, u32 preferred_d_id)
+{
+ struct sli4_cmd_write_nvparms *write_nvparms = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ write_nvparms->hdr.command = SLI4_MBX_CMD_WRITE_NVPARMS;
+ memcpy(write_nvparms->wwpn, wwpn, 8);
+ memcpy(write_nvparms->wwnn, wwnn, 8);
+
+ write_nvparms->hard_alpa_d_id =
+ cpu_to_le32((preferred_d_id << 8) | hard_alpa);
+ return 0;
+}
+
+static int
+sli_cmd_read_rev(struct sli4 *sli4, void *buf, struct efc_dma *vpd)
+{
+ struct sli4_cmd_read_rev *read_rev = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_rev->hdr.command = SLI4_MBX_CMD_READ_REV;
+
+ if (vpd && vpd->size) {
+ read_rev->flags0_word |= cpu_to_le16(SLI4_READ_REV_FLAG_VPD);
+
+ read_rev->available_length_dword =
+ cpu_to_le32(vpd->size &
+ SLI4_READ_REV_AVAILABLE_LENGTH);
+
+ read_rev->hostbuf.low =
+ cpu_to_le32(lower_32_bits(vpd->phys));
+ read_rev->hostbuf.high =
+ cpu_to_le32(upper_32_bits(vpd->phys));
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_read_sparm64(struct sli4 *sli4, void *buf, struct efc_dma *dma, u16 vpi)
+{
+ struct sli4_cmd_read_sparm64 *read_sparm64 = buf;
+
+ if (vpi == U16_MAX) {
+ efc_log_err(sli4, "special VPI not supported!!!\n");
+ return -EIO;
+ }
+
+ if (!dma || !dma->phys) {
+ efc_log_err(sli4, "bad DMA buffer\n");
+ return -EIO;
+ }
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_sparm64->hdr.command = SLI4_MBX_CMD_READ_SPARM64;
+
+ read_sparm64->bde_64.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (dma->size & SLI4_BDE_LEN_MASK));
+ read_sparm64->bde_64.u.data.low =
+ cpu_to_le32(lower_32_bits(dma->phys));
+ read_sparm64->bde_64.u.data.high =
+ cpu_to_le32(upper_32_bits(dma->phys));
+
+ read_sparm64->vpi = cpu_to_le16(vpi);
+
+ return 0;
+}
+
+int
+sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma)
+{
+ struct sli4_cmd_read_topology *read_topo = buf;
+
+ if (!dma || !dma->size)
+ return -EIO;
+
+ if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) {
+ efc_log_err(sli4, "loop map buffer too small %zx\n", dma->size);
+ return -EIO;
+ }
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_topo->hdr.command = SLI4_MBX_CMD_READ_TOPOLOGY;
+
+ memset(dma->virt, 0, dma->size);
+
+ read_topo->bde_loop_map.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (dma->size & SLI4_BDE_LEN_MASK));
+ read_topo->bde_loop_map.u.data.low =
+ cpu_to_le32(lower_32_bits(dma->phys));
+ read_topo->bde_loop_map.u.data.high =
+ cpu_to_le32(upper_32_bits(dma->phys));
+
+ return 0;
+}
+
+int
+sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index,
+ struct sli4_cmd_rq_cfg *rq_cfg)
+{
+ struct sli4_cmd_reg_fcfi *reg_fcfi = buf;
+ u32 i;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_fcfi->hdr.command = SLI4_MBX_CMD_REG_FCFI;
+
+ reg_fcfi->fcf_index = cpu_to_le16(index);
+
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ switch (i) {
+ case 0:
+ reg_fcfi->rqid0 = rq_cfg[0].rq_id;
+ break;
+ case 1:
+ reg_fcfi->rqid1 = rq_cfg[1].rq_id;
+ break;
+ case 2:
+ reg_fcfi->rqid2 = rq_cfg[2].rq_id;
+ break;
+ case 3:
+ reg_fcfi->rqid3 = rq_cfg[3].rq_id;
+ break;
+ }
+ reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
+ reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
+ reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
+ reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match;
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 fcf_index,
+ u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs,
+ struct sli4_cmd_rq_cfg *rq_cfg)
+{
+ struct sli4_cmd_reg_fcfi_mrq *reg_fcfi_mrq = buf;
+ u32 i;
+ u32 mrq_flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_fcfi_mrq->hdr.command = SLI4_MBX_CMD_REG_FCFI_MRQ;
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
+ reg_fcfi_mrq->fcf_index = cpu_to_le16(fcf_index);
+ goto done;
+ }
+
+ reg_fcfi_mrq->dw8_vlan = cpu_to_le32(SLI4_REGFCFI_MRQ_MODE);
+
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
+ reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
+ reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
+ reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match;
+
+ switch (i) {
+ case 3:
+ reg_fcfi_mrq->rqid3 = rq_cfg[i].rq_id;
+ break;
+ case 2:
+ reg_fcfi_mrq->rqid2 = rq_cfg[i].rq_id;
+ break;
+ case 1:
+ reg_fcfi_mrq->rqid1 = rq_cfg[i].rq_id;
+ break;
+ case 0:
+ reg_fcfi_mrq->rqid0 = rq_cfg[i].rq_id;
+ break;
+ }
+ }
+
+ mrq_flags = num_mrqs & SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS;
+ mrq_flags |= (mrq_bit_mask << 8);
+ mrq_flags |= (rq_selection_policy << 12);
+ reg_fcfi_mrq->dw9_mrqflags = cpu_to_le32(mrq_flags);
+done:
+ return 0;
+}
+
+int
+sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id,
+ struct efc_dma *dma, u8 update, u8 enable_t10_pi)
+{
+ struct sli4_cmd_reg_rpi *reg_rpi = buf;
+ u32 rportid_flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_rpi->hdr.command = SLI4_MBX_CMD_REG_RPI;
+
+ reg_rpi->rpi = cpu_to_le16(rpi);
+
+ rportid_flags = fc_id & SLI4_REGRPI_REMOTE_N_PORTID;
+
+ if (update)
+ rportid_flags |= SLI4_REGRPI_UPD;
+ else
+ rportid_flags &= ~SLI4_REGRPI_UPD;
+
+ if (enable_t10_pi)
+ rportid_flags |= SLI4_REGRPI_ETOW;
+ else
+ rportid_flags &= ~SLI4_REGRPI_ETOW;
+
+ reg_rpi->dw2_rportid_flags = cpu_to_le32(rportid_flags);
+
+ reg_rpi->bde_64.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
+ reg_rpi->bde_64.u.data.low =
+ cpu_to_le32(lower_32_bits(dma->phys));
+ reg_rpi->bde_64.u.data.high =
+ cpu_to_le32(upper_32_bits(dma->phys));
+
+ reg_rpi->vpi = cpu_to_le16(vpi);
+
+ return 0;
+}
+
+int
+sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size,
+ u16 vfi, u16 fcfi, struct efc_dma dma,
+ u16 vpi, __be64 sli_wwpn, u32 fc_id)
+{
+ struct sli4_cmd_reg_vfi *reg_vfi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_vfi->hdr.command = SLI4_MBX_CMD_REG_VFI;
+
+ reg_vfi->vfi = cpu_to_le16(vfi);
+
+ reg_vfi->fcfi = cpu_to_le16(fcfi);
+
+ reg_vfi->sparm.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
+ reg_vfi->sparm.u.data.low =
+ cpu_to_le32(lower_32_bits(dma.phys));
+ reg_vfi->sparm.u.data.high =
+ cpu_to_le32(upper_32_bits(dma.phys));
+
+ reg_vfi->e_d_tov = cpu_to_le32(sli4->e_d_tov);
+ reg_vfi->r_a_tov = cpu_to_le32(sli4->r_a_tov);
+
+ reg_vfi->dw0w1_flags |= cpu_to_le16(SLI4_REGVFI_VP);
+ reg_vfi->vpi = cpu_to_le16(vpi);
+ memcpy(reg_vfi->wwpn, &sli_wwpn, sizeof(reg_vfi->wwpn));
+ reg_vfi->dw10_lportid_flags = cpu_to_le32(fc_id);
+
+ return 0;
+}
+
+int
+sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id, __be64 sli_wwpn,
+ u16 vpi, u16 vfi, bool update)
+{
+ struct sli4_cmd_reg_vpi *reg_vpi = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_vpi->hdr.command = SLI4_MBX_CMD_REG_VPI;
+
+ flags = (fc_id & SLI4_REGVPI_LOCAL_N_PORTID);
+ if (update)
+ flags |= SLI4_REGVPI_UPD;
+ else
+ flags &= ~SLI4_REGVPI_UPD;
+
+ reg_vpi->dw2_lportid_flags = cpu_to_le32(flags);
+ memcpy(reg_vpi->wwpn, &sli_wwpn, sizeof(reg_vpi->wwpn));
+ reg_vpi->vpi = cpu_to_le16(vpi);
+ reg_vpi->vfi = cpu_to_le16(vfi);
+
+ return 0;
+}
+
+static int
+sli_cmd_request_features(struct sli4 *sli4, void *buf, u32 features_mask,
+ bool query)
+{
+ struct sli4_cmd_request_features *req_features = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ req_features->hdr.command = SLI4_MBX_CMD_RQST_FEATURES;
+
+ if (query)
+ req_features->dw1_qry = cpu_to_le32(SLI4_REQFEAT_QRY);
+
+ req_features->cmd = cpu_to_le32(features_mask);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator)
+{
+ struct sli4_cmd_unreg_fcfi *unreg_fcfi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_fcfi->hdr.command = SLI4_MBX_CMD_UNREG_FCFI;
+ unreg_fcfi->fcfi = cpu_to_le16(indicator);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator,
+ enum sli4_resource which, u32 fc_id)
+{
+ struct sli4_cmd_unreg_rpi *unreg_rpi = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_rpi->hdr.command = SLI4_MBX_CMD_UNREG_RPI;
+ switch (which) {
+ case SLI4_RSRC_RPI:
+ flags |= SLI4_UNREG_RPI_II_RPI;
+ if (fc_id == U32_MAX)
+ break;
+
+ flags |= SLI4_UNREG_RPI_DP;
+ unreg_rpi->dw2_dest_n_portid =
+ cpu_to_le32(fc_id & SLI4_UNREG_RPI_DEST_N_PORTID_MASK);
+ break;
+ case SLI4_RSRC_VPI:
+ flags |= SLI4_UNREG_RPI_II_VPI;
+ break;
+ case SLI4_RSRC_VFI:
+ flags |= SLI4_UNREG_RPI_II_VFI;
+ break;
+ case SLI4_RSRC_FCFI:
+ flags |= SLI4_UNREG_RPI_II_FCFI;
+ break;
+ default:
+ efc_log_info(sli4, "unknown type %#x\n", which);
+ return -EIO;
+ }
+
+ unreg_rpi->dw1w1_flags = cpu_to_le16(flags);
+ unreg_rpi->index = cpu_to_le16(indicator);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 index, u32 which)
+{
+ struct sli4_cmd_unreg_vfi *unreg_vfi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_vfi->hdr.command = SLI4_MBX_CMD_UNREG_VFI;
+ switch (which) {
+ case SLI4_UNREG_TYPE_DOMAIN:
+ unreg_vfi->index = cpu_to_le16(index);
+ break;
+ case SLI4_UNREG_TYPE_FCF:
+ unreg_vfi->index = cpu_to_le16(index);
+ break;
+ case SLI4_UNREG_TYPE_ALL:
+ unreg_vfi->index = cpu_to_le16(U32_MAX);
+ break;
+ default:
+ return -EIO;
+ }
+
+ if (which != SLI4_UNREG_TYPE_DOMAIN)
+ unreg_vfi->dw2_flags = cpu_to_le16(SLI4_UNREG_VFI_II_FCFI);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 indicator, u32 which)
+{
+ struct sli4_cmd_unreg_vpi *unreg_vpi = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_vpi->hdr.command = SLI4_MBX_CMD_UNREG_VPI;
+ unreg_vpi->index = cpu_to_le16(indicator);
+ switch (which) {
+ case SLI4_UNREG_TYPE_PORT:
+ flags |= SLI4_UNREG_VPI_II_VPI;
+ break;
+ case SLI4_UNREG_TYPE_DOMAIN:
+ flags |= SLI4_UNREG_VPI_II_VFI;
+ break;
+ case SLI4_UNREG_TYPE_FCF:
+ flags |= SLI4_UNREG_VPI_II_FCFI;
+ break;
+ case SLI4_UNREG_TYPE_ALL:
+ /* override indicator */
+ unreg_vpi->index = cpu_to_le16(U32_MAX);
+ flags |= SLI4_UNREG_VPI_II_FCFI;
+ break;
+ default:
+ return -EIO;
+ }
+
+ unreg_vpi->dw2w0_flags = cpu_to_le16(flags);
+ return 0;
+}
+
+static int
+sli_cmd_common_modify_eq_delay(struct sli4 *sli4, void *buf,
+ struct sli4_queue *q, int num_q, u32 shift,
+ u32 delay_mult)
+{
+ struct sli4_rqst_cmn_modify_eq_delay *req = NULL;
+ int i;
+
+ req = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_modify_eq_delay), NULL);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_MODIFY_EQ_DELAY,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_modify_eq_delay));
+ req->num_eq = cpu_to_le32(num_q);
+
+ for (i = 0; i < num_q; i++) {
+ req->eq_delay_record[i].eq_id = cpu_to_le32(q[i].id);
+ req->eq_delay_record[i].phase = cpu_to_le32(shift);
+ req->eq_delay_record[i].delay_multiplier =
+ cpu_to_le32(delay_mult);
+ }
+
+ return 0;
+}
+
+void
+sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf,
+ size_t size, u16 timeout)
+{
+ struct sli4_rqst_lowlevel_set_watchdog *req = NULL;
+
+ req = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(lowlevel_set_watchdog), NULL);
+ if (!req)
+ return;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_LOWLEVEL_SET_WATCHDOG,
+ SLI4_SUBSYSTEM_LOWLEVEL, CMD_V0,
+ SLI4_RQST_PYLD_LEN(lowlevel_set_watchdog));
+ req->watchdog_timeout = cpu_to_le16(timeout);
+}
+
+static int
+sli_cmd_common_get_cntl_attributes(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_hdr *hdr = NULL;
+
+ hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
+ if (!hdr)
+ return -EIO;
+
+ hdr->opcode = SLI4_CMN_GET_CNTL_ATTRIBUTES;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = cpu_to_le32(dma->size);
+
+ return 0;
+}
+
+static int
+sli_cmd_common_get_cntl_addl_attributes(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_hdr *hdr = NULL;
+
+ hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
+ if (!hdr)
+ return -EIO;
+
+ hdr->opcode = SLI4_CMN_GET_CNTL_ADDL_ATTRS;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = cpu_to_le32(dma->size);
+
+ return 0;
+}
+
+int
+sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context)
+{
+ struct sli4_rqst_cmn_nop *nop = NULL;
+
+ nop = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_nop),
+ NULL);
+ if (!nop)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&nop->hdr, SLI4_CMN_NOP, SLI4_SUBSYSTEM_COMMON,
+ CMD_V0, SLI4_RQST_PYLD_LEN(cmn_nop));
+
+ memcpy(&nop->context, &context, sizeof(context));
+
+ return 0;
+}
+
+int
+sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf, u16 rtype)
+{
+ struct sli4_rqst_cmn_get_resource_extent_info *ext = NULL;
+
+ ext = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_get_resource_extent_info), NULL);
+ if (!ext)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&ext->hdr, SLI4_CMN_GET_RSC_EXTENT_INFO,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_get_resource_extent_info));
+
+ ext->resource_type = cpu_to_le16(rtype);
+
+ return 0;
+}
+
+int
+sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf)
+{
+ struct sli4_rqst_hdr *hdr = NULL;
+
+ hdr = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_get_sli4_params), NULL);
+ if (!hdr)
+ return -EIO;
+
+ hdr->opcode = SLI4_CMN_GET_SLI4_PARAMS;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = SLI4_RQST_PYLD_LEN(cmn_get_sli4_params);
+
+ return 0;
+}
+
+static int
+sli_cmd_common_get_port_name(struct sli4 *sli4, void *buf)
+{
+ struct sli4_rqst_cmn_get_port_name *pname;
+
+ pname = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_get_port_name), NULL);
+ if (!pname)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&pname->hdr, SLI4_CMN_GET_PORT_NAME,
+ SLI4_SUBSYSTEM_COMMON, CMD_V1,
+ SLI4_RQST_PYLD_LEN(cmn_get_port_name));
+
+ /* Set the port type value (ethernet=0, FC=1) for V1 commands */
+ pname->port_type = SLI4_PORT_TYPE_FC;
+
+ return 0;
+}
+
+int
+sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
+ u16 eof, u32 desired_write_length,
+ u32 offset, char *obj_name,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_write_object *wr_obj = NULL;
+ struct sli4_bde *bde;
+ u32 dwflags = 0;
+
+ wr_obj = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_write_object) + sizeof(*bde), NULL);
+ if (!wr_obj)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&wr_obj->hdr, SLI4_CMN_WRITE_OBJECT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN_VAR(cmn_write_object, sizeof(*bde)));
+
+ if (noc)
+ dwflags |= SLI4_RQ_DES_WRITE_LEN_NOC;
+ if (eof)
+ dwflags |= SLI4_RQ_DES_WRITE_LEN_EOF;
+ dwflags |= (desired_write_length & SLI4_RQ_DES_WRITE_LEN);
+
+ wr_obj->desired_write_len_dword = cpu_to_le32(dwflags);
+
+ wr_obj->write_offset = cpu_to_le32(offset);
+ strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1);
+ wr_obj->host_buffer_descriptor_count = cpu_to_le32(1);
+
+ bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor;
+
+ /* Setup to transfer xfer_size bytes to device */
+ bde->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (desired_write_length & SLI4_BDE_LEN_MASK));
+ bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
+ bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
+
+ return 0;
+}
+
+int
+sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name)
+{
+ struct sli4_rqst_cmn_delete_object *req = NULL;
+
+ req = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_delete_object), NULL);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_DELETE_OBJECT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_delete_object));
+
+ strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1);
+ return 0;
+}
+
+int
+sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len,
+ u32 offset, char *obj_name, struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_read_object *rd_obj = NULL;
+ struct sli4_bde *bde;
+
+ rd_obj = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_read_object) + sizeof(*bde), NULL);
+ if (!rd_obj)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&rd_obj->hdr, SLI4_CMN_READ_OBJECT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN_VAR(cmn_read_object, sizeof(*bde)));
+ rd_obj->desired_read_length_dword =
+ cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN);
+
+ rd_obj->read_offset = cpu_to_le32(offset);
+ strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1);
+ rd_obj->host_buffer_descriptor_count = cpu_to_le32(1);
+
+ bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor;
+
+ /* Setup to transfer xfer_size bytes to device */
+ bde->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (desired_read_len & SLI4_BDE_LEN_MASK));
+ if (dma) {
+ bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
+ bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
+ } else {
+ bde->u.data.low = 0;
+ bde->u.data.high = 0;
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf, struct efc_dma *cmd,
+ struct efc_dma *resp)
+{
+ struct sli4_rqst_dmtf_exec_clp_cmd *clp_cmd = NULL;
+
+ clp_cmd = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(dmtf_exec_clp_cmd), NULL);
+ if (!clp_cmd)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&clp_cmd->hdr, DMTF_EXEC_CLP_CMD, SLI4_SUBSYSTEM_DMTF,
+ CMD_V0, SLI4_RQST_PYLD_LEN(dmtf_exec_clp_cmd));
+
+ clp_cmd->cmd_buf_length = cpu_to_le32(cmd->size);
+ clp_cmd->cmd_buf_addr_low = cpu_to_le32(lower_32_bits(cmd->phys));
+ clp_cmd->cmd_buf_addr_high = cpu_to_le32(upper_32_bits(cmd->phys));
+ clp_cmd->resp_buf_length = cpu_to_le32(resp->size);
+ clp_cmd->resp_buf_addr_low = cpu_to_le32(lower_32_bits(resp->phys));
+ clp_cmd->resp_buf_addr_high = cpu_to_le32(upper_32_bits(resp->phys));
+ return 0;
+}
+
+int
+sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf, bool query,
+ bool is_buffer_list,
+ struct efc_dma *buffer, u8 fdb)
+{
+ struct sli4_rqst_cmn_set_dump_location *set_dump_loc = NULL;
+ u32 buffer_length_flag = 0;
+
+ set_dump_loc = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_set_dump_location), NULL);
+ if (!set_dump_loc)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&set_dump_loc->hdr, SLI4_CMN_SET_DUMP_LOCATION,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_set_dump_location));
+
+ if (is_buffer_list)
+ buffer_length_flag |= SLI4_CMN_SET_DUMP_BLP;
+
+ if (query)
+ buffer_length_flag |= SLI4_CMN_SET_DUMP_QRY;
+
+ if (fdb)
+ buffer_length_flag |= SLI4_CMN_SET_DUMP_FDB;
+
+ if (buffer) {
+ set_dump_loc->buf_addr_low =
+ cpu_to_le32(lower_32_bits(buffer->phys));
+ set_dump_loc->buf_addr_high =
+ cpu_to_le32(upper_32_bits(buffer->phys));
+
+ buffer_length_flag |=
+ buffer->len & SLI4_CMN_SET_DUMP_BUFFER_LEN;
+ } else {
+ set_dump_loc->buf_addr_low = 0;
+ set_dump_loc->buf_addr_high = 0;
+ set_dump_loc->buffer_length_dword = 0;
+ }
+ set_dump_loc->buffer_length_dword = cpu_to_le32(buffer_length_flag);
+ return 0;
+}
+
+int
+sli_cmd_common_set_features(struct sli4 *sli4, void *buf, u32 feature,
+ u32 param_len, void *parameter)
+{
+ struct sli4_rqst_cmn_set_features *cmd = NULL;
+
+ cmd = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_set_features), NULL);
+ if (!cmd)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&cmd->hdr, SLI4_CMN_SET_FEATURES,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_set_features));
+
+ cmd->feature = cpu_to_le32(feature);
+ cmd->param_len = cpu_to_le32(param_len);
+ memcpy(cmd->params, parameter, param_len);
+
+ return 0;
+}
+
+int
+sli_cqe_mq(struct sli4 *sli4, void *buf)
+{
+ struct sli4_mcqe *mcqe = buf;
+ u32 dwflags = le32_to_cpu(mcqe->dw3_flags);
+ /*
+ * Firmware can split mbx completions into two MCQEs: first with only
+ * the "consumed" bit set and a second with the "complete" bit set.
+ * Thus, ignore MCQE unless "complete" is set.
+ */
+ if (!(dwflags & SLI4_MCQE_COMPLETED))
+ return SLI4_MCQE_STATUS_NOT_COMPLETED;
+
+ if (le16_to_cpu(mcqe->completion_status)) {
+ efc_log_info(sli4, "status(st=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n",
+ le16_to_cpu(mcqe->completion_status),
+ le16_to_cpu(mcqe->extended_status),
+ (dwflags & SLI4_MCQE_CONSUMED),
+ (dwflags & SLI4_MCQE_COMPLETED),
+ (dwflags & SLI4_MCQE_AE),
+ (dwflags & SLI4_MCQE_VALID));
+ }
+
+ return le16_to_cpu(mcqe->completion_status);
+}
+
+int
+sli_cqe_async(struct sli4 *sli4, void *buf)
+{
+ struct sli4_acqe *acqe = buf;
+ int rc = -EIO;
+
+ if (!buf) {
+ efc_log_err(sli4, "bad parameter sli4=%p buf=%p\n", sli4, buf);
+ return -EIO;
+ }
+
+ switch (acqe->event_code) {
+ case SLI4_ACQE_EVENT_CODE_LINK_STATE:
+ efc_log_info(sli4, "Unsupported by FC link, evt code:%#x\n",
+ acqe->event_code);
+ break;
+ case SLI4_ACQE_EVENT_CODE_GRP_5:
+ efc_log_info(sli4, "ACQE GRP5\n");
+ break;
+ case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT:
+ efc_log_info(sli4, "ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n",
+ acqe->event_type,
+ le32_to_cpu(acqe->event_data[0]),
+ le32_to_cpu(acqe->event_data[1]));
+ break;
+ case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT:
+ rc = sli_fc_process_link_attention(sli4, buf);
+ break;
+ default:
+ efc_log_info(sli4, "ACQE unknown=%#x\n", acqe->event_code);
+ }
+
+ return rc;
+}
+
+bool
+sli_fw_ready(struct sli4 *sli4)
+{
+ u32 val;
+
+ /* Determine if the chip FW is in a ready state */
+ val = sli_reg_read_status(sli4);
+ return (val & SLI4_PORT_STATUS_RDY) ? 1 : 0;
+}
+
+static bool
+sli_wait_for_fw_ready(struct sli4 *sli4, u32 timeout_ms)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(timeout_ms);
+
+ do {
+ if (sli_fw_ready(sli4))
+ return true;
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end));
+
+ return false;
+}
+
+static bool
+sli_sliport_reset(struct sli4 *sli4)
+{
+ bool rc;
+ u32 val;
+
+ val = SLI4_PORT_CTRL_IP;
+ /* Initialize port, endian */
+ writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
+
+ rc = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
+ if (!rc)
+ efc_log_crit(sli4, "port failed to become ready after initialization\n");
+
+ return rc;
+}
+
+static bool
+sli_fw_init(struct sli4 *sli4)
+{
+ /*
+ * Is firmware ready for operation?
+ */
+ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
+ efc_log_crit(sli4, "FW status is NOT ready\n");
+ return false;
+ }
+
+ /*
+ * Reset port to a known state
+ */
+ return sli_sliport_reset(sli4);
+}
+
+static int
+sli_request_features(struct sli4 *sli4, u32 *features, bool query)
+{
+ struct sli4_cmd_request_features *req_features = sli4->bmbx.virt;
+
+ if (sli_cmd_request_features(sli4, sli4->bmbx.virt, *features, query)) {
+ efc_log_err(sli4, "bad REQUEST_FEATURES write\n");
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail\n");
+ return -EIO;
+ }
+
+ if (le16_to_cpu(req_features->hdr.status)) {
+ efc_log_err(sli4, "REQUEST_FEATURES bad status %#x\n",
+ le16_to_cpu(req_features->hdr.status));
+ return -EIO;
+ }
+
+ *features = le32_to_cpu(req_features->resp);
+ return 0;
+}
+
+void
+sli_calc_max_qentries(struct sli4 *sli4)
+{
+ enum sli4_qtype q;
+ u32 qentries;
+
+ for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
+ sli4->qinfo.max_qentries[q] =
+ sli_convert_mask_to_count(sli4->qinfo.count_method[q],
+ sli4->qinfo.count_mask[q]);
+ }
+
+ /* single, contiguous DMA allocations will be called for each queue
+ * of size (max_qentries * queue entry size); since these can be large,
+ * check against the OS max DMA allocation size
+ */
+ for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
+ qentries = sli4->qinfo.max_qentries[q];
+
+ efc_log_info(sli4, "[%s]: max_qentries from %d to %d\n",
+ SLI4_QNAME[q],
+ sli4->qinfo.max_qentries[q], qentries);
+ sli4->qinfo.max_qentries[q] = qentries;
+ }
+}
+
+static int
+sli_get_read_config(struct sli4 *sli4)
+{
+ struct sli4_rsp_read_config *conf = sli4->bmbx.virt;
+ u32 i, total;
+ u32 *base;
+
+ if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad READ_CONFIG write\n");
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox fail (READ_CONFIG)\n");
+ return -EIO;
+ }
+
+ if (le16_to_cpu(conf->hdr.status)) {
+ efc_log_err(sli4, "READ_CONFIG bad status %#x\n",
+ le16_to_cpu(conf->hdr.status));
+ return -EIO;
+ }
+
+ sli4->params.has_extents =
+ le32_to_cpu(conf->ext_dword) & SLI4_READ_CFG_RESP_RESOURCE_EXT;
+ if (sli4->params.has_extents) {
+ efc_log_err(sli4, "extents not supported\n");
+ return -EIO;
+ }
+
+ base = sli4->ext[0].base;
+ if (!base) {
+ int size = SLI4_RSRC_MAX * sizeof(u32);
+
+ base = kzalloc(size, GFP_KERNEL);
+ if (!base)
+ return -EIO;
+ }
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ sli4->ext[i].number = 1;
+ sli4->ext[i].n_alloc = 0;
+ sli4->ext[i].base = &base[i];
+ }
+
+ sli4->ext[SLI4_RSRC_VFI].base[0] = le16_to_cpu(conf->vfi_base);
+ sli4->ext[SLI4_RSRC_VFI].size = le16_to_cpu(conf->vfi_count);
+
+ sli4->ext[SLI4_RSRC_VPI].base[0] = le16_to_cpu(conf->vpi_base);
+ sli4->ext[SLI4_RSRC_VPI].size = le16_to_cpu(conf->vpi_count);
+
+ sli4->ext[SLI4_RSRC_RPI].base[0] = le16_to_cpu(conf->rpi_base);
+ sli4->ext[SLI4_RSRC_RPI].size = le16_to_cpu(conf->rpi_count);
+
+ sli4->ext[SLI4_RSRC_XRI].base[0] = le16_to_cpu(conf->xri_base);
+ sli4->ext[SLI4_RSRC_XRI].size = le16_to_cpu(conf->xri_count);
+
+ sli4->ext[SLI4_RSRC_FCFI].base[0] = 0;
+ sli4->ext[SLI4_RSRC_FCFI].size = le16_to_cpu(conf->fcfi_count);
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ total = sli4->ext[i].number * sli4->ext[i].size;
+ sli4->ext[i].use_map = bitmap_zalloc(total, GFP_KERNEL);
+ if (!sli4->ext[i].use_map) {
+ efc_log_err(sli4, "bitmap memory allocation failed %d\n",
+ i);
+ return -EIO;
+ }
+ sli4->ext[i].map_size = total;
+ }
+
+ sli4->topology = (le32_to_cpu(conf->topology_dword) &
+ SLI4_READ_CFG_RESP_TOPOLOGY) >> 24;
+ switch (sli4->topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ efc_log_info(sli4, "FC (unknown)\n");
+ break;
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ efc_log_info(sli4, "FC (direct attach)\n");
+ break;
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ efc_log_info(sli4, "FC (arbitrated loop)\n");
+ break;
+ default:
+ efc_log_info(sli4, "bad topology %#x\n", sli4->topology);
+ }
+
+ sli4->e_d_tov = le16_to_cpu(conf->e_d_tov);
+ sli4->r_a_tov = le16_to_cpu(conf->r_a_tov);
+
+ sli4->link_module_type = le16_to_cpu(conf->lmt);
+
+ sli4->qinfo.max_qcount[SLI4_QTYPE_EQ] = le16_to_cpu(conf->eq_count);
+ sli4->qinfo.max_qcount[SLI4_QTYPE_CQ] = le16_to_cpu(conf->cq_count);
+ sli4->qinfo.max_qcount[SLI4_QTYPE_WQ] = le16_to_cpu(conf->wq_count);
+ sli4->qinfo.max_qcount[SLI4_QTYPE_RQ] = le16_to_cpu(conf->rq_count);
+
+ /*
+ * READ_CONFIG doesn't give the max number of MQ. Applications
+ * will typically want 1, but we may need another at some future
+ * date. Dummy up a "max" MQ count here.
+ */
+ sli4->qinfo.max_qcount[SLI4_QTYPE_MQ] = SLI4_USER_MQ_COUNT;
+ return 0;
+}
+
+static int
+sli_get_sli4_parameters(struct sli4 *sli4)
+{
+ struct sli4_rsp_cmn_get_sli4_params *parms;
+ u32 dw_loopback;
+ u32 dw_eq_pg_cnt;
+ u32 dw_cq_pg_cnt;
+ u32 dw_mq_pg_cnt;
+ u32 dw_wq_pg_cnt;
+ u32 dw_rq_pg_cnt;
+ u32 dw_sgl_pg_cnt;
+
+ if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt))
+ return -EIO;
+
+ parms = (struct sli4_rsp_cmn_get_sli4_params *)
+ (((u8 *)sli4->bmbx.virt) +
+ offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail\n");
+ return -EIO;
+ }
+
+ if (parms->hdr.status) {
+ efc_log_err(sli4, "COMMON_GET_SLI4_PARAMETERS bad status %#x",
+ parms->hdr.status);
+ efc_log_err(sli4, "additional status %#x\n",
+ parms->hdr.additional_status);
+ return -EIO;
+ }
+
+ dw_loopback = le32_to_cpu(parms->dw16_loopback_scope);
+ dw_eq_pg_cnt = le32_to_cpu(parms->dw6_eq_page_cnt);
+ dw_cq_pg_cnt = le32_to_cpu(parms->dw8_cq_page_cnt);
+ dw_mq_pg_cnt = le32_to_cpu(parms->dw10_mq_page_cnt);
+ dw_wq_pg_cnt = le32_to_cpu(parms->dw12_wq_page_cnt);
+ dw_rq_pg_cnt = le32_to_cpu(parms->dw14_rq_page_cnt);
+
+ sli4->params.auto_reg = (dw_loopback & SLI4_PARAM_AREG);
+ sli4->params.auto_xfer_rdy = (dw_loopback & SLI4_PARAM_AGXF);
+ sli4->params.hdr_template_req = (dw_loopback & SLI4_PARAM_HDRR);
+ sli4->params.t10_dif_inline_capable = (dw_loopback & SLI4_PARAM_TIMM);
+ sli4->params.t10_dif_separate_capable = (dw_loopback & SLI4_PARAM_TSMM);
+
+ sli4->params.mq_create_version = GET_Q_CREATE_VERSION(dw_mq_pg_cnt);
+ sli4->params.cq_create_version = GET_Q_CREATE_VERSION(dw_cq_pg_cnt);
+
+ sli4->rq_min_buf_size = le16_to_cpu(parms->min_rq_buffer_size);
+ sli4->rq_max_buf_size = le32_to_cpu(parms->max_rq_buffer_size);
+
+ sli4->qinfo.qpage_count[SLI4_QTYPE_EQ] =
+ (dw_eq_pg_cnt & SLI4_PARAM_EQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_CQ] =
+ (dw_cq_pg_cnt & SLI4_PARAM_CQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_MQ] =
+ (dw_mq_pg_cnt & SLI4_PARAM_MQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_WQ] =
+ (dw_wq_pg_cnt & SLI4_PARAM_WQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_RQ] =
+ (dw_rq_pg_cnt & SLI4_PARAM_RQ_PAGE_CNT_MASK);
+
+ /* save count methods and masks for each queue type */
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_EQ] =
+ le16_to_cpu(parms->eqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_EQ] =
+ GET_Q_CNT_METHOD(dw_eq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_CQ] =
+ le16_to_cpu(parms->cqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_CQ] =
+ GET_Q_CNT_METHOD(dw_cq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_MQ] =
+ le16_to_cpu(parms->mqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_MQ] =
+ GET_Q_CNT_METHOD(dw_mq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_WQ] =
+ le16_to_cpu(parms->wqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_WQ] =
+ GET_Q_CNT_METHOD(dw_wq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_RQ] =
+ le16_to_cpu(parms->rqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_RQ] =
+ GET_Q_CNT_METHOD(dw_rq_pg_cnt);
+
+ /* now calculate max queue entries */
+ sli_calc_max_qentries(sli4);
+
+ dw_sgl_pg_cnt = le32_to_cpu(parms->dw18_sgl_page_cnt);
+
+ /* max # of pages */
+ sli4->max_sgl_pages = (dw_sgl_pg_cnt & SLI4_PARAM_SGL_PAGE_CNT_MASK);
+
+ /* bit map of available sizes */
+ sli4->sgl_page_sizes = (dw_sgl_pg_cnt &
+ SLI4_PARAM_SGL_PAGE_SZS_MASK) >> 8;
+ /* ignore HLM here. Use value from REQUEST_FEATURES */
+ sli4->sge_supported_length = le32_to_cpu(parms->sge_supported_length);
+ sli4->params.sgl_pre_reg_required = (dw_loopback & SLI4_PARAM_SGLR);
+ /* default to using pre-registered SGL's */
+ sli4->params.sgl_pre_registered = true;
+
+ sli4->params.perf_hint = dw_loopback & SLI4_PARAM_PHON;
+ sli4->params.perf_wq_id_association = (dw_loopback & SLI4_PARAM_PHWQ);
+
+ sli4->rq_batch = (le16_to_cpu(parms->dw15w1_rq_db_window) &
+ SLI4_PARAM_RQ_DB_WINDOW_MASK) >> 12;
+
+ /* Use the highest available WQE size. */
+ if (((dw_wq_pg_cnt & SLI4_PARAM_WQE_SZS_MASK) >> 8) &
+ SLI4_128BYTE_WQE_SUPPORT)
+ sli4->wqe_size = SLI4_WQE_EXT_BYTES;
+ else
+ sli4->wqe_size = SLI4_WQE_BYTES;
+
+ return 0;
+}
+
+static int
+sli_get_ctrl_attributes(struct sli4 *sli4)
+{
+ struct sli4_rsp_cmn_get_cntl_attributes *attr;
+ struct sli4_rsp_cmn_get_cntl_addl_attributes *add_attr;
+ struct efc_dma data;
+ u32 psize;
+
+ /*
+ * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily
+ * uses VPD DMA buffer as the response won't fit in the embedded
+ * buffer.
+ */
+ memset(sli4->vpd_data.virt, 0, sli4->vpd_data.size);
+ if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt,
+ &sli4->vpd_data)) {
+ efc_log_err(sli4, "bad COMMON_GET_CNTL_ATTRIBUTES write\n");
+ return -EIO;
+ }
+
+ attr = sli4->vpd_data.virt;
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail\n");
+ return -EIO;
+ }
+
+ if (attr->hdr.status) {
+ efc_log_err(sli4, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x",
+ attr->hdr.status);
+ efc_log_err(sli4, "additional status %#x\n",
+ attr->hdr.additional_status);
+ return -EIO;
+ }
+
+ sli4->port_number = attr->port_num_type_flags & SLI4_CNTL_ATTR_PORTNUM;
+
+ memcpy(sli4->bios_version_string, attr->bios_version_str,
+ sizeof(sli4->bios_version_string));
+
+ /* get additional attributes */
+ psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
+ data.size = psize;
+ data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
+ &data.phys, GFP_KERNEL);
+ if (!data.virt) {
+ memset(&data, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
+ return -EIO;
+ }
+
+ if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt,
+ &data)) {
+ efc_log_err(sli4, "bad GET_CNTL_ADDL_ATTR write\n");
+ dma_free_coherent(&sli4->pci->dev, data.size,
+ data.virt, data.phys);
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "mailbox fail (GET_CNTL_ADDL_ATTR)\n");
+ dma_free_coherent(&sli4->pci->dev, data.size,
+ data.virt, data.phys);
+ return -EIO;
+ }
+
+ add_attr = data.virt;
+ if (add_attr->hdr.status) {
+ efc_log_err(sli4, "GET_CNTL_ADDL_ATTR bad status %#x\n",
+ add_attr->hdr.status);
+ dma_free_coherent(&sli4->pci->dev, data.size,
+ data.virt, data.phys);
+ return -EIO;
+ }
+
+ memcpy(sli4->ipl_name, add_attr->ipl_file_name, sizeof(sli4->ipl_name));
+
+ efc_log_info(sli4, "IPL:%s\n", (char *)sli4->ipl_name);
+
+ dma_free_coherent(&sli4->pci->dev, data.size, data.virt,
+ data.phys);
+ memset(&data, 0, sizeof(struct efc_dma));
+ return 0;
+}
+
+static int
+sli_get_fw_rev(struct sli4 *sli4)
+{
+ struct sli4_cmd_read_rev *read_rev = sli4->bmbx.virt;
+
+ if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, &sli4->vpd_data))
+ return -EIO;
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail (READ_REV)\n");
+ return -EIO;
+ }
+
+ if (le16_to_cpu(read_rev->hdr.status)) {
+ efc_log_err(sli4, "READ_REV bad status %#x\n",
+ le16_to_cpu(read_rev->hdr.status));
+ return -EIO;
+ }
+
+ sli4->fw_rev[0] = le32_to_cpu(read_rev->first_fw_id);
+ memcpy(sli4->fw_name[0], read_rev->first_fw_name,
+ sizeof(sli4->fw_name[0]));
+
+ sli4->fw_rev[1] = le32_to_cpu(read_rev->second_fw_id);
+ memcpy(sli4->fw_name[1], read_rev->second_fw_name,
+ sizeof(sli4->fw_name[1]));
+
+ sli4->hw_rev[0] = le32_to_cpu(read_rev->first_hw_rev);
+ sli4->hw_rev[1] = le32_to_cpu(read_rev->second_hw_rev);
+ sli4->hw_rev[2] = le32_to_cpu(read_rev->third_hw_rev);
+
+ efc_log_info(sli4, "FW1:%s (%08x) / FW2:%s (%08x)\n",
+ read_rev->first_fw_name, le32_to_cpu(read_rev->first_fw_id),
+ read_rev->second_fw_name, le32_to_cpu(read_rev->second_fw_id));
+
+ efc_log_info(sli4, "HW1: %08x / HW2: %08x\n",
+ le32_to_cpu(read_rev->first_hw_rev),
+ le32_to_cpu(read_rev->second_hw_rev));
+
+ /* Check that all VPD data was returned */
+ if (le32_to_cpu(read_rev->returned_vpd_length) !=
+ le32_to_cpu(read_rev->actual_vpd_length)) {
+ efc_log_info(sli4, "VPD length: avail=%d return=%d actual=%d\n",
+ le32_to_cpu(read_rev->available_length_dword) &
+ SLI4_READ_REV_AVAILABLE_LENGTH,
+ le32_to_cpu(read_rev->returned_vpd_length),
+ le32_to_cpu(read_rev->actual_vpd_length));
+ }
+ sli4->vpd_length = le32_to_cpu(read_rev->returned_vpd_length);
+ return 0;
+}
+
+static int
+sli_get_config(struct sli4 *sli4)
+{
+ struct sli4_rsp_cmn_get_port_name *port_name;
+ struct sli4_cmd_read_nvparms *read_nvparms;
+
+ /*
+ * Read the device configuration
+ */
+ if (sli_get_read_config(sli4))
+ return -EIO;
+
+ if (sli_get_sli4_parameters(sli4))
+ return -EIO;
+
+ if (sli_get_ctrl_attributes(sli4))
+ return -EIO;
+
+ if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt))
+ return -EIO;
+
+ port_name = (struct sli4_rsp_cmn_get_port_name *)
+ (((u8 *)sli4->bmbx.virt) +
+ offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox fail (GET_PORT_NAME)\n");
+ return -EIO;
+ }
+
+ sli4->port_name[0] = port_name->port_name[sli4->port_number];
+ sli4->port_name[1] = '\0';
+
+ if (sli_get_fw_rev(sli4))
+ return -EIO;
+
+ if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad READ_NVPARMS write\n");
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox fail (READ_NVPARMS)\n");
+ return -EIO;
+ }
+
+ read_nvparms = sli4->bmbx.virt;
+ if (le16_to_cpu(read_nvparms->hdr.status)) {
+ efc_log_err(sli4, "READ_NVPARMS bad status %#x\n",
+ le16_to_cpu(read_nvparms->hdr.status));
+ return -EIO;
+ }
+
+ memcpy(sli4->wwpn, read_nvparms->wwpn, sizeof(sli4->wwpn));
+ memcpy(sli4->wwnn, read_nvparms->wwnn, sizeof(sli4->wwnn));
+
+ efc_log_info(sli4, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ sli4->wwpn[0], sli4->wwpn[1], sli4->wwpn[2], sli4->wwpn[3],
+ sli4->wwpn[4], sli4->wwpn[5], sli4->wwpn[6], sli4->wwpn[7]);
+ efc_log_info(sli4, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ sli4->wwnn[0], sli4->wwnn[1], sli4->wwnn[2], sli4->wwnn[3],
+ sli4->wwnn[4], sli4->wwnn[5], sli4->wwnn[6], sli4->wwnn[7]);
+
+ return 0;
+}
+
+int
+sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
+ void __iomem *reg[])
+{
+ u32 intf = U32_MAX;
+ u32 pci_class_rev = 0;
+ u32 rev_id = 0;
+ u32 family = 0;
+ u32 asic_id = 0;
+ u32 i;
+ struct sli4_asic_entry_t *asic;
+
+ memset(sli4, 0, sizeof(struct sli4));
+
+ sli4->os = os;
+ sli4->pci = pdev;
+
+ for (i = 0; i < 6; i++)
+ sli4->reg[i] = reg[i];
+ /*
+ * Read the SLI_INTF register to discover the register layout
+ * and other capability information
+ */
+ if (pci_read_config_dword(pdev, SLI4_INTF_REG, &intf))
+ return -EIO;
+
+ if ((intf & SLI4_INTF_VALID_MASK) != (u32)SLI4_INTF_VALID_VALUE) {
+ efc_log_err(sli4, "SLI_INTF is not valid\n");
+ return -EIO;
+ }
+
+ /* driver only support SLI-4 */
+ if ((intf & SLI4_INTF_REV_MASK) != SLI4_INTF_REV_S4) {
+ efc_log_err(sli4, "Unsupported SLI revision (intf=%#x)\n", intf);
+ return -EIO;
+ }
+
+ sli4->sli_family = intf & SLI4_INTF_FAMILY_MASK;
+
+ sli4->if_type = intf & SLI4_INTF_IF_TYPE_MASK;
+ efc_log_info(sli4, "status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(sli4),
+ sli_reg_read_err1(sli4),
+ sli_reg_read_err2(sli4));
+
+ /*
+ * set the ASIC type and revision
+ */
+ if (pci_read_config_dword(pdev, PCI_CLASS_REVISION, &pci_class_rev))
+ return -EIO;
+
+ rev_id = pci_class_rev & 0xff;
+ family = sli4->sli_family;
+ if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) {
+ if (!pci_read_config_dword(pdev, SLI4_ASIC_ID_REG, &asic_id))
+ family = asic_id & SLI4_ASIC_GEN_MASK;
+ }
+
+ for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table);
+ i++, asic++) {
+ if (rev_id == asic->rev_id && family == asic->family) {
+ sli4->asic_type = family;
+ sli4->asic_rev = rev_id;
+ break;
+ }
+ }
+ /* Fail if no matching asic type/rev was found */
+ if (!sli4->asic_type) {
+ efc_log_err(sli4, "no matching asic family/rev found: %02x/%02x\n",
+ family, rev_id);
+ return -EIO;
+ }
+
+ /*
+ * The bootstrap mailbox is equivalent to a MQ with a single 256 byte
+ * entry, a CQ with a single 16 byte entry, and no event queue.
+ * Alignment must be 16 bytes as the low order address bits in the
+ * address register are also control / status.
+ */
+ sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
+ sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
+ &sli4->bmbx.phys, GFP_KERNEL);
+ if (!sli4->bmbx.virt) {
+ memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
+ return -EIO;
+ }
+
+ if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) {
+ efc_log_err(sli4, "bad alignment for bootstrap mailbox\n");
+ return -EIO;
+ }
+
+ efc_log_info(sli4, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt,
+ upper_32_bits(sli4->bmbx.phys),
+ lower_32_bits(sli4->bmbx.phys), sli4->bmbx.size);
+
+ /* 4096 is arbitrary. What should this value actually be? */
+ sli4->vpd_data.size = 4096;
+ sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
+ sli4->vpd_data.size,
+ &sli4->vpd_data.phys,
+ GFP_KERNEL);
+ if (!sli4->vpd_data.virt) {
+ memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
+ /* Note that failure isn't fatal in this specific case */
+ efc_log_info(sli4, "VPD buffer allocation failed\n");
+ }
+
+ if (!sli_fw_init(sli4)) {
+ efc_log_err(sli4, "FW initialization failed\n");
+ return -EIO;
+ }
+
+ /*
+ * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true
+ * in addition to any other desired features
+ */
+ sli4->features = (SLI4_REQFEAT_IAAB | SLI4_REQFEAT_NPIV |
+ SLI4_REQFEAT_DIF | SLI4_REQFEAT_VF |
+ SLI4_REQFEAT_FCPC | SLI4_REQFEAT_IAAR |
+ SLI4_REQFEAT_HLM | SLI4_REQFEAT_PERFH |
+ SLI4_REQFEAT_RXSEQ | SLI4_REQFEAT_RXRI |
+ SLI4_REQFEAT_MRQP);
+
+ /* use performance hints if available */
+ if (sli4->params.perf_hint)
+ sli4->features |= SLI4_REQFEAT_PERFH;
+
+ if (sli_request_features(sli4, &sli4->features, true))
+ return -EIO;
+
+ if (sli_get_config(sli4))
+ return -EIO;
+
+ return 0;
+}
+
+int
+sli_init(struct sli4 *sli4)
+{
+ if (sli4->params.has_extents) {
+ efc_log_info(sli4, "extend allocation not supported\n");
+ return -EIO;
+ }
+
+ sli4->features &= (~SLI4_REQFEAT_HLM);
+ sli4->features &= (~SLI4_REQFEAT_RXSEQ);
+ sli4->features &= (~SLI4_REQFEAT_RXRI);
+
+ if (sli_request_features(sli4, &sli4->features, false))
+ return -EIO;
+
+ return 0;
+}
+
+int
+sli_reset(struct sli4 *sli4)
+{
+ u32 i;
+
+ if (!sli_fw_init(sli4)) {
+ efc_log_crit(sli4, "FW initialization failed\n");
+ return -EIO;
+ }
+
+ kfree(sli4->ext[0].base);
+ sli4->ext[0].base = NULL;
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ bitmap_free(sli4->ext[i].use_map);
+ sli4->ext[i].use_map = NULL;
+ sli4->ext[i].base = NULL;
+ }
+
+ return sli_get_config(sli4);
+}
+
+int
+sli_fw_reset(struct sli4 *sli4)
+{
+ /*
+ * Firmware must be ready before issuing the reset.
+ */
+ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
+ efc_log_crit(sli4, "FW status is NOT ready\n");
+ return -EIO;
+ }
+
+ /* Lancer uses PHYDEV_CONTROL */
+ writel(SLI4_PHYDEV_CTRL_FRST, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
+
+ /* wait for the FW to become ready after the reset */
+ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
+ efc_log_crit(sli4, "Failed to be ready after firmware reset\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+sli_teardown(struct sli4 *sli4)
+{
+ u32 i;
+
+ kfree(sli4->ext[0].base);
+ sli4->ext[0].base = NULL;
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ sli4->ext[i].base = NULL;
+
+ bitmap_free(sli4->ext[i].use_map);
+ sli4->ext[i].use_map = NULL;
+ }
+
+ if (!sli_sliport_reset(sli4))
+ efc_log_err(sli4, "FW deinitialization failed\n");
+
+ dma_free_coherent(&sli4->pci->dev, sli4->vpd_data.size,
+ sli4->vpd_data.virt, sli4->vpd_data.phys);
+ memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
+
+ dma_free_coherent(&sli4->pci->dev, sli4->bmbx.size,
+ sli4->bmbx.virt, sli4->bmbx.phys);
+ memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
+}
+
+int
+sli_callback(struct sli4 *sli4, enum sli4_callback which,
+ void *func, void *arg)
+{
+ if (!func) {
+ efc_log_err(sli4, "bad parameter sli4=%p which=%#x func=%p\n",
+ sli4, which, func);
+ return -EIO;
+ }
+
+ switch (which) {
+ case SLI4_CB_LINK:
+ sli4->link = func;
+ sli4->link_arg = arg;
+ break;
+ default:
+ efc_log_info(sli4, "unknown callback %#x\n", which);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq,
+ u32 num_eq, u32 shift, u32 delay_mult)
+{
+ sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, eq, num_eq,
+ shift, delay_mult);
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n");
+ return -EIO;
+ }
+ if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad status MODIFY EQ DELAY\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype,
+ u32 *rid, u32 *index)
+{
+ int rc = 0;
+ u32 size;
+ u32 ext_idx;
+ u32 item_idx;
+ u32 position;
+
+ *rid = U32_MAX;
+ *index = U32_MAX;
+
+ switch (rtype) {
+ case SLI4_RSRC_VFI:
+ case SLI4_RSRC_VPI:
+ case SLI4_RSRC_RPI:
+ case SLI4_RSRC_XRI:
+ position =
+ find_first_zero_bit(sli4->ext[rtype].use_map,
+ sli4->ext[rtype].map_size);
+ if (position >= sli4->ext[rtype].map_size) {
+ efc_log_err(sli4, "out of resource %d (alloc=%d)\n",
+ rtype, sli4->ext[rtype].n_alloc);
+ rc = -EIO;
+ break;
+ }
+ set_bit(position, sli4->ext[rtype].use_map);
+ *index = position;
+
+ size = sli4->ext[rtype].size;
+
+ ext_idx = *index / size;
+ item_idx = *index % size;
+
+ *rid = sli4->ext[rtype].base[ext_idx] + item_idx;
+
+ sli4->ext[rtype].n_alloc++;
+ break;
+ default:
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+int
+sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid)
+{
+ int rc = -EIO;
+ u32 x;
+ u32 size, *base;
+
+ switch (rtype) {
+ case SLI4_RSRC_VFI:
+ case SLI4_RSRC_VPI:
+ case SLI4_RSRC_RPI:
+ case SLI4_RSRC_XRI:
+ /*
+ * Figure out which extent contains the resource ID. I.e. find
+ * the extent such that
+ * extent->base <= resource ID < extent->base + extent->size
+ */
+ base = sli4->ext[rtype].base;
+ size = sli4->ext[rtype].size;
+
+ /*
+ * In the case of FW reset, this may be cleared
+ * but the force_free path will still attempt to
+ * free the resource. Prevent a NULL pointer access.
+ */
+ if (!base)
+ break;
+
+ for (x = 0; x < sli4->ext[rtype].number; x++) {
+ if ((rid < base[x] || (rid >= (base[x] + size))))
+ continue;
+
+ rid -= base[x];
+ clear_bit((x * size) + rid, sli4->ext[rtype].use_map);
+ rc = 0;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+int
+sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype)
+{
+ int rc = -EIO;
+ u32 i;
+
+ switch (rtype) {
+ case SLI4_RSRC_VFI:
+ case SLI4_RSRC_VPI:
+ case SLI4_RSRC_RPI:
+ case SLI4_RSRC_XRI:
+ for (i = 0; i < sli4->ext[rtype].map_size; i++)
+ clear_bit(i, sli4->ext[rtype].use_map);
+ rc = 0;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+int sli_raise_ue(struct sli4 *sli4, u8 dump)
+{
+ u32 val = 0;
+
+ if (dump == SLI4_FUNC_DESC_DUMP) {
+ val = SLI4_PORT_CTRL_FDD | SLI4_PORT_CTRL_IP;
+ writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
+ } else {
+ val = SLI4_PHYDEV_CTRL_FRST;
+
+ if (dump == SLI4_CHIP_LEVEL_DUMP)
+ val |= SLI4_PHYDEV_CTRL_DD;
+ writel(val, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
+ }
+
+ return 0;
+}
+
+int sli_dump_is_ready(struct sli4 *sli4)
+{
+ int rc = SLI4_DUMP_READY_STATUS_NOT_READY;
+ u32 port_val;
+ u32 bmbx_val;
+
+ /*
+ * Ensure that the port is ready AND the mailbox is
+ * ready before signaling that the dump is ready to go.
+ */
+ port_val = sli_reg_read_status(sli4);
+ bmbx_val = readl(sli4->reg[0] + SLI4_BMBX_REG);
+
+ if ((bmbx_val & SLI4_BMBX_RDY) &&
+ (port_val & SLI4_PORT_STATUS_RDY)) {
+ if (port_val & SLI4_PORT_STATUS_DIP)
+ rc = SLI4_DUMP_READY_STATUS_DD_PRESENT;
+ else if (port_val & SLI4_PORT_STATUS_FDP)
+ rc = SLI4_DUMP_READY_STATUS_FDB_PRESENT;
+ }
+
+ return rc;
+}
+
+bool sli_reset_required(struct sli4 *sli4)
+{
+ u32 val;
+
+ val = sli_reg_read_status(sli4);
+ return (val & SLI4_PORT_STATUS_RN);
+}
+
+int
+sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri,
+ u32 xri_count, struct efc_dma *page0[],
+ struct efc_dma *page1[], struct efc_dma *dma)
+{
+ struct sli4_rqst_post_sgl_pages *post = NULL;
+ u32 i;
+ __le32 req_len;
+
+ post = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(post_sgl_pages), dma);
+ if (!post)
+ return -EIO;
+
+ /* payload size calculation */
+ /* 4 = xri_start + xri_count */
+ /* xri_count = # of XRI's registered */
+ /* sizeof(uint64_t) = physical address size */
+ /* 2 = # of physical addresses per page set */
+ req_len = cpu_to_le32(4 + (xri_count * (sizeof(uint64_t) * 2)));
+ sli_cmd_fill_hdr(&post->hdr, SLI4_OPC_POST_SGL_PAGES, SLI4_SUBSYSTEM_FC,
+ CMD_V0, req_len);
+ post->xri_start = cpu_to_le16(xri);
+ post->xri_count = cpu_to_le16(xri_count);
+
+ for (i = 0; i < xri_count; i++) {
+ post->page_set[i].page0_low =
+ cpu_to_le32(lower_32_bits(page0[i]->phys));
+ post->page_set[i].page0_high =
+ cpu_to_le32(upper_32_bits(page0[i]->phys));
+ }
+
+ if (page1) {
+ for (i = 0; i < xri_count; i++) {
+ post->page_set[i].page1_low =
+ cpu_to_le32(lower_32_bits(page1[i]->phys));
+ post->page_set[i].page1_high =
+ cpu_to_le32(upper_32_bits(page1[i]->phys));
+ }
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
+ u16 rpi, struct efc_dma *payload_dma)
+{
+ struct sli4_rqst_post_hdr_templates *req = NULL;
+ uintptr_t phys = 0;
+ u32 i = 0;
+ u32 page_count, payload_size;
+
+ page_count = sli_page_count(dma->size, SLI_PAGE_SIZE);
+
+ payload_size = ((sizeof(struct sli4_rqst_post_hdr_templates) +
+ (page_count * SZ_DMAADDR)) - sizeof(struct sli4_rqst_hdr));
+
+ if (page_count > 16) {
+ /*
+ * We can't fit more than 16 descriptors into an embedded mbox
+ * command, it has to be non-embedded
+ */
+ payload_dma->size = payload_size;
+ payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
+ payload_dma->size,
+ &payload_dma->phys, GFP_KERNEL);
+ if (!payload_dma->virt) {
+ memset(payload_dma, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "mbox payload memory allocation fail\n");
+ return -EIO;
+ }
+ req = sli_config_cmd_init(sli4, buf, payload_size, payload_dma);
+ } else {
+ req = sli_config_cmd_init(sli4, buf, payload_size, NULL);
+ }
+
+ if (!req)
+ return -EIO;
+
+ if (rpi == U16_MAX)
+ rpi = sli4->ext[SLI4_RSRC_RPI].base[0];
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_POST_HDR_TEMPLATES,
+ SLI4_SUBSYSTEM_FC, CMD_V0,
+ SLI4_RQST_PYLD_LEN(post_hdr_templates));
+
+ req->rpi_offset = cpu_to_le16(rpi);
+ req->page_count = cpu_to_le16(page_count);
+ phys = dma->phys;
+ for (i = 0; i < page_count; i++) {
+ req->page_descriptor[i].low = cpu_to_le32(lower_32_bits(phys));
+ req->page_descriptor[i].high = cpu_to_le32(upper_32_bits(phys));
+
+ phys += SLI_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+u32
+sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi)
+{
+ u32 bytes = 0;
+
+ /* Check if header templates needed */
+ if (sli4->params.hdr_template_req)
+ /* round up to a page */
+ bytes = round_up(n_rpi * SLI4_HDR_TEMPLATE_SIZE, SLI_PAGE_SIZE);
+
+ return bytes;
+}
+
+const char *
+sli_fc_get_status_string(u32 status)
+{
+ static struct {
+ u32 code;
+ const char *label;
+ } lookup[] = {
+ {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"},
+ {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"},
+ {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"},
+ {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"},
+ {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"},
+ {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"},
+ {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"},
+ {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"},
+ {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"},
+ {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"},
+ {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"},
+ {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED,
+ "RQ_INSUFF_BUF_NEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"},
+ {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"},
+ {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"},
+ {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"},
+ {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED,
+ "RQ_INSUFF_XRI_NEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"},
+ {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"},
+ {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"},
+ };
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(lookup); i++) {
+ if (status == lookup[i].code)
+ return lookup[i].label;
+ }
+ return "unknown";
+}
diff --git a/drivers/scsi/elx/libefc_sli/sli4.h b/drivers/scsi/elx/libefc_sli/sli4.h
new file mode 100644
index 000000000000..38af166cc786
--- /dev/null
+++ b/drivers/scsi/elx/libefc_sli/sli4.h
@@ -0,0 +1,4132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ */
+
+/*
+ * All common SLI-4 structures and function prototypes.
+ */
+
+#ifndef _SLI4_H
+#define _SLI4_H
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "../include/efc_common.h"
+
+/*************************************************************************
+ * Common SLI-4 register offsets and field definitions
+ */
+
+/* SLI_INTF - SLI Interface Definition Register */
+#define SLI4_INTF_REG 0x0058
+enum sli4_intf {
+ SLI4_INTF_REV_SHIFT = 4,
+ SLI4_INTF_REV_MASK = 0xf0,
+
+ SLI4_INTF_REV_S3 = 0x30,
+ SLI4_INTF_REV_S4 = 0x40,
+
+ SLI4_INTF_FAMILY_SHIFT = 8,
+ SLI4_INTF_FAMILY_MASK = 0x0f00,
+
+ SLI4_FAMILY_CHECK_ASIC_TYPE = 0x0f00,
+
+ SLI4_INTF_IF_TYPE_SHIFT = 12,
+ SLI4_INTF_IF_TYPE_MASK = 0xf000,
+
+ SLI4_INTF_IF_TYPE_2 = 0x2000,
+ SLI4_INTF_IF_TYPE_6 = 0x6000,
+
+ SLI4_INTF_VALID_SHIFT = 29,
+ SLI4_INTF_VALID_MASK = 0xe0000000,
+
+ SLI4_INTF_VALID_VALUE = 0xc0000000,
+};
+
+/* ASIC_ID - SLI ASIC Type and Revision Register */
+#define SLI4_ASIC_ID_REG 0x009c
+enum sli4_asic {
+ SLI4_ASIC_GEN_SHIFT = 8,
+ SLI4_ASIC_GEN_MASK = 0xff00,
+ SLI4_ASIC_GEN_5 = 0x0b00,
+ SLI4_ASIC_GEN_6 = 0x0c00,
+ SLI4_ASIC_GEN_7 = 0x0d00,
+};
+
+enum sli4_acic_revisions {
+ SLI4_ASIC_REV_A0 = 0x00,
+ SLI4_ASIC_REV_A1 = 0x01,
+ SLI4_ASIC_REV_A2 = 0x02,
+ SLI4_ASIC_REV_A3 = 0x03,
+ SLI4_ASIC_REV_B0 = 0x10,
+ SLI4_ASIC_REV_B1 = 0x11,
+ SLI4_ASIC_REV_B2 = 0x12,
+ SLI4_ASIC_REV_C0 = 0x20,
+ SLI4_ASIC_REV_C1 = 0x21,
+ SLI4_ASIC_REV_C2 = 0x22,
+ SLI4_ASIC_REV_D0 = 0x30,
+};
+
+struct sli4_asic_entry_t {
+ u32 rev_id;
+ u32 family;
+};
+
+/* BMBX - Bootstrap Mailbox Register */
+#define SLI4_BMBX_REG 0x0160
+enum sli4_bmbx {
+ SLI4_BMBX_MASK_HI = 0x3,
+ SLI4_BMBX_MASK_LO = 0xf,
+ SLI4_BMBX_RDY = 1 << 0,
+ SLI4_BMBX_HI = 1 << 1,
+ SLI4_BMBX_SIZE = 256,
+};
+
+static inline u32
+sli_bmbx_write_hi(u64 addr) {
+ u32 val;
+
+ val = upper_32_bits(addr) & ~SLI4_BMBX_MASK_HI;
+ val |= SLI4_BMBX_HI;
+
+ return val;
+}
+
+static inline u32
+sli_bmbx_write_lo(u64 addr) {
+ u32 val;
+
+ val = (upper_32_bits(addr) & SLI4_BMBX_MASK_HI) << 30;
+ val |= ((addr) & ~SLI4_BMBX_MASK_LO) >> 2;
+
+ return val;
+}
+
+/* SLIPORT_CONTROL - SLI Port Control Register */
+#define SLI4_PORT_CTRL_REG 0x0408
+enum sli4_port_ctrl {
+ SLI4_PORT_CTRL_IP = 1u << 27,
+ SLI4_PORT_CTRL_IDIS = 1u << 22,
+ SLI4_PORT_CTRL_FDD = 1u << 31,
+};
+
+/* SLI4_SLIPORT_ERROR - SLI Port Error Register */
+#define SLI4_PORT_ERROR1 0x040c
+#define SLI4_PORT_ERROR2 0x0410
+
+/* EQCQ_DOORBELL - EQ and CQ Doorbell Register */
+#define SLI4_EQCQ_DB_REG 0x120
+enum sli4_eqcq_e {
+ SLI4_EQ_ID_LO_MASK = 0x01ff,
+
+ SLI4_CQ_ID_LO_MASK = 0x03ff,
+
+ SLI4_EQCQ_CI_EQ = 0x0200,
+
+ SLI4_EQCQ_QT_EQ = 0x00000400,
+ SLI4_EQCQ_QT_CQ = 0x00000000,
+
+ SLI4_EQCQ_ID_HI_SHIFT = 11,
+ SLI4_EQCQ_ID_HI_MASK = 0xf800,
+
+ SLI4_EQCQ_NUM_SHIFT = 16,
+ SLI4_EQCQ_NUM_MASK = 0x1fff0000,
+
+ SLI4_EQCQ_ARM = 0x20000000,
+ SLI4_EQCQ_UNARM = 0x00000000,
+};
+
+static inline u32
+sli_format_eq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = (id & SLI4_EQ_ID_LO_MASK) | SLI4_EQCQ_QT_EQ;
+ reg |= (((id) >> 9) << SLI4_EQCQ_ID_HI_SHIFT) & SLI4_EQCQ_ID_HI_MASK;
+ reg |= ((num_popped) << SLI4_EQCQ_NUM_SHIFT) & SLI4_EQCQ_NUM_MASK;
+ reg |= arm | SLI4_EQCQ_CI_EQ;
+
+ return reg;
+}
+
+static inline u32
+sli_format_cq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = ((id) & SLI4_CQ_ID_LO_MASK) | SLI4_EQCQ_QT_CQ;
+ reg |= (((id) >> 10) << SLI4_EQCQ_ID_HI_SHIFT) & SLI4_EQCQ_ID_HI_MASK;
+ reg |= ((num_popped) << SLI4_EQCQ_NUM_SHIFT) & SLI4_EQCQ_NUM_MASK;
+ reg |= arm;
+
+ return reg;
+}
+
+/* EQ_DOORBELL - EQ Doorbell Register for IF_TYPE = 6*/
+#define SLI4_IF6_EQ_DB_REG 0x120
+enum sli4_eq_e {
+ SLI4_IF6_EQ_ID_MASK = 0x0fff,
+
+ SLI4_IF6_EQ_NUM_SHIFT = 16,
+ SLI4_IF6_EQ_NUM_MASK = 0x1fff0000,
+};
+
+static inline u32
+sli_format_if6_eq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = id & SLI4_IF6_EQ_ID_MASK;
+ reg |= (num_popped << SLI4_IF6_EQ_NUM_SHIFT) & SLI4_IF6_EQ_NUM_MASK;
+ reg |= arm;
+
+ return reg;
+}
+
+/* CQ_DOORBELL - CQ Doorbell Register for IF_TYPE = 6 */
+#define SLI4_IF6_CQ_DB_REG 0xc0
+enum sli4_cq_e {
+ SLI4_IF6_CQ_ID_MASK = 0xffff,
+
+ SLI4_IF6_CQ_NUM_SHIFT = 16,
+ SLI4_IF6_CQ_NUM_MASK = 0x1fff0000,
+};
+
+static inline u32
+sli_format_if6_cq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = id & SLI4_IF6_CQ_ID_MASK;
+ reg |= ((num_popped) << SLI4_IF6_CQ_NUM_SHIFT) & SLI4_IF6_CQ_NUM_MASK;
+ reg |= arm;
+
+ return reg;
+}
+
+/* MQ_DOORBELL - MQ Doorbell Register */
+#define SLI4_MQ_DB_REG 0x0140
+#define SLI4_IF6_MQ_DB_REG 0x0160
+enum sli4_mq_e {
+ SLI4_MQ_ID_MASK = 0xffff,
+
+ SLI4_MQ_NUM_SHIFT = 16,
+ SLI4_MQ_NUM_MASK = 0x3fff0000,
+};
+
+static inline u32
+sli_format_mq_db_data(u16 id) {
+ u32 reg;
+
+ reg = id & SLI4_MQ_ID_MASK;
+ reg |= (1 << SLI4_MQ_NUM_SHIFT) & SLI4_MQ_NUM_MASK;
+
+ return reg;
+}
+
+/* RQ_DOORBELL - RQ Doorbell Register */
+#define SLI4_RQ_DB_REG 0x0a0
+#define SLI4_IF6_RQ_DB_REG 0x0080
+enum sli4_rq_e {
+ SLI4_RQ_DB_ID_MASK = 0xffff,
+
+ SLI4_RQ_DB_NUM_SHIFT = 16,
+ SLI4_RQ_DB_NUM_MASK = 0x3fff0000,
+};
+
+static inline u32
+sli_format_rq_db_data(u16 id) {
+ u32 reg;
+
+ reg = id & SLI4_RQ_DB_ID_MASK;
+ reg |= (1 << SLI4_RQ_DB_NUM_SHIFT) & SLI4_RQ_DB_NUM_MASK;
+
+ return reg;
+}
+
+/* WQ_DOORBELL - WQ Doorbell Register */
+#define SLI4_IO_WQ_DB_REG 0x040
+#define SLI4_IF6_WQ_DB_REG 0x040
+enum sli4_wq_e {
+ SLI4_WQ_ID_MASK = 0xffff,
+
+ SLI4_WQ_IDX_SHIFT = 16,
+ SLI4_WQ_IDX_MASK = 0xff0000,
+
+ SLI4_WQ_NUM_SHIFT = 24,
+ SLI4_WQ_NUM_MASK = 0x0ff00000,
+};
+
+static inline u32
+sli_format_wq_db_data(u16 id) {
+ u32 reg;
+
+ reg = id & SLI4_WQ_ID_MASK;
+ reg |= (1 << SLI4_WQ_NUM_SHIFT) & SLI4_WQ_NUM_MASK;
+
+ return reg;
+}
+
+/* SLIPORT_STATUS - SLI Port Status Register */
+#define SLI4_PORT_STATUS_REGOFF 0x0404
+enum sli4_port_status {
+ SLI4_PORT_STATUS_FDP = 1u << 21,
+ SLI4_PORT_STATUS_RDY = 1u << 23,
+ SLI4_PORT_STATUS_RN = 1u << 24,
+ SLI4_PORT_STATUS_DIP = 1u << 25,
+ SLI4_PORT_STATUS_OTI = 1u << 29,
+ SLI4_PORT_STATUS_ERR = 1u << 31,
+};
+
+#define SLI4_PHYDEV_CTRL_REG 0x0414
+#define SLI4_PHYDEV_CTRL_FRST (1 << 1)
+#define SLI4_PHYDEV_CTRL_DD (1 << 2)
+
+/* Register name enums */
+enum sli4_regname_en {
+ SLI4_REG_BMBX,
+ SLI4_REG_EQ_DOORBELL,
+ SLI4_REG_CQ_DOORBELL,
+ SLI4_REG_RQ_DOORBELL,
+ SLI4_REG_IO_WQ_DOORBELL,
+ SLI4_REG_MQ_DOORBELL,
+ SLI4_REG_PHYSDEV_CONTROL,
+ SLI4_REG_PORT_CONTROL,
+ SLI4_REG_PORT_ERROR1,
+ SLI4_REG_PORT_ERROR2,
+ SLI4_REG_PORT_SEMAPHORE,
+ SLI4_REG_PORT_STATUS,
+ SLI4_REG_UNKWOWN /* must be last */
+};
+
+struct sli4_reg {
+ u32 rset;
+ u32 off;
+};
+
+struct sli4_dmaaddr {
+ __le32 low;
+ __le32 high;
+};
+
+/*
+ * a 3-word Buffer Descriptor Entry with
+ * address 1st 2 words, length last word
+ */
+struct sli4_bufptr {
+ struct sli4_dmaaddr addr;
+ __le32 length;
+};
+
+/* Buffer Descriptor Entry (BDE) */
+enum sli4_bde_e {
+ SLI4_BDE_LEN_MASK = 0x00ffffff,
+ SLI4_BDE_TYPE_MASK = 0xff000000,
+};
+
+struct sli4_bde {
+ __le32 bde_type_buflen;
+ union {
+ struct sli4_dmaaddr data;
+ struct {
+ __le32 offset;
+ __le32 rsvd2;
+ } imm;
+ struct sli4_dmaaddr blp;
+ } u;
+};
+
+/* Buffer Descriptors */
+enum sli4_bde_type {
+ SLI4_BDE_TYPE_SHIFT = 24,
+ SLI4_BDE_TYPE_64 = 0x00, /* Generic 64-bit data */
+ SLI4_BDE_TYPE_IMM = 0x01, /* Immediate data */
+ SLI4_BDE_TYPE_BLP = 0x40, /* Buffer List Pointer */
+};
+
+#define SLI4_BDE_TYPE_VAL(type) \
+ (SLI4_BDE_TYPE_##type << SLI4_BDE_TYPE_SHIFT)
+
+/* Scatter-Gather Entry (SGE) */
+#define SLI4_SGE_MAX_RESERVED 3
+
+enum sli4_sge_type {
+ /* DW2 */
+ SLI4_SGE_DATA_OFFSET_MASK = 0x07ffffff,
+ /*DW2W1*/
+ SLI4_SGE_TYPE_SHIFT = 27,
+ SLI4_SGE_TYPE_MASK = 0x78000000,
+ /*SGE Types*/
+ SLI4_SGE_TYPE_DATA = 0x00,
+ SLI4_SGE_TYPE_DIF = 0x04, /* Data Integrity Field */
+ SLI4_SGE_TYPE_LSP = 0x05, /* List Segment Pointer */
+ SLI4_SGE_TYPE_PEDIF = 0x06, /* Post Encryption Engine DIF */
+ SLI4_SGE_TYPE_PESEED = 0x07, /* Post Encryption DIF Seed */
+ SLI4_SGE_TYPE_DISEED = 0x08, /* DIF Seed */
+ SLI4_SGE_TYPE_ENC = 0x09, /* Encryption */
+ SLI4_SGE_TYPE_ATM = 0x0a, /* DIF Application Tag Mask */
+ SLI4_SGE_TYPE_SKIP = 0x0c, /* SKIP */
+
+ SLI4_SGE_LAST = 1u << 31,
+};
+
+struct sli4_sge {
+ __le32 buffer_address_high;
+ __le32 buffer_address_low;
+ __le32 dw2_flags;
+ __le32 buffer_length;
+};
+
+/* T10 DIF Scatter-Gather Entry (SGE) */
+struct sli4_dif_sge {
+ __le32 buffer_address_high;
+ __le32 buffer_address_low;
+ __le32 dw2_flags;
+ __le32 rsvd12;
+};
+
+/* Data Integrity Seed (DISEED) SGE */
+enum sli4_diseed_sge_flags {
+ /* DW2W1 */
+ SLI4_DISEED_SGE_HS = 1 << 2,
+ SLI4_DISEED_SGE_WS = 1 << 3,
+ SLI4_DISEED_SGE_IC = 1 << 4,
+ SLI4_DISEED_SGE_ICS = 1 << 5,
+ SLI4_DISEED_SGE_ATRT = 1 << 6,
+ SLI4_DISEED_SGE_AT = 1 << 7,
+ SLI4_DISEED_SGE_FAT = 1 << 8,
+ SLI4_DISEED_SGE_NA = 1 << 9,
+ SLI4_DISEED_SGE_HI = 1 << 10,
+
+ /* DW3W1 */
+ SLI4_DISEED_SGE_BS_MASK = 0x0007,
+ SLI4_DISEED_SGE_AI = 1 << 3,
+ SLI4_DISEED_SGE_ME = 1 << 4,
+ SLI4_DISEED_SGE_RE = 1 << 5,
+ SLI4_DISEED_SGE_CE = 1 << 6,
+ SLI4_DISEED_SGE_NR = 1 << 7,
+
+ SLI4_DISEED_SGE_OP_RX_SHIFT = 8,
+ SLI4_DISEED_SGE_OP_RX_MASK = 0x0f00,
+ SLI4_DISEED_SGE_OP_TX_SHIFT = 12,
+ SLI4_DISEED_SGE_OP_TX_MASK = 0xf000,
+};
+
+/* Opcode values */
+enum sli4_diseed_sge_opcodes {
+ SLI4_DISEED_SGE_OP_IN_NODIF_OUT_CRC,
+ SLI4_DISEED_SGE_OP_IN_CRC_OUT_NODIF,
+ SLI4_DISEED_SGE_OP_IN_NODIF_OUT_CSUM,
+ SLI4_DISEED_SGE_OP_IN_CSUM_OUT_NODIF,
+ SLI4_DISEED_SGE_OP_IN_CRC_OUT_CRC,
+ SLI4_DISEED_SGE_OP_IN_CSUM_OUT_CSUM,
+ SLI4_DISEED_SGE_OP_IN_CRC_OUT_CSUM,
+ SLI4_DISEED_SGE_OP_IN_CSUM_OUT_CRC,
+ SLI4_DISEED_SGE_OP_IN_RAW_OUT_RAW,
+};
+
+#define SLI4_DISEED_SGE_OP_RX_VALUE(stype) \
+ (SLI4_DISEED_SGE_OP_##stype << SLI4_DISEED_SGE_OP_RX_SHIFT)
+#define SLI4_DISEED_SGE_OP_TX_VALUE(stype) \
+ (SLI4_DISEED_SGE_OP_##stype << SLI4_DISEED_SGE_OP_TX_SHIFT)
+
+struct sli4_diseed_sge {
+ __le32 ref_tag_cmp;
+ __le32 ref_tag_repl;
+ __le16 app_tag_repl;
+ __le16 dw2w1_flags;
+ __le16 app_tag_cmp;
+ __le16 dw3w1_flags;
+};
+
+/* List Segment Pointer Scatter-Gather Entry (SGE) */
+#define SLI4_LSP_SGE_SEGLEN 0x00ffffff
+
+struct sli4_lsp_sge {
+ __le32 buffer_address_high;
+ __le32 buffer_address_low;
+ __le32 dw2_flags;
+ __le32 dw3_seglen;
+};
+
+enum sli4_eqe_e {
+ SLI4_EQE_VALID = 1,
+ SLI4_EQE_MJCODE = 0xe,
+ SLI4_EQE_MNCODE = 0xfff0,
+};
+
+struct sli4_eqe {
+ __le16 dw0w0_flags;
+ __le16 resource_id;
+};
+
+#define SLI4_MAJOR_CODE_STANDARD 0
+#define SLI4_MAJOR_CODE_SENTINEL 1
+
+/* Sentinel EQE indicating the EQ is full */
+#define SLI4_EQE_STATUS_EQ_FULL 2
+
+enum sli4_mcqe_e {
+ SLI4_MCQE_CONSUMED = 1u << 27,
+ SLI4_MCQE_COMPLETED = 1u << 28,
+ SLI4_MCQE_AE = 1u << 30,
+ SLI4_MCQE_VALID = 1u << 31,
+};
+
+/* Entry was consumed but not completed */
+#define SLI4_MCQE_STATUS_NOT_COMPLETED -2
+
+struct sli4_mcqe {
+ __le16 completion_status;
+ __le16 extended_status;
+ __le32 mqe_tag_low;
+ __le32 mqe_tag_high;
+ __le32 dw3_flags;
+};
+
+enum sli4_acqe_e {
+ SLI4_ACQE_AE = 1 << 6, /* async event - this is an ACQE */
+ SLI4_ACQE_VAL = 1 << 7, /* valid - contents of CQE are valid */
+};
+
+struct sli4_acqe {
+ __le32 event_data[3];
+ u8 rsvd12;
+ u8 event_code;
+ u8 event_type;
+ u8 ae_val;
+};
+
+enum sli4_acqe_event_code {
+ SLI4_ACQE_EVENT_CODE_LINK_STATE = 0x01,
+ SLI4_ACQE_EVENT_CODE_FIP = 0x02,
+ SLI4_ACQE_EVENT_CODE_DCBX = 0x03,
+ SLI4_ACQE_EVENT_CODE_ISCSI = 0x04,
+ SLI4_ACQE_EVENT_CODE_GRP_5 = 0x05,
+ SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT = 0x10,
+ SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT = 0x11,
+ SLI4_ACQE_EVENT_CODE_VF_EVENT = 0x12,
+ SLI4_ACQE_EVENT_CODE_MR_EVENT = 0x13,
+};
+
+enum sli4_qtype {
+ SLI4_QTYPE_EQ,
+ SLI4_QTYPE_CQ,
+ SLI4_QTYPE_MQ,
+ SLI4_QTYPE_WQ,
+ SLI4_QTYPE_RQ,
+ SLI4_QTYPE_MAX, /* must be last */
+};
+
+#define SLI4_USER_MQ_COUNT 1
+#define SLI4_MAX_CQ_SET_COUNT 16
+#define SLI4_MAX_RQ_SET_COUNT 16
+
+enum sli4_qentry {
+ SLI4_QENTRY_ASYNC,
+ SLI4_QENTRY_MQ,
+ SLI4_QENTRY_RQ,
+ SLI4_QENTRY_WQ,
+ SLI4_QENTRY_WQ_RELEASE,
+ SLI4_QENTRY_OPT_WRITE_CMD,
+ SLI4_QENTRY_OPT_WRITE_DATA,
+ SLI4_QENTRY_XABT,
+ SLI4_QENTRY_MAX /* must be last */
+};
+
+enum sli4_queue_flags {
+ SLI4_QUEUE_FLAG_MQ = 1 << 0, /* CQ has MQ/Async completion */
+ SLI4_QUEUE_FLAG_HDR = 1 << 1, /* RQ for packet headers */
+ SLI4_QUEUE_FLAG_RQBATCH = 1 << 2, /* RQ index increment by 8 */
+};
+
+/* Generic Command Request header */
+enum sli4_cmd_version {
+ CMD_V0,
+ CMD_V1,
+ CMD_V2,
+};
+
+struct sli4_rqst_hdr {
+ u8 opcode;
+ u8 subsystem;
+ __le16 rsvd2;
+ __le32 timeout;
+ __le32 request_length;
+ __le32 dw3_version;
+};
+
+/* Generic Command Response header */
+struct sli4_rsp_hdr {
+ u8 opcode;
+ u8 subsystem;
+ __le16 rsvd2;
+ u8 status;
+ u8 additional_status;
+ __le16 rsvd6;
+ __le32 response_length;
+ __le32 actual_response_length;
+};
+
+#define SLI4_QUEUE_RQ_BATCH 8
+
+#define SZ_DMAADDR sizeof(struct sli4_dmaaddr)
+#define SLI4_RQST_CMDSZ(stype) sizeof(struct sli4_rqst_##stype)
+
+#define SLI4_RQST_PYLD_LEN(stype) \
+ cpu_to_le32(sizeof(struct sli4_rqst_##stype) - \
+ sizeof(struct sli4_rqst_hdr))
+
+#define SLI4_RQST_PYLD_LEN_VAR(stype, varpyld) \
+ cpu_to_le32((sizeof(struct sli4_rqst_##stype) + \
+ varpyld) - sizeof(struct sli4_rqst_hdr))
+
+#define SLI4_CFG_PYLD_LENGTH(stype) \
+ max(sizeof(struct sli4_rqst_##stype), \
+ sizeof(struct sli4_rsp_##stype))
+
+enum sli4_create_cqv2_e {
+ /* DW5_flags values*/
+ SLI4_CREATE_CQV2_CLSWM_MASK = 0x00003000,
+ SLI4_CREATE_CQV2_NODELAY = 0x00004000,
+ SLI4_CREATE_CQV2_AUTOVALID = 0x00008000,
+ SLI4_CREATE_CQV2_CQECNT_MASK = 0x18000000,
+ SLI4_CREATE_CQV2_VALID = 0x20000000,
+ SLI4_CREATE_CQV2_EVT = 0x80000000,
+ /* DW6W1_flags values*/
+ SLI4_CREATE_CQV2_ARM = 0x8000,
+};
+
+struct sli4_rqst_cmn_create_cq_v2 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 page_size;
+ u8 rsvd19;
+ __le32 dw5_flags;
+ __le16 eq_id;
+ __le16 dw6w1_arm;
+ __le16 cqe_count;
+ __le16 rsvd30;
+ __le32 rsvd32;
+ struct sli4_dmaaddr page_phys_addr[];
+};
+
+enum sli4_create_cqset_e {
+ /* DW5_flags values*/
+ SLI4_CREATE_CQSETV0_CLSWM_MASK = 0x00003000,
+ SLI4_CREATE_CQSETV0_NODELAY = 0x00004000,
+ SLI4_CREATE_CQSETV0_AUTOVALID = 0x00008000,
+ SLI4_CREATE_CQSETV0_CQECNT_MASK = 0x18000000,
+ SLI4_CREATE_CQSETV0_VALID = 0x20000000,
+ SLI4_CREATE_CQSETV0_EVT = 0x80000000,
+ /* DW5W1_flags values */
+ SLI4_CREATE_CQSETV0_CQE_COUNT = 0x7fff,
+ SLI4_CREATE_CQSETV0_ARM = 0x8000,
+};
+
+struct sli4_rqst_cmn_create_cq_set_v0 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 page_size;
+ u8 rsvd19;
+ __le32 dw5_flags;
+ __le16 num_cq_req;
+ __le16 dw6w1_flags;
+ __le16 eq_id[16];
+ struct sli4_dmaaddr page_phys_addr[];
+};
+
+/* CQE count */
+enum sli4_cq_cnt {
+ SLI4_CQ_CNT_256,
+ SLI4_CQ_CNT_512,
+ SLI4_CQ_CNT_1024,
+ SLI4_CQ_CNT_LARGE,
+};
+
+#define SLI4_CQ_CNT_SHIFT 27
+#define SLI4_CQ_CNT_VAL(type) (SLI4_CQ_CNT_##type << SLI4_CQ_CNT_SHIFT)
+
+#define SLI4_CQE_BYTES (4 * sizeof(u32))
+
+#define SLI4_CREATE_CQV2_MAX_PAGES 8
+
+/* Generic Common Create EQ/CQ/MQ/WQ/RQ Queue completion */
+struct sli4_rsp_cmn_create_queue {
+ struct sli4_rsp_hdr hdr;
+ __le16 q_id;
+ u8 rsvd18;
+ u8 ulp;
+ __le32 db_offset;
+ __le16 db_rs;
+ __le16 db_fmt;
+};
+
+struct sli4_rsp_cmn_create_queue_set {
+ struct sli4_rsp_hdr hdr;
+ __le16 q_id;
+ __le16 num_q_allocated;
+};
+
+/* Common Destroy Queue */
+struct sli4_rqst_cmn_destroy_q {
+ struct sli4_rqst_hdr hdr;
+ __le16 q_id;
+ __le16 rsvd;
+};
+
+struct sli4_rsp_cmn_destroy_q {
+ struct sli4_rsp_hdr hdr;
+};
+
+/* Modify the delay multiplier for EQs */
+struct sli4_eqdelay_rec {
+ __le32 eq_id;
+ __le32 phase;
+ __le32 delay_multiplier;
+};
+
+struct sli4_rqst_cmn_modify_eq_delay {
+ struct sli4_rqst_hdr hdr;
+ __le32 num_eq;
+ struct sli4_eqdelay_rec eq_delay_record[8];
+};
+
+struct sli4_rsp_cmn_modify_eq_delay {
+ struct sli4_rsp_hdr hdr;
+};
+
+enum sli4_create_cq_e {
+ /* DW5 */
+ SLI4_CREATE_EQ_AUTOVALID = 1u << 28,
+ SLI4_CREATE_EQ_VALID = 1u << 29,
+ SLI4_CREATE_EQ_EQESZ = 1u << 31,
+ /* DW6 */
+ SLI4_CREATE_EQ_COUNT = 7 << 26,
+ SLI4_CREATE_EQ_ARM = 1u << 31,
+ /* DW7 */
+ SLI4_CREATE_EQ_DELAYMULTI_SHIFT = 13,
+ SLI4_CREATE_EQ_DELAYMULTI_MASK = 0x007fe000,
+ SLI4_CREATE_EQ_DELAYMULTI = 0x00040000,
+};
+
+struct sli4_rqst_cmn_create_eq {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 rsvd18;
+ __le32 dw5_flags;
+ __le32 dw6_flags;
+ __le32 dw7_delaymulti;
+ __le32 rsvd32;
+ struct sli4_dmaaddr page_address[8];
+};
+
+struct sli4_rsp_cmn_create_eq {
+ struct sli4_rsp_cmn_create_queue q_rsp;
+};
+
+/* EQ count */
+enum sli4_eq_cnt {
+ SLI4_EQ_CNT_256,
+ SLI4_EQ_CNT_512,
+ SLI4_EQ_CNT_1024,
+ SLI4_EQ_CNT_2048,
+ SLI4_EQ_CNT_4096 = 3,
+};
+
+#define SLI4_EQ_CNT_SHIFT 26
+#define SLI4_EQ_CNT_VAL(type) (SLI4_EQ_CNT_##type << SLI4_EQ_CNT_SHIFT)
+
+#define SLI4_EQE_SIZE_4 0
+#define SLI4_EQE_SIZE_16 1
+
+/* Create a Mailbox Queue; accommodate v0 and v1 forms. */
+enum sli4_create_mq_flags {
+ /* DW6W1 */
+ SLI4_CREATE_MQEXT_RINGSIZE = 0xf,
+ SLI4_CREATE_MQEXT_CQID_SHIFT = 6,
+ SLI4_CREATE_MQEXT_CQIDV0_MASK = 0xffc0,
+ /* DW7 */
+ SLI4_CREATE_MQEXT_VAL = 1u << 31,
+ /* DW8 */
+ SLI4_CREATE_MQEXT_ACQV = 1u << 0,
+ SLI4_CREATE_MQEXT_ASYNC_CQIDV0 = 0x7fe,
+};
+
+struct sli4_rqst_cmn_create_mq_ext {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 cq_id_v1;
+ __le32 async_event_bitmap;
+ __le16 async_cq_id_v1;
+ __le16 dw6w1_flags;
+ __le32 dw7_val;
+ __le32 dw8_flags;
+ __le32 rsvd36;
+ struct sli4_dmaaddr page_phys_addr[];
+};
+
+struct sli4_rsp_cmn_create_mq_ext {
+ struct sli4_rsp_cmn_create_queue q_rsp;
+};
+
+enum sli4_mqe_size {
+ SLI4_MQE_SIZE_16 = 0x05,
+ SLI4_MQE_SIZE_32,
+ SLI4_MQE_SIZE_64,
+ SLI4_MQE_SIZE_128,
+};
+
+enum sli4_async_evt {
+ SLI4_ASYNC_EVT_LINK_STATE = 1 << 1,
+ SLI4_ASYNC_EVT_FIP = 1 << 2,
+ SLI4_ASYNC_EVT_GRP5 = 1 << 5,
+ SLI4_ASYNC_EVT_FC = 1 << 16,
+ SLI4_ASYNC_EVT_SLI_PORT = 1 << 17,
+};
+
+#define SLI4_ASYNC_EVT_FC_ALL \
+ (SLI4_ASYNC_EVT_LINK_STATE | \
+ SLI4_ASYNC_EVT_FIP | \
+ SLI4_ASYNC_EVT_GRP5 | \
+ SLI4_ASYNC_EVT_FC | \
+ SLI4_ASYNC_EVT_SLI_PORT)
+
+/* Create a Completion Queue. */
+struct sli4_rqst_cmn_create_cq_v0 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 rsvd18;
+ __le32 dw5_flags;
+ __le32 dw6_flags;
+ __le32 rsvd28;
+ __le32 rsvd32;
+ struct sli4_dmaaddr page_phys_addr[];
+};
+
+enum sli4_create_rq_e {
+ SLI4_RQ_CREATE_DUA = 0x1,
+ SLI4_RQ_CREATE_BQU = 0x2,
+
+ SLI4_RQE_SIZE = 8,
+ SLI4_RQE_SIZE_8 = 0x2,
+ SLI4_RQE_SIZE_16 = 0x3,
+ SLI4_RQE_SIZE_32 = 0x4,
+ SLI4_RQE_SIZE_64 = 0x5,
+ SLI4_RQE_SIZE_128 = 0x6,
+
+ SLI4_RQ_PAGE_SIZE_4096 = 0x1,
+ SLI4_RQ_PAGE_SIZE_8192 = 0x2,
+ SLI4_RQ_PAGE_SIZE_16384 = 0x4,
+ SLI4_RQ_PAGE_SIZE_32768 = 0x8,
+ SLI4_RQ_PAGE_SIZE_64536 = 0x10,
+
+ SLI4_RQ_CREATE_V0_MAX_PAGES = 8,
+ SLI4_RQ_CREATE_V0_MIN_BUF_SIZE = 128,
+ SLI4_RQ_CREATE_V0_MAX_BUF_SIZE = 2048,
+};
+
+struct sli4_rqst_rq_create {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 dua_bqu_byte;
+ u8 ulp;
+ __le16 rsvd16;
+ u8 rqe_count_byte;
+ u8 rsvd19;
+ __le32 rsvd20;
+ __le16 buffer_size;
+ __le16 cq_id;
+ __le32 rsvd28;
+ struct sli4_dmaaddr page_phys_addr[SLI4_RQ_CREATE_V0_MAX_PAGES];
+};
+
+struct sli4_rsp_rq_create {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+enum sli4_create_rqv1_e {
+ SLI4_RQ_CREATE_V1_DNB = 0x80,
+ SLI4_RQ_CREATE_V1_MAX_PAGES = 8,
+ SLI4_RQ_CREATE_V1_MIN_BUF_SIZE = 64,
+ SLI4_RQ_CREATE_V1_MAX_BUF_SIZE = 2048,
+};
+
+struct sli4_rqst_rq_create_v1 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 rsvd14;
+ u8 dim_dfd_dnb;
+ u8 page_size;
+ u8 rqe_size_byte;
+ __le16 rqe_count;
+ __le32 rsvd20;
+ __le16 rsvd24;
+ __le16 cq_id;
+ __le32 buffer_size;
+ struct sli4_dmaaddr page_phys_addr[SLI4_RQ_CREATE_V1_MAX_PAGES];
+};
+
+struct sli4_rsp_rq_create_v1 {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+#define SLI4_RQCREATEV2_DNB 0x80
+
+struct sli4_rqst_rq_create_v2 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 rq_count;
+ u8 dim_dfd_dnb;
+ u8 page_size;
+ u8 rqe_size_byte;
+ __le16 rqe_count;
+ __le16 hdr_buffer_size;
+ __le16 payload_buffer_size;
+ __le16 base_cq_id;
+ __le16 rsvd26;
+ __le32 rsvd42;
+ struct sli4_dmaaddr page_phys_addr[];
+};
+
+struct sli4_rsp_rq_create_v2 {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+#define SLI4_CQE_CODE_OFFSET 14
+
+enum sli4_cqe_code {
+ SLI4_CQE_CODE_WORK_REQUEST_COMPLETION = 0x01,
+ SLI4_CQE_CODE_RELEASE_WQE,
+ SLI4_CQE_CODE_RSVD,
+ SLI4_CQE_CODE_RQ_ASYNC,
+ SLI4_CQE_CODE_XRI_ABORTED,
+ SLI4_CQE_CODE_RQ_COALESCING,
+ SLI4_CQE_CODE_RQ_CONSUMPTION,
+ SLI4_CQE_CODE_MEASUREMENT_REPORTING,
+ SLI4_CQE_CODE_RQ_ASYNC_V1,
+ SLI4_CQE_CODE_RQ_COALESCING_V1,
+ SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD,
+ SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA,
+};
+
+#define SLI4_WQ_CREATE_MAX_PAGES 8
+
+struct sli4_rqst_wq_create {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 cq_id;
+ u8 page_size;
+ u8 wqe_size_byte;
+ __le16 wqe_count;
+ __le32 rsvd;
+ struct sli4_dmaaddr page_phys_addr[SLI4_WQ_CREATE_MAX_PAGES];
+};
+
+struct sli4_rsp_wq_create {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+enum sli4_link_attention_flags {
+ SLI4_LNK_ATTN_TYPE_LINK_UP = 0x01,
+ SLI4_LNK_ATTN_TYPE_LINK_DOWN = 0x02,
+ SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA = 0x03,
+
+ SLI4_LNK_ATTN_P2P = 0x01,
+ SLI4_LNK_ATTN_FC_AL = 0x02,
+ SLI4_LNK_ATTN_INTERNAL_LOOPBACK = 0x03,
+ SLI4_LNK_ATTN_SERDES_LOOPBACK = 0x04,
+};
+
+struct sli4_link_attention {
+ u8 link_number;
+ u8 attn_type;
+ u8 topology;
+ u8 port_speed;
+ u8 port_fault;
+ u8 shared_link_status;
+ __le16 logical_link_speed;
+ __le32 event_tag;
+ u8 rsvd12;
+ u8 event_code;
+ u8 event_type;
+ u8 flags;
+};
+
+enum sli4_link_event_type {
+ SLI4_EVENT_LINK_ATTENTION = 0x01,
+ SLI4_EVENT_SHARED_LINK_ATTENTION = 0x02,
+};
+
+enum sli4_wcqe_flags {
+ SLI4_WCQE_XB = 0x10,
+ SLI4_WCQE_QX = 0x80,
+};
+
+struct sli4_fc_wcqe {
+ u8 hw_status;
+ u8 status;
+ __le16 request_tag;
+ __le32 wqe_specific_1;
+ __le32 wqe_specific_2;
+ u8 rsvd12;
+ u8 qx_byte;
+ u8 code;
+ u8 flags;
+};
+
+/* FC WQ consumed CQ queue entry */
+struct sli4_fc_wqec {
+ __le32 rsvd0;
+ __le32 rsvd1;
+ __le16 wqe_index;
+ __le16 wq_id;
+ __le16 rsvd12;
+ u8 code;
+ u8 vld_byte;
+};
+
+/* FC Completion Status Codes. */
+enum sli4_wcqe_status {
+ SLI4_FC_WCQE_STATUS_SUCCESS,
+ SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE,
+ SLI4_FC_WCQE_STATUS_REMOTE_STOP,
+ SLI4_FC_WCQE_STATUS_LOCAL_REJECT,
+ SLI4_FC_WCQE_STATUS_NPORT_RJT,
+ SLI4_FC_WCQE_STATUS_FABRIC_RJT,
+ SLI4_FC_WCQE_STATUS_NPORT_BSY,
+ SLI4_FC_WCQE_STATUS_FABRIC_BSY,
+ SLI4_FC_WCQE_STATUS_RSVD,
+ SLI4_FC_WCQE_STATUS_LS_RJT,
+ SLI4_FC_WCQE_STATUS_RX_BUF_OVERRUN,
+ SLI4_FC_WCQE_STATUS_CMD_REJECT,
+ SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK,
+ SLI4_FC_WCQE_STATUS_RSVD1,
+ SLI4_FC_WCQE_STATUS_ELS_CMPLT_NO_AUTOREG,
+ SLI4_FC_WCQE_STATUS_RSVD2,
+ SLI4_FC_WCQE_STATUS_RQ_SUCCESS,
+ SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC,
+ SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE,
+ SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE,
+ SLI4_FC_WCQE_STATUS_DI_ERROR,
+ SLI4_FC_WCQE_STATUS_BA_RJT,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC,
+ SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT,
+ SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST,
+
+ /* driver generated status codes */
+ SLI4_FC_WCQE_STATUS_DISPATCH_ERROR = 0xfd,
+ SLI4_FC_WCQE_STATUS_SHUTDOWN = 0xfe,
+ SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT = 0xff,
+};
+
+/* DI_ERROR Extended Status */
+enum sli4_fc_di_error_status {
+ SLI4_FC_DI_ERROR_GE = 1 << 0,
+ SLI4_FC_DI_ERROR_AE = 1 << 1,
+ SLI4_FC_DI_ERROR_RE = 1 << 2,
+ SLI4_FC_DI_ERROR_TDPV = 1 << 3,
+ SLI4_FC_DI_ERROR_UDB = 1 << 4,
+ SLI4_FC_DI_ERROR_EDIR = 1 << 5,
+};
+
+/* WQE DIF field contents */
+enum sli4_dif_fields {
+ SLI4_DIF_DISABLED,
+ SLI4_DIF_PASS_THROUGH,
+ SLI4_DIF_STRIP,
+ SLI4_DIF_INSERT,
+};
+
+/* Work Queue Entry (WQE) types */
+enum sli4_wqe_types {
+ SLI4_WQE_ABORT = 0x0f,
+ SLI4_WQE_ELS_REQUEST64 = 0x8a,
+ SLI4_WQE_FCP_IBIDIR64 = 0xac,
+ SLI4_WQE_FCP_IREAD64 = 0x9a,
+ SLI4_WQE_FCP_IWRITE64 = 0x98,
+ SLI4_WQE_FCP_ICMND64 = 0x9c,
+ SLI4_WQE_FCP_TRECEIVE64 = 0xa1,
+ SLI4_WQE_FCP_CONT_TRECEIVE64 = 0xe5,
+ SLI4_WQE_FCP_TRSP64 = 0xa3,
+ SLI4_WQE_FCP_TSEND64 = 0x9f,
+ SLI4_WQE_GEN_REQUEST64 = 0xc2,
+ SLI4_WQE_SEND_FRAME = 0xe1,
+ SLI4_WQE_XMIT_BCAST64 = 0x84,
+ SLI4_WQE_XMIT_BLS_RSP = 0x97,
+ SLI4_WQE_ELS_RSP64 = 0x95,
+ SLI4_WQE_XMIT_SEQUENCE64 = 0x82,
+ SLI4_WQE_REQUEUE_XRI = 0x93,
+};
+
+/* WQE command types */
+enum sli4_wqe_cmds {
+ SLI4_CMD_FCP_IREAD64_WQE = 0x00,
+ SLI4_CMD_FCP_ICMND64_WQE = 0x00,
+ SLI4_CMD_FCP_IWRITE64_WQE = 0x01,
+ SLI4_CMD_FCP_TRECEIVE64_WQE = 0x02,
+ SLI4_CMD_FCP_TRSP64_WQE = 0x03,
+ SLI4_CMD_FCP_TSEND64_WQE = 0x07,
+ SLI4_CMD_GEN_REQUEST64_WQE = 0x08,
+ SLI4_CMD_XMIT_BCAST64_WQE = 0x08,
+ SLI4_CMD_XMIT_BLS_RSP64_WQE = 0x08,
+ SLI4_CMD_ABORT_WQE = 0x08,
+ SLI4_CMD_XMIT_SEQUENCE64_WQE = 0x08,
+ SLI4_CMD_REQUEUE_XRI_WQE = 0x0a,
+ SLI4_CMD_SEND_FRAME_WQE = 0x0a,
+};
+
+#define SLI4_WQE_SIZE 0x05
+#define SLI4_WQE_EXT_SIZE 0x06
+
+#define SLI4_WQE_BYTES (16 * sizeof(u32))
+#define SLI4_WQE_EXT_BYTES (32 * sizeof(u32))
+
+/* Mask for ccp (CS_CTL) */
+#define SLI4_MASK_CCP 0xfe
+
+/* Generic WQE */
+enum sli4_gen_wqe_flags {
+ SLI4_GEN_WQE_EBDECNT = 0xf,
+ SLI4_GEN_WQE_LEN_LOC = 0x3 << 7,
+ SLI4_GEN_WQE_QOSD = 1 << 9,
+ SLI4_GEN_WQE_XBL = 1 << 11,
+ SLI4_GEN_WQE_HLM = 1 << 12,
+ SLI4_GEN_WQE_IOD = 1 << 13,
+ SLI4_GEN_WQE_DBDE = 1 << 14,
+ SLI4_GEN_WQE_WQES = 1 << 15,
+
+ SLI4_GEN_WQE_PRI = 0x7,
+ SLI4_GEN_WQE_PV = 1 << 3,
+ SLI4_GEN_WQE_EAT = 1 << 4,
+ SLI4_GEN_WQE_XC = 1 << 5,
+ SLI4_GEN_WQE_CCPE = 1 << 7,
+
+ SLI4_GEN_WQE_CMDTYPE = 0xf,
+ SLI4_GEN_WQE_WQEC = 1 << 7,
+};
+
+struct sli4_generic_wqe {
+ __le32 cmd_spec0_5[6];
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ __le16 dw10w0_flags;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmdtype_wqec_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+};
+
+/* WQE used to abort exchanges. */
+enum sli4_abort_wqe_flags {
+ SLI4_ABRT_WQE_IR = 0x02,
+
+ SLI4_ABRT_WQE_EBDECNT = 0xf,
+ SLI4_ABRT_WQE_LEN_LOC = 0x3 << 7,
+ SLI4_ABRT_WQE_QOSD = 1 << 9,
+ SLI4_ABRT_WQE_XBL = 1 << 11,
+ SLI4_ABRT_WQE_IOD = 1 << 13,
+ SLI4_ABRT_WQE_DBDE = 1 << 14,
+ SLI4_ABRT_WQE_WQES = 1 << 15,
+
+ SLI4_ABRT_WQE_PRI = 0x7,
+ SLI4_ABRT_WQE_PV = 1 << 3,
+ SLI4_ABRT_WQE_EAT = 1 << 4,
+ SLI4_ABRT_WQE_XC = 1 << 5,
+ SLI4_ABRT_WQE_CCPE = 1 << 7,
+
+ SLI4_ABRT_WQE_CMDTYPE = 0xf,
+ SLI4_ABRT_WQE_WQEC = 1 << 7,
+};
+
+struct sli4_abort_wqe {
+ __le32 rsvd0;
+ __le32 rsvd4;
+ __le32 ext_t_tag;
+ u8 ia_ir_byte;
+ u8 criteria;
+ __le16 rsvd10;
+ __le32 ext_t_mask;
+ __le32 t_mask;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 t_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ __le16 dw10w0_flags;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmdtype_wqec_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+};
+
+enum sli4_abort_criteria {
+ SLI4_ABORT_CRITERIA_XRI_TAG = 0x01,
+ SLI4_ABORT_CRITERIA_ABORT_TAG,
+ SLI4_ABORT_CRITERIA_REQUEST_TAG,
+ SLI4_ABORT_CRITERIA_EXT_ABORT_TAG,
+};
+
+enum sli4_abort_type {
+ SLI4_ABORT_XRI,
+ SLI4_ABORT_ABORT_ID,
+ SLI4_ABORT_REQUEST_ID,
+ SLI4_ABORT_MAX, /* must be last */
+};
+
+/* WQE used to create an ELS request. */
+enum sli4_els_req_wqe_flags {
+ SLI4_REQ_WQE_QOSD = 0x2,
+ SLI4_REQ_WQE_DBDE = 0x40,
+ SLI4_REQ_WQE_XBL = 0x8,
+ SLI4_REQ_WQE_XC = 0x20,
+ SLI4_REQ_WQE_IOD = 0x20,
+ SLI4_REQ_WQE_HLM = 0x10,
+ SLI4_REQ_WQE_CCPE = 0x80,
+ SLI4_REQ_WQE_EAT = 0x10,
+ SLI4_REQ_WQE_WQES = 0x80,
+ SLI4_REQ_WQE_PU_SHFT = 4,
+ SLI4_REQ_WQE_CT_SHFT = 2,
+ SLI4_REQ_WQE_CT = 0xc,
+ SLI4_REQ_WQE_ELSID_SHFT = 4,
+ SLI4_REQ_WQE_SP_SHFT = 24,
+ SLI4_REQ_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_REQ_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_els_request64_wqe {
+ struct sli4_bde els_request_payload;
+ __le32 els_request_payload_length;
+ __le32 sid_sp_dword;
+ __le32 remote_id_dword;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 temporary_rpi;
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmdtype_elsid_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ struct sli4_bde els_response_payload_bde;
+ __le32 max_response_payload_length;
+};
+
+/* WQE used to create an FCP initiator no data command. */
+enum sli4_icmd_wqe_flags {
+ SLI4_ICMD_WQE_DBDE = 0x40,
+ SLI4_ICMD_WQE_XBL = 0x8,
+ SLI4_ICMD_WQE_XC = 0x20,
+ SLI4_ICMD_WQE_IOD = 0x20,
+ SLI4_ICMD_WQE_HLM = 0x10,
+ SLI4_ICMD_WQE_CCPE = 0x80,
+ SLI4_ICMD_WQE_EAT = 0x10,
+ SLI4_ICMD_WQE_APPID = 0x10,
+ SLI4_ICMD_WQE_WQES = 0x80,
+ SLI4_ICMD_WQE_PU_SHFT = 4,
+ SLI4_ICMD_WQE_CT_SHFT = 2,
+ SLI4_ICMD_WQE_BS_SHFT = 4,
+ SLI4_ICMD_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_ICMD_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_icmnd64_wqe {
+ struct sli4_bde bde;
+ __le16 payload_offset_length;
+ __le16 fcp_cmd_buffer_length;
+ __le32 rsvd12;
+ __le32 remote_n_port_id_dword;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_pu_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 rsvd44;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/* WQE used to create an FCP initiator read. */
+enum sli4_ir_wqe_flags {
+ SLI4_IR_WQE_DBDE = 0x40,
+ SLI4_IR_WQE_XBL = 0x8,
+ SLI4_IR_WQE_XC = 0x20,
+ SLI4_IR_WQE_IOD = 0x20,
+ SLI4_IR_WQE_HLM = 0x10,
+ SLI4_IR_WQE_CCPE = 0x80,
+ SLI4_IR_WQE_EAT = 0x10,
+ SLI4_IR_WQE_APPID = 0x10,
+ SLI4_IR_WQE_WQES = 0x80,
+ SLI4_IR_WQE_PU_SHFT = 4,
+ SLI4_IR_WQE_CT_SHFT = 2,
+ SLI4_IR_WQE_BS_SHFT = 4,
+ SLI4_IR_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_IR_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_iread64_wqe {
+ struct sli4_bde bde;
+ __le16 payload_offset_length;
+ __le16 fcp_cmd_buffer_length;
+
+ __le32 total_transfer_length;
+
+ __le32 remote_n_port_id_dword;
+
+ __le16 xri_tag;
+ __le16 context_tag;
+
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_pu_byte;
+ u8 timer;
+
+ __le32 abort_tag;
+
+ __le16 request_tag;
+ __le16 rsvd34;
+
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+
+ __le32 rsvd44;
+ struct sli4_bde first_data_bde;
+};
+
+/* WQE used to create an FCP initiator write. */
+enum sli4_iwr_wqe_flags {
+ SLI4_IWR_WQE_DBDE = 0x40,
+ SLI4_IWR_WQE_XBL = 0x8,
+ SLI4_IWR_WQE_XC = 0x20,
+ SLI4_IWR_WQE_IOD = 0x20,
+ SLI4_IWR_WQE_HLM = 0x10,
+ SLI4_IWR_WQE_DNRX = 0x10,
+ SLI4_IWR_WQE_CCPE = 0x80,
+ SLI4_IWR_WQE_EAT = 0x10,
+ SLI4_IWR_WQE_APPID = 0x10,
+ SLI4_IWR_WQE_WQES = 0x80,
+ SLI4_IWR_WQE_PU_SHFT = 4,
+ SLI4_IWR_WQE_CT_SHFT = 2,
+ SLI4_IWR_WQE_BS_SHFT = 4,
+ SLI4_IWR_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_IWR_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_iwrite64_wqe {
+ struct sli4_bde bde;
+ __le16 payload_offset_length;
+ __le16 fcp_cmd_buffer_length;
+ __le16 total_transfer_length;
+ __le16 initial_transfer_length;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_pu_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 remote_n_port_id_dword;
+ struct sli4_bde first_data_bde;
+};
+
+struct sli4_fcp_128byte_wqe {
+ u32 dw[32];
+};
+
+/* WQE used to create an FCP target receive */
+enum sli4_trcv_wqe_flags {
+ SLI4_TRCV_WQE_DBDE = 0x40,
+ SLI4_TRCV_WQE_XBL = 0x8,
+ SLI4_TRCV_WQE_AR = 0x8,
+ SLI4_TRCV_WQE_XC = 0x20,
+ SLI4_TRCV_WQE_IOD = 0x20,
+ SLI4_TRCV_WQE_HLM = 0x10,
+ SLI4_TRCV_WQE_DNRX = 0x10,
+ SLI4_TRCV_WQE_CCPE = 0x80,
+ SLI4_TRCV_WQE_EAT = 0x10,
+ SLI4_TRCV_WQE_APPID = 0x10,
+ SLI4_TRCV_WQE_WQES = 0x80,
+ SLI4_TRCV_WQE_PU_SHFT = 4,
+ SLI4_TRCV_WQE_CT_SHFT = 2,
+ SLI4_TRCV_WQE_BS_SHFT = 4,
+ SLI4_TRCV_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_treceive64_wqe {
+ struct sli4_bde bde;
+ __le32 payload_offset_length;
+ __le32 relative_offset;
+ union {
+ __le16 sec_xri_tag;
+ __le16 rsvd;
+ __le32 dword;
+ } dword5;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_ar_pu_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ u8 lloc1_appid;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 fcp_data_receive_length;
+ struct sli4_bde first_data_bde;
+};
+
+/* WQE used to create an FCP target response */
+enum sli4_trsp_wqe_flags {
+ SLI4_TRSP_WQE_AG = 0x8,
+ SLI4_TRSP_WQE_DBDE = 0x40,
+ SLI4_TRSP_WQE_XBL = 0x8,
+ SLI4_TRSP_WQE_XC = 0x20,
+ SLI4_TRSP_WQE_HLM = 0x10,
+ SLI4_TRSP_WQE_DNRX = 0x10,
+ SLI4_TRSP_WQE_CCPE = 0x80,
+ SLI4_TRSP_WQE_EAT = 0x10,
+ SLI4_TRSP_WQE_APPID = 0x10,
+ SLI4_TRSP_WQE_WQES = 0x80,
+};
+
+struct sli4_fcp_trsp64_wqe {
+ struct sli4_bde bde;
+ __le32 fcp_response_length;
+ __le32 rsvd12;
+ __le32 dword5;
+ __le16 xri_tag;
+ __le16 rpi;
+ u8 ct_dnrx_byte;
+ u8 command;
+ u8 class_ag_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ u8 lloc1_appid;
+ u8 qosd_xbl_hlm_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 rsvd44;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/* WQE used to create an FCP target send (DATA IN). */
+enum sli4_tsend_wqe_flags {
+ SLI4_TSEND_WQE_XBL = 0x8,
+ SLI4_TSEND_WQE_DBDE = 0x40,
+ SLI4_TSEND_WQE_IOD = 0x20,
+ SLI4_TSEND_WQE_QOSD = 0x2,
+ SLI4_TSEND_WQE_HLM = 0x10,
+ SLI4_TSEND_WQE_PU_SHFT = 4,
+ SLI4_TSEND_WQE_AR = 0x8,
+ SLI4_TSEND_CT_SHFT = 2,
+ SLI4_TSEND_BS_SHFT = 4,
+ SLI4_TSEND_LEN_LOC_BIT2 = 0x1,
+ SLI4_TSEND_CCPE = 0x80,
+ SLI4_TSEND_APPID_VALID = 0x20,
+ SLI4_TSEND_WQES = 0x80,
+ SLI4_TSEND_XC = 0x20,
+ SLI4_TSEND_EAT = 0x10,
+};
+
+struct sli4_fcp_tsend64_wqe {
+ struct sli4_bde bde;
+ __le32 payload_offset_length;
+ __le32 relative_offset;
+ __le32 dword5;
+ __le16 xri_tag;
+ __le16 rpi;
+ u8 ct_byte;
+ u8 command;
+ u8 class_pu_ar_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ u8 dw10byte0;
+ u8 ll_qd_xbl_hlm_iod_dbde;
+ u8 dw10byte2;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd45;
+ __le16 cq_id;
+ __le32 fcp_data_transmit_length;
+ struct sli4_bde first_data_bde;
+};
+
+/* WQE used to create a general request. */
+enum sli4_gen_req_wqe_flags {
+ SLI4_GEN_REQ64_WQE_XBL = 0x8,
+ SLI4_GEN_REQ64_WQE_DBDE = 0x40,
+ SLI4_GEN_REQ64_WQE_IOD = 0x20,
+ SLI4_GEN_REQ64_WQE_QOSD = 0x2,
+ SLI4_GEN_REQ64_WQE_HLM = 0x10,
+ SLI4_GEN_REQ64_CT_SHFT = 2,
+};
+
+struct sli4_gen_request64_wqe {
+ struct sli4_bde bde;
+ __le32 request_payload_length;
+ __le32 relative_offset;
+ u8 rsvd17;
+ u8 df_ctl;
+ u8 type;
+ u8 r_ctl;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ u8 dw10flags0;
+ u8 dw10flags1;
+ u8 dw10flags2;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 remote_n_port_id_dword;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 max_response_payload_length;
+};
+
+/* WQE used to create a send frame request */
+enum sli4_sf_wqe_flags {
+ SLI4_SF_WQE_DBDE = 0x40,
+ SLI4_SF_PU = 0x30,
+ SLI4_SF_CT = 0xc,
+ SLI4_SF_QOSD = 0x2,
+ SLI4_SF_LEN_LOC_BIT1 = 0x80,
+ SLI4_SF_LEN_LOC_BIT2 = 0x1,
+ SLI4_SF_XC = 0x20,
+ SLI4_SF_XBL = 0x8,
+};
+
+struct sli4_send_frame_wqe {
+ struct sli4_bde bde;
+ __le32 frame_length;
+ __le32 fc_header_0_1[2];
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 dw7flags0;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ u8 eof;
+ u8 sof;
+ u8 dw10flags0;
+ u8 dw10flags1;
+ u8 dw10flags2;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 fc_header_2_5[4];
+};
+
+/* WQE used to create a transmit sequence */
+enum sli4_seq_wqe_flags {
+ SLI4_SEQ_WQE_DBDE = 0x4000,
+ SLI4_SEQ_WQE_XBL = 0x800,
+ SLI4_SEQ_WQE_SI = 0x4,
+ SLI4_SEQ_WQE_FT = 0x8,
+ SLI4_SEQ_WQE_XO = 0x40,
+ SLI4_SEQ_WQE_LS = 0x80,
+ SLI4_SEQ_WQE_DIF = 0x3,
+ SLI4_SEQ_WQE_BS = 0x70,
+ SLI4_SEQ_WQE_PU = 0x30,
+ SLI4_SEQ_WQE_HLM = 0x1000,
+ SLI4_SEQ_WQE_IOD_SHIFT = 13,
+ SLI4_SEQ_WQE_CT_SHIFT = 2,
+ SLI4_SEQ_WQE_LEN_LOC_SHIFT = 7,
+};
+
+struct sli4_xmit_sequence64_wqe {
+ struct sli4_bde bde;
+ __le32 remote_n_port_id_dword;
+ __le32 relative_offset;
+ u8 dw5flags0;
+ u8 df_ctl;
+ u8 type;
+ u8 r_ctl;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dw7flags0;
+ u8 command;
+ u8 dw7flags1;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ __le16 dw10w0;
+ u8 dw10flags0;
+ u8 ccp;
+ u8 cmd_type_wqec_byte;
+ u8 rsvd45;
+ __le16 cq_id;
+ __le32 sequence_payload_len;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/*
+ * WQE used unblock the specified XRI and to release
+ * it to the SLI Port's free pool.
+ */
+enum sli4_requeue_wqe_flags {
+ SLI4_REQU_XRI_WQE_XC = 0x20,
+ SLI4_REQU_XRI_WQE_QOSD = 0x2,
+};
+
+struct sli4_requeue_xri_wqe {
+ __le32 rsvd0;
+ __le32 rsvd4;
+ __le32 rsvd8;
+ __le32 rsvd12;
+ __le32 rsvd16;
+ __le32 rsvd20;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 rsvd32;
+ __le16 request_tag;
+ __le16 rsvd34;
+ __le16 flags0;
+ __le16 flags1;
+ __le16 flags2;
+ u8 ccp;
+ u8 cmd_type_wqec_byte;
+ u8 rsvd42;
+ __le16 cq_id;
+ __le32 rsvd44;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/* WQE used to create a BLS response */
+enum sli4_bls_rsp_wqe_flags {
+ SLI4_BLS_RSP_RID = 0xffffff,
+ SLI4_BLS_RSP_WQE_AR = 0x40000000,
+ SLI4_BLS_RSP_WQE_CT_SHFT = 2,
+ SLI4_BLS_RSP_WQE_QOSD = 0x2,
+ SLI4_BLS_RSP_WQE_HLM = 0x10,
+};
+
+struct sli4_xmit_bls_rsp_wqe {
+ __le32 payload_word0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
+ __le32 rsvd12;
+ __le32 local_n_port_id_dword;
+ __le32 remote_id_dword;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dw8flags0;
+ u8 command;
+ u8 dw8flags1;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd38;
+ u8 dw11flags0;
+ u8 dw11flags1;
+ u8 dw11flags2;
+ u8 ccp;
+ u8 dw12flags0;
+ u8 rsvd45;
+ __le16 cq_id;
+ __le16 temporary_rpi;
+ u8 rsvd50;
+ u8 rsvd51;
+ __le32 rsvd52;
+ __le32 rsvd56;
+ __le32 rsvd60;
+};
+
+enum sli_bls_type {
+ SLI4_SLI_BLS_ACC,
+ SLI4_SLI_BLS_RJT,
+ SLI4_SLI_BLS_MAX
+};
+
+struct sli_bls_payload {
+ enum sli_bls_type type;
+ __le16 ox_id;
+ __le16 rx_id;
+ union {
+ struct {
+ u8 seq_id_validity;
+ u8 seq_id_last;
+ u8 rsvd2;
+ u8 rsvd3;
+ u16 ox_id;
+ u16 rx_id;
+ __le16 low_seq_cnt;
+ __le16 high_seq_cnt;
+ } acc;
+ struct {
+ u8 vendor_unique;
+ u8 reason_explanation;
+ u8 reason_code;
+ u8 rsvd3;
+ } rjt;
+ } u;
+};
+
+/* WQE used to create an ELS response */
+
+enum sli4_els_rsp_flags {
+ SLI4_ELS_SID = 0xffffff,
+ SLI4_ELS_RID = 0xffffff,
+ SLI4_ELS_DBDE = 0x40,
+ SLI4_ELS_XBL = 0x8,
+ SLI4_ELS_IOD = 0x20,
+ SLI4_ELS_QOSD = 0x2,
+ SLI4_ELS_XC = 0x20,
+ SLI4_ELS_CT_OFFSET = 0X2,
+ SLI4_ELS_SP = 0X1000000,
+ SLI4_ELS_HLM = 0X10,
+};
+
+struct sli4_xmit_els_rsp64_wqe {
+ struct sli4_bde els_response_payload;
+ __le32 els_response_payload_length;
+ __le32 sid_dw;
+ __le32 rid_dw;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 ox_id;
+ u8 flags1;
+ u8 flags2;
+ u8 flags3;
+ u8 flags4;
+ u8 cmd_type_wqec;
+ u8 rsvd34;
+ __le16 cq_id;
+ __le16 temporary_rpi;
+ __le16 rsvd38;
+ u32 rsvd40;
+ u32 rsvd44;
+ u32 rsvd48;
+};
+
+/* Local Reject Reason Codes */
+enum sli4_fc_local_rej_codes {
+ SLI4_FC_LOCAL_REJECT_UNKNOWN,
+ SLI4_FC_LOCAL_REJECT_MISSING_CONTINUE,
+ SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT,
+ SLI4_FC_LOCAL_REJECT_INTERNAL_ERROR,
+ SLI4_FC_LOCAL_REJECT_INVALID_RPI,
+ SLI4_FC_LOCAL_REJECT_NO_XRI,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_COMMAND,
+ SLI4_FC_LOCAL_REJECT_XCHG_DROPPED,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_FIELD,
+ SLI4_FC_LOCAL_REJECT_RPI_SUSPENDED,
+ SLI4_FC_LOCAL_REJECT_RSVD,
+ SLI4_FC_LOCAL_REJECT_RSVD1,
+ SLI4_FC_LOCAL_REJECT_NO_ABORT_MATCH,
+ SLI4_FC_LOCAL_REJECT_TX_DMA_FAILED,
+ SLI4_FC_LOCAL_REJECT_RX_DMA_FAILED,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_FRAME,
+ SLI4_FC_LOCAL_REJECT_RSVD2,
+ SLI4_FC_LOCAL_REJECT_NO_RESOURCES, //0x11
+ SLI4_FC_LOCAL_REJECT_FCP_CONF_FAILURE,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_LENGTH,
+ SLI4_FC_LOCAL_REJECT_UNSUPPORTED_FEATURE,
+ SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS,
+ SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED,
+ SLI4_FC_LOCAL_REJECT_RCV_BUFFER_TIMEOUT,
+ SLI4_FC_LOCAL_REJECT_LOOP_OPEN_FAILURE,
+ SLI4_FC_LOCAL_REJECT_RSVD3,
+ SLI4_FC_LOCAL_REJECT_LINK_DOWN,
+ SLI4_FC_LOCAL_REJECT_CORRUPTED_DATA,
+ SLI4_FC_LOCAL_REJECT_CORRUPTED_RPI,
+ SLI4_FC_LOCAL_REJECT_OUTOFORDER_DATA,
+ SLI4_FC_LOCAL_REJECT_OUTOFORDER_ACK,
+ SLI4_FC_LOCAL_REJECT_DUP_FRAME,
+ SLI4_FC_LOCAL_REJECT_LINK_CONTROL_FRAME, //0x20
+ SLI4_FC_LOCAL_REJECT_BAD_HOST_ADDRESS,
+ SLI4_FC_LOCAL_REJECT_RSVD4,
+ SLI4_FC_LOCAL_REJECT_MISSING_HDR_BUFFER,
+ SLI4_FC_LOCAL_REJECT_MSEQ_CHAIN_CORRUPTED,
+ SLI4_FC_LOCAL_REJECT_ABORTMULT_REQUESTED,
+ SLI4_FC_LOCAL_REJECT_BUFFER_SHORTAGE = 0x28,
+ SLI4_FC_LOCAL_REJECT_RCV_XRIBUF_WAITING,
+ SLI4_FC_LOCAL_REJECT_INVALID_VPI = 0x2e,
+ SLI4_FC_LOCAL_REJECT_NO_FPORT_DETECTED,
+ SLI4_FC_LOCAL_REJECT_MISSING_XRIBUF,
+ SLI4_FC_LOCAL_REJECT_RSVD5,
+ SLI4_FC_LOCAL_REJECT_INVALID_XRI,
+ SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET = 0x40,
+ SLI4_FC_LOCAL_REJECT_MISSING_RELOFFSET,
+ SLI4_FC_LOCAL_REJECT_INSUFF_BUFFERSPACE,
+ SLI4_FC_LOCAL_REJECT_MISSING_SI,
+ SLI4_FC_LOCAL_REJECT_MISSING_ES,
+ SLI4_FC_LOCAL_REJECT_INCOMPLETE_XFER,
+ SLI4_FC_LOCAL_REJECT_SLER_FAILURE,
+ SLI4_FC_LOCAL_REJECT_SLER_CMD_RCV_FAILURE,
+ SLI4_FC_LOCAL_REJECT_SLER_REC_RJT_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_REC_SRR_RETRY_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_SRR_RJT_ERR,
+ SLI4_FC_LOCAL_REJECT_RSVD6,
+ SLI4_FC_LOCAL_REJECT_SLER_RRQ_RJT_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_RRQ_RETRY_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_ABTS_ERR,
+};
+
+enum sli4_async_rcqe_flags {
+ SLI4_RACQE_RQ_EL_INDX = 0xfff,
+ SLI4_RACQE_FCFI = 0x3f,
+ SLI4_RACQE_HDPL = 0x3f,
+ SLI4_RACQE_RQ_ID = 0xffc0,
+};
+
+struct sli4_fc_async_rcqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 rq_elmt_indx_word;
+ __le32 rsvd4;
+ __le16 fcfi_rq_id_word;
+ __le16 data_placement_length;
+ u8 sof_byte;
+ u8 eof_byte;
+ u8 code;
+ u8 hdpl_byte;
+};
+
+struct sli4_fc_async_rcqe_v1 {
+ u8 rsvd0;
+ u8 status;
+ __le16 rq_elmt_indx_word;
+ u8 fcfi_byte;
+ u8 rsvd5;
+ __le16 rsvd6;
+ __le16 rq_id;
+ __le16 data_placement_length;
+ u8 sof_byte;
+ u8 eof_byte;
+ u8 code;
+ u8 hdpl_byte;
+};
+
+enum sli4_fc_async_rq_status {
+ SLI4_FC_ASYNC_RQ_SUCCESS = 0x10,
+ SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED,
+ SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED,
+ SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC,
+ SLI4_FC_ASYNC_RQ_DMA_FAILURE,
+};
+
+#define SLI4_RCQE_RQ_EL_INDX 0xfff
+
+struct sli4_fc_coalescing_rcqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 rq_elmt_indx_word;
+ __le32 rsvd4;
+ __le16 rq_id;
+ __le16 seq_placement_length;
+ __le16 rsvd14;
+ u8 code;
+ u8 vld_byte;
+};
+
+#define SLI4_FC_COALESCE_RQ_SUCCESS 0x10
+#define SLI4_FC_COALESCE_RQ_INSUFF_XRI_NEEDED 0x18
+
+enum sli4_optimized_write_cmd_cqe_flags {
+ SLI4_OCQE_RQ_EL_INDX = 0x7f, /* DW0 bits 16:30 */
+ SLI4_OCQE_FCFI = 0x3f, /* DW1 bits 0:6 */
+ SLI4_OCQE_OOX = 1 << 6, /* DW1 bit 15 */
+ SLI4_OCQE_AGXR = 1 << 7, /* DW1 bit 16 */
+ SLI4_OCQE_HDPL = 0x3f, /* DW3 bits 24:29*/
+};
+
+struct sli4_fc_optimized_write_cmd_cqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 w1;
+ u8 flags0;
+ u8 flags1;
+ __le16 xri;
+ __le16 rq_id;
+ __le16 data_placement_length;
+ __le16 rpi;
+ u8 code;
+ u8 hdpl_vld;
+};
+
+#define SLI4_OCQE_XB 0x10
+
+struct sli4_fc_optimized_write_data_cqe {
+ u8 hw_status;
+ u8 status;
+ __le16 xri;
+ __le32 total_data_placed;
+ __le32 extended_status;
+ __le16 rsvd12;
+ u8 code;
+ u8 flags;
+};
+
+struct sli4_fc_xri_aborted_cqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 rsvd2;
+ __le32 extended_status;
+ __le16 xri;
+ __le16 remote_xid;
+ __le16 rsvd12;
+ u8 code;
+ u8 flags;
+};
+
+enum sli4_generic_ctx {
+ SLI4_GENERIC_CONTEXT_RPI,
+ SLI4_GENERIC_CONTEXT_VPI,
+ SLI4_GENERIC_CONTEXT_VFI,
+ SLI4_GENERIC_CONTEXT_FCFI,
+};
+
+#define SLI4_GENERIC_CLASS_CLASS_2 0x1
+#define SLI4_GENERIC_CLASS_CLASS_3 0x2
+
+#define SLI4_ELS_REQUEST64_DIR_WRITE 0x0
+#define SLI4_ELS_REQUEST64_DIR_READ 0x1
+
+enum sli4_els_request {
+ SLI4_ELS_REQUEST64_OTHER,
+ SLI4_ELS_REQUEST64_LOGO,
+ SLI4_ELS_REQUEST64_FDISC,
+ SLI4_ELS_REQUEST64_FLOGIN,
+ SLI4_ELS_REQUEST64_PLOGI,
+};
+
+enum sli4_els_cmd_type {
+ SLI4_ELS_REQUEST64_CMD_GEN = 0x08,
+ SLI4_ELS_REQUEST64_CMD_NON_FABRIC = 0x0c,
+ SLI4_ELS_REQUEST64_CMD_FABRIC = 0x0d,
+};
+
+#define SLI_PAGE_SIZE SZ_4K
+
+#define SLI4_BMBX_TIMEOUT_MSEC 30000
+#define SLI4_FW_READY_TIMEOUT_MSEC 30000
+
+#define SLI4_BMBX_DELAY_US 1000 /* 1 ms */
+#define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */
+
+static inline u32
+sli_page_count(size_t bytes, u32 page_size)
+{
+ if (!page_size)
+ return 0;
+
+ return (bytes + (page_size - 1)) >> __ffs(page_size);
+}
+
+/*************************************************************************
+ * SLI-4 mailbox command formats and definitions
+ */
+
+struct sli4_mbox_command_header {
+ u8 resvd0;
+ u8 command;
+ __le16 status; /* Port writes to indicate success/fail */
+};
+
+enum sli4_mbx_cmd_value {
+ SLI4_MBX_CMD_CONFIG_LINK = 0x07,
+ SLI4_MBX_CMD_DUMP = 0x17,
+ SLI4_MBX_CMD_DOWN_LINK = 0x06,
+ SLI4_MBX_CMD_INIT_LINK = 0x05,
+ SLI4_MBX_CMD_INIT_VFI = 0xa3,
+ SLI4_MBX_CMD_INIT_VPI = 0xa4,
+ SLI4_MBX_CMD_POST_XRI = 0xa7,
+ SLI4_MBX_CMD_RELEASE_XRI = 0xac,
+ SLI4_MBX_CMD_READ_CONFIG = 0x0b,
+ SLI4_MBX_CMD_READ_STATUS = 0x0e,
+ SLI4_MBX_CMD_READ_NVPARMS = 0x02,
+ SLI4_MBX_CMD_READ_REV = 0x11,
+ SLI4_MBX_CMD_READ_LNK_STAT = 0x12,
+ SLI4_MBX_CMD_READ_SPARM64 = 0x8d,
+ SLI4_MBX_CMD_READ_TOPOLOGY = 0x95,
+ SLI4_MBX_CMD_REG_FCFI = 0xa0,
+ SLI4_MBX_CMD_REG_FCFI_MRQ = 0xaf,
+ SLI4_MBX_CMD_REG_RPI = 0x93,
+ SLI4_MBX_CMD_REG_RX_RQ = 0xa6,
+ SLI4_MBX_CMD_REG_VFI = 0x9f,
+ SLI4_MBX_CMD_REG_VPI = 0x96,
+ SLI4_MBX_CMD_RQST_FEATURES = 0x9d,
+ SLI4_MBX_CMD_SLI_CONFIG = 0x9b,
+ SLI4_MBX_CMD_UNREG_FCFI = 0xa2,
+ SLI4_MBX_CMD_UNREG_RPI = 0x14,
+ SLI4_MBX_CMD_UNREG_VFI = 0xa1,
+ SLI4_MBX_CMD_UNREG_VPI = 0x97,
+ SLI4_MBX_CMD_WRITE_NVPARMS = 0x03,
+ SLI4_MBX_CMD_CFG_AUTO_XFER_RDY = 0xad,
+};
+
+enum sli4_mbx_status {
+ SLI4_MBX_STATUS_SUCCESS = 0x0000,
+ SLI4_MBX_STATUS_FAILURE = 0x0001,
+ SLI4_MBX_STATUS_RPI_NOT_REG = 0x1400,
+};
+
+/* CONFIG_LINK - configure link-oriented parameters,
+ * such as default N_Port_ID address and various timers
+ */
+enum sli4_cmd_config_link_flags {
+ SLI4_CFG_LINK_BBSCN = 0xf00,
+ SLI4_CFG_LINK_CSCN = 0x1000,
+};
+
+struct sli4_cmd_config_link {
+ struct sli4_mbox_command_header hdr;
+ u8 maxbbc;
+ u8 rsvd5;
+ u8 rsvd6;
+ u8 rsvd7;
+ u8 alpa;
+ __le16 n_port_id;
+ u8 rsvd11;
+ __le32 rsvd12;
+ __le32 e_d_tov;
+ __le32 lp_tov;
+ __le32 r_a_tov;
+ __le32 r_t_tov;
+ __le32 al_tov;
+ __le32 rsvd36;
+ __le32 bbscn_dword;
+};
+
+#define SLI4_DUMP4_TYPE 0xf
+
+#define SLI4_WKI_TAG_SAT_TEM 0x1040
+
+struct sli4_cmd_dump4 {
+ struct sli4_mbox_command_header hdr;
+ __le32 type_dword;
+ __le16 wki_selection;
+ __le16 rsvd10;
+ __le32 rsvd12;
+ __le32 returned_byte_cnt;
+ __le32 resp_data[59];
+};
+
+/* INIT_LINK - initialize the link for a FC port */
+enum sli4_init_link_flags {
+ SLI4_INIT_LINK_F_LOOPBACK = 1 << 0,
+
+ SLI4_INIT_LINK_F_P2P_ONLY = 1 << 1,
+ SLI4_INIT_LINK_F_FCAL_ONLY = 2 << 1,
+ SLI4_INIT_LINK_F_FCAL_FAIL_OVER = 0 << 1,
+ SLI4_INIT_LINK_F_P2P_FAIL_OVER = 1 << 1,
+
+ SLI4_INIT_LINK_F_UNFAIR = 1 << 6,
+ SLI4_INIT_LINK_F_NO_LIRP = 1 << 7,
+ SLI4_INIT_LINK_F_LOOP_VALID_CHK = 1 << 8,
+ SLI4_INIT_LINK_F_NO_LISA = 1 << 9,
+ SLI4_INIT_LINK_F_FAIL_OVER = 1 << 10,
+ SLI4_INIT_LINK_F_FIXED_SPEED = 1 << 11,
+ SLI4_INIT_LINK_F_PICK_HI_ALPA = 1 << 15,
+
+};
+
+enum sli4_fc_link_speed {
+ SLI4_LINK_SPEED_1G = 1,
+ SLI4_LINK_SPEED_2G,
+ SLI4_LINK_SPEED_AUTO_1_2,
+ SLI4_LINK_SPEED_4G,
+ SLI4_LINK_SPEED_AUTO_4_1,
+ SLI4_LINK_SPEED_AUTO_4_2,
+ SLI4_LINK_SPEED_AUTO_4_2_1,
+ SLI4_LINK_SPEED_8G,
+ SLI4_LINK_SPEED_AUTO_8_1,
+ SLI4_LINK_SPEED_AUTO_8_2,
+ SLI4_LINK_SPEED_AUTO_8_2_1,
+ SLI4_LINK_SPEED_AUTO_8_4,
+ SLI4_LINK_SPEED_AUTO_8_4_1,
+ SLI4_LINK_SPEED_AUTO_8_4_2,
+ SLI4_LINK_SPEED_10G,
+ SLI4_LINK_SPEED_16G,
+ SLI4_LINK_SPEED_AUTO_16_8_4,
+ SLI4_LINK_SPEED_AUTO_16_8,
+ SLI4_LINK_SPEED_32G,
+ SLI4_LINK_SPEED_AUTO_32_16_8,
+ SLI4_LINK_SPEED_AUTO_32_16,
+ SLI4_LINK_SPEED_64G,
+ SLI4_LINK_SPEED_AUTO_64_32_16,
+ SLI4_LINK_SPEED_AUTO_64_32,
+ SLI4_LINK_SPEED_128G,
+ SLI4_LINK_SPEED_AUTO_128_64_32,
+ SLI4_LINK_SPEED_AUTO_128_64,
+};
+
+struct sli4_cmd_init_link {
+ struct sli4_mbox_command_header hdr;
+ __le32 sel_reset_al_pa_dword;
+ __le32 flags0;
+ __le32 link_speed_sel_code;
+};
+
+/* INIT_VFI - initialize the VFI resource */
+enum sli4_init_vfi_flags {
+ SLI4_INIT_VFI_FLAG_VP = 0x1000,
+ SLI4_INIT_VFI_FLAG_VF = 0x2000,
+ SLI4_INIT_VFI_FLAG_VT = 0x4000,
+ SLI4_INIT_VFI_FLAG_VR = 0x8000,
+
+ SLI4_INIT_VFI_VFID = 0x1fff,
+ SLI4_INIT_VFI_PRI = 0xe000,
+
+ SLI4_INIT_VFI_HOP_COUNT = 0xff000000,
+};
+
+struct sli4_cmd_init_vfi {
+ struct sli4_mbox_command_header hdr;
+ __le16 vfi;
+ __le16 flags0_word;
+ __le16 fcfi;
+ __le16 vpi;
+ __le32 vf_id_pri_dword;
+ __le32 hop_cnt_dword;
+};
+
+/* INIT_VPI - initialize the VPI resource */
+struct sli4_cmd_init_vpi {
+ struct sli4_mbox_command_header hdr;
+ __le16 vpi;
+ __le16 vfi;
+};
+
+/* POST_XRI - post XRI resources to the SLI Port */
+enum sli4_post_xri_flags {
+ SLI4_POST_XRI_COUNT = 0xfff,
+ SLI4_POST_XRI_FLAG_ENX = 0x1000,
+ SLI4_POST_XRI_FLAG_DL = 0x2000,
+ SLI4_POST_XRI_FLAG_DI = 0x4000,
+ SLI4_POST_XRI_FLAG_VAL = 0x8000,
+};
+
+struct sli4_cmd_post_xri {
+ struct sli4_mbox_command_header hdr;
+ __le16 xri_base;
+ __le16 xri_count_flags;
+};
+
+/* RELEASE_XRI - Release XRI resources from the SLI Port */
+enum sli4_release_xri_flags {
+ SLI4_RELEASE_XRI_REL_XRI_CNT = 0x1f,
+ SLI4_RELEASE_XRI_COUNT = 0x1f,
+};
+
+struct sli4_cmd_release_xri {
+ struct sli4_mbox_command_header hdr;
+ __le16 rel_xri_count_word;
+ __le16 xri_count_word;
+
+ struct {
+ __le16 xri_tag0;
+ __le16 xri_tag1;
+ } xri_tbl[62];
+};
+
+/* READ_CONFIG - read SLI port configuration parameters */
+struct sli4_cmd_read_config {
+ struct sli4_mbox_command_header hdr;
+};
+
+enum sli4_read_cfg_resp_flags {
+ SLI4_READ_CFG_RESP_RESOURCE_EXT = 0x80000000, /* DW1 */
+ SLI4_READ_CFG_RESP_TOPOLOGY = 0xff000000, /* DW2 */
+};
+
+enum sli4_read_cfg_topo {
+ SLI4_READ_CFG_TOPO_FC = 0x1, /* FC topology unknown */
+ SLI4_READ_CFG_TOPO_NON_FC_AL = 0x2, /* FC point-to-point or fabric */
+ SLI4_READ_CFG_TOPO_FC_AL = 0x3, /* FC-AL topology */
+};
+
+/* Link Module Type */
+enum sli4_read_cfg_lmt {
+ SLI4_LINK_MODULE_TYPE_1GB = 0x0004,
+ SLI4_LINK_MODULE_TYPE_2GB = 0x0008,
+ SLI4_LINK_MODULE_TYPE_4GB = 0x0040,
+ SLI4_LINK_MODULE_TYPE_8GB = 0x0080,
+ SLI4_LINK_MODULE_TYPE_16GB = 0x0200,
+ SLI4_LINK_MODULE_TYPE_32GB = 0x0400,
+ SLI4_LINK_MODULE_TYPE_64GB = 0x0800,
+ SLI4_LINK_MODULE_TYPE_128GB = 0x1000,
+};
+
+struct sli4_rsp_read_config {
+ struct sli4_mbox_command_header hdr;
+ __le32 ext_dword;
+ __le32 topology_dword;
+ __le32 resvd8;
+ __le16 e_d_tov;
+ __le16 resvd14;
+ __le32 resvd16;
+ __le16 r_a_tov;
+ __le16 resvd22;
+ __le32 resvd24;
+ __le32 resvd28;
+ __le16 lmt;
+ __le16 resvd34;
+ __le32 resvd36;
+ __le32 resvd40;
+ __le16 xri_base;
+ __le16 xri_count;
+ __le16 rpi_base;
+ __le16 rpi_count;
+ __le16 vpi_base;
+ __le16 vpi_count;
+ __le16 vfi_base;
+ __le16 vfi_count;
+ __le16 resvd60;
+ __le16 fcfi_count;
+ __le16 rq_count;
+ __le16 eq_count;
+ __le16 wq_count;
+ __le16 cq_count;
+ __le32 pad[45];
+};
+
+/* READ_NVPARMS - read SLI port configuration parameters */
+enum sli4_read_nvparms_flags {
+ SLI4_READ_NVPARAMS_HARD_ALPA = 0xff,
+ SLI4_READ_NVPARAMS_PREFERRED_D_ID = 0xffffff00,
+};
+
+struct sli4_cmd_read_nvparms {
+ struct sli4_mbox_command_header hdr;
+ __le32 resvd0;
+ __le32 resvd4;
+ __le32 resvd8;
+ __le32 resvd12;
+ u8 wwpn[8];
+ u8 wwnn[8];
+ __le32 hard_alpa_d_id;
+};
+
+/* WRITE_NVPARMS - write SLI port configuration parameters */
+struct sli4_cmd_write_nvparms {
+ struct sli4_mbox_command_header hdr;
+ __le32 resvd0;
+ __le32 resvd4;
+ __le32 resvd8;
+ __le32 resvd12;
+ u8 wwpn[8];
+ u8 wwnn[8];
+ __le32 hard_alpa_d_id;
+};
+
+/* READ_REV - read the Port revision levels */
+enum {
+ SLI4_READ_REV_FLAG_SLI_LEVEL = 0xf,
+ SLI4_READ_REV_FLAG_FCOEM = 0x10,
+ SLI4_READ_REV_FLAG_CEEV = 0x60,
+ SLI4_READ_REV_FLAG_VPD = 0x2000,
+
+ SLI4_READ_REV_AVAILABLE_LENGTH = 0xffffff,
+};
+
+struct sli4_cmd_read_rev {
+ struct sli4_mbox_command_header hdr;
+ __le16 resvd0;
+ __le16 flags0_word;
+ __le32 first_hw_rev;
+ __le32 second_hw_rev;
+ __le32 resvd12;
+ __le32 third_hw_rev;
+ u8 fc_ph_low;
+ u8 fc_ph_high;
+ u8 feature_level_low;
+ u8 feature_level_high;
+ __le32 resvd24;
+ __le32 first_fw_id;
+ u8 first_fw_name[16];
+ __le32 second_fw_id;
+ u8 second_fw_name[16];
+ __le32 rsvd18[30];
+ __le32 available_length_dword;
+ struct sli4_dmaaddr hostbuf;
+ __le32 returned_vpd_length;
+ __le32 actual_vpd_length;
+};
+
+/* READ_SPARM64 - read the Port service parameters */
+#define SLI4_READ_SPARM64_WWPN_OFFSET (4 * sizeof(u32))
+#define SLI4_READ_SPARM64_WWNN_OFFSET (6 * sizeof(u32))
+
+struct sli4_cmd_read_sparm64 {
+ struct sli4_mbox_command_header hdr;
+ __le32 resvd0;
+ __le32 resvd4;
+ struct sli4_bde bde_64;
+ __le16 vpi;
+ __le16 resvd22;
+ __le16 port_name_start;
+ __le16 port_name_len;
+ __le16 node_name_start;
+ __le16 node_name_len;
+};
+
+/* READ_TOPOLOGY - read the link event information */
+enum sli4_read_topo_e {
+ SLI4_READTOPO_ATTEN_TYPE = 0xff,
+ SLI4_READTOPO_FLAG_IL = 0x100,
+ SLI4_READTOPO_FLAG_PB_RECVD = 0x200,
+
+ SLI4_READTOPO_LINKSTATE_RECV = 0x3,
+ SLI4_READTOPO_LINKSTATE_TRANS = 0xc,
+ SLI4_READTOPO_LINKSTATE_MACHINE = 0xf0,
+ SLI4_READTOPO_LINKSTATE_SPEED = 0xff00,
+ SLI4_READTOPO_LINKSTATE_TF = 0x40000000,
+ SLI4_READTOPO_LINKSTATE_LU = 0x80000000,
+
+ SLI4_READTOPO_SCN_BBSCN = 0xf,
+ SLI4_READTOPO_SCN_CBBSCN = 0xf0,
+
+ SLI4_READTOPO_R_T_TOV = 0x1ff,
+ SLI4_READTOPO_AL_TOV = 0xf000,
+
+ SLI4_READTOPO_PB_FLAG = 0x80,
+
+ SLI4_READTOPO_INIT_N_PORTID = 0xffffff,
+};
+
+#define SLI4_MIN_LOOP_MAP_BYTES 128
+
+struct sli4_cmd_read_topology {
+ struct sli4_mbox_command_header hdr;
+ __le32 event_tag;
+ __le32 dw2_attentype;
+ u8 topology;
+ u8 lip_type;
+ u8 lip_al_ps;
+ u8 al_pa_granted;
+ struct sli4_bde bde_loop_map;
+ __le32 linkdown_state;
+ __le32 currlink_state;
+ u8 max_bbc;
+ u8 init_bbc;
+ u8 scn_flags;
+ u8 rsvd39;
+ __le16 dw10w0_al_rt_tov;
+ __le16 lp_tov;
+ u8 acquired_al_pa;
+ u8 pb_flags;
+ __le16 specified_al_pa;
+ __le32 dw12_init_n_port_id;
+};
+
+enum sli4_read_topo_link {
+ SLI4_READ_TOPOLOGY_LINK_UP = 0x1,
+ SLI4_READ_TOPOLOGY_LINK_DOWN,
+ SLI4_READ_TOPOLOGY_LINK_NO_ALPA,
+};
+
+enum sli4_read_topo {
+ SLI4_READ_TOPO_UNKNOWN = 0x0,
+ SLI4_READ_TOPO_NON_FC_AL,
+ SLI4_READ_TOPO_FC_AL,
+};
+
+enum sli4_read_topo_speed {
+ SLI4_READ_TOPOLOGY_SPEED_NONE = 0x00,
+ SLI4_READ_TOPOLOGY_SPEED_1G = 0x04,
+ SLI4_READ_TOPOLOGY_SPEED_2G = 0x08,
+ SLI4_READ_TOPOLOGY_SPEED_4G = 0x10,
+ SLI4_READ_TOPOLOGY_SPEED_8G = 0x20,
+ SLI4_READ_TOPOLOGY_SPEED_10G = 0x40,
+ SLI4_READ_TOPOLOGY_SPEED_16G = 0x80,
+ SLI4_READ_TOPOLOGY_SPEED_32G = 0x90,
+ SLI4_READ_TOPOLOGY_SPEED_64G = 0xa0,
+ SLI4_READ_TOPOLOGY_SPEED_128G = 0xb0,
+};
+
+/* REG_FCFI - activate a FC Forwarder */
+struct sli4_cmd_reg_fcfi_rq_cfg {
+ u8 r_ctl_mask;
+ u8 r_ctl_match;
+ u8 type_mask;
+ u8 type_match;
+};
+
+enum sli4_regfcfi_tag {
+ SLI4_REGFCFI_VLAN_TAG = 0xfff,
+ SLI4_REGFCFI_VLANTAG_VALID = 0x1000,
+};
+
+#define SLI4_CMD_REG_FCFI_NUM_RQ_CFG 4
+struct sli4_cmd_reg_fcfi {
+ struct sli4_mbox_command_header hdr;
+ __le16 fcf_index;
+ __le16 fcfi;
+ __le16 rqid1;
+ __le16 rqid0;
+ __le16 rqid3;
+ __le16 rqid2;
+ struct sli4_cmd_reg_fcfi_rq_cfg
+ rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ __le32 dw8_vlan;
+};
+
+#define SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG 4
+#define SLI4_CMD_REG_FCFI_MRQ_MAX_NUM_RQ 32
+#define SLI4_CMD_REG_FCFI_SET_FCFI_MODE 0
+#define SLI4_CMD_REG_FCFI_SET_MRQ_MODE 1
+
+enum sli4_reg_fcfi_mrq {
+ SLI4_REGFCFI_MRQ_VLAN_TAG = 0xfff,
+ SLI4_REGFCFI_MRQ_VLANTAG_VALID = 0x1000,
+ SLI4_REGFCFI_MRQ_MODE = 0x2000,
+
+ SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS = 0xff,
+ SLI4_REGFCFI_MRQ_FILTER_BITMASK = 0xf00,
+ SLI4_REGFCFI_MRQ_RQ_SEL_POLICY = 0xf000,
+};
+
+struct sli4_cmd_reg_fcfi_mrq {
+ struct sli4_mbox_command_header hdr;
+ __le16 fcf_index;
+ __le16 fcfi;
+ __le16 rqid1;
+ __le16 rqid0;
+ __le16 rqid3;
+ __le16 rqid2;
+ struct sli4_cmd_reg_fcfi_rq_cfg
+ rq_cfg[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
+ __le32 dw8_vlan;
+ __le32 dw9_mrqflags;
+};
+
+struct sli4_cmd_rq_cfg {
+ __le16 rq_id;
+ u8 r_ctl_mask;
+ u8 r_ctl_match;
+ u8 type_mask;
+ u8 type_match;
+};
+
+/* REG_RPI - register a Remote Port Indicator */
+enum sli4_reg_rpi {
+ SLI4_REGRPI_REMOTE_N_PORTID = 0xffffff, /* DW2 */
+ SLI4_REGRPI_UPD = 0x1000000,
+ SLI4_REGRPI_ETOW = 0x8000000,
+ SLI4_REGRPI_TERP = 0x20000000,
+ SLI4_REGRPI_CI = 0x80000000,
+};
+
+struct sli4_cmd_reg_rpi {
+ struct sli4_mbox_command_header hdr;
+ __le16 rpi;
+ __le16 rsvd2;
+ __le32 dw2_rportid_flags;
+ struct sli4_bde bde_64;
+ __le16 vpi;
+ __le16 rsvd26;
+};
+
+#define SLI4_REG_RPI_BUF_LEN 0x70
+
+/* REG_VFI - register a Virtual Fabric Indicator */
+enum sli_reg_vfi {
+ SLI4_REGVFI_VP = 0x1000, /* DW1 */
+ SLI4_REGVFI_UPD = 0x2000,
+
+ SLI4_REGVFI_LOCAL_N_PORTID = 0xffffff, /* DW10 */
+};
+
+struct sli4_cmd_reg_vfi {
+ struct sli4_mbox_command_header hdr;
+ __le16 vfi;
+ __le16 dw0w1_flags;
+ __le16 fcfi;
+ __le16 vpi;
+ u8 wwpn[8];
+ struct sli4_bde sparm;
+ __le32 e_d_tov;
+ __le32 r_a_tov;
+ __le32 dw10_lportid_flags;
+};
+
+/* REG_VPI - register a Virtual Port Indicator */
+enum sli4_reg_vpi {
+ SLI4_REGVPI_LOCAL_N_PORTID = 0xffffff,
+ SLI4_REGVPI_UPD = 0x1000000,
+};
+
+struct sli4_cmd_reg_vpi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le32 dw2_lportid_flags;
+ u8 wwpn[8];
+ __le32 rsvd12;
+ __le16 vpi;
+ __le16 vfi;
+};
+
+/* REQUEST_FEATURES - request / query SLI features */
+enum sli4_req_features_flags {
+ SLI4_REQFEAT_QRY = 0x1, /* Dw1 */
+
+ SLI4_REQFEAT_IAAB = 1 << 0, /* DW2 & DW3 */
+ SLI4_REQFEAT_NPIV = 1 << 1,
+ SLI4_REQFEAT_DIF = 1 << 2,
+ SLI4_REQFEAT_VF = 1 << 3,
+ SLI4_REQFEAT_FCPI = 1 << 4,
+ SLI4_REQFEAT_FCPT = 1 << 5,
+ SLI4_REQFEAT_FCPC = 1 << 6,
+ SLI4_REQFEAT_RSVD = 1 << 7,
+ SLI4_REQFEAT_RQD = 1 << 8,
+ SLI4_REQFEAT_IAAR = 1 << 9,
+ SLI4_REQFEAT_HLM = 1 << 10,
+ SLI4_REQFEAT_PERFH = 1 << 11,
+ SLI4_REQFEAT_RXSEQ = 1 << 12,
+ SLI4_REQFEAT_RXRI = 1 << 13,
+ SLI4_REQFEAT_DCL2 = 1 << 14,
+ SLI4_REQFEAT_RSCO = 1 << 15,
+ SLI4_REQFEAT_MRQP = 1 << 16,
+};
+
+struct sli4_cmd_request_features {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_qry;
+ __le32 cmd;
+ __le32 resp;
+};
+
+/*
+ * SLI_CONFIG - submit a configuration command to Port
+ *
+ * Command is either embedded as part of the payload (embed) or located
+ * in a separate memory buffer (mem)
+ */
+enum sli4_sli_config {
+ SLI4_SLICONF_EMB = 0x1, /* DW1 */
+ SLI4_SLICONF_PMDCMD_SHIFT = 3,
+ SLI4_SLICONF_PMDCMD_MASK = 0xf8,
+ SLI4_SLICONF_PMDCMD_VAL_1 = 8,
+ SLI4_SLICONF_PMDCNT = 0xf8,
+
+ SLI4_SLICONF_PMD_LEN = 0x00ffffff,
+};
+
+struct sli4_cmd_sli_config {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_flags;
+ __le32 payload_len;
+ __le32 rsvd12[3];
+ union {
+ u8 embed[58 * sizeof(u32)];
+ struct sli4_bufptr mem;
+ } payload;
+};
+
+/* READ_STATUS - read tx/rx status of a particular port */
+#define SLI4_READSTATUS_CLEAR_COUNTERS 0x1
+
+struct sli4_cmd_read_status {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_flags;
+ __le32 rsvd4;
+ __le32 trans_kbyte_cnt;
+ __le32 recv_kbyte_cnt;
+ __le32 trans_frame_cnt;
+ __le32 recv_frame_cnt;
+ __le32 trans_seq_cnt;
+ __le32 recv_seq_cnt;
+ __le32 tot_exchanges_orig;
+ __le32 tot_exchanges_resp;
+ __le32 recv_p_bsy_cnt;
+ __le32 recv_f_bsy_cnt;
+ __le32 no_rq_buf_dropped_frames_cnt;
+ __le32 empty_rq_timeout_cnt;
+ __le32 no_xri_dropped_frames_cnt;
+ __le32 empty_xri_pool_cnt;
+};
+
+/* READ_LNK_STAT - read link status of a particular port */
+enum sli4_read_link_stats_flags {
+ SLI4_READ_LNKSTAT_REC = 1u << 0,
+ SLI4_READ_LNKSTAT_GEC = 1u << 1,
+ SLI4_READ_LNKSTAT_W02OF = 1u << 2,
+ SLI4_READ_LNKSTAT_W03OF = 1u << 3,
+ SLI4_READ_LNKSTAT_W04OF = 1u << 4,
+ SLI4_READ_LNKSTAT_W05OF = 1u << 5,
+ SLI4_READ_LNKSTAT_W06OF = 1u << 6,
+ SLI4_READ_LNKSTAT_W07OF = 1u << 7,
+ SLI4_READ_LNKSTAT_W08OF = 1u << 8,
+ SLI4_READ_LNKSTAT_W09OF = 1u << 9,
+ SLI4_READ_LNKSTAT_W10OF = 1u << 10,
+ SLI4_READ_LNKSTAT_W11OF = 1u << 11,
+ SLI4_READ_LNKSTAT_W12OF = 1u << 12,
+ SLI4_READ_LNKSTAT_W13OF = 1u << 13,
+ SLI4_READ_LNKSTAT_W14OF = 1u << 14,
+ SLI4_READ_LNKSTAT_W15OF = 1u << 15,
+ SLI4_READ_LNKSTAT_W16OF = 1u << 16,
+ SLI4_READ_LNKSTAT_W17OF = 1u << 17,
+ SLI4_READ_LNKSTAT_W18OF = 1u << 18,
+ SLI4_READ_LNKSTAT_W19OF = 1u << 19,
+ SLI4_READ_LNKSTAT_W20OF = 1u << 20,
+ SLI4_READ_LNKSTAT_W21OF = 1u << 21,
+ SLI4_READ_LNKSTAT_CLRC = 1u << 30,
+ SLI4_READ_LNKSTAT_CLOF = 1u << 31,
+};
+
+struct sli4_cmd_read_link_stats {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_flags;
+ __le32 linkfail_errcnt;
+ __le32 losssync_errcnt;
+ __le32 losssignal_errcnt;
+ __le32 primseq_errcnt;
+ __le32 inval_txword_errcnt;
+ __le32 crc_errcnt;
+ __le32 primseq_eventtimeout_cnt;
+ __le32 elastic_bufoverrun_errcnt;
+ __le32 arbit_fc_al_timeout_cnt;
+ __le32 adv_rx_buftor_to_buf_credit;
+ __le32 curr_rx_buf_to_buf_credit;
+ __le32 adv_tx_buf_to_buf_credit;
+ __le32 curr_tx_buf_to_buf_credit;
+ __le32 rx_eofa_cnt;
+ __le32 rx_eofdti_cnt;
+ __le32 rx_eofni_cnt;
+ __le32 rx_soff_cnt;
+ __le32 rx_dropped_no_aer_cnt;
+ __le32 rx_dropped_no_avail_rpi_rescnt;
+ __le32 rx_dropped_no_avail_xri_rescnt;
+};
+
+/* Format a WQE with WQ_ID Association performance hint */
+static inline void
+sli_set_wq_id_association(void *entry, u16 q_id)
+{
+ u32 *wqe = entry;
+
+ /*
+ * Set Word 10, bit 0 to zero
+ * Set Word 10, bits 15:1 to the WQ ID
+ */
+ wqe[10] &= ~0xffff;
+ wqe[10] |= q_id << 1;
+}
+
+/* UNREG_FCFI - unregister a FCFI */
+struct sli4_cmd_unreg_fcfi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le16 fcfi;
+ __le16 rsvd6;
+};
+
+/* UNREG_RPI - unregister one or more RPI */
+enum sli4_unreg_rpi {
+ SLI4_UNREG_RPI_DP = 0x2000,
+ SLI4_UNREG_RPI_II_SHIFT = 14,
+ SLI4_UNREG_RPI_II_MASK = 0xc000,
+ SLI4_UNREG_RPI_II_RPI = 0x0000,
+ SLI4_UNREG_RPI_II_VPI = 0x4000,
+ SLI4_UNREG_RPI_II_VFI = 0x8000,
+ SLI4_UNREG_RPI_II_FCFI = 0xc000,
+
+ SLI4_UNREG_RPI_DEST_N_PORTID_MASK = 0x00ffffff,
+};
+
+struct sli4_cmd_unreg_rpi {
+ struct sli4_mbox_command_header hdr;
+ __le16 index;
+ __le16 dw1w1_flags;
+ __le32 dw2_dest_n_portid;
+};
+
+/* UNREG_VFI - unregister one or more VFI */
+enum sli4_unreg_vfi {
+ SLI4_UNREG_VFI_II_SHIFT = 14,
+ SLI4_UNREG_VFI_II_MASK = 0xc000,
+ SLI4_UNREG_VFI_II_VFI = 0x0000,
+ SLI4_UNREG_VFI_II_FCFI = 0xc000,
+};
+
+struct sli4_cmd_unreg_vfi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le16 index;
+ __le16 dw2_flags;
+};
+
+enum sli4_unreg_type {
+ SLI4_UNREG_TYPE_PORT,
+ SLI4_UNREG_TYPE_DOMAIN,
+ SLI4_UNREG_TYPE_FCF,
+ SLI4_UNREG_TYPE_ALL
+};
+
+/* UNREG_VPI - unregister one or more VPI */
+enum sli4_unreg_vpi {
+ SLI4_UNREG_VPI_II_SHIFT = 14,
+ SLI4_UNREG_VPI_II_MASK = 0xc000,
+ SLI4_UNREG_VPI_II_VPI = 0x0000,
+ SLI4_UNREG_VPI_II_VFI = 0x8000,
+ SLI4_UNREG_VPI_II_FCFI = 0xc000,
+};
+
+struct sli4_cmd_unreg_vpi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le16 index;
+ __le16 dw2w0_flags;
+};
+
+/* AUTO_XFER_RDY - Configure the auto-generate XFER-RDY feature */
+struct sli4_cmd_config_auto_xfer_rdy {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le32 max_burst_len;
+};
+
+#define SLI4_CONFIG_AUTO_XFERRDY_BLKSIZE 0xffff
+
+struct sli4_cmd_config_auto_xfer_rdy_hp {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le32 max_burst_len;
+ __le32 dw3_esoc_flags;
+ __le16 block_size;
+ __le16 rsvd14;
+};
+
+/*************************************************************************
+ * SLI-4 common configuration command formats and definitions
+ */
+
+/*
+ * Subsystem values.
+ */
+enum sli4_subsystem {
+ SLI4_SUBSYSTEM_COMMON = 0x01,
+ SLI4_SUBSYSTEM_LOWLEVEL = 0x0b,
+ SLI4_SUBSYSTEM_FC = 0x0c,
+ SLI4_SUBSYSTEM_DMTF = 0x11,
+};
+
+#define SLI4_OPC_LOWLEVEL_SET_WATCHDOG 0X36
+
+/*
+ * Common opcode (OPC) values.
+ */
+enum sli4_cmn_opcode {
+ SLI4_CMN_FUNCTION_RESET = 0x3d,
+ SLI4_CMN_CREATE_CQ = 0x0c,
+ SLI4_CMN_CREATE_CQ_SET = 0x1d,
+ SLI4_CMN_DESTROY_CQ = 0x36,
+ SLI4_CMN_MODIFY_EQ_DELAY = 0x29,
+ SLI4_CMN_CREATE_EQ = 0x0d,
+ SLI4_CMN_DESTROY_EQ = 0x37,
+ SLI4_CMN_CREATE_MQ_EXT = 0x5a,
+ SLI4_CMN_DESTROY_MQ = 0x35,
+ SLI4_CMN_GET_CNTL_ATTRIBUTES = 0x20,
+ SLI4_CMN_NOP = 0x21,
+ SLI4_CMN_GET_RSC_EXTENT_INFO = 0x9a,
+ SLI4_CMN_GET_SLI4_PARAMS = 0xb5,
+ SLI4_CMN_QUERY_FW_CONFIG = 0x3a,
+ SLI4_CMN_GET_PORT_NAME = 0x4d,
+
+ SLI4_CMN_WRITE_FLASHROM = 0x07,
+ /* TRANSCEIVER Data */
+ SLI4_CMN_READ_TRANS_DATA = 0x49,
+ SLI4_CMN_GET_CNTL_ADDL_ATTRS = 0x79,
+ SLI4_CMN_GET_FUNCTION_CFG = 0xa0,
+ SLI4_CMN_GET_PROFILE_CFG = 0xa4,
+ SLI4_CMN_SET_PROFILE_CFG = 0xa5,
+ SLI4_CMN_GET_PROFILE_LIST = 0xa6,
+ SLI4_CMN_GET_ACTIVE_PROFILE = 0xa7,
+ SLI4_CMN_SET_ACTIVE_PROFILE = 0xa8,
+ SLI4_CMN_READ_OBJECT = 0xab,
+ SLI4_CMN_WRITE_OBJECT = 0xac,
+ SLI4_CMN_DELETE_OBJECT = 0xae,
+ SLI4_CMN_READ_OBJECT_LIST = 0xad,
+ SLI4_CMN_SET_DUMP_LOCATION = 0xb8,
+ SLI4_CMN_SET_FEATURES = 0xbf,
+ SLI4_CMN_GET_RECFG_LINK_INFO = 0xc9,
+ SLI4_CMN_SET_RECNG_LINK_ID = 0xca,
+};
+
+/* DMTF opcode (OPC) values */
+#define DMTF_EXEC_CLP_CMD 0x01
+
+/*
+ * COMMON_FUNCTION_RESET
+ *
+ * Resets the Port, returning it to a power-on state. This configuration
+ * command does not have a payload and should set/expect the lengths to
+ * be zero.
+ */
+struct sli4_rqst_cmn_function_reset {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_function_reset {
+ struct sli4_rsp_hdr hdr;
+};
+
+/*
+ * COMMON_GET_CNTL_ATTRIBUTES
+ *
+ * Query for information about the SLI Port
+ */
+enum sli4_cntrl_attr_flags {
+ SLI4_CNTL_ATTR_PORTNUM = 0x3f,
+ SLI4_CNTL_ATTR_PORTTYPE = 0xc0,
+};
+
+struct sli4_rsp_cmn_get_cntl_attributes {
+ struct sli4_rsp_hdr hdr;
+ u8 version_str[32];
+ u8 manufacturer_name[32];
+ __le32 supported_modes;
+ u8 eprom_version_lo;
+ u8 eprom_version_hi;
+ __le16 rsvd17;
+ __le32 mbx_ds_version;
+ __le32 ep_fw_ds_version;
+ u8 ncsi_version_str[12];
+ __le32 def_extended_timeout;
+ u8 model_number[32];
+ u8 description[64];
+ u8 serial_number[32];
+ u8 ip_version_str[32];
+ u8 fw_version_str[32];
+ u8 bios_version_str[32];
+ u8 redboot_version_str[32];
+ u8 driver_version_str[32];
+ u8 fw_on_flash_version_str[32];
+ __le32 functionalities_supported;
+ __le16 max_cdb_length;
+ u8 asic_revision;
+ u8 generational_guid0;
+ __le32 generational_guid1_12[3];
+ __le16 generational_guid13_14;
+ u8 generational_guid15;
+ u8 hba_port_count;
+ __le16 default_link_down_timeout;
+ u8 iscsi_version_min_max;
+ u8 multifunctional_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 port_num_type_flags;
+ __le32 firmware_post_status;
+ __le32 hba_mtu;
+ u8 iscsi_features;
+ u8 rsvd121[3];
+ __le16 pci_vendor_id;
+ __le16 pci_device_id;
+ __le16 pci_sub_vendor_id;
+ __le16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ __le64 unique_identifier;
+ u8 number_of_netfilters;
+ u8 rsvd122[3];
+};
+
+/*
+ * COMMON_GET_CNTL_ATTRIBUTES
+ *
+ * This command queries the controller information from the Flash ROM.
+ */
+struct sli4_rqst_cmn_get_cntl_addl_attributes {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_cntl_addl_attributes {
+ struct sli4_rsp_hdr hdr;
+ __le16 ipl_file_number;
+ u8 ipl_file_version;
+ u8 rsvd4;
+ u8 on_die_temperature;
+ u8 rsvd5[3];
+ __le32 driver_advanced_features_supported;
+ __le32 rsvd7[4];
+ char universal_bios_version[32];
+ char x86_bios_version[32];
+ char efi_bios_version[32];
+ char fcode_version[32];
+ char uefi_bios_version[32];
+ char uefi_nic_version[32];
+ char uefi_fcode_version[32];
+ char uefi_iscsi_version[32];
+ char iscsi_x86_bios_version[32];
+ char pxe_x86_bios_version[32];
+ u8 default_wwpn[8];
+ u8 ext_phy_version[32];
+ u8 fc_universal_bios_version[32];
+ u8 fc_x86_bios_version[32];
+ u8 fc_efi_bios_version[32];
+ u8 fc_fcode_version[32];
+ u8 ext_phy_crc_label[8];
+ u8 ipl_file_name[16];
+ u8 rsvd139[72];
+};
+
+/*
+ * COMMON_NOP
+ *
+ * This command does not do anything; it only returns
+ * the payload in the completion.
+ */
+struct sli4_rqst_cmn_nop {
+ struct sli4_rqst_hdr hdr;
+ __le32 context[2];
+};
+
+struct sli4_rsp_cmn_nop {
+ struct sli4_rsp_hdr hdr;
+ __le32 context[2];
+};
+
+struct sli4_rqst_cmn_get_resource_extent_info {
+ struct sli4_rqst_hdr hdr;
+ __le16 resource_type;
+ __le16 rsvd16;
+};
+
+enum sli4_rsc_type {
+ SLI4_RSC_TYPE_VFI = 0x20,
+ SLI4_RSC_TYPE_VPI = 0x21,
+ SLI4_RSC_TYPE_RPI = 0x22,
+ SLI4_RSC_TYPE_XRI = 0x23,
+};
+
+struct sli4_rsp_cmn_get_resource_extent_info {
+ struct sli4_rsp_hdr hdr;
+ __le16 resource_extent_count;
+ __le16 resource_extent_size;
+};
+
+#define SLI4_128BYTE_WQE_SUPPORT 0x02
+
+#define GET_Q_CNT_METHOD(m) \
+ (((m) & SLI4_PARAM_Q_CNT_MTHD_MASK) >> SLI4_PARAM_Q_CNT_MTHD_SHFT)
+#define GET_Q_CREATE_VERSION(v) \
+ (((v) & SLI4_PARAM_QV_MASK) >> SLI4_PARAM_QV_SHIFT)
+
+enum sli4_rsp_get_params_e {
+ /*GENERIC*/
+ SLI4_PARAM_Q_CNT_MTHD_SHFT = 24,
+ SLI4_PARAM_Q_CNT_MTHD_MASK = 0xf << 24,
+ SLI4_PARAM_QV_SHIFT = 14,
+ SLI4_PARAM_QV_MASK = 3 << 14,
+
+ /* DW4 */
+ SLI4_PARAM_PROTO_TYPE_MASK = 0xff,
+ /* DW5 */
+ SLI4_PARAM_FT = 1 << 0,
+ SLI4_PARAM_SLI_REV_MASK = 0xf << 4,
+ SLI4_PARAM_SLI_FAM_MASK = 0xf << 8,
+ SLI4_PARAM_IF_TYPE_MASK = 0xf << 12,
+ SLI4_PARAM_SLI_HINT1_MASK = 0xff << 16,
+ SLI4_PARAM_SLI_HINT2_MASK = 0x1f << 24,
+ /* DW6 */
+ SLI4_PARAM_EQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_EQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_EQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW8 */
+ SLI4_PARAM_CQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_CQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_CQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW10 */
+ SLI4_PARAM_MQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_MQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW12 */
+ SLI4_PARAM_WQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_WQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_WQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW14 */
+ SLI4_PARAM_RQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_RQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_RQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW15W1*/
+ SLI4_PARAM_RQ_DB_WINDOW_MASK = 0xf000,
+ /* DW16 */
+ SLI4_PARAM_FC = 1 << 0,
+ SLI4_PARAM_EXT = 1 << 1,
+ SLI4_PARAM_HDRR = 1 << 2,
+ SLI4_PARAM_SGLR = 1 << 3,
+ SLI4_PARAM_FBRR = 1 << 4,
+ SLI4_PARAM_AREG = 1 << 5,
+ SLI4_PARAM_TGT = 1 << 6,
+ SLI4_PARAM_TERP = 1 << 7,
+ SLI4_PARAM_ASSI = 1 << 8,
+ SLI4_PARAM_WCHN = 1 << 9,
+ SLI4_PARAM_TCCA = 1 << 10,
+ SLI4_PARAM_TRTY = 1 << 11,
+ SLI4_PARAM_TRIR = 1 << 12,
+ SLI4_PARAM_PHOFF = 1 << 13,
+ SLI4_PARAM_PHON = 1 << 14,
+ SLI4_PARAM_PHWQ = 1 << 15,
+ SLI4_PARAM_BOUND_4GA = 1 << 16,
+ SLI4_PARAM_RXC = 1 << 17,
+ SLI4_PARAM_HLM = 1 << 18,
+ SLI4_PARAM_IPR = 1 << 19,
+ SLI4_PARAM_RXRI = 1 << 20,
+ SLI4_PARAM_SGLC = 1 << 21,
+ SLI4_PARAM_TIMM = 1 << 22,
+ SLI4_PARAM_TSMM = 1 << 23,
+ SLI4_PARAM_OAS = 1 << 25,
+ SLI4_PARAM_LC = 1 << 26,
+ SLI4_PARAM_AGXF = 1 << 27,
+ SLI4_PARAM_LOOPBACK_MASK = 0xf << 28,
+ /* DW18 */
+ SLI4_PARAM_SGL_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_SGL_PAGE_SZS_MASK = 0xff << 8,
+ SLI4_PARAM_SGL_PP_ALIGN_MASK = 0xff << 16,
+};
+
+struct sli4_rqst_cmn_get_sli4_params {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_sli4_params {
+ struct sli4_rsp_hdr hdr;
+ __le32 dw4_protocol_type;
+ __le32 dw5_sli;
+ __le32 dw6_eq_page_cnt;
+ __le16 eqe_count_mask;
+ __le16 rsvd26;
+ __le32 dw8_cq_page_cnt;
+ __le16 cqe_count_mask;
+ __le16 rsvd34;
+ __le32 dw10_mq_page_cnt;
+ __le16 mqe_count_mask;
+ __le16 rsvd42;
+ __le32 dw12_wq_page_cnt;
+ __le16 wqe_count_mask;
+ __le16 rsvd50;
+ __le32 dw14_rq_page_cnt;
+ __le16 rqe_count_mask;
+ __le16 dw15w1_rq_db_window;
+ __le32 dw16_loopback_scope;
+ __le32 sge_supported_length;
+ __le32 dw18_sgl_page_cnt;
+ __le16 min_rq_buffer_size;
+ __le16 rsvd75;
+ __le32 max_rq_buffer_size;
+ __le16 physical_xri_max;
+ __le16 physical_rpi_max;
+ __le16 physical_vpi_max;
+ __le16 physical_vfi_max;
+ __le32 rsvd88;
+ __le16 frag_num_field_offset;
+ __le16 frag_num_field_size;
+ __le16 sgl_index_field_offset;
+ __le16 sgl_index_field_size;
+ __le32 chain_sge_initial_value_lo;
+ __le32 chain_sge_initial_value_hi;
+};
+
+/*Port Types*/
+enum sli4_port_types {
+ SLI4_PORT_TYPE_ETH = 0,
+ SLI4_PORT_TYPE_FC = 1,
+};
+
+struct sli4_rqst_cmn_get_port_name {
+ struct sli4_rqst_hdr hdr;
+ u8 port_type;
+ u8 rsvd4[3];
+};
+
+struct sli4_rsp_cmn_get_port_name {
+ struct sli4_rsp_hdr hdr;
+ char port_name[4];
+};
+
+struct sli4_rqst_cmn_write_flashrom {
+ struct sli4_rqst_hdr hdr;
+ __le32 flash_rom_access_opcode;
+ __le32 flash_rom_access_operation_type;
+ __le32 data_buffer_size;
+ __le32 offset;
+ u8 data_buffer[4];
+};
+
+/*
+ * COMMON_READ_TRANSCEIVER_DATA
+ *
+ * This command reads SFF transceiver data(Format is defined
+ * by the SFF-8472 specification).
+ */
+struct sli4_rqst_cmn_read_transceiver_data {
+ struct sli4_rqst_hdr hdr;
+ __le32 page_number;
+ __le32 port;
+};
+
+struct sli4_rsp_cmn_read_transceiver_data {
+ struct sli4_rsp_hdr hdr;
+ __le32 page_number;
+ __le32 port;
+ u8 page_data[128];
+ u8 page_data_2[128];
+};
+
+#define SLI4_REQ_DESIRE_READLEN 0xffffff
+
+struct sli4_rqst_cmn_read_object {
+ struct sli4_rqst_hdr hdr;
+ __le32 desired_read_length_dword;
+ __le32 read_offset;
+ u8 object_name[104];
+ __le32 host_buffer_descriptor_count;
+ struct sli4_bde host_buffer_descriptor[];
+};
+
+#define RSP_COM_READ_OBJ_EOF 0x80000000
+
+struct sli4_rsp_cmn_read_object {
+ struct sli4_rsp_hdr hdr;
+ __le32 actual_read_length;
+ __le32 eof_dword;
+};
+
+enum sli4_rqst_write_object_flags {
+ SLI4_RQ_DES_WRITE_LEN = 0xffffff,
+ SLI4_RQ_DES_WRITE_LEN_NOC = 0x40000000,
+ SLI4_RQ_DES_WRITE_LEN_EOF = 0x80000000,
+};
+
+struct sli4_rqst_cmn_write_object {
+ struct sli4_rqst_hdr hdr;
+ __le32 desired_write_len_dword;
+ __le32 write_offset;
+ u8 object_name[104];
+ __le32 host_buffer_descriptor_count;
+ struct sli4_bde host_buffer_descriptor[];
+};
+
+#define RSP_CHANGE_STATUS 0xff
+
+struct sli4_rsp_cmn_write_object {
+ struct sli4_rsp_hdr hdr;
+ __le32 actual_write_length;
+ __le32 change_status_dword;
+};
+
+struct sli4_rqst_cmn_delete_object {
+ struct sli4_rqst_hdr hdr;
+ __le32 rsvd4;
+ __le32 rsvd5;
+ u8 object_name[104];
+};
+
+#define SLI4_RQ_OBJ_LIST_READ_LEN 0xffffff
+
+struct sli4_rqst_cmn_read_object_list {
+ struct sli4_rqst_hdr hdr;
+ __le32 desired_read_length_dword;
+ __le32 read_offset;
+ u8 object_name[104];
+ __le32 host_buffer_descriptor_count;
+ struct sli4_bde host_buffer_descriptor[];
+};
+
+enum sli4_rqst_set_dump_flags {
+ SLI4_CMN_SET_DUMP_BUFFER_LEN = 0xffffff,
+ SLI4_CMN_SET_DUMP_FDB = 0x20000000,
+ SLI4_CMN_SET_DUMP_BLP = 0x40000000,
+ SLI4_CMN_SET_DUMP_QRY = 0x80000000,
+};
+
+struct sli4_rqst_cmn_set_dump_location {
+ struct sli4_rqst_hdr hdr;
+ __le32 buffer_length_dword;
+ __le32 buf_addr_low;
+ __le32 buf_addr_high;
+};
+
+struct sli4_rsp_cmn_set_dump_location {
+ struct sli4_rsp_hdr hdr;
+ __le32 buffer_length_dword;
+};
+
+enum sli4_dump_level {
+ SLI4_DUMP_LEVEL_NONE,
+ SLI4_CHIP_LEVEL_DUMP,
+ SLI4_FUNC_DESC_DUMP,
+};
+
+enum sli4_dump_state {
+ SLI4_DUMP_STATE_NONE,
+ SLI4_CHIP_DUMP_STATE_VALID,
+ SLI4_FUNC_DUMP_STATE_VALID,
+};
+
+enum sli4_dump_status {
+ SLI4_DUMP_READY_STATUS_NOT_READY,
+ SLI4_DUMP_READY_STATUS_DD_PRESENT,
+ SLI4_DUMP_READY_STATUS_FDB_PRESENT,
+ SLI4_DUMP_READY_STATUS_SKIP_DUMP,
+ SLI4_DUMP_READY_STATUS_FAILED = -1,
+};
+
+enum sli4_set_features {
+ SLI4_SET_FEATURES_DIF_SEED = 0x01,
+ SLI4_SET_FEATURES_XRI_TIMER = 0x03,
+ SLI4_SET_FEATURES_MAX_PCIE_SPEED = 0x04,
+ SLI4_SET_FEATURES_FCTL_CHECK = 0x05,
+ SLI4_SET_FEATURES_FEC = 0x06,
+ SLI4_SET_FEATURES_PCIE_RECV_DETECT = 0x07,
+ SLI4_SET_FEATURES_DIF_MEMORY_MODE = 0x08,
+ SLI4_SET_FEATURES_DISABLE_SLI_PORT_PAUSE_STATE = 0x09,
+ SLI4_SET_FEATURES_ENABLE_PCIE_OPTIONS = 0x0a,
+ SLI4_SET_FEAT_CFG_AUTO_XFER_RDY_T10PI = 0x0c,
+ SLI4_SET_FEATURES_ENABLE_MULTI_RECEIVE_QUEUE = 0x0d,
+ SLI4_SET_FEATURES_SET_FTD_XFER_HINT = 0x0f,
+ SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK = 0x11,
+};
+
+struct sli4_rqst_cmn_set_features {
+ struct sli4_rqst_hdr hdr;
+ __le32 feature;
+ __le32 param_len;
+ __le32 params[8];
+};
+
+struct sli4_rqst_cmn_set_features_dif_seed {
+ __le16 seed;
+ __le16 rsvd16;
+};
+
+enum sli4_rqst_set_mrq_features {
+ SLI4_RQ_MULTIRQ_ISR = 0x1,
+ SLI4_RQ_MULTIRQ_AUTOGEN_XFER_RDY = 0x2,
+
+ SLI4_RQ_MULTIRQ_NUM_RQS = 0xff,
+ SLI4_RQ_MULTIRQ_RQ_SELECT = 0xf00,
+};
+
+struct sli4_rqst_cmn_set_features_multirq {
+ __le32 auto_gen_xfer_dword;
+ __le32 num_rqs_dword;
+};
+
+enum sli4_rqst_health_check_flags {
+ SLI4_RQ_HEALTH_CHECK_ENABLE = 0x1,
+ SLI4_RQ_HEALTH_CHECK_QUERY = 0x2,
+};
+
+struct sli4_rqst_cmn_set_features_health_check {
+ __le32 health_check_dword;
+};
+
+struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint {
+ __le32 fdt_xfer_hint;
+};
+
+struct sli4_rqst_dmtf_exec_clp_cmd {
+ struct sli4_rqst_hdr hdr;
+ __le32 cmd_buf_length;
+ __le32 resp_buf_length;
+ __le32 cmd_buf_addr_low;
+ __le32 cmd_buf_addr_high;
+ __le32 resp_buf_addr_low;
+ __le32 resp_buf_addr_high;
+};
+
+struct sli4_rsp_dmtf_exec_clp_cmd {
+ struct sli4_rsp_hdr hdr;
+ __le32 rsvd4;
+ __le32 resp_length;
+ __le32 rsvd6;
+ __le32 rsvd7;
+ __le32 rsvd8;
+ __le32 rsvd9;
+ __le32 clp_status;
+ __le32 clp_detailed_status;
+};
+
+#define SLI4_PROTOCOL_FC 0x10
+#define SLI4_PROTOCOL_DEFAULT 0xff
+
+struct sli4_rspource_descriptor_v1 {
+ u8 descriptor_type;
+ u8 descriptor_length;
+ __le16 rsvd16;
+ __le32 type_specific[];
+};
+
+enum sli4_pcie_desc_flags {
+ SLI4_PCIE_DESC_IMM = 0x4000,
+ SLI4_PCIE_DESC_NOSV = 0x8000,
+
+ SLI4_PCIE_DESC_PF_NO = 0x3ff0000,
+
+ SLI4_PCIE_DESC_MISSN_ROLE = 0xff,
+ SLI4_PCIE_DESC_PCHG = 0x8000000,
+ SLI4_PCIE_DESC_SCHG = 0x10000000,
+ SLI4_PCIE_DESC_XCHG = 0x20000000,
+ SLI4_PCIE_DESC_XROM = 0xc0000000
+};
+
+struct sli4_pcie_resource_descriptor_v1 {
+ u8 descriptor_type;
+ u8 descriptor_length;
+ __le16 imm_nosv_dword;
+ __le32 pf_number_dword;
+ __le32 rsvd3;
+ u8 sriov_state;
+ u8 pf_state;
+ u8 pf_type;
+ u8 rsvd4;
+ __le16 number_of_vfs;
+ __le16 rsvd5;
+ __le32 mission_roles_dword;
+ __le32 rsvd7[16];
+};
+
+struct sli4_rqst_cmn_get_function_config {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_function_config {
+ struct sli4_rsp_hdr hdr;
+ __le32 desc_count;
+ __le32 desc[54];
+};
+
+/* Link Config Descriptor for link config functions */
+struct sli4_link_config_descriptor {
+ u8 link_config_id;
+ u8 rsvd1[3];
+ __le32 config_description[8];
+};
+
+#define MAX_LINK_DES 10
+
+struct sli4_rqst_cmn_get_reconfig_link_info {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_reconfig_link_info {
+ struct sli4_rsp_hdr hdr;
+ u8 active_link_config_id;
+ u8 rsvd17;
+ u8 next_link_config_id;
+ u8 rsvd19;
+ __le32 link_configuration_descriptor_count;
+ struct sli4_link_config_descriptor
+ desc[MAX_LINK_DES];
+};
+
+enum sli4_set_reconfig_link_flags {
+ SLI4_SET_RECONFIG_LINKID_NEXT = 0xff,
+ SLI4_SET_RECONFIG_LINKID_FD = 1u << 31,
+};
+
+struct sli4_rqst_cmn_set_reconfig_link_id {
+ struct sli4_rqst_hdr hdr;
+ __le32 dw4_flags;
+};
+
+struct sli4_rsp_cmn_set_reconfig_link_id {
+ struct sli4_rsp_hdr hdr;
+};
+
+struct sli4_rqst_lowlevel_set_watchdog {
+ struct sli4_rqst_hdr hdr;
+ __le16 watchdog_timeout;
+ __le16 rsvd18;
+};
+
+struct sli4_rsp_lowlevel_set_watchdog {
+ struct sli4_rsp_hdr hdr;
+ __le32 rsvd;
+};
+
+/* FC opcode (OPC) values */
+enum sli4_fc_opcodes {
+ SLI4_OPC_WQ_CREATE = 0x1,
+ SLI4_OPC_WQ_DESTROY = 0x2,
+ SLI4_OPC_POST_SGL_PAGES = 0x3,
+ SLI4_OPC_RQ_CREATE = 0x5,
+ SLI4_OPC_RQ_DESTROY = 0x6,
+ SLI4_OPC_READ_FCF_TABLE = 0x8,
+ SLI4_OPC_POST_HDR_TEMPLATES = 0xb,
+ SLI4_OPC_REDISCOVER_FCF = 0x10,
+};
+
+/* Use the default CQ associated with the WQ */
+#define SLI4_CQ_DEFAULT 0xffff
+
+/*
+ * POST_SGL_PAGES
+ *
+ * Register the scatter gather list (SGL) memory and
+ * associate it with an XRI.
+ */
+struct sli4_rqst_post_sgl_pages {
+ struct sli4_rqst_hdr hdr;
+ __le16 xri_start;
+ __le16 xri_count;
+ struct {
+ __le32 page0_low;
+ __le32 page0_high;
+ __le32 page1_low;
+ __le32 page1_high;
+ } page_set[10];
+};
+
+struct sli4_rsp_post_sgl_pages {
+ struct sli4_rsp_hdr hdr;
+};
+
+struct sli4_rqst_post_hdr_templates {
+ struct sli4_rqst_hdr hdr;
+ __le16 rpi_offset;
+ __le16 page_count;
+ struct sli4_dmaaddr page_descriptor[];
+};
+
+#define SLI4_HDR_TEMPLATE_SIZE 64
+
+enum sli4_io_flags {
+/* The XRI associated with this IO is already active */
+ SLI4_IO_CONTINUATION = 1 << 0,
+/* Automatically generate a good RSP frame */
+ SLI4_IO_AUTO_GOOD_RESPONSE = 1 << 1,
+ SLI4_IO_NO_ABORT = 1 << 2,
+/* Set the DNRX bit because no auto xref rdy buffer is posted */
+ SLI4_IO_DNRX = 1 << 3,
+};
+
+enum sli4_callback {
+ SLI4_CB_LINK,
+ SLI4_CB_MAX,
+};
+
+enum sli4_link_status {
+ SLI4_LINK_STATUS_UP,
+ SLI4_LINK_STATUS_DOWN,
+ SLI4_LINK_STATUS_NO_ALPA,
+ SLI4_LINK_STATUS_MAX,
+};
+
+enum sli4_link_topology {
+ SLI4_LINK_TOPO_NON_FC_AL = 1,
+ SLI4_LINK_TOPO_FC_AL,
+ SLI4_LINK_TOPO_LOOPBACK_INTERNAL,
+ SLI4_LINK_TOPO_LOOPBACK_EXTERNAL,
+ SLI4_LINK_TOPO_NONE,
+ SLI4_LINK_TOPO_MAX,
+};
+
+enum sli4_link_medium {
+ SLI4_LINK_MEDIUM_ETHERNET,
+ SLI4_LINK_MEDIUM_FC,
+ SLI4_LINK_MEDIUM_MAX,
+};
+/******Driver specific structures******/
+
+struct sli4_queue {
+ /* Common to all queue types */
+ struct efc_dma dma;
+ spinlock_t lock; /* Lock to protect the doorbell register
+ * writes and queue reads
+ */
+ u32 index; /* current host entry index */
+ u16 size; /* entry size */
+ u16 length; /* number of entries */
+ u16 n_posted; /* number entries posted for CQ, EQ */
+ u16 id; /* Port assigned xQ_ID */
+ u8 type; /* queue type ie EQ, CQ, ... */
+ void __iomem *db_regaddr; /* register address for the doorbell */
+ u16 phase; /* For if_type = 6, this value toggle
+ * for each iteration of the queue,
+ * a queue entry is valid when a cqe
+ * valid bit matches this value
+ */
+ u32 proc_limit; /* limit CQE processed per iteration */
+ u32 posted_limit; /* CQE/EQE process before ring db */
+ u32 max_num_processed;
+ u64 max_process_time;
+ union {
+ u32 r_idx; /* "read" index (MQ only) */
+ u32 flag;
+ } u;
+};
+
+/* Parameters used to populate WQE*/
+struct sli_bls_params {
+ u32 s_id;
+ u32 d_id;
+ u16 ox_id;
+ u16 rx_id;
+ u32 rpi;
+ u32 vpi;
+ bool rpi_registered;
+ u8 payload[12];
+ u16 xri;
+ u16 tag;
+};
+
+struct sli_els_params {
+ u32 s_id;
+ u32 d_id;
+ u16 ox_id;
+ u32 rpi;
+ u32 vpi;
+ bool rpi_registered;
+ u32 xmit_len;
+ u32 rsp_len;
+ u8 timeout;
+ u8 cmd;
+ u16 xri;
+ u16 tag;
+};
+
+struct sli_ct_params {
+ u8 r_ctl;
+ u8 type;
+ u8 df_ctl;
+ u8 timeout;
+ u16 ox_id;
+ u32 d_id;
+ u32 rpi;
+ u32 vpi;
+ bool rpi_registered;
+ u32 xmit_len;
+ u32 rsp_len;
+ u16 xri;
+ u16 tag;
+};
+
+struct sli_fcp_tgt_params {
+ u32 s_id;
+ u32 d_id;
+ u32 rpi;
+ u32 vpi;
+ u32 offset;
+ u16 ox_id;
+ u16 flags;
+ u8 cs_ctl;
+ u8 timeout;
+ u32 app_id;
+ u32 xmit_len;
+ u16 xri;
+ u16 tag;
+};
+
+struct sli4_link_event {
+ enum sli4_link_status status;
+ enum sli4_link_topology topology;
+ enum sli4_link_medium medium;
+ u32 speed;
+ u8 *loop_map;
+ u32 fc_id;
+};
+
+enum sli4_resource {
+ SLI4_RSRC_VFI,
+ SLI4_RSRC_VPI,
+ SLI4_RSRC_RPI,
+ SLI4_RSRC_XRI,
+ SLI4_RSRC_FCFI,
+ SLI4_RSRC_MAX,
+};
+
+struct sli4_extent {
+ u32 number;
+ u32 size;
+ u32 n_alloc;
+ u32 *base;
+ unsigned long *use_map;
+ u32 map_size;
+};
+
+struct sli4_queue_info {
+ u16 max_qcount[SLI4_QTYPE_MAX];
+ u32 max_qentries[SLI4_QTYPE_MAX];
+ u16 count_mask[SLI4_QTYPE_MAX];
+ u16 count_method[SLI4_QTYPE_MAX];
+ u32 qpage_count[SLI4_QTYPE_MAX];
+};
+
+struct sli4_params {
+ u8 has_extents;
+ u8 auto_reg;
+ u8 auto_xfer_rdy;
+ u8 hdr_template_req;
+ u8 perf_hint;
+ u8 perf_wq_id_association;
+ u8 cq_create_version;
+ u8 mq_create_version;
+ u8 high_login_mode;
+ u8 sgl_pre_registered;
+ u8 sgl_pre_reg_required;
+ u8 t10_dif_inline_capable;
+ u8 t10_dif_separate_capable;
+};
+
+struct sli4 {
+ void *os;
+ struct pci_dev *pci;
+ void __iomem *reg[PCI_STD_NUM_BARS];
+
+ u32 sli_rev;
+ u32 sli_family;
+ u32 if_type;
+
+ u16 asic_type;
+ u16 asic_rev;
+
+ u16 e_d_tov;
+ u16 r_a_tov;
+ struct sli4_queue_info qinfo;
+ u16 link_module_type;
+ u8 rq_batch;
+ u8 port_number;
+ char port_name[2];
+ u16 rq_min_buf_size;
+ u32 rq_max_buf_size;
+ u8 topology;
+ u8 wwpn[8];
+ u8 wwnn[8];
+ u32 fw_rev[2];
+ u8 fw_name[2][16];
+ char ipl_name[16];
+ u32 hw_rev[3];
+ char modeldesc[64];
+ char bios_version_string[32];
+ u32 wqe_size;
+ u32 vpd_length;
+ /*
+ * Tracks the port resources using extents metaphor. For
+ * devices that don't implement extents (i.e.
+ * has_extents == FALSE), the code models each resource as
+ * a single large extent.
+ */
+ struct sli4_extent ext[SLI4_RSRC_MAX];
+ u32 features;
+ struct sli4_params params;
+ u32 sge_supported_length;
+ u32 sgl_page_sizes;
+ u32 max_sgl_pages;
+
+ /*
+ * Callback functions
+ */
+ int (*link)(void *ctx, void *event);
+ void *link_arg;
+
+ struct efc_dma bmbx;
+
+ /* Save pointer to physical memory descriptor for non-embedded
+ * SLI_CONFIG commands for BMBX dumping purposes
+ */
+ struct efc_dma *bmbx_non_emb_pmd;
+
+ struct efc_dma vpd_data;
+};
+
+static inline void
+sli_cmd_fill_hdr(struct sli4_rqst_hdr *hdr, u8 opc, u8 sub, u32 ver, __le32 len)
+{
+ hdr->opcode = opc;
+ hdr->subsystem = sub;
+ hdr->dw3_version = cpu_to_le32(ver);
+ hdr->request_length = len;
+}
+
+/**
+ * Get / set parameter functions
+ */
+
+static inline u32
+sli_get_max_sge(struct sli4 *sli4)
+{
+ return sli4->sge_supported_length;
+}
+
+static inline u32
+sli_get_max_sgl(struct sli4 *sli4)
+{
+ if (sli4->sgl_page_sizes != 1) {
+ efc_log_err(sli4, "unsupported SGL page sizes %#x\n",
+ sli4->sgl_page_sizes);
+ return 0;
+ }
+
+ return (sli4->max_sgl_pages * SLI_PAGE_SIZE) / sizeof(struct sli4_sge);
+}
+
+static inline enum sli4_link_medium
+sli_get_medium(struct sli4 *sli4)
+{
+ switch (sli4->topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ return SLI4_LINK_MEDIUM_FC;
+ default:
+ return SLI4_LINK_MEDIUM_MAX;
+ }
+}
+
+static inline u32
+sli_get_lmt(struct sli4 *sli4)
+{
+ return sli4->link_module_type;
+}
+
+static inline int
+sli_set_topology(struct sli4 *sli4, u32 value)
+{
+ int rc = 0;
+
+ switch (value) {
+ case SLI4_READ_CFG_TOPO_FC:
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ sli4->topology = value;
+ break;
+ default:
+ efc_log_err(sli4, "unsupported topology %#x\n", value);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static inline u32
+sli_convert_mask_to_count(u32 method, u32 mask)
+{
+ u32 count = 0;
+
+ if (method) {
+ count = 1 << (31 - __builtin_clz(mask));
+ count *= 16;
+ } else {
+ count = mask;
+ }
+
+ return count;
+}
+
+static inline u32
+sli_reg_read_status(struct sli4 *sli)
+{
+ return readl(sli->reg[0] + SLI4_PORT_STATUS_REGOFF);
+}
+
+static inline int
+sli_fw_error_status(struct sli4 *sli4)
+{
+ return (sli_reg_read_status(sli4) & SLI4_PORT_STATUS_ERR) ? 1 : 0;
+}
+
+static inline u32
+sli_reg_read_err1(struct sli4 *sli)
+{
+ return readl(sli->reg[0] + SLI4_PORT_ERROR1);
+}
+
+static inline u32
+sli_reg_read_err2(struct sli4 *sli)
+{
+ return readl(sli->reg[0] + SLI4_PORT_ERROR2);
+}
+
+static inline int
+sli_fc_rqe_length(struct sli4 *sli4, void *cqe, u32 *len_hdr,
+ u32 *len_data)
+{
+ struct sli4_fc_async_rcqe *rcqe = cqe;
+
+ *len_hdr = *len_data = 0;
+
+ if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ *len_hdr = rcqe->hdpl_byte & SLI4_RACQE_HDPL;
+ *len_data = le16_to_cpu(rcqe->data_placement_length);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static inline u8
+sli_fc_rqe_fcfi(struct sli4 *sli4, void *cqe)
+{
+ u8 code = ((u8 *)cqe)[SLI4_CQE_CODE_OFFSET];
+ u8 fcfi = U8_MAX;
+
+ switch (code) {
+ case SLI4_CQE_CODE_RQ_ASYNC: {
+ struct sli4_fc_async_rcqe *rcqe = cqe;
+
+ fcfi = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_FCFI;
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC_V1: {
+ struct sli4_fc_async_rcqe_v1 *rcqev1 = cqe;
+
+ fcfi = rcqev1->fcfi_byte & SLI4_RACQE_FCFI;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: {
+ struct sli4_fc_optimized_write_cmd_cqe *opt_wr = cqe;
+
+ fcfi = opt_wr->flags0 & SLI4_OCQE_FCFI;
+ break;
+ }
+ }
+
+ return fcfi;
+}
+
+/****************************************************************************
+ * Function prototypes
+ */
+int
+sli_cmd_config_link(struct sli4 *sli4, void *buf);
+int
+sli_cmd_down_link(struct sli4 *sli4, void *buf);
+int
+sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki);
+int
+sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf,
+ u32 page_num, struct efc_dma *dma);
+int
+sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_stats,
+ u8 clear_overflow_flags, u8 clear_all_counters);
+int
+sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear);
+int
+sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed,
+ u8 reset_alpa);
+int
+sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi,
+ u16 vpi);
+int
+sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi);
+int
+sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 base, u16 cnt);
+int
+sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri);
+int
+sli_cmd_read_sparm64(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma, u16 vpi);
+int
+sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma);
+int
+sli_cmd_read_nvparms(struct sli4 *sli4, void *buf);
+int
+sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn,
+ u8 *wwnn, u8 hard_alpa, u32 preferred_d_id);
+int
+sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index,
+ struct sli4_cmd_rq_cfg *rq_cfg);
+int
+sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 index,
+ u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs,
+ struct sli4_cmd_rq_cfg *rq_cfg);
+int
+sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id,
+ struct efc_dma *dma, u8 update, u8 enable_t10_pi);
+int
+sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator);
+int
+sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator,
+ enum sli4_resource which, u32 fc_id);
+int
+sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id,
+ __be64 sli_wwpn, u16 vpi, u16 vfi, bool update);
+int
+sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size,
+ u16 vfi, u16 fcfi, struct efc_dma dma,
+ u16 vpi, __be64 sli_wwpn, u32 fc_id);
+int
+sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 id, u32 type);
+int
+sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 idx, u32 type);
+int
+sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context);
+int
+sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf,
+ u16 rtype);
+int
+sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf);
+int
+sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
+ u16 eof, u32 len, u32 offset, char *name, struct efc_dma *dma);
+int
+sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *object_name);
+int
+sli_cmd_common_read_object(struct sli4 *sli4, void *buf,
+ u32 length, u32 offset, char *name, struct efc_dma *dma);
+int
+sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf,
+ struct efc_dma *cmd, struct efc_dma *resp);
+int
+sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf,
+ bool query, bool is_buffer_list, struct efc_dma *dma, u8 fdb);
+int
+sli_cmd_common_set_features(struct sli4 *sli4, void *buf,
+ u32 feature, u32 param_len, void *parameter);
+
+int sli_cqe_mq(struct sli4 *sli4, void *buf);
+int sli_cqe_async(struct sli4 *sli4, void *buf);
+
+int
+sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev, void __iomem *r[]);
+void sli_calc_max_qentries(struct sli4 *sli4);
+int sli_init(struct sli4 *sli4);
+int sli_reset(struct sli4 *sli4);
+int sli_fw_reset(struct sli4 *sli4);
+void sli_teardown(struct sli4 *sli4);
+int
+sli_callback(struct sli4 *sli4, enum sli4_callback cb, void *func, void *arg);
+int
+sli_bmbx_command(struct sli4 *sli4);
+int
+__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
+ size_t size, u32 n_entries, u32 align);
+int
+__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q);
+int
+sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq, u32 num_eq,
+ u32 shift, u32 delay_mult);
+int
+sli_queue_alloc(struct sli4 *sli4, u32 qtype, struct sli4_queue *q,
+ u32 n_entries, struct sli4_queue *assoc);
+int
+sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[], u32 num_cqs,
+ u32 n_entries, struct sli4_queue *eqs[]);
+int
+sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype);
+int
+sli_queue_free(struct sli4 *sli4, struct sli4_queue *q, u32 destroy_queues,
+ u32 free_memory);
+int
+sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm);
+int
+sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm);
+
+int
+sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype, u32 *rid,
+ u32 *index);
+int
+sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid);
+int
+sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype);
+int
+sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id);
+int
+sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe,
+ enum sli4_qentry *etype, u16 *q_id);
+
+int sli_raise_ue(struct sli4 *sli4, u8 dump);
+int sli_dump_is_ready(struct sli4 *sli4);
+bool sli_reset_required(struct sli4 *sli4);
+bool sli_fw_ready(struct sli4 *sli4);
+
+int
+sli_fc_process_link_attention(struct sli4 *sli4, void *acqe);
+int
+sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq,
+ u8 *cqe, enum sli4_qentry *etype,
+ u16 *rid);
+u32 sli_fc_response_length(struct sli4 *sli4, u8 *cqe);
+u32 sli_fc_io_length(struct sli4 *sli4, u8 *cqe);
+int sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id);
+u32 sli_fc_ext_status(struct sli4 *sli4, u8 *cqe);
+int
+sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index);
+int
+sli_cmd_wq_create(struct sli4 *sli4, void *buf,
+ struct efc_dma *qmem, u16 cq_id);
+int sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri,
+ u32 xri_count, struct efc_dma *page0[], struct efc_dma *page1[],
+ struct efc_dma *dma);
+int
+sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma, u16 rpi, struct efc_dma *payload_dma);
+int
+sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q, u32 n_entries,
+ u32 buffer_size, struct sli4_queue *cq, bool is_hdr);
+int
+sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs, struct sli4_queue *q[],
+ u32 base_cq_id, u32 num, u32 hdr_buf_size, u32 data_buf_size);
+u32 sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi);
+int
+sli_abort_wqe(struct sli4 *sli4, void *buf, enum sli4_abort_type type,
+ bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id);
+
+int
+sli_send_frame_wqe(struct sli4 *sli4, void *buf, u8 sof, u8 eof,
+ u32 *hdr, struct efc_dma *payload, u32 req_len, u8 timeout,
+ u16 xri, u16 req_tag);
+
+int
+sli_xmit_els_rsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *rsp,
+ struct sli_els_params *params);
+
+int
+sli_els_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ struct sli_els_params *params);
+
+int
+sli_fcp_icmnd64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, u16 xri,
+ u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout);
+
+int
+sli_fcp_iread64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len, u16 xri,
+ u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 dif, u8 bs,
+ u8 timeout);
+
+int
+sli_fcp_iwrite64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len,
+ u32 first_burst, u16 xri, u16 tag, u16 cq_id, u32 rpi,
+ u32 rnode_fcid, u8 dif, u8 bs, u8 timeout);
+
+int
+sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params);
+int
+sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 sec_xri, u16 cq_id, u8 dif,
+ u8 bs, struct sli_fcp_tgt_params *params);
+
+int
+sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params);
+
+int
+sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params);
+int
+sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ struct sli_ct_params *params);
+
+int
+sli_xmit_bls_rsp64_wqe(struct sli4 *sli4, void *buf,
+ struct sli_bls_payload *payload, struct sli_bls_params *params);
+
+int
+sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload,
+ struct sli_ct_params *params);
+
+int
+sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id);
+void
+sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf, size_t size,
+ u16 timeout);
+
+const char *sli_fc_get_status_string(u32 status);
+
+#endif /* !_SLI4_H */
diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig
index 19f6d3029658..c9b43f7fce31 100644
--- a/drivers/scsi/esas2r/Kconfig
+++ b/drivers/scsi/esas2r/Kconfig
@@ -2,5 +2,5 @@
config SCSI_ESAS2R
tristate "ATTO Technology's ExpressSAS RAID adapter driver"
depends on PCI && SCSI
- ---help---
+ help
This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers.
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
index 4aca3d52c851..dd3437412ffc 100644
--- a/drivers/scsi/esas2r/atioctl.h
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -831,6 +831,7 @@ struct __packed atto_hba_trace {
u32 total_length;
u32 trace_mask;
u8 reserved2[48];
+ u8 contents[];
};
#define ATTO_FUNC_SCSI_PASS_THRU 0x04
@@ -1141,7 +1142,7 @@ struct __packed atto_ioctl_vda_gsv_cmd {
u8 rsp_len;
u8 reserved[7];
- u8 version_info[1];
+ u8 version_info[];
#define ATTO_VDA_VER_UNSUPPORTED 0xFF
};
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 7f43b95f4e94..ed63f7a9ea54 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -996,8 +996,9 @@ void esas2r_adapter_tasklet(unsigned long context);
irqreturn_t esas2r_interrupt(int irq, void *dev_id);
irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
void esas2r_kickoff_timer(struct esas2r_adapter *a);
-int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
-int esas2r_resume(struct pci_dev *pcid);
+
+extern const struct dev_pm_ops esas2r_pm_ops;
+
void esas2r_fw_event_off(struct esas2r_adapter *a);
void esas2r_fw_event_on(struct esas2r_adapter *a);
bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
@@ -1225,8 +1226,9 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq,
/* req_table entry should be NULL at this point - if not, halt */
- if (a->req_table[LOWORD(vrq->scsi.handle)])
+ if (a->req_table[LOWORD(vrq->scsi.handle)]) {
esas2r_bugon();
+ }
/* fill in the table for this handle so we can get back to the
* request.
diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c
index 1c079f4300a5..ba42536d1e87 100644
--- a/drivers/scsi/esas2r/esas2r_disc.c
+++ b/drivers/scsi/esas2r/esas2r_disc.c
@@ -1031,8 +1031,9 @@ static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
{
struct esas2r_adapter *a = sgc->adapter;
- if (sgc->length > ESAS2R_DISC_BUF_LEN)
+ if (sgc->length > ESAS2R_DISC_BUF_LEN) {
esas2r_bugon();
+ }
*addr = a->uncached_phys
+ (u64)((u8 *)a->disc_buffer - a->uncached);
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index b02ac389e6c6..f910e2553fbb 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -232,7 +232,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
*/
rq->req_stat = RS_PENDING;
if (test_bit(AF_DEGRADED_MODE, &a->flags))
- /* not suppported for now */;
+ /* not supported for now */;
else
build_flash_msg(a, rq);
@@ -1500,7 +1500,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
}
- /* fall through */
+ fallthrough;
case FI_ACT_UP: /* Upload the components */
default:
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index eb7d139ffc00..c1a5ab662dc8 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -412,10 +412,11 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
esas2r_disable_chip_interrupts(a);
esas2r_check_adapter(a);
- if (!esas2r_init_adapter_hw(a, true))
+ if (!esas2r_init_adapter_hw(a, true)) {
esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
- else
+ } else {
esas2r_debug("esas2r_init_adapter ok");
+ }
esas2r_claim_interrupts(a);
@@ -640,53 +641,27 @@ void esas2r_kill_adapter(int i)
}
}
-int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused esas2r_suspend(struct device *dev)
{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
- u32 device_state;
+ struct Scsi_Host *host = dev_get_drvdata(dev);
struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
+ esas2r_log_dev(ESAS2R_LOG_INFO, dev, "suspending adapter()");
if (!a)
return -ENODEV;
esas2r_adapter_power_down(a, 1);
- device_state = pci_choose_state(pdev, state);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_save_state() called");
- pci_save_state(pdev);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_disable_device() called");
- pci_disable_device(pdev);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_set_power_state() called");
- pci_set_power_state(pdev, device_state);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
+ esas2r_log_dev(ESAS2R_LOG_INFO, dev, "esas2r_suspend(): 0");
return 0;
}
-int esas2r_resume(struct pci_dev *pdev)
+static int __maybe_unused esas2r_resume(struct device *dev)
{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct Scsi_Host *host = dev_get_drvdata(dev);
struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
- int rez;
-
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_set_power_state(PCI_D0) "
- "called");
- pci_set_power_state(pdev, PCI_D0);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_enable_wake(PCI_D0, 0) "
- "called");
- pci_enable_wake(pdev, PCI_D0, 0);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_restore_state() called");
- pci_restore_state(pdev);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_enable_device() called");
- rez = pci_enable_device(pdev);
- pci_set_master(pdev);
+ int rez = 0;
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, dev, "resuming adapter()");
if (!a) {
rez = -ENODEV;
@@ -730,11 +705,13 @@ int esas2r_resume(struct pci_dev *pdev)
}
error_exit:
- esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
+ esas2r_log_dev(ESAS2R_LOG_CRIT, dev, "esas2r_resume(): %d",
rez);
return rez;
}
+SIMPLE_DEV_PM_OPS(esas2r_pm_ops, esas2r_suspend, esas2r_resume);
+
bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
{
set_bit(AF_DEGRADED_MODE, &a->flags);
@@ -1236,7 +1213,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
break;
}
- /* fall through */
+ fallthrough;
case ESAS2R_INIT_MSG_GET_INIT:
if (msg == ESAS2R_INIT_MSG_GET_INIT) {
@@ -1250,7 +1227,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
esas2r_hdebug("FAILED");
}
}
- /* fall through */
+ fallthrough;
default:
rq->req_stat = RS_SUCCESS;
diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c
index f16d6bcf9bb6..5281d9356327 100644
--- a/drivers/scsi/esas2r/esas2r_int.c
+++ b/drivers/scsi/esas2r/esas2r_int.c
@@ -688,8 +688,9 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
esas2r_local_reset_adapter(a);
}
- if (!(doorbell & DRBL_FORCE_INT))
+ if (!(doorbell & DRBL_FORCE_INT)) {
esas2r_trace_exit();
+ }
}
void esas2r_force_interrupt(struct esas2r_adapter *a)
@@ -862,10 +863,11 @@ void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
ae.byflags = 0;
ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
- if (pwr_mgt)
+ if (pwr_mgt) {
esas2r_hdebug("*** sending power management AE ***");
- else
+ } else {
esas2r_hdebug("*** sending reset AE ***");
+ }
esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
sizeof(union atto_vda_ae));
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 442c5e70a7b4..e003d923acbf 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -947,10 +947,9 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
break;
}
- memcpy(trc + 1,
+ memcpy(trc->contents,
a->fw_coredump_buff + offset,
len);
-
hi->data_length = len;
} else if (trc->trace_func == ATTO_TRC_TF_RESET) {
memset(a->fw_coredump_buff, 0,
@@ -1510,7 +1509,7 @@ ioctl_done:
}
/* Always copy the buffer back, if only to pick up the status */
- err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
+ err = copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
if (err != 0) {
esas2r_log(ESAS2R_LOG_WARN,
"ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)",
@@ -1548,11 +1547,10 @@ static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
a->firmware.orig_len = length;
- a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
- (size_t)length,
- (dma_addr_t *)&a->firmware.
- phys,
- GFP_KERNEL);
+ a->firmware.data = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)length,
+ (dma_addr_t *)&a->firmware.phys,
+ GFP_KERNEL);
if (!a->firmware.data) {
esas2r_debug("buffer alloc failed!");
@@ -1895,11 +1893,11 @@ int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
if (!a->vda_buffer) {
dma_addr_t dma_addr;
- a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
- (size_t)
- VDA_MAX_BUFFER_SIZE,
- &dma_addr,
- GFP_KERNEL);
+ a->vda_buffer = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)
+ VDA_MAX_BUFFER_SIZE,
+ &dma_addr,
+ GFP_KERNEL);
a->ppvda_buffer = dma_addr;
}
@@ -2064,11 +2062,10 @@ int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
re_allocate_buffer:
a->fs_api_buffer_size = length;
- a->fs_api_buffer = (u8 *)dma_alloc_coherent(
- &a->pcid->dev,
- (size_t)a->fs_api_buffer_size,
- (dma_addr_t *)&a->ppfs_api_buffer,
- GFP_KERNEL);
+ a->fs_api_buffer = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ (dma_addr_t *)&a->ppfs_api_buffer,
+ GFP_KERNEL);
}
}
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
index 65fdf22b0ba9..d6c87a0bae09 100644
--- a/drivers/scsi/esas2r/esas2r_log.c
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -75,7 +75,7 @@ static char event_buffer[EVENT_LOG_BUFF_SIZE];
/* A lock to protect the shared buffer used for formatting messages. */
static DEFINE_SPINLOCK(event_buffer_lock);
-/**
+/*
* translates an esas2r-defined logging event level to a kernel logging level.
*
* @param [in] level the esas2r-defined logging event level to translate
@@ -101,7 +101,12 @@ static const char *translate_esas2r_event_level_to_kernel(const long level)
}
}
-/**
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
+
+/*
* the master logging function. this function will format the message as
* outlined by the formatting string, the input device information and the
* substitution arguments and output the resulting string to the system log.
@@ -170,7 +175,9 @@ static int esas2r_log_master(const long level,
return 0;
}
-/**
+#pragma GCC diagnostic pop
+
+/*
* formats and logs a message to the system log.
*
* @param [in] level the event level of the message
@@ -193,7 +200,7 @@ int esas2r_log(const long level, const char *format, ...)
return retval;
}
-/**
+/*
* formats and logs a message to the system log. this message will include
* device information.
*
@@ -221,7 +228,7 @@ int esas2r_log_dev(const long level,
return retval;
}
-/**
+/*
* formats and logs a message to the system log. this message will include
* device information.
*
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 7b49e2e9fcde..7a4eadad23d7 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -249,7 +249,6 @@ static struct scsi_host_template driver_template = {
.cmd_per_lun =
ESAS2R_DEFAULT_CMD_PER_LUN,
.present = 0,
- .unchecked_isa_dma = 0,
.emulated = 0,
.proc_name = ESAS2R_DRVR_NAME,
.change_queue_depth = scsi_change_queue_depth,
@@ -346,8 +345,7 @@ static struct pci_driver
.id_table = esas2r_pci_table,
.probe = esas2r_probe,
.remove = esas2r_remove,
- .suspend = esas2r_suspend,
- .resume = esas2r_resume,
+ .driver.pm = &esas2r_pm_ops,
};
static int esas2r_probe(struct pci_dev *pcid,
@@ -618,6 +616,7 @@ static const struct file_operations esas2r_proc_fops = {
};
static const struct proc_ops esas2r_proc_ops = {
+ .proc_lseek = default_llseek,
.proc_ioctl = esas2r_proc_ioctl,
#ifdef CONFIG_COMPAT
.proc_compat_ioctl = compat_ptr_ioctl,
@@ -829,7 +828,7 @@ int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -894,15 +893,11 @@ static void complete_task_management_request(struct esas2r_adapter *a,
esas2r_free_request(a, rq);
}
-/**
+/*
* Searches the specified queue for the specified queue for the command
* to abort.
*
- * @param [in] a
- * @param [in] abort_request
- * @param [in] cmd
- * t
- * @return 0 on failure, 1 if command was not found, 2 if command was found
+ * Return 0 on failure, 1 if command was not found, 2 if command was found
*/
static int esas2r_check_active_queue(struct esas2r_adapter *a,
struct esas2r_request **abort_request,
@@ -993,7 +988,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
scsi_set_resid(cmd, 0);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return SUCCESS;
}
@@ -1059,7 +1054,7 @@ check_active_queue:
scsi_set_resid(cmd, 0);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return SUCCESS;
}
@@ -1530,7 +1525,7 @@ void esas2r_complete_request_cb(struct esas2r_adapter *a,
rq->cmd->result =
((esas2r_req_status_to_error(rq->req_stat) << 16)
- | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
+ | rq->func_rsp.scsi_rsp.scsi_stat);
if (rq->req_stat == RS_UNDERRUN)
scsi_set_resid(rq->cmd,
@@ -1540,7 +1535,7 @@ void esas2r_complete_request_cb(struct esas2r_adapter *a,
scsi_set_resid(rq->cmd, 0);
}
- rq->cmd->scsi_done(rq->cmd);
+ scsi_done(rq->cmd);
esas2r_free_request(a, rq);
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 89afa31e33cb..64ec6bb84550 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -307,7 +307,7 @@ static void esp_reset_esp(struct esp *esp)
case FASHME:
esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
- /* fallthrough... */
+ fallthrough;
case FAS236:
case PCSCSI:
@@ -647,7 +647,7 @@ static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
ent->sense_ptr = NULL;
}
-/* When a contingent allegiance conditon is created, we force feed a
+/* When a contingent allegiance condition is created, we force feed a
* REQUEST_SENSE command to the device to fetch the sense data. I
* tried many other schemes, relying on the scsi error handling layer
* to send out the REQUEST_SENSE automatically, but this was difficult
@@ -896,7 +896,7 @@ static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
}
static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
- struct scsi_cmnd *cmd, unsigned int result)
+ struct scsi_cmnd *cmd, unsigned char host_byte)
{
struct scsi_device *dev = cmd->device;
int tgt = dev->id;
@@ -905,7 +905,10 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
esp->active_cmd = NULL;
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, dev->hostdata);
- cmd->result = result;
+ cmd->result = 0;
+ set_host_byte(cmd, host_byte);
+ if (host_byte == DID_OK)
+ set_status_byte(cmd, ent->status);
if (ent->eh_done) {
complete(ent->eh_done);
@@ -919,10 +922,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
* saw originally. Also, report that we are providing
* the sense data.
*/
- cmd->result = ((DRIVER_SENSE << 24) |
- (DID_OK << 16) |
- (COMMAND_COMPLETE << 8) |
- (SAM_STAT_CHECK_CONDITION << 0));
+ cmd->result = SAM_STAT_CHECK_CONDITION;
ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
if (esp_debug & ESP_DEBUG_AUTOSENSE) {
@@ -936,7 +936,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
}
}
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
@@ -944,12 +944,6 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
esp_maybe_execute_command(esp);
}
-static unsigned int compose_result(unsigned int status, unsigned int message,
- unsigned int driver_code)
-{
- return (status | (message << 8) | (driver_code << 16));
-}
-
static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_device *dev = ent->cmd->device;
@@ -958,7 +952,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
scsi_track_queue_full(dev, lp->num_tagged - 1);
}
-static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int esp_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct scsi_device *dev = cmd->device;
struct esp *esp = shost_priv(dev->host);
@@ -971,8 +965,6 @@ static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_
ent->cmd = cmd;
- cmd->scsi_done = done;
-
spriv = ESP_CMD_PRIV(cmd);
spriv->num_sg = 0;
@@ -1244,7 +1236,7 @@ static int esp_finish_select(struct esp *esp)
* all bets are off.
*/
esp_schedule_reset(esp);
- esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
+ esp_cmd_is_done(esp, ent, cmd, DID_ERROR);
return 0;
}
@@ -1289,7 +1281,7 @@ static int esp_finish_select(struct esp *esp)
esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
scsi_esp_cmd(esp, ESP_CMD_ESEL);
- esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
+ esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET);
return 1;
}
@@ -1345,7 +1337,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
bytes_sent -= esp->send_cmd_residual;
/*
- * The am53c974 has a DMA 'pecularity'. The doc states:
+ * The am53c974 has a DMA 'peculiarity'. The doc states:
* In some odd byte conditions, one residual byte will
* be left in the SCSI FIFO, and the FIFO Flags will
* never count to '0 '. When this happens, the residual
@@ -1741,7 +1733,7 @@ again:
case ESP_EVENT_DATA_IN:
write = 1;
- /* fallthru */
+ fallthrough;
case ESP_EVENT_DATA_OUT: {
struct esp_cmd_entry *ent = esp->active_cmd;
@@ -1874,10 +1866,7 @@ again:
ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
esp_autosense(esp, ent);
} else {
- esp_cmd_is_done(esp, ent, cmd,
- compose_result(ent->status,
- ent->message,
- DID_OK));
+ esp_cmd_is_done(esp, ent, cmd, DID_OK);
}
} else if (ent->message == DISCONNECT) {
esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
@@ -2047,7 +2036,7 @@ static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
esp_unmap_sense(esp, ent);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
}
@@ -2070,7 +2059,7 @@ static void esp_reset_cleanup(struct esp *esp)
list_del(&ent->list);
cmd->result = DID_RESET << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
esp_put_ent(esp, ent);
}
@@ -2544,7 +2533,7 @@ static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
list_del(&ent->list);
cmd->result = DID_ABORT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
esp_put_ent(esp, ent);
@@ -2689,6 +2678,7 @@ struct scsi_host_template scsi_esp_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
.skip_settle_delay = 1,
+ .cmd_size = sizeof(struct esp_cmd_priv),
};
EXPORT_SYMBOL(scsi_esp_template);
@@ -2750,9 +2740,6 @@ static struct spi_function_template esp_transport_ops = {
static int __init esp_init(void)
{
- BUILD_BUG_ON(sizeof(struct scsi_pointer) <
- sizeof(struct esp_cmd_priv));
-
esp_transport_template = spi_attach_transport(&esp_transport_ops);
if (!esp_transport_template)
return -ENODEV;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 446a3d18c022..c73760d3cf83 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -262,7 +262,8 @@ struct esp_cmd_priv {
struct scatterlist *cur_sg;
int tot_residue;
};
-#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
+
+#define ESP_CMD_PRIV(cmd) ((struct esp_cmd_priv *)scsi_cmd_priv(cmd))
/* NOTE: this enum is ordered based on chip features! */
enum esp_rev {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 25dae9f0b205..6ec296321ffc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -277,6 +277,7 @@ static struct scsi_host_template fcoe_shost_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct libfc_cmd_priv),
};
/**
@@ -293,7 +294,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct netdev_hw_addr *ha;
struct net_device *real_dev;
- u8 flogi_maddr[ETH_ALEN];
+ static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
const struct net_device_ops *ops;
fcoe->netdev = netdev;
@@ -307,7 +308,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
}
/* Do not support for bonding device */
- if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
+ if (netif_is_bond_master(netdev)) {
FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
return -EOPNOTSUPP;
}
@@ -336,7 +337,6 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
* or enter promiscuous mode if not capable of listening
* for multiple unicast MACs.
*/
- memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_uc_add(netdev, flogi_maddr);
if (fip->spma)
dev_uc_add(netdev, fip->ctl_src_addr);
@@ -442,7 +442,7 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
- u8 flogi_maddr[ETH_ALEN];
+ static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
const struct net_device_ops *ops;
/*
@@ -458,7 +458,6 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
synchronize_net();
/* Delete secondary MAC addresses */
- memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_uc_del(netdev, flogi_maddr);
if (fip->spma)
dev_uc_del(netdev, fip->ctl_src_addr);
@@ -645,7 +644,7 @@ static int fcoe_lport_config(struct fc_lport *lport)
return 0;
}
-/**
+/*
* fcoe_netdev_features_change - Updates the lport's offload flags based
* on the LLD netdev's FCoE feature flags
*/
@@ -668,7 +667,7 @@ static void fcoe_netdev_features_change(struct fc_lport *lport,
if (netdev->features & NETIF_F_FSO) {
lport->seq_offload = 1;
- lport->lso_max = netdev->gso_max_size;
+ lport->lso_max = min(netdev->gso_max_size, GSO_LEGACY_MAX_SIZE);
FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
lport->lso_max);
} else {
@@ -1435,8 +1434,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
return NET_RX_SUCCESS;
err:
- per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
- put_cpu();
+ this_cpu_inc(lport->stats->ErrorFrames);
err2:
kfree_skb(skb);
return NET_RX_DROP;
@@ -1454,9 +1452,10 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
struct fcoe_percpu_s *fps;
int rc;
- fps = &get_cpu_var(fcoe_percpu);
+ local_lock(&fcoe_percpu.lock);
+ fps = this_cpu_ptr(&fcoe_percpu);
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
- put_cpu_var(fcoe_percpu);
+ local_unlock(&fcoe_percpu.lock);
return rc;
}
@@ -1475,7 +1474,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct ethhdr *eh;
struct fcoe_crc_eof *cp;
struct sk_buff *skb;
- struct fc_stats *stats;
struct fc_frame_header *fh;
unsigned int hlen; /* header length implies the version */
unsigned int tlen; /* trailer length */
@@ -1490,7 +1488,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
- wlen = skb->len / FCOE_WORD_TO_BYTE;
if (!lport->link_up) {
kfree_skb(skb);
@@ -1586,10 +1583,8 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_shinfo(skb)->gso_size = 0;
}
/* update tx stats: regardless if LLD fails */
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->TxFrames++;
- stats->TxWords += wlen;
- put_cpu();
+ this_cpu_inc(lport->stats->TxFrames);
+ this_cpu_add(lport->stats->TxWords, wlen);
/* send down to lld */
fr_dev(fp) = lport;
@@ -1611,7 +1606,6 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct sk_buff *skb = (struct sk_buff *)fp;
- struct fc_stats *stats;
/*
* We only check CRC if no offload is available and if it is
@@ -1641,11 +1635,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
}
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->InvalidCRCCount++;
- if (stats->InvalidCRCCount < 5)
+ if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
- put_cpu();
return -EINVAL;
}
@@ -1658,7 +1649,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
u32 fr_len;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
- struct fc_stats *stats;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fcoe_hdr *hp;
@@ -1686,9 +1676,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
- stats = per_cpu_ptr(lport->stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
- if (stats->ErrorFrames < 5)
+ struct fc_stats *stats;
+
+ stats = per_cpu_ptr(lport->stats, raw_smp_processor_id());
+ if (READ_ONCE(stats->ErrorFrames) < 5)
printk(KERN_WARNING "fcoe: FCoE version "
"mismatch: The frame has "
"version %x, but the "
@@ -1701,8 +1693,8 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
- stats->RxFrames++;
- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ this_cpu_inc(lport->stats->RxFrames);
+ this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
@@ -1718,13 +1710,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
- put_cpu();
fc_exch_recv(lport, fp);
return;
}
drop:
- stats->ErrorFrames++;
- put_cpu();
+ this_cpu_inc(lport->stats->ErrorFrames);
kfree_skb(skb);
}
@@ -1848,7 +1838,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
- struct fc_stats *stats;
u32 link_possible = 1;
u32 mfs;
int rc = NOTIFY_OK;
@@ -1894,7 +1883,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
mutex_unlock(&fcoe_config_mutex);
fcoe_ctlr_device_delete(fcoe_ctlr_to_ctlr_dev(ctlr));
goto out;
- break;
case NETDEV_FEAT_CHANGE:
fcoe_netdev_features_change(lport, netdev);
break;
@@ -1915,7 +1903,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case FCOE_CTLR_ENABLED:
case FCOE_CTLR_UNUSED:
fcoe_ctlr_link_up(ctlr);
- };
+ }
} else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) {
case FCOE_CTLR_DISABLED:
@@ -1923,11 +1911,9 @@ static int fcoe_device_notification(struct notifier_block *notifier,
break;
case FCOE_CTLR_ENABLED:
case FCOE_CTLR_UNUSED:
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->LinkFailureCount++;
- put_cpu();
+ this_cpu_inc(lport->stats->LinkFailureCount);
fcoe_clean_pending_queue(lport);
- };
+ }
}
out:
return rc;
@@ -2024,12 +2010,12 @@ static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
case FCOE_CTLR_UNUSED:
default:
return -ENOTSUPP;
- };
+ }
}
/**
* fcoe_ctlr_mode() - Switch FIP mode
- * @cdev: The FCoE Controller that is being modified
+ * @ctlr_dev: The FCoE Controller that is being modified
*
* When the FIP mode has been changed we need to update
* the multicast addresses to ensure we get the correct
@@ -2136,9 +2122,7 @@ static bool fcoe_match(struct net_device *netdev)
/**
* fcoe_dcb_create() - Initialize DCB attributes and hooks
- * @netdev: The net_device object of the L2 link that should be queried
- * @port: The fcoe_port to bind FCoE APP priority with
- * @
+ * @fcoe: The new FCoE interface
*/
static void fcoe_dcb_create(struct fcoe_interface *fcoe)
{
@@ -2492,6 +2476,7 @@ static int __init fcoe_init(void)
p = per_cpu_ptr(&fcoe_percpu, cpu);
INIT_WORK(&p->work, fcoe_receive_work);
skb_queue_head_init(&p->fcoe_rx_list);
+ local_lock_init(&p->lock);
}
/* Setup link change notification */
@@ -2584,7 +2569,7 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
/* pre-FIP */
if (is_zero_ether_addr(mac))
fcoe_ctlr_recv_flogi(fip, lport, fp);
- if (!is_zero_ether_addr(mac))
+ else
fcoe_update_src_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
@@ -2609,7 +2594,7 @@ static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fc_lport_logo_resp(seq, fp, lport);
}
-/**
+/*
* fcoe_elsct_send - FCoE specific ELS handler
*
* This does special case handling of FIP encapsualted ELS exchanges for FCoE,
@@ -2774,7 +2759,7 @@ static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
}
/**
- * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
+ * fcoe_set_vport_symbolic_name() - append vport string to symbolic name
* @vport: fc_vport with a new symbolic name string
*
* After generating a new symbolic name string, a new RSPN_ID request is
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 1791a393795d..ddc048069af2 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -134,6 +134,7 @@ static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip)
/**
* fcoe_ctlr_init() - Initialize the FCoE Controller instance
* @fip: The FCoE controller to initialize
+ * @mode: FIP mode to set
*/
void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_mode mode)
{
@@ -255,9 +256,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
WARN_ON(!fcf_dev);
new->fcf_dev = NULL;
fcoe_fcf_device_delete(fcf_dev);
- kfree(new);
mutex_unlock(&cdev->lock);
}
+ kfree(new);
}
/**
@@ -336,7 +337,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
printk(KERN_NOTICE "libfcoe: host%d: "
"FIP Fibre-Channel Forwarder MAC %pM deselected\n",
fip->lp->host->host_no, fip->dest_addr);
- memset(fip->dest_addr, 0, ETH_ALEN);
+ eth_zero_addr(fip->dest_addr);
}
if (sel) {
printk(KERN_INFO "libfcoe: host%d: FIP selected "
@@ -449,10 +450,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
switch (fip->mode) {
default:
LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
- /* fall-through */
+ fallthrough;
case FIP_MODE_AUTO:
LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
- /* fall-through */
+ fallthrough;
case FIP_MODE_FABRIC:
case FIP_MODE_NON_FIP:
mutex_unlock(&fip->ctlr_mutex);
@@ -587,6 +588,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it
* @fip: The FCoE controller for the ELS frame
+ * @lport: The local port
* @dtype: The FIP descriptor type for the frame
* @skb: The FCoE ELS frame including FC header but no FCoE headers
* @d_id: The destination port ID.
@@ -771,7 +773,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
fc_fcoe_set_mac(mac, fh->fh_d_id);
fip->update_mac(lport, mac);
}
- /* fall through */
+ fallthrough;
case ELS_LS_RJT:
op = fr_encaps(fp);
if (op)
@@ -822,22 +824,21 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
unsigned long deadline;
unsigned long sel_time = 0;
struct list_head del_list;
- struct fc_stats *stats;
INIT_LIST_HEAD(&del_list);
- stats = per_cpu_ptr(fip->lp->stats, get_cpu());
-
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
if (fip->sel_fcf == fcf) {
if (time_after(jiffies, deadline)) {
- stats->MissDiscAdvCount++;
+ u64 miss_cnt;
+
+ miss_cnt = this_cpu_inc_return(fip->lp->stats->MissDiscAdvCount);
printk(KERN_INFO "libfcoe: host%d: "
"Missing Discovery Advertisement "
"for fab %16.16llx count %lld\n",
fip->lp->host->host_no, fcf->fabric_name,
- stats->MissDiscAdvCount);
+ miss_cnt);
} else if (time_after(next_timer, deadline))
next_timer = deadline;
}
@@ -853,7 +854,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
*/
list_del(&fcf->list);
list_add(&fcf->list, &del_list);
- stats->VLinkFailureCount++;
+ this_cpu_inc(fip->lp->stats->VLinkFailureCount);
} else {
if (time_after(next_timer, deadline))
next_timer = deadline;
@@ -862,7 +863,6 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
sel_time = fcf->time;
}
}
- put_cpu();
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
@@ -1140,7 +1140,6 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fip_desc *desc;
struct fip_encaps *els;
struct fcoe_fcf *sel;
- struct fc_stats *stats;
enum fip_desc_type els_dtype = 0;
u8 els_op;
u8 sub;
@@ -1284,10 +1283,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
fr_dev(fp) = lport;
fr_encaps(fp) = els_dtype;
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->RxFrames++;
- stats->RxWords += skb->len / FIP_BPW;
- put_cpu();
+ this_cpu_inc(lport->stats->RxFrames);
+ this_cpu_add(lport->stats->RxWords, skb->len / FIP_BPW);
fc_exch_recv(lport, fp);
return;
@@ -1300,9 +1297,9 @@ drop:
}
/**
- * fcoe_ctlr_recv_els() - Handle an incoming link reset frame
+ * fcoe_ctlr_recv_clr_vlink() - Handle an incoming link reset frame
* @fip: The FCoE controller that received the frame
- * @fh: The received FIP header
+ * @skb: The received FIP packet
*
* There may be multiple VN_Port descriptors.
* The overall length has already been checked.
@@ -1425,9 +1422,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
ntoh24(vp->fd_fc_id));
if (vn_port && (vn_port == lport)) {
mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->stats,
- get_cpu())->VLinkFailureCount++;
- put_cpu();
+ this_cpu_inc(lport->stats->VLinkFailureCount);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
}
@@ -1455,8 +1450,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
* followed by physical port
*/
mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++;
- put_cpu();
+ this_cpu_inc(lport->stats->VLinkFailureCount);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
@@ -1775,7 +1769,7 @@ unlock:
/**
* fcoe_ctlr_timeout() - FIP timeout handler
- * @arg: The FCoE controller that timed out
+ * @t: Timer context use to obtain the controller reference
*/
static void fcoe_ctlr_timeout(struct timer_list *t)
{
@@ -1887,6 +1881,7 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
/**
* fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response
* @fip: The FCoE controller
+ * @lport: The local port
* @fp: The FC frame to snoop
*
* Snoop potential response to FLOGI or even incoming FLOGI.
@@ -1966,7 +1961,7 @@ EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
*
* Returns: u64 fc world wide name
*/
-u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
+u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN],
unsigned int scheme, unsigned int port)
{
u64 wwn;
@@ -2158,7 +2153,7 @@ static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = {
/**
* fcoe_ctlr_disc_stop_locked() - stop discovery in VN2VN mode
- * @fip: The FCoE controller
+ * @lport: The local port
*
* Called with ctlr_mutex held.
*/
@@ -2179,7 +2174,7 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
/**
* fcoe_ctlr_disc_stop() - stop discovery in VN2VN mode
- * @fip: The FCoE controller
+ * @lport: The local port
*
* Called through the local port template for discovery.
* Called without the ctlr_mutex held.
@@ -2195,7 +2190,7 @@ static void fcoe_ctlr_disc_stop(struct fc_lport *lport)
/**
* fcoe_ctlr_disc_stop_final() - stop discovery for shutdown in VN2VN mode
- * @fip: The FCoE controller
+ * @lport: The local port
*
* Called through the local port template for discovery.
* Called without the ctlr_mutex held.
@@ -2238,7 +2233,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
if (fip->probe_tries < FIP_VN_RLIM_COUNT) {
fip->probe_tries++;
- wait = prandom_u32() % FIP_VN_PROBE_WAIT;
+ wait = prandom_u32_max(FIP_VN_PROBE_WAIT);
} else
wait = FIP_VN_RLIM_INT;
mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait));
@@ -2262,7 +2257,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
* fcoe_ctlr_vn_parse - parse probe request or response
* @fip: The FCoE controller
* @skb: incoming packet
- * @rdata: buffer for resulting parsed VN entry plus fcoe_rport
+ * @frport: parsed FCoE rport from the probe request
*
* Returns non-zero error number on error.
* Does not consume the packet.
@@ -2436,7 +2431,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
frport->enode_mac, 0);
break;
}
- /* fall through */
+ fallthrough;
case FIP_ST_VNMP_START:
LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
"restart VN2VN negotiation\n");
@@ -2793,7 +2788,7 @@ drop:
* fcoe_ctlr_vlan_parse - parse vlan discovery request or response
* @fip: The FCoE controller
* @skb: incoming packet
- * @rdata: buffer for resulting parsed VLAN entry plus fcoe_rport
+ * @frport: parsed FCoE rport from the probe request
*
* Returns non-zero error number on error.
* Does not consume the packet.
@@ -2892,7 +2887,6 @@ len_err:
* @fip: The FCoE controller
* @sub: sub-opcode for vlan notification or vn2vn vlan notification
* @dest: The destination Ethernet MAC address
- * @min_len: minimum size of the Ethernet payload to be sent
*/
static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
enum fip_vlan_subcode sub,
@@ -2950,7 +2944,7 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
}
/**
- * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
+ * fcoe_ctlr_vlan_disc_reply() - send FIP VLAN Discovery Notification.
* @fip: The FCoE controller
* @frport: The newly-parsed FCoE rport from the Discovery Request
*
@@ -2969,9 +2963,8 @@ static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vlan_recv - vlan request receive handler for VN2VN mode.
- * @lport: The local port
- * @fp: The received frame
- *
+ * @fip: The FCoE controller
+ * @skb: The received FIP packet
*/
static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
@@ -3015,9 +3008,8 @@ static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp)
fc_frame_free(fp);
}
-/**
- * fcoe_ctlr_disc_recv - start discovery for VN2VN mode.
- * @fip: The FCoE controller
+/*
+ * fcoe_ctlr_disc_start - start discovery for VN2VN mode.
*
* This sets a flag indicating that remote ports should be created
* and started for the peers we discover. We use the disc_callback
@@ -3133,7 +3125,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
fcoe_all_vn2vn, 0);
fip->port_ka_time = jiffies +
msecs_to_jiffies(FIP_VN_BEACON_INT +
- (prandom_u32() % FIP_VN_BEACON_FUZZ));
+ prandom_u32_max(FIP_VN_BEACON_FUZZ));
}
if (time_before(fip->port_ka_time, next_time))
next_time = fip->port_ka_time;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 2cb7a8c93a15..af658aa38fed 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -312,7 +312,7 @@ static ssize_t store_ctlr_mode(struct device *dev,
default:
LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n");
return -ENOTSUPP;
- };
+ }
}
static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
@@ -346,7 +346,7 @@ static ssize_t store_ctlr_enabled(struct device *dev,
break;
case FCOE_CTLR_UNUSED:
return -ENOTSUPP;
- };
+ }
rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
if (rc)
@@ -1053,16 +1053,10 @@ EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
int __init fcoe_sysfs_setup(void)
{
- int error;
-
atomic_set(&ctlr_num, 0);
atomic_set(&fcf_num, 0);
- error = bus_register(&fcoe_bus_type);
- if (error)
- return error;
-
- return 0;
+ return bus_register(&fcoe_bus_type);
}
void __exit fcoe_sysfs_teardown(void)
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index a20ddc301c89..62341c6353a7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/ethtool.h>
#include <linux/errno.h>
#include <linux/crc32.h>
#include <scsi/libfcoe.h>
@@ -182,9 +183,9 @@ void __fcoe_get_lesb(struct fc_lport *lport,
memset(lesb, 0, sizeof(*lesb));
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(lport->stats, cpu);
- lfc += stats->LinkFailureCount;
- vlfc += stats->VLinkFailureCount;
- mdac += stats->MissDiscAdvCount;
+ lfc += READ_ONCE(stats->LinkFailureCount);
+ vlfc += READ_ONCE(stats->VLinkFailureCount);
+ mdac += READ_ONCE(stats->MissDiscAdvCount);
}
lesb->lesb_link_fail = htonl(lfc);
lesb->lesb_vlink_fail = htonl(vlfc);
@@ -382,6 +383,7 @@ EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
/**
* fcoe_check_wait_queue() - Attempt to clear the transmit backlog
* @lport: The local port whose backlog is to be cleared
+ * @skb: The received FIP packet
*
* This empties the wait_queue, dequeues the head of the wait_queue queue
* and calls fcoe_start_io() for each packet. If all skb have been
@@ -439,7 +441,7 @@ EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
/**
* fcoe_queue_timer() - The fcoe queue timer
- * @lport: The local port
+ * @t: Timer context use to obtain the FCoE port
*
* Calls fcoe_check_wait_queue on timeout
*/
@@ -672,6 +674,7 @@ static void fcoe_del_netdev_mapping(struct net_device *netdev)
/**
* fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which
* it was created
+ * @netdev: The net device that the FCoE interface is on
*
* Returns : ptr to the fcoe transport that supports this netdev or NULL
* if not found.
@@ -860,7 +863,7 @@ static int fcoe_transport_create(const char *buffer,
int rc = -ENODEV;
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
- enum fip_mode fip_mode = (enum fip_mode)kp->arg;
+ enum fip_mode fip_mode = (enum fip_mode)(uintptr_t)kp->arg;
mutex_lock(&ft_mutex);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 772bdc93930a..444eac9b2466 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -115,6 +115,11 @@ struct fdomain {
struct work_struct work;
};
+static struct scsi_pointer *fdomain_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static inline void fdomain_make_bus_idle(struct fdomain *fd)
{
outb(0, fd->base + REG_BCTL);
@@ -202,12 +207,11 @@ static int fdomain_select(struct Scsi_Host *sh, int target)
return 1;
}
-static void fdomain_finish_cmd(struct fdomain *fd, int result)
+static void fdomain_finish_cmd(struct fdomain *fd)
{
outb(0, fd->base + REG_ICTL);
fdomain_make_bus_idle(fd);
- fd->cur_cmd->result = result;
- fd->cur_cmd->scsi_done(fd->cur_cmd);
+ scsi_done(fd->cur_cmd);
fd->cur_cmd = NULL;
}
@@ -264,19 +268,21 @@ static void fdomain_work(struct work_struct *work)
struct Scsi_Host *sh = container_of((void *)fd, struct Scsi_Host,
hostdata);
struct scsi_cmnd *cmd = fd->cur_cmd;
+ struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd);
unsigned long flags;
int status;
int done = 0;
spin_lock_irqsave(sh->host_lock, flags);
- if (cmd->SCp.phase & in_arbitration) {
+ if (scsi_pointer->phase & in_arbitration) {
status = inb(fd->base + REG_ASTAT);
if (!(status & ASTAT_ARB)) {
- fdomain_finish_cmd(fd, DID_BUS_BUSY << 16);
+ set_host_byte(cmd, DID_BUS_BUSY);
+ fdomain_finish_cmd(fd);
goto out;
}
- cmd->SCp.phase = in_selection;
+ scsi_pointer->phase = in_selection;
outb(ICTL_SEL | FIFO_COUNT, fd->base + REG_ICTL);
outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL);
@@ -285,85 +291,87 @@ static void fdomain_work(struct work_struct *work)
/* Stop arbitration and enable parity */
outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
goto out;
- } else if (cmd->SCp.phase & in_selection) {
+ } else if (scsi_pointer->phase & in_selection) {
status = inb(fd->base + REG_BSTAT);
if (!(status & BSTAT_BSY)) {
/* Try again, for slow devices */
if (fdomain_select(cmd->device->host, scmd_id(cmd))) {
- fdomain_finish_cmd(fd, DID_NO_CONNECT << 16);
+ set_host_byte(cmd, DID_NO_CONNECT);
+ fdomain_finish_cmd(fd);
goto out;
}
/* Stop arbitration and enable parity */
outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
}
- cmd->SCp.phase = in_other;
+ scsi_pointer->phase = in_other;
outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL);
outb(BCTL_BUSEN, fd->base + REG_BCTL);
goto out;
}
- /* cur_cmd->SCp.phase == in_other: this is the body of the routine */
+ /* fdomain_scsi_pointer(cur_cmd)->phase == in_other: this is the body of the routine */
status = inb(fd->base + REG_BSTAT);
if (status & BSTAT_REQ) {
switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) {
case BSTAT_CMD: /* COMMAND OUT */
- outb(cmd->cmnd[cmd->SCp.sent_command++],
+ outb(cmd->cmnd[scsi_pointer->sent_command++],
fd->base + REG_SCSI_DATA);
break;
case 0: /* DATA OUT -- tmc18c50/tmc18c30 only */
- if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
- cmd->SCp.have_data_in = -1;
+ if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) {
+ scsi_pointer->have_data_in = -1;
outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
PARITY_MASK, fd->base + REG_ACTL);
}
break;
case BSTAT_IO: /* DATA IN -- tmc18c50/tmc18c30 only */
- if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
- cmd->SCp.have_data_in = 1;
+ if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) {
+ scsi_pointer->have_data_in = 1;
outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
fd->base + REG_ACTL);
}
break;
case BSTAT_CMD | BSTAT_IO: /* STATUS IN */
- cmd->SCp.Status = inb(fd->base + REG_SCSI_DATA);
+ scsi_pointer->Status = inb(fd->base + REG_SCSI_DATA);
break;
case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */
outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA);
break;
case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */
- cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
- if (!cmd->SCp.Message)
+ scsi_pointer->Message = inb(fd->base + REG_SCSI_DATA);
+ if (scsi_pointer->Message == COMMAND_COMPLETE)
++done;
break;
}
}
- if (fd->chip == tmc1800 && !cmd->SCp.have_data_in &&
- cmd->SCp.sent_command >= cmd->cmd_len) {
+ if (fd->chip == tmc1800 && !scsi_pointer->have_data_in &&
+ scsi_pointer->sent_command >= cmd->cmd_len) {
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
- cmd->SCp.have_data_in = -1;
+ scsi_pointer->have_data_in = -1;
outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
PARITY_MASK, fd->base + REG_ACTL);
} else {
- cmd->SCp.have_data_in = 1;
+ scsi_pointer->have_data_in = 1;
outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
fd->base + REG_ACTL);
}
}
- if (cmd->SCp.have_data_in == -1) /* DATA OUT */
+ if (scsi_pointer->have_data_in == -1) /* DATA OUT */
fdomain_write_data(cmd);
- if (cmd->SCp.have_data_in == 1) /* DATA IN */
+ if (scsi_pointer->have_data_in == 1) /* DATA IN */
fdomain_read_data(cmd);
if (done) {
- fdomain_finish_cmd(fd, (cmd->SCp.Status & 0xff) |
- ((cmd->SCp.Message & 0xff) << 8) |
- (DID_OK << 16));
+ set_status_byte(cmd, scsi_pointer->Status);
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ fdomain_finish_cmd(fd);
} else {
- if (cmd->SCp.phase & disconnect) {
+ if (scsi_pointer->phase & disconnect) {
outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT,
fd->base + REG_ICTL);
outb(0, fd->base + REG_BCTL);
@@ -396,14 +404,15 @@ static irqreturn_t fdomain_irq(int irq, void *dev_id)
static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
+ struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd);
struct fdomain *fd = shost_priv(cmd->device->host);
unsigned long flags;
- cmd->SCp.Status = 0;
- cmd->SCp.Message = 0;
- cmd->SCp.have_data_in = 0;
- cmd->SCp.sent_command = 0;
- cmd->SCp.phase = in_arbitration;
+ scsi_pointer->Status = 0;
+ scsi_pointer->Message = 0;
+ scsi_pointer->have_data_in = 0;
+ scsi_pointer->sent_command = 0;
+ scsi_pointer->phase = in_arbitration;
scsi_set_resid(cmd, scsi_bufflen(cmd));
spin_lock_irqsave(sh->host_lock, flags);
@@ -438,11 +447,11 @@ static int fdomain_abort(struct scsi_cmnd *cmd)
spin_lock_irqsave(sh->host_lock, flags);
fdomain_make_bus_idle(fd);
- fd->cur_cmd->SCp.phase |= aborted;
- fd->cur_cmd->result = DID_ABORT << 16;
+ fdomain_scsi_pointer(fd->cur_cmd)->phase |= aborted;
/* Aborts are not done well. . . */
- fdomain_finish_cmd(fd, DID_ABORT << 16);
+ set_host_byte(fd->cur_cmd, DID_ABORT);
+ fdomain_finish_cmd(fd);
spin_unlock_irqrestore(sh->host_lock, flags);
return SUCCESS;
}
@@ -499,6 +508,7 @@ static struct scsi_host_template fdomain_template = {
.this_id = 7,
.sg_tablesize = 64,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
struct Scsi_Host *fdomain_create(int base, int irq, int this_id,
diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h
index 6f63fc6b0d12..93afcee207ae 100644
--- a/drivers/scsi/fdomain.h
+++ b/drivers/scsi/fdomain.h
@@ -103,7 +103,7 @@ enum {
#define REG_FIFO_COUNT 14 /* R: FIFO Data Count */
#ifdef CONFIG_PM_SLEEP
-static const struct dev_pm_ops fdomain_pm_ops;
+static const struct dev_pm_ops __maybe_unused fdomain_pm_ops;
#define FDOMAIN_PM_OPS (&fdomain_pm_ops)
#else
#define FDOMAIN_PM_OPS NULL
diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c
index f2da4fa382e8..2b4280a43a53 100644
--- a/drivers/scsi/fdomain_isa.c
+++ b/drivers/scsi/fdomain_isa.c
@@ -111,12 +111,11 @@ static int fdomain_isa_match(struct device *dev, unsigned int ndev)
base = readb(p + sig->base_offset) +
(readb(p + sig->base_offset + 1) << 8);
iounmap(p);
- if (base)
+ if (base) {
dev_info(dev, "BIOS at 0x%lx specifies I/O base 0x%x\n",
bios_base, base);
- else
+ } else { /* no I/O base in BIOS area */
dev_info(dev, "BIOS at 0x%lx\n", bios_base);
- if (!base) { /* no I/O base in BIOS area */
/* save BIOS signature for later use in port probing */
saved_sig = sig;
return 0;
@@ -176,7 +175,7 @@ static int fdomain_isa_param_match(struct device *dev, unsigned int ndev)
return 1;
}
-static int fdomain_isa_remove(struct device *dev, unsigned int ndev)
+static void fdomain_isa_remove(struct device *dev, unsigned int ndev)
{
struct Scsi_Host *sh = dev_get_drvdata(dev);
int base = sh->io_port;
@@ -184,7 +183,6 @@ static int fdomain_isa_remove(struct device *dev, unsigned int ndev)
fdomain_destroy(sh);
release_region(base, FDOMAIN_REGION_SIZE);
dev_set_drvdata(dev, NULL);
- return 0;
}
static struct isa_driver fdomain_isa_driver = {
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h
index d1225cf6320e..0eb4ba277264 100644
--- a/drivers/scsi/fnic/cq_desc.h
+++ b/drivers/scsi/fnic/cq_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h
index a9fa26f82ddd..b6113291cf68 100644
--- a/drivers/scsi/fnic/cq_enet_desc.h
+++ b/drivers/scsi/fnic/cq_enet_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h
index 501660cfe228..4d94329c8ef5 100644
--- a/drivers/scsi/fnic/cq_exch_desc.h
+++ b/drivers/scsi/fnic/cq_exch_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CQ_EXCH_DESC_H_
#define _CQ_EXCH_DESC_H_
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h
index 12d770d885c5..54a0b2ba8f6f 100644
--- a/drivers/scsi/fnic/fcpio.h
+++ b/drivers/scsi/fnic/fcpio.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FCPIO_H_
#define _FCPIO_H_
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 477513dc23b7..d82de34f6fd7 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_H_
#define _FNIC_H_
@@ -39,7 +27,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.47"
+#define DRV_VERSION "1.6.0.54"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -89,15 +77,28 @@
#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
/*
- * Usage of the scsi_cmnd scratchpad.
+ * fnic private data per SCSI command.
* These fields are locked by the hashed io_req_lock.
*/
-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
-#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
-#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
-#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
-#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
-#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
+struct fnic_cmd_priv {
+ struct fnic_io_req *io_req;
+ enum fnic_ioreq_state state;
+ u32 flags;
+ u16 abts_status;
+ u16 lr_status;
+};
+
+static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
+static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
+{
+ struct fnic_cmd_priv *fcmd = fnic_priv(cmd);
+
+ return ((u64)fcmd->flags << 32) | fcmd->state;
+}
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
@@ -245,6 +246,7 @@ struct fnic {
u32 vlan_hw_insert:1; /* let hw insert the tag */
u32 in_remove:1; /* fnic device in removal */
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
+ u32 link_events:1; /* set when we get any link event*/
struct completion *remove_wait; /* device remove thread blocks */
@@ -321,7 +323,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
extern struct workqueue_struct *fnic_event_queue;
extern struct workqueue_struct *fnic_fip_queue;
-extern struct device_attribute *fnic_attrs[];
+extern const struct attribute_group *fnic_host_groups[];
void fnic_clear_intr_mode(struct fnic *fnic);
int fnic_set_intr_mode(struct fnic *fnic);
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
index aea0c3becfd4..a61e0c5e6506 100644
--- a/drivers/scsi/fnic/fnic_attrs.c
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/string.h>
#include <linux/device.h>
@@ -48,9 +36,18 @@ static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
-struct device_attribute *fnic_attrs[] = {
- &dev_attr_fnic_state,
- &dev_attr_drv_version,
- &dev_attr_link_state,
+static struct attribute *fnic_host_attrs[] = {
+ &dev_attr_fnic_state.attr,
+ &dev_attr_drv_version.attr,
+ &dev_attr_link_state.attr,
NULL,
};
+
+static const struct attribute_group fnic_host_attr_group = {
+ .attrs = fnic_host_attrs
+};
+
+const struct attribute_group *fnic_host_groups[] = {
+ &fnic_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 13f7d88d6e57..6fedc3b7d1ab 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2012 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2012 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/errno.h>
@@ -58,8 +44,7 @@ int fnic_debugfs_init(void)
fnic_trace_debugfs_root);
/* Allocate memory to structure */
- fc_trc_flag = (struct fc_trace_flag_type *)
- vmalloc(sizeof(struct fc_trace_flag_type));
+ fc_trc_flag = vmalloc(sizeof(struct fc_trace_flag_type));
if (fc_trc_flag) {
fc_trc_flag->fc_row_file = 0;
@@ -87,8 +72,7 @@ void fnic_debugfs_terminate(void)
debugfs_remove(fnic_trace_debugfs_root);
fnic_trace_debugfs_root = NULL;
- if (fc_trc_flag)
- vfree(fc_trc_flag);
+ vfree(fc_trc_flag);
}
/*
@@ -120,11 +104,11 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
len = 0;
trace_type = (u8 *)filp->private_data;
if (*trace_type == fc_trc_flag->fnic_trace)
- len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+ len = sprintf(buf, "%d\n", fnic_tracing_enabled);
else if (*trace_type == fc_trc_flag->fc_trace)
- len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled);
+ len = sprintf(buf, "%d\n", fnic_fc_tracing_enabled);
else if (*trace_type == fc_trc_flag->fc_clear)
- len = sprintf(buf, "%u\n", fnic_fc_trace_cleared);
+ len = sprintf(buf, "%d\n", fnic_fc_trace_cleared);
else
pr_err("fnic: Cannot read to any debugfs file\n");
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 673887e383cc..79ddfaaf71a4 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
@@ -56,6 +44,8 @@ void fnic_handle_link(struct work_struct *work)
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->link_events = 1; /* less work to just set everytime*/
+
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
@@ -73,7 +63,7 @@ void fnic_handle_link(struct work_struct *work)
atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
new_port_speed);
if (old_port_speed != new_port_speed)
- shost_printk(KERN_INFO, fnic->lport->host,
+ FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
"Current vnic speed set to : %llu\n",
new_port_speed);
@@ -294,7 +284,7 @@ void fnic_handle_event(struct work_struct *work)
}
/**
- * Check if the Received FIP FLOGI frame is rejected
+ * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
* @fip: The FCoE controller that received the frame
* @skb: The received FIP frame
*
@@ -309,12 +299,10 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
struct fc_frame_header *fh = NULL;
struct fip_desc *desc;
struct fip_encaps *els;
- enum fip_desc_type els_dtype = 0;
u16 op;
u8 els_op;
u8 sub;
- size_t els_len = 0;
size_t rlen;
size_t dlen = 0;
@@ -346,10 +334,8 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
if (dlen < sizeof(*els) + sizeof(*fh) + 1)
return 0;
- els_len = dlen - sizeof(*els);
els = (struct fip_encaps *)desc;
fh = (struct fc_frame_header *)(els + 1);
- els_dtype = desc->fip_dtype;
if (!fh)
return 0;
@@ -376,7 +362,6 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct sk_buff *skb;
char *eth_fr;
- int fr_len;
struct fip_vlan *vlan;
u64 vlan_tov;
@@ -391,7 +376,6 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
if (!skb)
return;
- fr_len = sizeof(*vlan);
eth_fr = (char *)skb->data;
vlan = (struct fip_vlan *)eth_fr;
@@ -837,7 +821,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
struct sk_buff *skb;
struct fc_frame *fp;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- unsigned int eth_hdrs_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe = 0, fcoe_sof, fcoe_eof;
u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
@@ -867,7 +850,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
&ingress_port, &packet_error,
&fcoe_enc_error, &fcs_ok, &vlan_stripped,
&vlan);
- eth_hdrs_stripped = 1;
skb_trim(skb, fcp_bytes_written);
fr_sof(fp) = sof;
fr_eof(fp) = eof;
@@ -884,7 +866,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
&tcp_udp_csum_ok, &udp, &tcp,
&ipv4_csum_ok, &ipv6, &ipv4,
&ipv4_fragment, &fcs_ok);
- eth_hdrs_stripped = 0;
skb_trim(skb, bytes_written);
if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
@@ -1350,15 +1331,16 @@ void fnic_handle_fip_timer(struct fnic *fnic)
if (list_empty(&fnic->vlans)) {
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* no vlans available, try again */
- if (printk_ratelimit())
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
+ if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
+ if (printk_ratelimit())
+ shost_printk(KERN_DEBUG, fnic->lport->host,
+ "Start VLAN Discovery\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
return;
}
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- shost_printk(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fip_timer: vlan %d state %d sol_count %d\n",
vlan->vid, vlan->state, vlan->sol_count);
switch (vlan->state) {
@@ -1370,9 +1352,10 @@ void fnic_handle_fip_timer(struct fnic *fnic)
case FIP_VLAN_FAILED:
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* if all vlans are in failed state, restart vlan disc */
- if (printk_ratelimit())
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
+ if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
+ if (printk_ratelimit())
+ shost_printk(KERN_DEBUG, fnic->lport->host,
+ "Start VLAN Discovery\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
break;
case FIP_VLAN_SENT:
@@ -1381,7 +1364,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
* no response on this vlan, remove from the list.
* Try the next vlan
*/
- shost_printk(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"Dequeue this VLAN ID %d from list\n",
vlan->vid);
list_del(&vlan->list);
@@ -1391,7 +1374,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
/* we exhausted all vlans, restart vlan disc */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
- shost_printk(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"fip_timer: vlan list empty, "
"trigger vlan disc\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
index 7761f33ab5d4..79f53029737b 100644
--- a/drivers/scsi/fnic/fnic_fip.h
+++ b/drivers/scsi/fnic/fnic_fip.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_FIP_H_
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index 1cb6a68c8e4e..f4c8769df312 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_IO_H_
#define _FNIC_IO_H_
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 2fb2731f50fb..8896758fed8c 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/string.h>
#include <linux/errno.h>
@@ -332,4 +320,3 @@ void fnic_clear_intr_mode(struct fnic *fnic)
pci_free_irq_vectors(fnic->pdev);
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
-
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 18584ab27c32..1077110ab273 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mempool.h>
@@ -49,8 +37,8 @@
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
-LIST_HEAD(fnic_list);
-DEFINE_SPINLOCK(fnic_list_lock);
+static LIST_HEAD(fnic_list);
+static DEFINE_SPINLOCK(fnic_list_lock);
/* Supported devices by fnic module */
static struct pci_device_id fnic_id_table[] = {
@@ -122,8 +110,9 @@ static struct scsi_host_template fnic_host_template = {
.can_queue = FNIC_DFLT_IO_REQ,
.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
.max_sectors = 0xffff,
- .shost_attrs = fnic_attrs,
+ .shost_groups = fnic_host_groups,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct fnic_cmd_priv),
};
static void
@@ -443,7 +432,7 @@ static void fnic_notify_timer_start(struct fnic *fnic)
default:
/* Using intr for notification for INTx/MSI-X */
break;
- };
+ }
}
static int fnic_dev_wait(struct vnic_dev *vdev,
@@ -552,8 +541,40 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
{
- u16 old_vlan;
- old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+ vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
+
+static int fnic_scsi_drv_init(struct fnic *fnic)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+
+ /* Configure maximum outstanding IO reqs*/
+ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
+ host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+ max_t(u32, FNIC_MIN_IO_REQ,
+ fnic->config.io_throttle_count));
+
+ fnic->fnic_max_tag_id = host->can_queue;
+ host->max_lun = fnic->config.luns_per_tgt;
+ host->max_id = FNIC_MAX_FCP_TARGET;
+ host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
+ host->nr_hw_queues = fnic->wq_copy_count;
+ if (host->nr_hw_queues > 1)
+ shost_printk(KERN_ERR, host,
+ "fnic: blk-mq is not supported");
+
+ host->nr_hw_queues = fnic->wq_copy_count = 1;
+
+ shost_printk(KERN_INFO, host,
+ "fnic: can_queue: %d max_lun: %llu",
+ host->can_queue, host->max_lun);
+
+ shost_printk(KERN_INFO, host,
+ "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
+ host->max_id, host->max_cmd_len, host->nr_hw_queues);
+
+ return 0;
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -581,6 +602,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fnic->lport = lp;
fnic->ctlr.lp = lp;
+ fnic->link_events = 0;
+
snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
host->host_no);
@@ -610,10 +633,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
- * limitation for the device. Try 64-bit first, and
- * fail to 32-bit.
+ * limitation for the device. Try 47-bit first, and
+ * fail to 32-bit. Cisco VIC supports 47 bits only.
*/
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
@@ -694,17 +717,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
- /* Configure Maximum Outstanding IO reqs*/
- if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
- host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
- max_t(u32, FNIC_MIN_IO_REQ,
- fnic->config.io_throttle_count));
- }
- fnic->fnic_max_tag_id = host->can_queue;
-
- host->max_lun = fnic->config.luns_per_tgt;
- host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FCOE_MAX_CMD_LEN;
+ fnic_scsi_drv_init(fnic);
fnic_get_res_counts(fnic);
@@ -741,6 +754,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < FNIC_IO_LOCKS; i++)
spin_lock_init(&fnic->io_req_lock[i]);
+ err = -ENOMEM;
fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
if (!fnic->io_req_pool)
goto err_out_free_resources;
@@ -1098,9 +1112,6 @@ static int __init fnic_init_module(void)
goto err_create_fnic_workq;
}
- spin_lock_init(&fnic_list_lock);
- INIT_LIST_HEAD(&fnic_list);
-
fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
if (!fnic_fip_queue) {
printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
@@ -1146,10 +1157,8 @@ static void __exit fnic_cleanup_module(void)
{
pci_unregister_driver(&fnic_driver);
destroy_workqueue(fnic_event_queue);
- if (fnic_fip_queue) {
- flush_workqueue(fnic_fip_queue);
+ if (fnic_fip_queue)
destroy_workqueue(fnic_fip_queue);
- }
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
@@ -1161,4 +1170,3 @@ static void __exit fnic_cleanup_module(void)
module_init(fnic_init_module);
module_exit(fnic_cleanup_module);
-
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 50488f8e169d..a1c9cfcace7f 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
index ef8aaf2156dd..92a2fcfd3ea9 100644
--- a/drivers/scsi/fnic/fnic_res.h
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_RES_H_
#define _FNIC_RES_H_
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index b60795893994..26dbd347156e 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/mempool.h>
#include <linux/errno.h>
@@ -23,6 +11,7 @@
#include <linux/scatterlist.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
+#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
@@ -101,12 +90,12 @@ static const char *fnic_fcpio_status_to_str(unsigned int status)
return fcpio_status_str[status];
}
-static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
+static void fnic_cleanup_io(struct fnic *fnic);
static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
struct scsi_cmnd *sc)
{
- u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
+ u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1);
return &fnic->io_req_lock[hash];
}
@@ -172,7 +161,7 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
}
-/**
+/*
* __fnic_set_state_flags
* Sets/Clears bits in fnic's state_flags
**/
@@ -275,7 +264,7 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
}
if (fnic->ctlr.map_dest) {
- memset(gw_mac, 0xff, ETH_ALEN);
+ eth_broadcast_addr(gw_mac);
format = FCPIO_FLOGI_REG_DEF_DEST;
} else {
memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
@@ -389,7 +378,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
(rp->flags & FC_RP_FLAGS_RETRY))
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
- fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
+ fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag,
0, exch_flags, io_req->sgl_cnt,
SCSI_SENSE_BUFFERSIZE,
io_req->sgl_list_pa,
@@ -419,8 +408,10 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
* Routine to send a scsi cdb
* Called with host_lock held and interrupts disabled.
*/
-static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
+ const int tag = scsi_cmd_to_rq(sc)->tag;
struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
@@ -494,8 +485,8 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
- CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
- CMD_FLAGS(sc) = FNIC_NO_FLAGS;
+ fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
+ fnic_priv(sc)->flags = FNIC_NO_FLAGS;
/* Get a new io_req for this SCSI IO */
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
@@ -510,8 +501,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- sc->request->tag, sc, 0, sc->cmnd[0],
- sg_count, CMD_STATE(sc));
+ tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
@@ -556,10 +546,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
io_lock_acquired = 1;
io_req->port_id = rport->port_id;
io_req->start_time = jiffies;
- CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
- CMD_SP(sc) = (char *)io_req;
- CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
- sc->scsi_done = done;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
+ fnic_priv(sc)->io_req = io_req;
+ fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
/* create copy wq desc and enqueue it */
wq = &fnic->wq_copy[0];
@@ -570,11 +559,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
* refetch the pointer under the lock.
*/
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- sc->request->tag, sc, 0, 0, 0,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
- io_req = (struct fnic_io_req *)CMD_SP(sc);
- CMD_SP(sc) = NULL;
- CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ tag, sc, 0, 0, 0, fnic_flags_and_state(sc));
+ io_req = fnic_priv(sc)->io_req;
+ fnic_priv(sc)->io_req = NULL;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
fnic_release_ioreq_buf(fnic, io_req, sc);
@@ -593,7 +581,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
atomic64_read(&fnic_stats->io_stats.active_ios));
/* REVISIT: Use per IO lock in the final code */
- CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
}
out:
cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
@@ -602,9 +590,8 @@ out:
sc->cmnd[5]);
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- sc->request->tag, sc, io_req,
- sg_count, cmd_trace,
- (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+ tag, sc, io_req, sg_count, cmd_trace,
+ fnic_flags_and_state(sc));
/* if only we issued IO, will we have the io lock */
if (io_lock_acquired)
@@ -637,7 +624,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
atomic64_inc(&reset_stats->fw_reset_completions);
/* Clean up all outstanding io requests */
- fnic_cleanup_io(fnic, SCSI_NO_TAG);
+ fnic_cleanup_io(fnic);
atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
@@ -867,11 +854,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
- CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl io_req is null - "
@@ -888,17 +875,17 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
* if SCSI-ML has already issued abort on this command,
* set completion of the IO. The abts path will clean it up
*/
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
/*
* set the FNIC_IO_DONE so that this doesn't get
* flagged as 'out of order' if it was not aborted
*/
- CMD_FLAGS(sc) |= FNIC_IO_DONE;
- CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+ fnic_priv(sc)->flags |= FNIC_IO_DONE;
+ fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
spin_unlock_irqrestore(io_lock, flags);
if(FCPIO_ABORTED == hdr_status)
- CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+ fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"icmnd_cmpl abts pending "
@@ -912,7 +899,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
}
/* Mark the IO as complete */
- CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
icmnd_cmpl = &desc->u.icmnd_cmpl;
@@ -920,10 +907,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
case FCPIO_SUCCESS:
sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
xfer_len = scsi_bufflen(sc);
- scsi_set_resid(sc, icmnd_cmpl->residual);
- if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
+ if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) {
xfer_len -= icmnd_cmpl->residual;
+ scsi_set_resid(sc, icmnd_cmpl->residual);
+ }
if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
atomic64_inc(&fnic_stats->misc_stats.check_condition);
@@ -982,10 +970,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
}
/* Break link with the SCSI command */
- CMD_SP(sc) = NULL;
- CMD_FLAGS(sc) |= FNIC_IO_DONE;
-
- spin_unlock_irqrestore(io_lock, flags);
+ fnic_priv(sc)->io_req = NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_DONE;
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
@@ -995,8 +981,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
- mempool_free(io_req, fnic->io_req_pool);
-
cmd_trace = ((u64)hdr_status << 56) |
(u64)icmnd_cmpl->scsi_status << 48 |
(u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
@@ -1008,8 +992,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
((u64)icmnd_cmpl->_resvd0[1] << 56 |
(u64)icmnd_cmpl->_resvd0[0] << 48 |
jiffies_to_msecs(jiffies - start_time)),
- desc, cmd_trace,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ desc, cmd_trace, fnic_flags_and_state(sc));
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
@@ -1020,6 +1003,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
} else
fnic->lport->host_stats.fcp_control_requests++;
+ /* Call SCSI completion function to complete the IO */
+ scsi_done(sc);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ mempool_free(io_req, fnic->io_req_pool);
+
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
@@ -1048,10 +1037,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
}
-
- /* Call SCSI completion function to complete the IO */
- if (sc->scsi_done)
- sc->scsi_done(sc);
}
/* fnic_fcpio_itmf_cmpl_handler
@@ -1095,12 +1080,12 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
@@ -1115,9 +1100,9 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset abts cmpl recd. id %x status %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
- CMD_ABTS_STATUS(sc) = hdr_status;
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
+ fnic_priv(sc)->abts_status = hdr_status;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
if (io_req->abts_done)
complete(io_req->abts_done);
spin_unlock_irqrestore(io_lock, flags);
@@ -1127,7 +1112,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
case FCPIO_SUCCESS:
break;
case FCPIO_TIMEOUT:
- if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_fw_timeouts);
else
atomic64_inc(
@@ -1139,34 +1124,34 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
(int)(id & FNIC_TAG_MASK));
break;
case FCPIO_IO_NOT_FOUND:
- if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_io_not_found);
else
atomic64_inc(
&term_stats->terminate_io_not_found);
break;
default:
- if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_failures);
else
atomic64_inc(
&term_stats->terminate_failures);
break;
}
- if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
spin_unlock_irqrestore(io_lock, flags);
return;
}
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
- CMD_ABTS_STATUS(sc) = hdr_status;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
+ fnic_priv(sc)->abts_status = hdr_status;
/* If the status is IO not found consider it as success */
if (hdr_status == FCPIO_IO_NOT_FOUND)
- CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
+ fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
- if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
+ if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -1185,46 +1170,41 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
} else {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl, completing IO\n");
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
sc->result = (DID_ERROR << 16);
spin_unlock_irqrestore(io_lock, flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
- if (sc->scsi_done) {
- FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
- sc->device->host->host_no, id,
- sc,
- jiffies_to_msecs(jiffies - start_time),
- desc,
- (((u64)hdr_status << 40) |
- (u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 |
- (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) |
- CMD_STATE(sc)));
- sc->scsi_done(sc);
- atomic64_dec(&fnic_stats->io_stats.active_ios);
- if (atomic64_read(&fnic->io_cmpl_skip))
- atomic64_dec(&fnic->io_cmpl_skip);
- else
- atomic64_inc(&fnic_stats->io_stats.io_completions);
- }
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id,
+ sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc,
+ (((u64)hdr_status << 40) |
+ (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ fnic_flags_and_state(sc));
+ scsi_done(sc);
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
}
-
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
- CMD_LR_STATUS(sc) = hdr_status;
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ fnic_priv(sc)->lr_status = hdr_status;
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
- desc, 0,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ desc, 0, fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Terminate pending "
"dev reset cmpl recd. id %d status %s\n",
@@ -1232,14 +1212,13 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_fcpio_status_to_str(hdr_status));
return;
}
- if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+ if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
/* Need to wait for terminate completion */
spin_unlock_irqrestore(io_lock, flags);
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
- desc, 0,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ desc, 0, fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd after time out. "
"id %d status %s\n",
@@ -1247,8 +1226,8 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_fcpio_status_to_str(hdr_status));
return;
}
- CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -1260,7 +1239,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
} else {
shost_printk(KERN_ERR, fnic->lport->host,
"Unexpected itmf io state %s tag %x\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state), id);
spin_unlock_irqrestore(io_lock, flags);
}
@@ -1359,94 +1338,89 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
return wq_work_done;
}
-static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
+static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
{
- int i;
+ const int tag = scsi_cmd_to_rq(sc)->tag;
+ struct fnic *fnic = data;
struct fnic_io_req *io_req;
unsigned long flags = 0;
- struct scsi_cmnd *sc;
spinlock_t *io_lock;
unsigned long start_time = 0;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- for (i = 0; i < fnic->fnic_max_tag_id; i++) {
- if (i == exclude_id)
- continue;
-
- io_lock = fnic_io_lock_tag(fnic, i);
- spin_lock_irqsave(io_lock, flags);
- sc = scsi_host_find_tag(fnic->lport->host, i);
- if (!sc) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
- /*
- * We will be here only when FW completes reset
- * without sending completions for outstanding ios.
- */
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
- if (io_req && io_req->dr_done)
- complete(io_req->dr_done);
- else if (io_req && io_req->abts_done)
- complete(io_req->abts_done);
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
- if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
- goto cleanup_scsi_cmd;
- }
+ io_req = fnic_priv(sc)->io_req;
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
+ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
+ /*
+ * We will be here only when FW completes reset
+ * without sending completions for outstanding ios.
+ */
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ else if (io_req && io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+ } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+ }
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto cleanup_scsi_cmd;
+ }
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(io_lock, flags);
- /*
- * If there is a scsi_cmnd associated with this io_req, then
- * free the corresponding state
- */
- start_time = io_req->start_time;
- fnic_release_ioreq_buf(fnic, io_req, sc);
- mempool_free(io_req, fnic->io_req_pool);
+ /*
+ * If there is a scsi_cmnd associated with this io_req, then
+ * free the corresponding state
+ */
+ start_time = io_req->start_time;
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
cleanup_scsi_cmd:
- sc->result = DID_TRANSPORT_DISRUPTED << 16;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
- __func__, sc->request->tag, sc,
- (jiffies - start_time));
+ sc->result = DID_TRANSPORT_DISRUPTED << 16;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
+ tag, sc, jiffies - start_time);
- if (atomic64_read(&fnic->io_cmpl_skip))
- atomic64_dec(&fnic->io_cmpl_skip);
- else
- atomic64_inc(&fnic_stats->io_stats.io_completions);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
- /* Complete the command to SCSI */
- if (sc->scsi_done) {
- if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
- shost_printk(KERN_ERR, fnic->lport->host,
- "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
- sc->request->tag, sc);
+ /* Complete the command to SCSI */
+ if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED))
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
+ tag, sc);
- FNIC_TRACE(fnic_cleanup_io,
- sc->device->host->host_no, i, sc,
- jiffies_to_msecs(jiffies - start_time),
- 0, ((u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 |
- (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_TRACE(fnic_cleanup_io,
+ sc->device->host->host_no, tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ fnic_flags_and_state(sc));
- sc->scsi_done(sc);
- }
- }
+ scsi_done(sc);
+
+ return true;
+}
+
+static void fnic_cleanup_io(struct fnic *fnic)
+{
+ scsi_host_busy_iter(fnic->lport->host,
+ fnic_cleanup_io_iter, fnic);
}
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
@@ -1475,7 +1449,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
spin_lock_irqsave(io_lock, flags);
/* Get the IO context which this desc refers to */
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
/* fnic interrupts are turned off by now */
@@ -1484,7 +1458,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
goto wq_copy_cleanup_scsi_cmd;
}
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -1497,17 +1471,15 @@ wq_copy_cleanup_scsi_cmd:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
- if (sc->scsi_done) {
- FNIC_TRACE(fnic_wq_copy_cleanup_handler,
- sc->device->host->host_no, id, sc,
- jiffies_to_msecs(jiffies - start_time),
- 0, ((u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ fnic_flags_and_state(sc));
- sc->scsi_done(sc);
- }
+ scsi_done(sc);
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
@@ -1557,143 +1529,140 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
return 0;
}
-static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+struct fnic_rport_abort_io_iter_data {
+ struct fnic *fnic;
+ u32 port_id;
+ int term_cnt;
+};
+
+static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
{
- int tag;
- int abt_tag;
- int term_cnt = 0;
+ struct fnic_rport_abort_io_iter_data *iter_data = data;
+ struct fnic *fnic = iter_data->fnic;
+ int abt_tag = scsi_cmd_to_rq(sc)->tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
- struct scsi_cmnd *sc;
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state;
- FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host,
- "fnic_rport_exch_reset called portid 0x%06x\n",
- port_id);
-
- if (fnic->in_remove)
- return;
-
- for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
- abt_tag = tag;
- io_lock = fnic_io_lock_tag(fnic, tag);
- spin_lock_irqsave(io_lock, flags);
- sc = scsi_host_find_tag(fnic->lport->host, tag);
- if (!sc) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ io_lock = fnic_io_lock_tag(fnic, abt_tag);
+ spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
- if (!io_req || io_req->port_id != port_id) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ if (!io_req || io_req->port_id != iter_data->port_id) {
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+ }
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
+ !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
sc);
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+ }
- /*
- * Found IO that is still pending with firmware and
- * belongs to rport that went away
- */
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
- if (io_req->abts_done) {
- shost_printk(KERN_ERR, fnic->lport->host,
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to rport that went away
+ */
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+ }
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
"fnic_rport_exch_reset: io_req->abts_done is set "
"state is %s\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
- }
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
+ }
- if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "rport_exch_reset "
- "IO not yet issued %p tag 0x%x flags "
- "%x state %d\n",
- sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
- }
- old_ioreq_state = CMD_STATE(sc);
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
- atomic64_inc(&reset_stats->device_reset_terminates);
- abt_tag = (tag | FNIC_TAG_DEV_RST);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "fnic_rport_exch_reset dev rst sc 0x%p\n",
- sc);
- }
+ if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "rport_exch_reset "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
+ }
+ old_ioreq_state = fnic_priv(sc)->state;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
+ atomic64_inc(&reset_stats->device_reset_terminates);
+ abt_tag |= FNIC_TAG_DEV_RST;
+ }
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
+ BUG_ON(io_req->abts_done);
- BUG_ON(io_req->abts_done);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_reset_exch: Issuing abts\n");
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "fnic_rport_reset_exch: Issuing abts\n");
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ /*
+ * Revert the cmd state back to old state, if
+ * it hasn't changed in between. This cmd will get
+ * aborted later by scsi_eh, or cleaned up during
+ * lun reset
+ */
+ spin_lock_irqsave(io_lock, flags);
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
+ fnic_priv(sc)->state = old_ioreq_state;
+ spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
+ atomic64_inc(&term_stats->terminates);
+ iter_data->term_cnt++;
+ }
+ return true;
+}
- /* Now queue the abort command to firmware */
- int_to_scsilun(sc->device->lun, &fc_lun);
+static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+{
+ struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
+ struct fnic_rport_abort_io_iter_data iter_data = {
+ .fnic = fnic,
+ .port_id = port_id,
+ .term_cnt = 0,
+ };
- if (fnic_queue_abort_io_req(fnic, abt_tag,
- FCPIO_ITMF_ABT_TASK_TERM,
- fc_lun.scsi_lun, io_req)) {
- /*
- * Revert the cmd state back to old state, if
- * it hasn't changed in between. This cmd will get
- * aborted later by scsi_eh, or cleaned up during
- * lun reset
- */
- spin_lock_irqsave(io_lock, flags);
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
- spin_unlock_irqrestore(io_lock, flags);
- } else {
- spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
- else
- CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
- spin_unlock_irqrestore(io_lock, flags);
- atomic64_inc(&term_stats->terminates);
- term_cnt++;
- }
- }
- if (term_cnt > atomic64_read(&term_stats->max_terminates))
- atomic64_set(&term_stats->max_terminates, term_cnt);
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "fnic_rport_exch_reset called portid 0x%06x\n",
+ port_id);
+
+ if (fnic->in_remove)
+ return;
+
+ scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
+ &iter_data);
+ if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
+ atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
}
void fnic_terminate_rport_io(struct fc_rport *rport)
{
- int tag;
- int abt_tag;
- int term_cnt = 0;
- struct fnic_io_req *io_req;
- spinlock_t *io_lock;
- unsigned long flags;
- struct scsi_cmnd *sc;
- struct scsi_lun fc_lun;
struct fc_rport_libfc_priv *rdata;
struct fc_lport *lport;
struct fnic *fnic;
- struct fc_rport *cmd_rport;
- struct reset_stats *reset_stats;
- struct terminate_stats *term_stats;
- enum fnic_ioreq_state old_ioreq_state;
if (!rport) {
printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
@@ -1721,109 +1690,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
if (fnic->in_remove)
return;
- reset_stats = &fnic->fnic_stats.reset_stats;
- term_stats = &fnic->fnic_stats.term_stats;
-
- for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
- abt_tag = tag;
- io_lock = fnic_io_lock_tag(fnic, tag);
- spin_lock_irqsave(io_lock, flags);
- sc = scsi_host_find_tag(fnic->lport->host, tag);
- if (!sc) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
-
- cmd_rport = starget_to_rport(scsi_target(sc->device));
- if (rport != cmd_rport) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
-
- io_req = (struct fnic_io_req *)CMD_SP(sc);
-
- if (!io_req || rport != cmd_rport) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
-
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
- sc);
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
- /*
- * Found IO that is still pending with firmware and
- * belongs to rport that went away
- */
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
- if (io_req->abts_done) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic_terminate_rport_io: io_req->abts_done is set "
- "state is %s\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
- }
- if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "fnic_terminate_rport_io "
- "IO not yet issued %p tag 0x%x flags "
- "%x state %d\n",
- sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
- }
- old_ioreq_state = CMD_STATE(sc);
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
- atomic64_inc(&reset_stats->device_reset_terminates);
- abt_tag = (tag | FNIC_TAG_DEV_RST);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
- }
-
- BUG_ON(io_req->abts_done);
-
- FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host,
- "fnic_terminate_rport_io: Issuing abts\n");
-
- spin_unlock_irqrestore(io_lock, flags);
-
- /* Now queue the abort command to firmware */
- int_to_scsilun(sc->device->lun, &fc_lun);
-
- if (fnic_queue_abort_io_req(fnic, abt_tag,
- FCPIO_ITMF_ABT_TASK_TERM,
- fc_lun.scsi_lun, io_req)) {
- /*
- * Revert the cmd state back to old state, if
- * it hasn't changed in between. This cmd will get
- * aborted later by scsi_eh, or cleaned up during
- * lun reset
- */
- spin_lock_irqsave(io_lock, flags);
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
- spin_unlock_irqrestore(io_lock, flags);
- } else {
- spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
- else
- CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
- spin_unlock_irqrestore(io_lock, flags);
- atomic64_inc(&term_stats->terminates);
- term_cnt++;
- }
- }
- if (term_cnt > atomic64_read(&term_stats->max_terminates))
- atomic64_set(&term_stats->max_terminates, term_cnt);
-
+ fnic_rport_exch_reset(fnic, rport->port_id);
}
/*
@@ -1833,6 +1700,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
*/
int fnic_abort_cmd(struct scsi_cmnd *sc)
{
+ struct request *const rq = scsi_cmd_to_rq(sc);
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
@@ -1847,7 +1715,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
struct abort_stats *abts_stats;
struct terminate_stats *term_stats;
enum fnic_ioreq_state old_ioreq_state;
- int tag;
+ const int tag = rq->tag;
unsigned long abt_issued_time;
DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -1863,13 +1731,12 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
term_stats = &fnic->fnic_stats.term_stats;
rport = starget_to_rport(scsi_target(sc->device));
- tag = sc->request->tag;
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
- rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+ rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags);
- CMD_FLAGS(sc) = FNIC_NO_FLAGS;
+ fnic_priv(sc)->flags = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
@@ -1886,11 +1753,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
* happened, the completion wont actually complete the command
* and it will be considered as an aborted command
*
- * The CMD_SP will not be cleared except while holding io_req_lock.
+ * .io_req will not be cleared except while holding io_req_lock.
*/
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
@@ -1898,7 +1765,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req->abts_done = &tm_done;
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
goto wait_pending;
}
@@ -1927,9 +1794,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
* the completion wont be done till mid-layer, since abort
* has already started.
*/
- old_ioreq_state = CMD_STATE(sc);
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ old_ioreq_state = fnic_priv(sc)->state;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
@@ -1948,12 +1815,12 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
- fc_lun.scsi_lun, io_req)) {
+ if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
+ io_req)) {
spin_lock_irqsave(io_lock, flags);
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
+ fnic_priv(sc)->state = old_ioreq_state;
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->abts_done = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -1961,10 +1828,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
goto fnic_abort_cmd_end;
}
if (task_req == FCPIO_ITMF_ABT_TASK) {
- CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
atomic64_inc(&fnic_stats->abts_stats.aborts);
} else {
- CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
atomic64_inc(&fnic_stats->term_stats.terminates);
}
@@ -1982,32 +1849,32 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* Check the abort status */
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
}
io_req->abts_done = NULL;
/* fw did not complete abort, timed out */
- if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
+ if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
if (task_req == FCPIO_ITMF_ABT_TASK) {
atomic64_inc(&abts_stats->abort_drv_timeouts);
} else {
atomic64_inc(&term_stats->terminate_drv_timeouts);
}
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
/* IO out of order */
- if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+ if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issuing Host reset due to out of order IO\n");
@@ -2016,7 +1883,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
goto fnic_abort_cmd_end;
}
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
start_time = io_req->start_time;
/*
@@ -2024,9 +1891,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
* free the io_req if successful. If abort fails,
* Device reset will clean the I/O.
*/
- if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
- CMD_SP(sc) = NULL;
- else {
+ if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) {
+ fnic_priv(sc)->io_req = NULL;
+ } else {
ret = FAILED;
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
@@ -2037,25 +1904,22 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
- if (sc->scsi_done) {
/* Call SCSI completion function to complete the IO */
- sc->result = (DID_ABORT << 16);
- sc->scsi_done(sc);
- atomic64_dec(&fnic_stats->io_stats.active_ios);
- if (atomic64_read(&fnic->io_cmpl_skip))
- atomic64_dec(&fnic->io_cmpl_skip);
- else
- atomic64_inc(&fnic_stats->io_stats.io_completions);
- }
+ sc->result = DID_ABORT << 16;
+ scsi_done(sc);
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
fnic_abort_cmd_end:
- FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
- sc->request->tag, sc,
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from abort cmd type %x %s\n", task_req,
@@ -2100,7 +1964,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
/* fill in the lun info */
int_to_scsilun(sc->device->lun, &fc_lun);
- fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
+ fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
@@ -2118,165 +1982,179 @@ lr_io_req_end:
return ret;
}
-/*
- * Clean up any pending aborts on the lun
- * For each outstanding IO on this lun, whose abort is not completed by fw,
- * issue a local abort. Wait for abort to complete. Return 0 if all commands
- * successfully aborted, 1 otherwise
- */
-static int fnic_clean_pending_aborts(struct fnic *fnic,
- struct scsi_cmnd *lr_sc,
- bool new_sc)
+struct fnic_pending_aborts_iter_data {
+ struct fnic *fnic;
+ struct scsi_cmnd *lr_sc;
+ struct scsi_device *lun_dev;
+ int ret;
+};
+static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
{
- int tag, abt_tag;
+ struct fnic_pending_aborts_iter_data *iter_data = data;
+ struct fnic *fnic = iter_data->fnic;
+ struct scsi_device *lun_dev = iter_data->lun_dev;
+ int abt_tag = scsi_cmd_to_rq(sc)->tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
- int ret = 0;
- struct scsi_cmnd *sc;
struct scsi_lun fc_lun;
- struct scsi_device *lun_dev = lr_sc->device;
DECLARE_COMPLETION_ONSTACK(tm_done);
enum fnic_ioreq_state old_ioreq_state;
- for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
- io_lock = fnic_io_lock_tag(fnic, tag);
- spin_lock_irqsave(io_lock, flags);
- sc = scsi_host_find_tag(fnic->lport->host, tag);
- /*
- * ignore this lun reset cmd if issued using new SC
- * or cmds that do not belong to this lun
- */
- if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ if (sc == iter_data->lr_sc || sc->device != lun_dev)
+ return true;
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_lock = fnic_io_lock_tag(fnic, abt_tag);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = fnic_priv(sc)->io_req;
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+ }
- if (!io_req || sc->device != lun_dev) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
- /*
- * Found IO that is still pending with firmware and
- * belongs to the LUN that we are resetting
- */
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Found IO in %s on lun\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+ }
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
+ (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s dev rst not pending sc 0x%p\n", __func__,
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+ }
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "%s dev rst not pending sc 0x%p\n", __func__,
- sc);
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ if (io_req->abts_done)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "%s: io_req->abts_done is set state is %s\n",
+ __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
+ old_ioreq_state = fnic_priv(sc)->state;
+ /*
+ * Any pending IO issued prior to reset is expected to be
+ * in abts pending state, if not we need to set
+ * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
+ * When IO is completed, the IO will be handed over and
+ * handled in this function.
+ */
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
- if (io_req->abts_done)
- shost_printk(KERN_ERR, fnic->lport->host,
- "%s: io_req->abts_done is set state is %s\n",
- __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
- old_ioreq_state = CMD_STATE(sc);
- /*
- * Any pending IO issued prior to reset is expected to be
- * in abts pending state, if not we need to set
- * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
- * When IO is completed, the IO will be handed over and
- * handled in this function.
- */
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ BUG_ON(io_req->abts_done);
- BUG_ON(io_req->abts_done);
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
+ abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s: dev rst sc 0x%p\n", __func__, sc);
+ }
- abt_tag = tag;
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
- abt_tag |= FNIC_TAG_DEV_RST;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "%s: dev rst sc 0x%p\n", __func__, sc);
- }
+ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
+ io_req->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
- io_req->abts_done = &tm_done;
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ spin_lock_irqsave(io_lock, flags);
+ io_req = fnic_priv(sc)->io_req;
+ if (io_req)
+ io_req->abts_done = NULL;
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
+ fnic_priv(sc)->state = old_ioreq_state;
+ spin_unlock_irqrestore(io_lock, flags);
+ iter_data->ret = FAILED;
+ return false;
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
+ }
+ fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
- /* Now queue the abort command to firmware */
- int_to_scsilun(sc->device->lun, &fc_lun);
+ wait_for_completion_timeout(&tm_done, msecs_to_jiffies
+ (fnic->config.ed_tov));
- if (fnic_queue_abort_io_req(fnic, abt_tag,
- FCPIO_ITMF_ABT_TASK_TERM,
- fc_lun.scsi_lun, io_req)) {
- spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
- if (io_req)
- io_req->abts_done = NULL;
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
- spin_unlock_irqrestore(io_lock, flags);
- ret = 1;
- goto clean_pending_aborts_end;
- } else {
- spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
- spin_unlock_irqrestore(io_lock, flags);
- }
- CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ /* Recheck cmd state to check if it is now aborted */
+ spin_lock_irqsave(io_lock, flags);
+ io_req = fnic_priv(sc)->io_req;
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
+ return true;
+ }
- wait_for_completion_timeout(&tm_done,
- msecs_to_jiffies
- (fnic->config.ed_tov));
+ io_req->abts_done = NULL;
- /* Recheck cmd state to check if it is now aborted */
- spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
- if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
- continue;
- }
+ /* if abort is still pending with fw, fail */
+ if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
+ spin_unlock_irqrestore(io_lock, flags);
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
+ iter_data->ret = FAILED;
+ return false;
+ }
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
- io_req->abts_done = NULL;
+ /* original sc used for lr is handled by dev reset code */
+ if (sc != iter_data->lr_sc)
+ fnic_priv(sc)->io_req = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
- /* if abort is still pending with fw, fail */
- if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
- spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
- ret = 1;
- goto clean_pending_aborts_end;
- }
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ /* original sc used for lr is handled by dev reset code */
+ if (sc != iter_data->lr_sc) {
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ }
- /* original sc used for lr is handled by dev reset code */
- if (sc != lr_sc)
- CMD_SP(sc) = NULL;
- spin_unlock_irqrestore(io_lock, flags);
+ /*
+ * Any IO is returned during reset, it needs to call scsi_done
+ * to return the scsi_cmnd to upper layer.
+ */
+ /* Set result to let upper SCSI layer retry */
+ sc->result = DID_RESET << 16;
+ scsi_done(sc);
- /* original sc used for lr is handled by dev reset code */
- if (sc != lr_sc) {
- fnic_release_ioreq_buf(fnic, io_req, sc);
- mempool_free(io_req, fnic->io_req_pool);
- }
+ return true;
+}
- /*
- * Any IO is returned during reset, it needs to call scsi_done
- * to return the scsi_cmnd to upper layer.
- */
- if (sc->scsi_done) {
- /* Set result to let upper SCSI layer retry */
- sc->result = DID_RESET << 16;
- sc->scsi_done(sc);
- }
- }
+/*
+ * Clean up any pending aborts on the lun
+ * For each outstanding IO on this lun, whose abort is not completed by fw,
+ * issue a local abort. Wait for abort to complete. Return 0 if all commands
+ * successfully aborted, 1 otherwise
+ */
+static int fnic_clean_pending_aborts(struct fnic *fnic,
+ struct scsi_cmnd *lr_sc,
+ bool new_sc)
+
+{
+ int ret = SUCCESS;
+ struct fnic_pending_aborts_iter_data iter_data = {
+ .fnic = fnic,
+ .lun_dev = lr_sc->device,
+ .ret = SUCCESS,
+ };
+
+ if (new_sc)
+ iter_data.lr_sc = lr_sc;
+ scsi_host_busy_iter(fnic->lport->host,
+ fnic_pending_aborts_iter, &iter_data);
+ if (iter_data.ret == FAILED) {
+ ret = iter_data.ret;
+ goto clean_pending_aborts_end;
+ }
schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
/* walk again to check, if IOs are still pending in fw */
@@ -2287,27 +2165,28 @@ clean_pending_aborts_end:
return ret;
}
-/**
+/*
* fnic_scsi_host_start_tag
* Allocates tagid from host's tag list
**/
static inline int
fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct request_queue *q = sc->request->q;
+ struct request *rq = scsi_cmd_to_rq(sc);
+ struct request_queue *q = rq->q;
struct request *dummy;
dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
if (IS_ERR(dummy))
return SCSI_NO_TAG;
- sc->tag = sc->request->tag = dummy->tag;
+ rq->tag = dummy->tag;
sc->host_scribble = (unsigned char *)dummy;
return dummy->tag;
}
-/**
+/*
* fnic_scsi_host_end_tag
* frees tag allocated by fnic_scsi_host_start_tag.
**/
@@ -2326,6 +2205,7 @@ fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
*/
int fnic_device_reset(struct scsi_cmnd *sc)
{
+ struct request *rq = scsi_cmd_to_rq(sc);
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
@@ -2338,7 +2218,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
struct scsi_lun fc_lun;
struct fnic_stats *fnic_stats;
struct reset_stats *reset_stats;
- int tag = 0;
+ int tag = rq->tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
bool new_sc = 0;
@@ -2369,10 +2249,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
goto fnic_device_reset_end;
}
- CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+ fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
/* Allocate tag if not present */
- tag = sc->request->tag;
if (unlikely(tag < 0)) {
/*
* Really should fix the midlayer to pass in a proper
@@ -2386,7 +2265,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
/*
* If there is a io_req attached to this command, then use it,
@@ -2400,11 +2279,11 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
memset(io_req, 0, sizeof(*io_req));
io_req->port_id = rport->port_id;
- CMD_SP(sc) = (char *)io_req;
+ fnic_priv(sc)->io_req = io_req;
}
io_req->dr_done = &tm_done;
- CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
- CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
+ fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
@@ -2415,13 +2294,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
spin_lock_irqsave(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
/*
@@ -2432,7 +2311,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2441,7 +2320,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
io_req->dr_done = NULL;
- status = CMD_LR_STATUS(sc);
+ status = fnic_priv(sc)->lr_status;
/*
* If lun reset not completed, bail out with failed. io_req
@@ -2451,7 +2330,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
atomic64_inc(&reset_stats->device_reset_timeouts);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
spin_unlock_irqrestore(io_lock, flags);
int_to_scsilun(sc->device->lun, &fc_lun);
/*
@@ -2460,7 +2339,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
while (1) {
spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+ if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
spin_unlock_irqrestore(io_lock, flags);
break;
}
@@ -2473,8 +2352,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
} else {
spin_lock_irqsave(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
io_req->abts_done = &tm_done;
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2485,13 +2364,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
while (1) {
spin_lock_irqsave(io_lock, flags);
- if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
spin_unlock_irqrestore(io_lock, flags);
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
break;
} else {
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
io_req->abts_done = NULL;
goto fnic_device_reset_clean;
}
@@ -2506,7 +2385,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Device reset completed - failed\n");
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
goto fnic_device_reset_clean;
}
@@ -2519,7 +2398,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset failed"
" since could not abort all IOs\n");
@@ -2528,14 +2407,14 @@ int fnic_device_reset(struct scsi_cmnd *sc)
/* Clean lun reset command */
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
/* Completed, and successful */
ret = SUCCESS;
fnic_device_reset_clean:
if (io_req)
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -2546,13 +2425,12 @@ fnic_device_reset_clean:
}
fnic_device_reset_end:
- FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
- sc->request->tag, sc,
+ FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
/* free tag if it is allocated */
if (unlikely(tag_gen_flag))
@@ -2624,8 +2502,8 @@ int fnic_host_reset(struct scsi_cmnd *sc)
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->internal_reset_inprogress == 0) {
- fnic->internal_reset_inprogress = 1;
+ if (!fnic->internal_reset_inprogress) {
+ fnic->internal_reset_inprogress = true;
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2654,7 +2532,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->internal_reset_inprogress = 0;
+ fnic->internal_reset_inprogress = false;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
@@ -2673,7 +2551,8 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
/* Issue firmware reset for fnic, wait for reset to complete */
retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) &&
+ fnic->link_events) {
/* fw reset is in progress, poll for its completion */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
schedule_timeout(msecs_to_jiffies(100));
@@ -2774,58 +2653,71 @@ call_fc_exch_mgr_reset:
}
-/*
- * fnic_is_abts_pending() is a helper function that
- * walks through tag map to check if there is any IOs pending,if there is one,
- * then it returns 1 (true), otherwise 0 (false)
- * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
- * otherwise, it checks for all IOs.
- */
-int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
{
- int tag;
+ struct fnic_pending_aborts_iter_data *iter_data = data;
+ struct fnic *fnic = iter_data->fnic;
+ int cmd_state;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
- int ret = 0;
- struct scsi_cmnd *sc;
- struct scsi_device *lun_dev = NULL;
- if (lr_sc)
- lun_dev = lr_sc->device;
+ /*
+ * ignore this lun reset cmd or cmds that do not belong to
+ * this lun
+ */
+ if (iter_data->lr_sc && sc == iter_data->lr_sc)
+ return true;
+ if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
+ return true;
- /* walk again to check, if IOs are still pending in fw */
- for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
- sc = scsi_host_find_tag(fnic->lport->host, tag);
- /*
- * ignore this lun reset cmd or cmds that do not belong to
- * this lun
- */
- if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
- continue;
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ io_req = fnic_priv(sc)->io_req;
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ return true;
+ }
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
+ cmd_state = fnic_priv(sc)->state;
+ spin_unlock_irqrestore(io_lock, flags);
+ if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
+ iter_data->ret = 1;
- if (!io_req || sc->device != lun_dev) {
- spin_unlock_irqrestore(io_lock, flags);
- continue;
- }
+ return iter_data->ret ? false : true;
+}
- /*
- * Found IO that is still pending with firmware and
- * belongs to the LUN that we are resetting
- */
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "Found IO in %s on lun\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
+/*
+ * fnic_is_abts_pending() is a helper function that
+ * walks through tag map to check if there is any IOs pending,if there is one,
+ * then it returns 1 (true), otherwise 0 (false)
+ * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
+ * otherwise, it checks for all IOs.
+ */
+int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+{
+ struct fnic_pending_aborts_iter_data iter_data = {
+ .fnic = fnic,
+ .lun_dev = NULL,
+ .ret = 0,
+ };
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- ret = 1;
- spin_unlock_irqrestore(io_lock, flags);
+ if (lr_sc) {
+ iter_data.lun_dev = lr_sc->device;
+ iter_data.lr_sc = lr_sc;
}
- return ret;
+ /* walk again to check, if IOs are still pending in fw */
+ scsi_host_busy_iter(fnic->lport->host,
+ fnic_abts_pending_iter, &iter_data);
+
+ return iter_data.ret;
}
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 086f729f3c46..bdf639eef8cf 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index a0d01aea28f7..e03967463561 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2012 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2012 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
@@ -138,7 +124,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
* Dump trace buffer entry to memory file
* and increment read index @rd_idx
*/
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
@@ -153,7 +139,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
if (rd_idx > (fnic_max_trace_entries-1))
rd_idx = 0;
/*
- * Continure dumpping trace buffer entries into
+ * Continue dumping trace buffer entries into
* memory file till rd_idx reaches write index
*/
if (rd_idx == wr_idx)
@@ -180,7 +166,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
* Dump trace buffer entry to memory file
* and increment read index @rd_idx
*/
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
@@ -189,7 +175,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
tbp->data[3], tbp->data[4]);
rd_idx++;
/*
- * Continue dumpping trace buffer entries into
+ * Continue dumping trace buffer entries into
* memory file till rd_idx reaches write index
*/
if (rd_idx == wr_idx)
@@ -220,12 +206,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
struct timespec64 val1, val2;
ktime_get_real_ts64(&val1);
- len = snprintf(debug->debug_buffer + len, buf_size - len,
+ len = scnprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tTime\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Current time : [%lld:%ld]\n"
"Last stats reset time: [%lld:%09ld]\n"
"Last stats read time: [%lld:%ld]\n"
@@ -243,11 +229,11 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
stats->stats_timestamps.last_read_time = val1;
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tIO Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
"Number of IOs: %lld\nNumber of IO Completions: %lld\n"
"Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
@@ -280,16 +266,16 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec),
(u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\nCurrent Max IO time : %lld\n",
(u64)atomic64_read(&stats->io_stats.current_max_io_time));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tAbort Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Aborts: %lld\n"
"Number of Abort Failures: %lld\n"
"Number of Abort Driver Timeouts: %lld\n"
@@ -318,12 +304,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec),
(u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tTerminate Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Terminates: %lld\n"
"Maximum Terminates: %lld\n"
"Number of Terminate Driver Timeouts: %lld\n"
@@ -337,12 +323,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
(u64)atomic64_read(&stats->term_stats.terminate_failures));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tReset Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Device Resets: %lld\n"
"Number of Device Reset Failures: %lld\n"
"Number of Device Reset Aborts: %lld\n"
@@ -368,12 +354,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
&stats->reset_stats.fnic_reset_completions),
(u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tFirmware Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active FW Requests %lld\n"
"Maximum FW Requests: %lld\n"
"Number of FW out of resources: %lld\n"
@@ -383,12 +369,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
(u64)atomic64_read(&stats->fw_stats.io_fw_errs));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tVlan Discovery Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Vlan Discovery Requests Sent %lld\n"
"Vlan Response Received with no FCF VLAN ID: %lld\n"
"No solicitations recvd after vlan set, expiry count: %lld\n"
@@ -398,7 +384,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
(u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
@@ -406,7 +392,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Last ISR time: %llu (%8llu.%09lu)\n"
"Last ACK time: %llu (%8llu.%09lu)\n"
"Max ISR jiffies: %llu\n"
@@ -452,7 +438,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->misc_stats.rport_not_ready),
(u64)atomic64_read(&stats->misc_stats.frame_errors));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Firmware reported port speed: %llu\n",
(u64)atomic64_read(
&stats->misc_stats.current_port_speed));
@@ -632,7 +618,7 @@ void fnic_fc_trace_free(void)
* fnic_fc_ctlr_set_trace_data:
* Maintain rd & wr idx accordingly and set data
* Passed parameters:
- * host_no: host number accociated with fnic
+ * host_no: host number associated with fnic
* frame_type: send_frame, rece_frame or link event
* fc_frame: pointer to fc_frame
* frame_len: Length of the fc_frame
@@ -715,13 +701,13 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
* fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
* Passed parameter:
* @fnic_dbgfs_t: pointer to debugfs trace buffer
- * rdata_flag: 1 => Unformated file
- * 0 => formated file
+ * rdata_flag: 1 => Unformatted file
+ * 0 => formatted file
* Description:
* This routine will copy the trace data to memory file with
* proper formatting and also copy to another memory
- * file without formatting for further procesing.
- * Retrun Value:
+ * file without formatting for further processing.
+ * Return Value:
* Number of bytes that were dumped into fnic_dbgfs_t
*/
@@ -742,7 +728,7 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
rd_idx = fc_trace_entries.rd_idx;
wr_idx = fc_trace_entries.wr_idx;
if (rdata_flag == 0) {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
"Time Stamp (UTC)\t\t"
"Host No: F Type: len: FCoE_FRAME:\n");
@@ -762,11 +748,11 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
} else {
fc_trace = (char *)tdata;
for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3)
- len, "%02x", fc_trace[j] & 0xff);
} /* for loop */
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
"\n");
}
@@ -785,10 +771,10 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
* @fc_trace_hdr_t: pointer to trace data
* @fnic_dbgfs_t: pointer to debugfs trace buffer
* @orig_len: pointer to len
- * rdata_flag: 0 => Formated file, 1 => Unformated file
+ * rdata_flag: 0 => Formatted file, 1 => Unformatted file
* Description:
* This routine will format and copy the passed trace data
- * for formated file or unformated file accordingly.
+ * for formatted file or unformatted file accordingly.
*/
void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
@@ -810,7 +796,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len,
fmt,
tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
@@ -823,25 +809,25 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
for (j = 0; j < min_t(u8, tdata->frame_len,
(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
if (tdata->frame_type == FNIC_FC_LE) {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "%c", fc_trace[j]);
} else {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "%02x", fc_trace[j] & 0xff);
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, " ");
if (j == ethhdr_len ||
j == ethhdr_len + fcoehdr_len ||
j == ethhdr_len + fcoehdr_len + fchdr_len ||
(i > 3 && j%fchdr_len == 0)) {
- len += snprintf(fnic_dbgfs_prt->buffer
+ len += scnprintf(fnic_dbgfs_prt->buffer
+ len, max_size - len,
"\n\t\t\t\t\t\t\t\t");
i++;
}
} /* end of else*/
} /* End of for loop*/
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "\n");
*orig_len = len;
}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index 8aa55c1e2025..d1c301bf3fde 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2012 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2012 Cisco Systems, Inc. All rights reserved. */
#ifndef __FNIC_TRACE_H__
#define __FNIC_TRACE_H__
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h
index 92e80ae6b725..9bc509d355c4 100644
--- a/drivers/scsi/fnic/rq_enet_desc.h
+++ b/drivers/scsi/fnic/rq_enet_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _RQ_ENET_DESC_H_
#define _RQ_ENET_DESC_H_
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c
index c5db32eda5ef..ed3dd443fe3e 100644
--- a/drivers/scsi/fnic/vnic_cq.c
+++ b/drivers/scsi/fnic/vnic_cq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h
index 4ede6809fb1e..e7cc1f165390 100644
--- a/drivers/scsi/fnic/vnic_cq.h
+++ b/drivers/scsi/fnic/vnic_cq.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h
index 7901ce255a81..1b198ee59dd6 100644
--- a/drivers/scsi/fnic/vnic_cq_copy.h
+++ b/drivers/scsi/fnic/vnic_cq_copy.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_CQ_COPY_H_
#define _VNIC_CQ_COPY_H_
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 1b88a3b53eee..3e5b437c0492 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/kernel.h>
@@ -254,7 +242,7 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
}
}
-int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
+static int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
@@ -316,7 +304,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
return -ETIMEDOUT;
}
-int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+static int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
@@ -411,7 +399,7 @@ int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
-int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
{
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
if (!vdev->devcmd)
@@ -422,7 +410,7 @@ int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
}
-int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
{
int err;
unsigned int fetch_index;
@@ -444,7 +432,8 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
pr_err("error in devcmd2 init");
- return -ENODEV;
+ err = -ENODEV;
+ goto err_free_wq;
}
/*
@@ -460,7 +449,7 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err)
- goto err_free_wq;
+ goto err_disable_wq;
vdev->devcmd2->result =
(struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
@@ -481,8 +470,9 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err_free_desc_ring:
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
-err_free_wq:
+err_disable_wq:
vnic_wq_disable(&vdev->devcmd2->wq);
+err_free_wq:
vnic_wq_free(&vdev->devcmd2->wq);
err_free_devcmd2:
kfree(vdev->devcmd2);
@@ -492,7 +482,7 @@ err_free_devcmd2:
}
-void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
{
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
vnic_wq_disable(&vdev->devcmd2->wq);
@@ -503,7 +493,7 @@ void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
}
-int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
{
int err;
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
index ef5309a5df5d..7a568d141cde 100644
--- a/drivers/scsi/fnic/vnic_dev.h
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index c5dde556dc7c..f876d223b2b4 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
@@ -442,7 +430,7 @@ struct vnic_devcmd_notify {
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
- u8 data[0];
+ u8 data[];
};
/*
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c
index 4f4dc8793d23..df7f63acd879 100644
--- a/drivers/scsi/fnic/vnic_intr.c
+++ b/drivers/scsi/fnic/vnic_intr.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h
index d5fb40e7c98e..acc194c0f522 100644
--- a/drivers/scsi/fnic/vnic_intr.h
+++ b/drivers/scsi/fnic/vnic_intr.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h
index f15b83eeaced..6896f16d564b 100644
--- a/drivers/scsi/fnic/vnic_nic.h
+++ b/drivers/scsi/fnic/vnic_nic.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_NIC_H_
#define _VNIC_NIC_H_
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
index 7c6163f73bd3..3d260b831fc5 100644
--- a/drivers/scsi/fnic/vnic_resource.h
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
index 6a35b1be0032..350607d13c9a 100644
--- a/drivers/scsi/fnic/vnic_rq.c
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
@@ -191,4 +179,3 @@ void vnic_rq_clean(struct vnic_rq *rq,
vnic_dev_clear_desc_ring(&rq->ring);
}
-
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h
index aebdfbd6ad3c..1066255de808 100644
--- a/drivers/scsi/fnic/vnic_rq.h
+++ b/drivers/scsi/fnic/vnic_rq.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index e343e1d0f801..4e12f7b32d9d 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_SCSI_H_
#define _VNIC_SCSI_H_
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h
index 5372e23c1cb3..4396397b089d 100644
--- a/drivers/scsi/fnic/vnic_stats.h
+++ b/drivers/scsi/fnic/vnic_stats.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
index 015af2cdabaf..426b901c80f3 100644
--- a/drivers/scsi/fnic/vnic_wq.c
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
@@ -25,7 +13,7 @@
#include "vnic_wq.h"
-int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
+static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, enum vnic_res_type res_type)
{
wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
@@ -37,7 +25,7 @@ int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
}
-int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
+static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
index 5d1e0a44d94a..041618e13ce2 100644
--- a/drivers/scsi/fnic/vnic_wq.h
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
index 9eab7e7caf38..96569d4ccc58 100644
--- a/drivers/scsi/fnic/vnic_wq_copy.c
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
@@ -79,8 +67,6 @@ int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
unsigned int index, unsigned int desc_count,
unsigned int desc_size)
{
- int err;
-
wq->index = index;
wq->vdev = vdev;
wq->to_use_index = wq->to_clean_index = 0;
@@ -92,11 +78,7 @@ int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
vnic_wq_copy_disable(wq);
- err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
- if (err)
- return err;
-
- return 0;
+ return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
}
void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
@@ -114,4 +96,3 @@ void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
}
-
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h
index 6aff9740c3df..2f8340144e79 100644
--- a/drivers/scsi/fnic/vnic_wq_copy.h
+++ b/drivers/scsi/fnic/vnic_wq_copy.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_WQ_COPY_H_
#define _VNIC_WQ_COPY_H_
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h
index b121cbad18b8..9a933a5dee79 100644
--- a/drivers/scsi/fnic/wq_enet_desc.h
+++ b/drivers/scsi/fnic/wq_enet_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 2ab774e62e40..0c768e7d06b9 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -20,7 +20,7 @@
* Added ISAPNP support for DTC436 adapters,
* Thomas Sailer, sailer@ife.ee.ethz.ch
*
- * See Documentation/scsi/g_NCR5380.txt for more info.
+ * See Documentation/scsi/g_NCR5380.rst for more info.
*/
#include <asm/io.h>
@@ -340,7 +340,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
break;
case BOARD_DTC3181E:
hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
+ fallthrough;
case BOARD_NCR53C400A:
case BOARD_HP_C2502:
hostdata->c400_ctl_status = 9;
@@ -529,14 +529,14 @@ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata,
if (start == len - 128) {
/* Ignore End of DMA interrupt for the final buffer */
if (NCR5380_poll_politely(hostdata, hostdata->c400_ctl_status,
- CSR_HOST_BUF_NOT_RDY, 0, HZ / 64) < 0)
+ CSR_HOST_BUF_NOT_RDY, 0, 0) < 0)
break;
} else {
if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status,
CSR_HOST_BUF_NOT_RDY, 0,
hostdata->c400_ctl_status,
CSR_GATED_53C80_IRQ,
- CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
+ CSR_GATED_53C80_IRQ, 0) < 0 ||
NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
break;
}
@@ -565,7 +565,7 @@ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata,
if (residual == 0 && NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_END_DMA_TRANSFER,
BASR_END_DMA_TRANSFER,
- HZ / 64) < 0)
+ 0) < 0)
scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
__func__);
@@ -597,7 +597,7 @@ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
CSR_HOST_BUF_NOT_RDY, 0,
hostdata->c400_ctl_status,
CSR_GATED_53C80_IRQ,
- CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
+ CSR_GATED_53C80_IRQ, 0) < 0 ||
NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) {
/* Both 128 B buffers are in use */
if (start >= 128)
@@ -644,13 +644,13 @@ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
if (residual == 0) {
if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT,
- HZ / 64) < 0)
+ 0) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: Last Byte Sent timeout\n", __func__);
if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER,
- HZ / 64) < 0)
+ 0) < 0)
scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
__func__);
}
@@ -663,7 +663,7 @@ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- int transfersize = cmd->SCp.this_residual;
+ int transfersize = NCR5380_to_ncmd(cmd)->this_residual;
if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
return 0;
@@ -675,7 +675,7 @@ static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
/* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */
if (hostdata->board == BOARD_DTC3181E &&
cmd->sc_data_direction == DMA_TO_DEVICE)
- transfersize = min(cmd->SCp.this_residual, 512);
+ transfersize = min(transfersize, 512);
return min(transfersize, DMA_MAX_SIZE);
}
@@ -702,7 +702,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
};
@@ -720,12 +720,11 @@ static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
return 1;
}
-static int generic_NCR5380_isa_remove(struct device *pdev,
- unsigned int ndev)
+static void generic_NCR5380_isa_remove(struct device *pdev,
+ unsigned int ndev)
{
generic_NCR5380_release_resources(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
- return 0;
}
static struct isa_driver generic_NCR5380_isa_driver = {
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
deleted file mode 100644
index fe03410268e6..000000000000
--- a/drivers/scsi/gdth.c
+++ /dev/null
@@ -1,4323 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/************************************************************************
- * Linux driver for *
- * ICP vortex GmbH: GDT PCI Disk Array Controllers *
- * Intel Corporation: Storage RAID Controllers *
- * *
- * gdth.c *
- * Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner *
- * Copyright (C) 2002-04 Intel Corporation *
- * Copyright (C) 2003-06 Adaptec Inc. *
- * <achim_leubner@adaptec.com> *
- * *
- * Additions/Fixes: *
- * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com> *
- * Johannes Dinner <johannes_dinner@adaptec.com> *
- * *
- * *
- * Linux kernel 2.6.x supported *
- * *
- ************************************************************************/
-
-/* All GDT Disk Array Controllers are fully supported by this driver.
- * This includes the PCI SCSI Disk Array Controllers and the
- * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
- * list of all controller types.
- *
- * After the optional list of IRQ values, other possible
- * command line options are:
- * disable:Y disable driver
- * disable:N enable driver
- * reserve_mode:0 reserve no drives for the raw service
- * reserve_mode:1 reserve all not init., removable drives
- * reserve_mode:2 reserve all not init. drives
- * reserve_list:h,b,t,l,h,b,t,l,... reserve particular drive(s) with
- * h- controller no., b- channel no.,
- * t- target ID, l- LUN
- * reverse_scan:Y reverse scan order for PCI controllers
- * reverse_scan:N scan PCI controllers like BIOS
- * max_ids:x x - target ID count per channel (1..MAXID)
- * rescan:Y rescan all channels/IDs
- * rescan:N use all devices found until now
- * hdr_channel:x x - number of virtual bus for host drives
- * shared_access:Y disable driver reserve/release protocol to
- * access a shared resource from several nodes,
- * appropriate controller firmware required
- * shared_access:N enable driver reserve/release protocol
- * force_dma32:Y use only 32 bit DMA mode
- * force_dma32:N use 64 bit DMA mode, if supported
- *
- * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
- * max_ids:127,rescan:N,hdr_channel:0,
- * shared_access:Y,force_dma32:N".
- * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
- *
- * When loading the gdth driver as a module, the same options are available.
- * You can set the IRQs with "IRQ=...". However, the syntax to specify the
- * options changes slightly. You must replace all ',' between options
- * with ' ' and all ':' with '=' and you must use
- * '1' in place of 'Y' and '0' in place of 'N'.
- *
- * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
- * max_ids=127 rescan=0 hdr_channel=0 shared_access=0
- * force_dma32=0"
- * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
- */
-
-/* The meaning of the Scsi_Pointer members in this driver is as follows:
- * ptr: Chaining
- * this_residual: unused
- * buffer: unused
- * dma_handle: unused
- * buffers_residual: unused
- * Status: unused
- * Message: unused
- * have_data_in: unused
- * sent_command: unused
- * phase: unused
- */
-
-/* statistics */
-#define GDTH_STATISTICS
-
-#include <linux/module.h>
-
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/proc_fs.h>
-#include <linux/time.h>
-#include <linux/timer.h>
-#include <linux/dma-mapping.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/reboot.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <linux/spinlock.h>
-#include <linux/blkdev.h>
-#include <linux/scatterlist.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "gdth.h"
-
-static DEFINE_MUTEX(gdth_mutex);
-static void gdth_delay(int milliseconds);
-static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
-static irqreturn_t gdth_interrupt(int irq, void *dev_id);
-static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
- int gdth_from_wait, int* pIndex);
-static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- struct scsi_cmnd *scp);
-static int gdth_async_event(gdth_ha_str *ha);
-static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
-
-static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
-static void gdth_next(gdth_ha_str *ha);
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
-static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
-static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
- u16 idx, gdth_evt_data *evt);
-static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
-static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
- gdth_evt_str *estr);
-static void gdth_clear_events(void);
-
-static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
- char *buffer, u16 count);
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
- u16 hdrive);
-
-static void gdth_enable_int(gdth_ha_str *ha);
-static int gdth_test_busy(gdth_ha_str *ha);
-static int gdth_get_cmd_index(gdth_ha_str *ha);
-static void gdth_release_event(gdth_ha_str *ha);
-static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
-static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
- u32 p1, u64 p2,u64 p3);
-static int gdth_search_drives(gdth_ha_str *ha);
-static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
-
-static const char *gdth_ctr_name(gdth_ha_str *ha);
-
-static int gdth_open(struct inode *inode, struct file *filep);
-static int gdth_close(struct inode *inode, struct file *filep);
-static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
- unsigned long arg);
-
-static void gdth_flush(gdth_ha_str *ha);
-static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
-static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
- struct gdth_cmndinfo *cmndinfo);
-static void gdth_scsi_done(struct scsi_cmnd *scp);
-
-#ifdef DEBUG_GDTH
-static u8 DebugState = DEBUG_GDTH;
-#define TRACE(a) {if (DebugState==1) {printk a;}}
-#define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}}
-#define TRACE3(a) {if (DebugState!=0) {printk a;}}
-#else /* !DEBUG */
-#define TRACE(a)
-#define TRACE2(a)
-#define TRACE3(a)
-#endif
-
-#ifdef GDTH_STATISTICS
-static u32 max_rq=0, max_index=0, max_sg=0;
-static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
-static struct timer_list gdth_timer;
-#endif
-
-#define PTR2USHORT(a) (u16)(unsigned long)(a)
-#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
-#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
-
-#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
-
-static u8 gdth_polling; /* polling if TRUE */
-static int gdth_ctr_count = 0; /* controller count */
-static LIST_HEAD(gdth_instances); /* controller list */
-static u8 gdth_write_through = FALSE; /* write through */
-static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
-static int elastidx;
-static int eoldidx;
-static int major;
-
-#define DIN 1 /* IN data direction */
-#define DOU 2 /* OUT data direction */
-#define DNO DIN /* no data transfer */
-#define DUN DIN /* unknown data direction */
-static u8 gdth_direction_tab[0x100] = {
- DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
- DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
- DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
- DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
- DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,
- DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU,
- DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
-};
-
-/* LILO and modprobe/insmod parameters */
-/* disable driver flag */
-static int disable __initdata = 0;
-/* reserve flag */
-static int reserve_mode = 1;
-/* reserve list */
-static int reserve_list[MAX_RES_ARGS] =
-{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
- 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
- 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
-/* scan order for PCI controllers */
-static int reverse_scan = 0;
-/* virtual channel for the host drives */
-static int hdr_channel = 0;
-/* max. IDs per channel */
-static int max_ids = MAXID;
-/* rescan all IDs */
-static int rescan = 0;
-/* shared access */
-static int shared_access = 1;
-/* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
-static int force_dma32 = 0;
-
-/* parameters for modprobe/insmod */
-module_param(disable, int, 0);
-module_param(reserve_mode, int, 0);
-module_param_array(reserve_list, int, NULL, 0);
-module_param(reverse_scan, int, 0);
-module_param(hdr_channel, int, 0);
-module_param(max_ids, int, 0);
-module_param(rescan, int, 0);
-module_param(shared_access, int, 0);
-module_param(force_dma32, int, 0);
-MODULE_AUTHOR("Achim Leubner");
-MODULE_LICENSE("GPL");
-
-/* ioctl interface */
-static const struct file_operations gdth_fops = {
- .unlocked_ioctl = gdth_unlocked_ioctl,
- .open = gdth_open,
- .release = gdth_close,
- .llseek = noop_llseek,
-};
-
-#include "gdth_proc.h"
-#include "gdth_proc.c"
-
-static gdth_ha_str *gdth_find_ha(int hanum)
-{
- gdth_ha_str *ha;
-
- list_for_each_entry(ha, &gdth_instances, list)
- if (hanum == ha->hanum)
- return ha;
-
- return NULL;
-}
-
-static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
-{
- struct gdth_cmndinfo *priv = NULL;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (i=0; i<GDTH_MAXCMDS; ++i) {
- if (ha->cmndinfo[i].index == 0) {
- priv = &ha->cmndinfo[i];
- memset(priv, 0, sizeof(*priv));
- priv->index = i+1;
- break;
- }
- }
-
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- return priv;
-}
-
-static void gdth_put_cmndinfo(struct gdth_cmndinfo *priv)
-{
- BUG_ON(!priv);
- priv->index = 0;
-}
-
-static void gdth_delay(int milliseconds)
-{
- if (milliseconds == 0) {
- udelay(1);
- } else {
- mdelay(milliseconds);
- }
-}
-
-static void gdth_scsi_done(struct scsi_cmnd *scp)
-{
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- int internal_command = cmndinfo->internal_command;
-
- TRACE2(("gdth_scsi_done()\n"));
-
- gdth_put_cmndinfo(cmndinfo);
- scp->host_scribble = NULL;
-
- if (internal_command)
- complete((struct completion *)scp->request);
- else
- scp->scsi_done(scp);
-}
-
-int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
- int timeout, u32 *info)
-{
- gdth_ha_str *ha = shost_priv(sdev->host);
- struct scsi_cmnd *scp;
- struct gdth_cmndinfo cmndinfo;
- DECLARE_COMPLETION_ONSTACK(wait);
- int rval;
-
- scp = kzalloc(sizeof(*scp), GFP_KERNEL);
- if (!scp)
- return -ENOMEM;
-
- scp->sense_buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- if (!scp->sense_buffer) {
- kfree(scp);
- return -ENOMEM;
- }
-
- scp->device = sdev;
- memset(&cmndinfo, 0, sizeof(cmndinfo));
-
- /* use request field to save the ptr. to completion struct. */
- scp->request = (struct request *)&wait;
- scp->cmd_len = 12;
- scp->cmnd = cmnd;
- cmndinfo.priority = IOCTL_PRI;
- cmndinfo.internal_cmd_str = gdtcmd;
- cmndinfo.internal_command = 1;
-
- TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
- __gdth_queuecommand(ha, scp, &cmndinfo);
-
- wait_for_completion(&wait);
-
- rval = cmndinfo.status;
- if (info)
- *info = cmndinfo.info;
- kfree(scp->sense_buffer);
- kfree(scp);
- return rval;
-}
-
-int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
- int timeout, u32 *info)
-{
- struct scsi_device *sdev = scsi_get_host_dev(shost);
- int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info);
-
- scsi_free_host_dev(sdev);
- return rval;
-}
-
-static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
-{
- *cyls = size /HEADS/SECS;
- if (*cyls <= MAXCYLS) {
- *heads = HEADS;
- *secs = SECS;
- } else { /* too high for 64*32 */
- *cyls = size /MEDHEADS/MEDSECS;
- if (*cyls <= MAXCYLS) {
- *heads = MEDHEADS;
- *secs = MEDSECS;
- } else { /* too high for 127*63 */
- *cyls = size /BIGHEADS/BIGSECS;
- *heads = BIGHEADS;
- *secs = BIGSECS;
- }
- }
-}
-
-static bool gdth_search_vortex(u16 device)
-{
- if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
- return true;
- if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP &&
- device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP)
- return true;
- if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX ||
- device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2)
- return true;
- return false;
-}
-
-static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out);
-static int gdth_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void gdth_pci_remove_one(struct pci_dev *pdev);
-static void gdth_remove_one(gdth_ha_str *ha);
-
-/* Vortex only makes RAID controllers.
- * We do not really want to specify all 550 ids here, so wildcard match.
- */
-static const struct pci_device_id gdthtable[] = {
- { PCI_VDEVICE(VORTEX, PCI_ANY_ID) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) },
- { } /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, gdthtable);
-
-static struct pci_driver gdth_pci_driver = {
- .name = "gdth",
- .id_table = gdthtable,
- .probe = gdth_pci_init_one,
- .remove = gdth_pci_remove_one,
-};
-
-static void gdth_pci_remove_one(struct pci_dev *pdev)
-{
- gdth_ha_str *ha = pci_get_drvdata(pdev);
-
- list_del(&ha->list);
- gdth_remove_one(ha);
-
- pci_disable_device(pdev);
-}
-
-static int gdth_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- u16 vendor = pdev->vendor;
- u16 device = pdev->device;
- unsigned long base0, base1, base2;
- int rc;
- gdth_pci_str gdth_pcistr;
- gdth_ha_str *ha = NULL;
-
- TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
- gdth_ctr_count, vendor, device));
-
- memset(&gdth_pcistr, 0, sizeof(gdth_pcistr));
-
- if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device))
- return -ENODEV;
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
-
- if (gdth_ctr_count >= MAXHA)
- return -EBUSY;
-
- /* GDT PCI controller found, resources are already in pdev */
- gdth_pcistr.pdev = pdev;
- base0 = pci_resource_flags(pdev, 0);
- base1 = pci_resource_flags(pdev, 1);
- base2 = pci_resource_flags(pdev, 2);
- if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */
- device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */
- if (!(base0 & IORESOURCE_MEM))
- return -ENODEV;
- gdth_pcistr.dpmem = pci_resource_start(pdev, 0);
- } else { /* GDT6110, GDT6120, .. */
- if (!(base0 & IORESOURCE_MEM) ||
- !(base2 & IORESOURCE_MEM) ||
- !(base1 & IORESOURCE_IO))
- return -ENODEV;
- gdth_pcistr.dpmem = pci_resource_start(pdev, 2);
- gdth_pcistr.io = pci_resource_start(pdev, 1);
- }
- TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
- gdth_pcistr.pdev->bus->number,
- PCI_SLOT(gdth_pcistr.pdev->devfn),
- gdth_pcistr.irq,
- gdth_pcistr.dpmem));
-
- rc = gdth_pci_probe_one(&gdth_pcistr, &ha);
- if (rc)
- return rc;
-
- return 0;
-}
-
-static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
- gdth_ha_str *ha)
-{
- register gdt6_dpram_str __iomem *dp6_ptr;
- register gdt6c_dpram_str __iomem *dp6c_ptr;
- register gdt6m_dpram_str __iomem *dp6m_ptr;
- u32 retries;
- u8 prot_ver;
- u16 command;
- int i, found = FALSE;
-
- TRACE(("gdth_init_pci()\n"));
-
- if (pdev->vendor == PCI_VENDOR_ID_INTEL)
- ha->oem_id = OEM_ID_INTEL;
- else
- ha->oem_id = OEM_ID_ICP;
- ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
- ha->stype = (u32)pdev->device;
- ha->irq = pdev->irq;
- ha->pdev = pdev;
-
- if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */
- TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
- ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- /* check and reset interface area */
- dp6_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6_ptr->u);
- if (readl(&dp6_ptr->u) != DPMEM_MAGIC) {
- printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
- pcistr->dpmem);
- found = FALSE;
- for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
- iounmap(ha->brd);
- ha->brd = ioremap(i, sizeof(u16));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- if (readw(ha->brd) != 0xffff) {
- TRACE2(("init_pci_old() address 0x%x busy\n", i));
- continue;
- }
- iounmap(ha->brd);
- pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
- ha->brd = ioremap(i, sizeof(gdt6_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- dp6_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6_ptr->u);
- if (readl(&dp6_ptr->u) == DPMEM_MAGIC) {
- printk("GDT-PCI: Use free address at 0x%x\n", i);
- found = TRUE;
- break;
- }
- }
- if (!found) {
- printk("GDT-PCI: No free address found!\n");
- iounmap(ha->brd);
- return 0;
- }
- }
- memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u));
- if (readl(&dp6_ptr->u) != 0) {
- printk("GDT-PCI: Initialization error (DPMEM write error)\n");
- iounmap(ha->brd);
- return 0;
- }
-
- /* disable board interrupts, deinit services */
- writeb(0xff, &dp6_ptr->io.irqdel);
- writeb(0x00, &dp6_ptr->io.irqen);
- writeb(0x00, &dp6_ptr->u.ic.S_Status);
- writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
-
- writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
- writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
- writeb(0, &dp6_ptr->io.event);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error (DEINIT failed)\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
- writeb(0, &dp6_ptr->u.ic.S_Status);
- writeb(0xff, &dp6_ptr->io.irqdel);
- if (prot_ver != PROTOCOL_VERSION) {
- printk("GDT-PCI: Illegal protocol version\n");
- iounmap(ha->brd);
- return 0;
- }
-
- ha->type = GDT_PCI;
- ha->ic_all_size = sizeof(dp6_ptr->u);
-
- /* special command to controller BIOS */
- writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
- writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
- writel(0x00, &dp6_ptr->u.ic.S_Info[2]);
- writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
- writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
- writeb(0, &dp6_ptr->io.event);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- writeb(0, &dp6_ptr->u.ic.S_Status);
- writeb(0xff, &dp6_ptr->io.irqdel);
-
- ha->dma64_support = 0;
-
- } else if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
- ha->plx = (gdt6c_plx_regs *)pcistr->io;
- TRACE2(("init_pci_new() dpmem %lx irq %d\n",
- pcistr->dpmem,ha->irq));
- ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- iounmap(ha->brd);
- return 0;
- }
- /* check and reset interface area */
- dp6c_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6c_ptr->u);
- if (readl(&dp6c_ptr->u) != DPMEM_MAGIC) {
- printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
- pcistr->dpmem);
- found = FALSE;
- for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
- iounmap(ha->brd);
- ha->brd = ioremap(i, sizeof(u16));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- if (readw(ha->brd) != 0xffff) {
- TRACE2(("init_pci_plx() address 0x%x busy\n", i));
- continue;
- }
- iounmap(ha->brd);
- pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i);
- ha->brd = ioremap(i, sizeof(gdt6c_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- dp6c_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6c_ptr->u);
- if (readl(&dp6c_ptr->u) == DPMEM_MAGIC) {
- printk("GDT-PCI: Use free address at 0x%x\n", i);
- found = TRUE;
- break;
- }
- }
- if (!found) {
- printk("GDT-PCI: No free address found!\n");
- iounmap(ha->brd);
- return 0;
- }
- }
- memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u));
- if (readl(&dp6c_ptr->u) != 0) {
- printk("GDT-PCI: Initialization error (DPMEM write error)\n");
- iounmap(ha->brd);
- return 0;
- }
-
- /* disable board interrupts, deinit services */
- outb(0x00,PTR2USHORT(&ha->plx->control1));
- outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
-
- writeb(0x00, &dp6c_ptr->u.ic.S_Status);
- writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
-
- writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
- writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
-
- outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
-
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error (DEINIT failed)\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
- writeb(0, &dp6c_ptr->u.ic.Status);
- if (prot_ver != PROTOCOL_VERSION) {
- printk("GDT-PCI: Illegal protocol version\n");
- iounmap(ha->brd);
- return 0;
- }
-
- ha->type = GDT_PCINEW;
- ha->ic_all_size = sizeof(dp6c_ptr->u);
-
- /* special command to controller BIOS */
- writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
- writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
- writel(0x00, &dp6c_ptr->u.ic.S_Info[2]);
- writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
- writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
-
- outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
-
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- writeb(0, &dp6c_ptr->u.ic.S_Status);
-
- ha->dma64_support = 0;
-
- } else { /* MPR */
- TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
- ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
-
- /* manipulate config. space to enable DPMEM, start RP controller */
- pci_read_config_word(pdev, PCI_COMMAND, &command);
- command |= 6;
- pci_write_config_word(pdev, PCI_COMMAND, command);
- gdth_delay(1);
-
- dp6m_ptr = ha->brd;
-
- /* Ensure that it is safe to access the non HW portions of DPMEM.
- * Aditional check needed for Xscale based RAID controllers */
- while( ((int)readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 )
- gdth_delay(1);
-
- /* check and reset interface area */
- writel(DPMEM_MAGIC, &dp6m_ptr->u);
- if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
- printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
- pcistr->dpmem);
- found = FALSE;
- for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
- iounmap(ha->brd);
- ha->brd = ioremap(i, sizeof(u16));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- if (readw(ha->brd) != 0xffff) {
- TRACE2(("init_pci_mpr() address 0x%x busy\n", i));
- continue;
- }
- iounmap(ha->brd);
- pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
- ha->brd = ioremap(i, sizeof(gdt6m_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- dp6m_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6m_ptr->u);
- if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
- printk("GDT-PCI: Use free address at 0x%x\n", i);
- found = TRUE;
- break;
- }
- }
- if (!found) {
- printk("GDT-PCI: No free address found!\n");
- iounmap(ha->brd);
- return 0;
- }
- }
- memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u));
-
- /* disable board interrupts, deinit services */
- writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
- &dp6m_ptr->i960r.edoor_en_reg);
- writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
- writeb(0x00, &dp6m_ptr->u.ic.S_Status);
- writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
-
- writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
- writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
- writeb(1, &dp6m_ptr->i960r.ldoor_reg);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error (DEINIT failed)\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
- writeb(0, &dp6m_ptr->u.ic.S_Status);
- if (prot_ver != PROTOCOL_VERSION) {
- printk("GDT-PCI: Illegal protocol version\n");
- iounmap(ha->brd);
- return 0;
- }
-
- ha->type = GDT_PCIMPR;
- ha->ic_all_size = sizeof(dp6m_ptr->u);
-
- /* special command to controller BIOS */
- writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
- writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
- writel(0x00, &dp6m_ptr->u.ic.S_Info[2]);
- writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
- writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
- writeb(1, &dp6m_ptr->i960r.ldoor_reg);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- writeb(0, &dp6m_ptr->u.ic.S_Status);
-
- /* read FW version to detect 64-bit DMA support */
- writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx);
- writeb(1, &dp6m_ptr->i960r.ldoor_reg);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error (DEINIT failed)\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
- writeb(0, &dp6m_ptr->u.ic.S_Status);
- if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
- ha->dma64_support = 0;
- else
- ha->dma64_support = 1;
- }
-
- return 1;
-}
-
-/* controller protocol functions */
-
-static void gdth_enable_int(gdth_ha_str *ha)
-{
- unsigned long flags;
- gdt6_dpram_str __iomem *dp6_ptr;
- gdt6m_dpram_str __iomem *dp6m_ptr;
-
- TRACE(("gdth_enable_int() hanum %d\n",ha->hanum));
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- if (ha->type == GDT_PCI) {
- dp6_ptr = ha->brd;
- writeb(1, &dp6_ptr->io.irqdel);
- writeb(0, &dp6_ptr->u.ic.Cmd_Index);
- writeb(1, &dp6_ptr->io.irqen);
- } else if (ha->type == GDT_PCINEW) {
- outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
- outb(0x03, PTR2USHORT(&ha->plx->control1));
- } else if (ha->type == GDT_PCIMPR) {
- dp6m_ptr = ha->brd;
- writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
- writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
- &dp6m_ptr->i960r.edoor_en_reg);
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-/* return IStatus if interrupt was from this card else 0 */
-static u8 gdth_get_status(gdth_ha_str *ha)
-{
- u8 IStatus = 0;
-
- TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
-
- if (ha->type == GDT_PCI)
- IStatus =
- readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
- else if (ha->type == GDT_PCINEW)
- IStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
- else if (ha->type == GDT_PCIMPR)
- IStatus =
- readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg);
-
- return IStatus;
-}
-
-static int gdth_test_busy(gdth_ha_str *ha)
-{
- register int gdtsema0 = 0;
-
- TRACE(("gdth_test_busy() hanum %d\n", ha->hanum));
-
- if (ha->type == GDT_PCI)
- gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
- else if (ha->type == GDT_PCINEW)
- gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
- else if (ha->type == GDT_PCIMPR)
- gdtsema0 =
- (int)readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
-
- return (gdtsema0 & 1);
-}
-
-
-static int gdth_get_cmd_index(gdth_ha_str *ha)
-{
- int i;
-
- TRACE(("gdth_get_cmd_index() hanum %d\n", ha->hanum));
-
- for (i=0; i<GDTH_MAXCMDS; ++i) {
- if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
- ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
- ha->cmd_tab[i].service = ha->pccb->Service;
- ha->pccb->CommandIndex = (u32)i+2;
- return (i+2);
- }
- }
- return 0;
-}
-
-
-static void gdth_set_sema0(gdth_ha_str *ha)
-{
- TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum));
-
- if (ha->type == GDT_PCI) {
- writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
- } else if (ha->type == GDT_PCINEW) {
- outb(1, PTR2USHORT(&ha->plx->sema0_reg));
- } else if (ha->type == GDT_PCIMPR) {
- writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
- }
-}
-
-
-static void gdth_copy_command(gdth_ha_str *ha)
-{
- register gdth_cmd_str *cmd_ptr;
- register gdt6m_dpram_str __iomem *dp6m_ptr;
- register gdt6c_dpram_str __iomem *dp6c_ptr;
- gdt6_dpram_str __iomem *dp6_ptr;
- u16 cp_count,dp_offset,cmd_no;
-
- TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
-
- cp_count = ha->cmd_len;
- dp_offset= ha->cmd_offs_dpmem;
- cmd_no = ha->cmd_cnt;
- cmd_ptr = ha->pccb;
-
- ++ha->cmd_cnt;
-
- /* set cpcount dword aligned */
- if (cp_count & 3)
- cp_count += (4 - (cp_count & 3));
-
- ha->cmd_offs_dpmem += cp_count;
-
- /* set offset and service, copy command to DPMEM */
- if (ha->type == GDT_PCI) {
- dp6_ptr = ha->brd;
- writew(dp_offset + DPMEM_COMMAND_OFFSET,
- &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((u16)cmd_ptr->Service,
- &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
- memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
- } else if (ha->type == GDT_PCINEW) {
- dp6c_ptr = ha->brd;
- writew(dp_offset + DPMEM_COMMAND_OFFSET,
- &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((u16)cmd_ptr->Service,
- &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
- memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
- } else if (ha->type == GDT_PCIMPR) {
- dp6m_ptr = ha->brd;
- writew(dp_offset + DPMEM_COMMAND_OFFSET,
- &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((u16)cmd_ptr->Service,
- &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
- memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
- }
-}
-
-
-static void gdth_release_event(gdth_ha_str *ha)
-{
- TRACE(("gdth_release_event() hanum %d\n", ha->hanum));
-
-#ifdef GDTH_STATISTICS
- {
- u32 i,j;
- for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
- if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
- ++i;
- }
- if (max_index < i) {
- max_index = i;
- TRACE3(("GDT: max_index = %d\n",(u16)i));
- }
- }
-#endif
-
- if (ha->pccb->OpCode == GDT_INIT)
- ha->pccb->Service |= 0x80;
-
- if (ha->type == GDT_PCI) {
- writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
- } else if (ha->type == GDT_PCINEW) {
- outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
- } else if (ha->type == GDT_PCIMPR) {
- writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg);
- }
-}
-
-static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
-{
- int answer_found = FALSE;
- int wait_index = 0;
-
- TRACE(("gdth_wait() hanum %d index %d time %d\n", ha->hanum, index, time));
-
- if (index == 0)
- return 1; /* no wait required */
-
- do {
- __gdth_interrupt(ha, true, &wait_index);
- if (wait_index == index) {
- answer_found = TRUE;
- break;
- }
- gdth_delay(1);
- } while (--time);
-
- while (gdth_test_busy(ha))
- gdth_delay(0);
-
- return (answer_found);
-}
-
-
-static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
- u32 p1, u64 p2, u64 p3)
-{
- register gdth_cmd_str *cmd_ptr;
- int retries,index;
-
- TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
-
- cmd_ptr = ha->pccb;
- memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
-
- /* make command */
- for (retries = INIT_RETRIES;;) {
- cmd_ptr->Service = service;
- cmd_ptr->RequestBuffer = INTERNAL_CMND;
- if (!(index=gdth_get_cmd_index(ha))) {
- TRACE(("GDT: No free command index found\n"));
- return 0;
- }
- gdth_set_sema0(ha);
- cmd_ptr->OpCode = opcode;
- cmd_ptr->BoardNode = LOCALBOARD;
- if (service == CACHESERVICE) {
- if (opcode == GDT_IOCTL) {
- cmd_ptr->u.ioctl.subfunc = p1;
- cmd_ptr->u.ioctl.channel = (u32)p2;
- cmd_ptr->u.ioctl.param_size = (u16)p3;
- cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
- } else {
- if (ha->cache_feat & GDT_64BIT) {
- cmd_ptr->u.cache64.DeviceNo = (u16)p1;
- cmd_ptr->u.cache64.BlockNo = p2;
- } else {
- cmd_ptr->u.cache.DeviceNo = (u16)p1;
- cmd_ptr->u.cache.BlockNo = (u32)p2;
- }
- }
- } else if (service == SCSIRAWSERVICE) {
- if (ha->raw_feat & GDT_64BIT) {
- cmd_ptr->u.raw64.direction = p1;
- cmd_ptr->u.raw64.bus = (u8)p2;
- cmd_ptr->u.raw64.target = (u8)p3;
- cmd_ptr->u.raw64.lun = (u8)(p3 >> 8);
- } else {
- cmd_ptr->u.raw.direction = p1;
- cmd_ptr->u.raw.bus = (u8)p2;
- cmd_ptr->u.raw.target = (u8)p3;
- cmd_ptr->u.raw.lun = (u8)(p3 >> 8);
- }
- } else if (service == SCREENSERVICE) {
- if (opcode == GDT_REALTIME) {
- *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
- *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
- *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
- }
- }
- ha->cmd_len = sizeof(gdth_cmd_str);
- ha->cmd_offs_dpmem = 0;
- ha->cmd_cnt = 0;
- gdth_copy_command(ha);
- gdth_release_event(ha);
- gdth_delay(20);
- if (!gdth_wait(ha, index, INIT_TIMEOUT)) {
- printk("GDT: Initialization error (timeout service %d)\n",service);
- return 0;
- }
- if (ha->status != S_BSY || --retries == 0)
- break;
- gdth_delay(1);
- }
-
- return (ha->status != S_OK ? 0:1);
-}
-
-
-/* search for devices */
-
-static int gdth_search_drives(gdth_ha_str *ha)
-{
- u16 cdev_cnt, i;
- int ok;
- u32 bus_no, drv_cnt, drv_no, j;
- gdth_getch_str *chn;
- gdth_drlist_str *drl;
- gdth_iochan_str *ioc;
- gdth_raw_iochan_str *iocr;
- gdth_arcdl_str *alst;
- gdth_alist_str *alst2;
- gdth_oem_str_ioctl *oemstr;
-
- TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
- ok = 0;
-
- /* initialize controller services, at first: screen service */
- ha->screen_feat = 0;
- if (!force_dma32) {
- ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_X_INIT_SCR, 0, 0, 0);
- if (ok)
- ha->screen_feat = GDT_64BIT;
- }
- if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
- ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
- if (!ok) {
- printk("GDT-HA %d: Initialization error screen service (code %d)\n",
- ha->hanum, ha->status);
- return 0;
- }
- TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
-
- /* unfreeze all IOs */
- gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0);
-
- /* initialize cache service */
- ha->cache_feat = 0;
- if (!force_dma32) {
- ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INIT_HOST, LINUX_OS,
- 0, 0);
- if (ok)
- ha->cache_feat = GDT_64BIT;
- }
- if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
- ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
- if (!ok) {
- printk("GDT-HA %d: Initialization error cache service (code %d)\n",
- ha->hanum, ha->status);
- return 0;
- }
- TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
- cdev_cnt = (u16)ha->info;
- ha->fw_vers = ha->service;
-
- /* detect number of buses - try new IOCTL */
- iocr = (gdth_raw_iochan_str *)ha->pscratch;
- iocr->hdr.version = 0xffffffff;
- iocr->hdr.list_entries = MAXBUS;
- iocr->hdr.first_chan = 0;
- iocr->hdr.last_chan = MAXBUS-1;
- iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]);
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_RAW_DESC,
- INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) {
- TRACE2(("IOCHAN_RAW_DESC supported!\n"));
- ha->bus_cnt = iocr->hdr.chan_count;
- for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
- if (iocr->list[bus_no].proc_id < MAXID)
- ha->bus_id[bus_no] = iocr->list[bus_no].proc_id;
- else
- ha->bus_id[bus_no] = 0xff;
- }
- } else {
- /* old method */
- chn = (gdth_getch_str *)ha->pscratch;
- for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
- chn->channel_no = bus_no;
- if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- SCSI_CHAN_CNT | L_CTRL_PATTERN,
- IO_CHANNEL | INVALID_CHANNEL,
- sizeof(gdth_getch_str))) {
- if (bus_no == 0) {
- printk("GDT-HA %d: Error detecting channel count (0x%x)\n",
- ha->hanum, ha->status);
- return 0;
- }
- break;
- }
- if (chn->siop_id < MAXID)
- ha->bus_id[bus_no] = chn->siop_id;
- else
- ha->bus_id[bus_no] = 0xff;
- }
- ha->bus_cnt = (u8)bus_no;
- }
- TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
-
- /* read cache configuration */
- if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_INFO,
- INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
- printk("GDT-HA %d: Initialization error cache service (code %d)\n",
- ha->hanum, ha->status);
- return 0;
- }
- ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar;
- TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n",
- ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
- ha->cpar.write_back,ha->cpar.block_size));
-
- /* read board info and features */
- ha->more_proc = FALSE;
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_INFO,
- INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
- memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch,
- sizeof(gdth_binfo_str));
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_FEATURES,
- INVALID_CHANNEL,sizeof(gdth_bfeat_str))) {
- TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n"));
- ha->bfeat = *(gdth_bfeat_str *)ha->pscratch;
- ha->more_proc = TRUE;
- }
- } else {
- TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n"));
- strcpy(ha->binfo.type_string, gdth_ctr_name(ha));
- }
- TRACE2(("Controller name: %s\n",ha->binfo.type_string));
-
- /* read more informations */
- if (ha->more_proc) {
- /* physical drives, channel addresses */
- ioc = (gdth_iochan_str *)ha->pscratch;
- ioc->hdr.version = 0xffffffff;
- ioc->hdr.list_entries = MAXBUS;
- ioc->hdr.first_chan = 0;
- ioc->hdr.last_chan = MAXBUS-1;
- ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]);
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_DESC,
- INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
- for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
- ha->raw[bus_no].address = ioc->list[bus_no].address;
- ha->raw[bus_no].local_no = ioc->list[bus_no].local_no;
- }
- } else {
- for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
- ha->raw[bus_no].address = IO_CHANNEL;
- ha->raw[bus_no].local_no = bus_no;
- }
- }
- for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
- chn = (gdth_getch_str *)ha->pscratch;
- chn->channel_no = ha->raw[bus_no].local_no;
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- SCSI_CHAN_CNT | L_CTRL_PATTERN,
- ha->raw[bus_no].address | INVALID_CHANNEL,
- sizeof(gdth_getch_str))) {
- ha->raw[bus_no].pdev_cnt = chn->drive_cnt;
- TRACE2(("Channel %d: %d phys. drives\n",
- bus_no,chn->drive_cnt));
- }
- if (ha->raw[bus_no].pdev_cnt > 0) {
- drl = (gdth_drlist_str *)ha->pscratch;
- drl->sc_no = ha->raw[bus_no].local_no;
- drl->sc_cnt = ha->raw[bus_no].pdev_cnt;
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- SCSI_DR_LIST | L_CTRL_PATTERN,
- ha->raw[bus_no].address | INVALID_CHANNEL,
- sizeof(gdth_drlist_str))) {
- for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j)
- ha->raw[bus_no].id_list[j] = drl->sc_list[j];
- } else {
- ha->raw[bus_no].pdev_cnt = 0;
- }
- }
- }
-
- /* logical drives */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
- INVALID_CHANNEL,sizeof(u32))) {
- drv_cnt = *(u32 *)ha->pscratch;
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
- INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
- for (j = 0; j < drv_cnt; ++j) {
- drv_no = ((u32 *)ha->pscratch)[j];
- if (drv_no < MAX_LDRIVES) {
- ha->hdr[drv_no].is_logdrv = TRUE;
- TRACE2(("Drive %d is log. drive\n",drv_no));
- }
- }
- }
- alst = (gdth_arcdl_str *)ha->pscratch;
- alst->entries_avail = MAX_LDRIVES;
- alst->first_entry = 0;
- alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]);
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- ARRAY_DRV_LIST2 | LA_CTRL_PATTERN,
- INVALID_CHANNEL, sizeof(gdth_arcdl_str) +
- (alst->entries_avail-1) * sizeof(gdth_alist_str))) {
- for (j = 0; j < alst->entries_init; ++j) {
- ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd;
- ha->hdr[j].is_master = alst->list[j].is_master;
- ha->hdr[j].is_parity = alst->list[j].is_parity;
- ha->hdr[j].is_hotfix = alst->list[j].is_hotfix;
- ha->hdr[j].master_no = alst->list[j].cd_handle;
- }
- } else if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- ARRAY_DRV_LIST | LA_CTRL_PATTERN,
- 0, 35 * sizeof(gdth_alist_str))) {
- for (j = 0; j < 35; ++j) {
- alst2 = &((gdth_alist_str *)ha->pscratch)[j];
- ha->hdr[j].is_arraydrv = alst2->is_arrayd;
- ha->hdr[j].is_master = alst2->is_master;
- ha->hdr[j].is_parity = alst2->is_parity;
- ha->hdr[j].is_hotfix = alst2->is_hotfix;
- ha->hdr[j].master_no = alst2->cd_handle;
- }
- }
- }
- }
-
- /* initialize raw service */
- ha->raw_feat = 0;
- if (!force_dma32) {
- ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_X_INIT_RAW, 0, 0, 0);
- if (ok)
- ha->raw_feat = GDT_64BIT;
- }
- if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
- ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
- if (!ok) {
- printk("GDT-HA %d: Initialization error raw service (code %d)\n",
- ha->hanum, ha->status);
- return 0;
- }
- TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
-
- /* set/get features raw service (scatter/gather) */
- if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_SET_FEAT, SCATTER_GATHER,
- 0, 0)) {
- TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
- if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
- TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
- ha->info));
- ha->raw_feat |= (u16)ha->info;
- }
- }
-
- /* set/get features cache service (equal to raw service) */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_SET_FEAT, 0,
- SCATTER_GATHER,0)) {
- TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
- TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
- ha->info));
- ha->cache_feat |= (u16)ha->info;
- }
- }
-
- /* reserve drives for raw service */
- if (reserve_mode != 0) {
- gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE_ALL,
- reserve_mode == 1 ? 1 : 3, 0, 0);
- TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n",
- ha->status));
- }
- for (i = 0; i < MAX_RES_ARGS; i += 4) {
- if (reserve_list[i] == ha->hanum && reserve_list[i+1] < ha->bus_cnt &&
- reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) {
- TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n",
- reserve_list[i], reserve_list[i+1],
- reserve_list[i+2], reserve_list[i+3]));
- if (!gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE, 0,
- reserve_list[i+1], reserve_list[i+2] |
- (reserve_list[i+3] << 8))) {
- printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n",
- ha->hanum, ha->status);
- }
- }
- }
-
- /* Determine OEM string using IOCTL */
- oemstr = (gdth_oem_str_ioctl *)ha->pscratch;
- oemstr->params.ctl_version = 0x01;
- oemstr->params.buffer_size = sizeof(oemstr->text);
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL,
- sizeof(gdth_oem_str_ioctl))) {
- TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n"));
- printk("GDT-HA %d: Vendor: %s Name: %s\n",
- ha->hanum, oemstr->text.oem_company_name, ha->binfo.type_string);
- /* Save the Host Drive inquiry data */
- strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,
- sizeof(ha->oem_name));
- } else {
- /* Old method, based on PCI ID */
- TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n"));
- printk("GDT-HA %d: Name: %s\n",
- ha->hanum, ha->binfo.type_string);
- if (ha->oem_id == OEM_ID_INTEL)
- strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name));
- else
- strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name));
- }
-
- /* scanning for host drives */
- for (i = 0; i < cdev_cnt; ++i)
- gdth_analyse_hdrive(ha, i);
-
- TRACE(("gdth_search_drives() OK\n"));
- return 1;
-}
-
-static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
-{
- u32 drv_cyls;
- int drv_hds, drv_secs;
-
- TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
- if (hdrive >= MAX_HDRIVES)
- return 0;
-
- if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_INFO, hdrive, 0, 0))
- return 0;
- ha->hdr[hdrive].present = TRUE;
- ha->hdr[hdrive].size = ha->info;
-
- /* evaluate mapping (sectors per head, heads per cylinder) */
- ha->hdr[hdrive].size &= ~SECS32;
- if (ha->info2 == 0) {
- gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs);
- } else {
- drv_hds = ha->info2 & 0xff;
- drv_secs = (ha->info2 >> 8) & 0xff;
- drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
- }
- ha->hdr[hdrive].heads = (u8)drv_hds;
- ha->hdr[hdrive].secs = (u8)drv_secs;
- /* round size */
- ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
-
- if (ha->cache_feat & GDT_64BIT) {
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
- && ha->info2 != 0) {
- ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
- }
- }
- TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
- hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs));
-
- /* get informations about device */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
- TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
- hdrive,ha->info));
- ha->hdr[hdrive].devtype = (u16)ha->info;
- }
-
- /* cluster info */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_INFO, hdrive, 0, 0)) {
- TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
- hdrive,ha->info));
- if (!shared_access)
- ha->hdr[hdrive].cluster_type = (u8)ha->info;
- }
-
- /* R/W attributes */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
- TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
- hdrive,ha->info));
- ha->hdr[hdrive].rw_attribs = (u8)ha->info;
- }
-
- return 1;
-}
-
-
-/* command queueing/sending functions */
-
-static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
-{
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- register struct scsi_cmnd *pscp;
- register struct scsi_cmnd *nscp;
- unsigned long flags;
-
- TRACE(("gdth_putq() priority %d\n",priority));
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- if (!cmndinfo->internal_command)
- cmndinfo->priority = priority;
-
- if (ha->req_first==NULL) {
- ha->req_first = scp; /* queue was empty */
- scp->SCp.ptr = NULL;
- } else { /* queue not empty */
- pscp = ha->req_first;
- nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
- /* priority: 0-highest,..,0xff-lowest */
- while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
- pscp = nscp;
- nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
- }
- pscp->SCp.ptr = (char *)scp;
- scp->SCp.ptr = (char *)nscp;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
-#ifdef GDTH_STATISTICS
- flags = 0;
- for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
- ++flags;
- if (max_rq < flags) {
- max_rq = flags;
- TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
- }
-#endif
-}
-
-static void gdth_next(gdth_ha_str *ha)
-{
- register struct scsi_cmnd *pscp;
- register struct scsi_cmnd *nscp;
- u8 b, t, l, firsttime;
- u8 this_cmd, next_cmd;
- unsigned long flags = 0;
- int cmd_index;
-
- TRACE(("gdth_next() hanum %d\n", ha->hanum));
- if (!gdth_polling)
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
- this_cmd = firsttime = TRUE;
- next_cmd = gdth_polling ? FALSE:TRUE;
- cmd_index = 0;
-
- for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
- struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
- if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
- pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
- if (!nscp_cmndinfo->internal_command) {
- b = nscp->device->channel;
- t = nscp->device->id;
- l = nscp->device->lun;
- if (nscp_cmndinfo->priority >= DEFAULT_PRI) {
- if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
- (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock))
- continue;
- }
- } else
- b = t = l = 0;
-
- if (firsttime) {
- if (gdth_test_busy(ha)) { /* controller busy ? */
- TRACE(("gdth_next() controller %d busy !\n", ha->hanum));
- if (!gdth_polling) {
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return;
- }
- while (gdth_test_busy(ha))
- gdth_delay(1);
- }
- firsttime = FALSE;
- }
-
- if (!nscp_cmndinfo->internal_command) {
- if (nscp_cmndinfo->phase == -1) {
- nscp_cmndinfo->phase = CACHESERVICE; /* default: cache svc. */
- if (nscp->cmnd[0] == TEST_UNIT_READY) {
- TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n",
- b, t, l));
- /* TEST_UNIT_READY -> set scan mode */
- if ((ha->scan_mode & 0x0f) == 0) {
- if (b == 0 && t == 0 && l == 0) {
- ha->scan_mode |= 1;
- TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
- }
- } else if ((ha->scan_mode & 0x0f) == 1) {
- if (b == 0 && ((t == 0 && l == 1) ||
- (t == 1 && l == 0))) {
- nscp_cmndinfo->OpCode = GDT_SCAN_START;
- nscp_cmndinfo->phase = ((ha->scan_mode & 0x10 ? 1:0) << 8)
- | SCSIRAWSERVICE;
- ha->scan_mode = 0x12;
- TRACE2(("Scan mode: 0x%x (SCAN_START)\n",
- ha->scan_mode));
- } else {
- ha->scan_mode &= 0x10;
- TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
- }
- } else if (ha->scan_mode == 0x12) {
- if (b == ha->bus_cnt && t == ha->tid_cnt-1) {
- nscp_cmndinfo->phase = SCSIRAWSERVICE;
- nscp_cmndinfo->OpCode = GDT_SCAN_END;
- ha->scan_mode &= 0x10;
- TRACE2(("Scan mode: 0x%x (SCAN_END)\n",
- ha->scan_mode));
- }
- }
- }
- if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY &&
- nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE &&
- (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) {
- /* always GDT_CLUST_INFO! */
- nscp_cmndinfo->OpCode = GDT_CLUST_INFO;
- }
- }
- }
-
- if (nscp_cmndinfo->OpCode != -1) {
- if ((nscp_cmndinfo->phase & 0xff) == CACHESERVICE) {
- if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
- this_cmd = FALSE;
- next_cmd = FALSE;
- } else if ((nscp_cmndinfo->phase & 0xff) == SCSIRAWSERVICE) {
- if (!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
- this_cmd = FALSE;
- next_cmd = FALSE;
- } else {
- memset((char*)nscp->sense_buffer,0,16);
- nscp->sense_buffer[0] = 0x70;
- nscp->sense_buffer[2] = NOT_READY;
- nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- }
- } else if (gdth_cmnd_priv(nscp)->internal_command) {
- if (!(cmd_index=gdth_special_cmd(ha, nscp)))
- this_cmd = FALSE;
- next_cmd = FALSE;
- } else if (b != ha->virt_bus) {
- if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW ||
- !(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
- this_cmd = FALSE;
- else
- ha->raw[BUS_L2P(ha,b)].io_cnt[t]++;
- } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) {
- TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n",
- nscp->cmnd[0], b, t, l));
- nscp->result = DID_BAD_TARGET << 16;
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- } else {
- switch (nscp->cmnd[0]) {
- case TEST_UNIT_READY:
- case INQUIRY:
- case REQUEST_SENSE:
- case READ_CAPACITY:
- case VERIFY:
- case START_STOP:
- case MODE_SENSE:
- case SERVICE_ACTION_IN_16:
- TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
- nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
- nscp->cmnd[4],nscp->cmnd[5]));
- if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) {
- /* return UNIT_ATTENTION */
- TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
- nscp->cmnd[0], t));
- ha->hdr[t].media_changed = FALSE;
- memset((char*)nscp->sense_buffer,0,16);
- nscp->sense_buffer[0] = 0x70;
- nscp->sense_buffer[2] = UNIT_ATTENTION;
- nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- } else if (gdth_internal_cache_cmd(ha, nscp))
- gdth_scsi_done(nscp);
- break;
-
- case ALLOW_MEDIUM_REMOVAL:
- TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
- nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
- nscp->cmnd[4],nscp->cmnd[5]));
- if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) {
- TRACE(("Prevent r. nonremov. drive->do nothing\n"));
- nscp->result = DID_OK << 16;
- nscp->sense_buffer[0] = 0;
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- } else {
- nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0;
- TRACE(("Prevent/allow r. %d rem. drive %d\n",
- nscp->cmnd[4],nscp->cmnd[3]));
- if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
- this_cmd = FALSE;
- }
- break;
-
- case RESERVE:
- case RELEASE:
- TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ?
- "RESERVE" : "RELEASE"));
- if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
- this_cmd = FALSE;
- break;
-
- case READ_6:
- case WRITE_6:
- case READ_10:
- case WRITE_10:
- case READ_16:
- case WRITE_16:
- if (ha->hdr[t].media_changed) {
- /* return UNIT_ATTENTION */
- TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
- nscp->cmnd[0], t));
- ha->hdr[t].media_changed = FALSE;
- memset((char*)nscp->sense_buffer,0,16);
- nscp->sense_buffer[0] = 0x70;
- nscp->sense_buffer[2] = UNIT_ATTENTION;
- nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- } else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
- this_cmd = FALSE;
- break;
-
- default:
- TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0],
- nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
- nscp->cmnd[4],nscp->cmnd[5]));
- printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n",
- ha->hanum, nscp->cmnd[0]);
- nscp->result = DID_ABORT << 16;
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- break;
- }
- }
-
- if (!this_cmd)
- break;
- if (nscp == ha->req_first)
- ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
- else
- pscp->SCp.ptr = nscp->SCp.ptr;
- if (!next_cmd)
- break;
- }
-
- if (ha->cmd_cnt > 0) {
- gdth_release_event(ha);
- }
-
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- if (gdth_polling && ha->cmd_cnt > 0) {
- if (!gdth_wait(ha, cmd_index, POLL_TIMEOUT))
- printk("GDT-HA %d: Command %d timed out !\n",
- ha->hanum, cmd_index);
- }
-}
-
-/*
- * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
- * buffers, kmap_atomic() as needed.
- */
-static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
- char *buffer, u16 count)
-{
- u16 cpcount,i, max_sg = scsi_sg_count(scp);
- u16 cpsum,cpnow;
- struct scatterlist *sl;
- char *address;
-
- cpcount = min_t(u16, count, scsi_bufflen(scp));
-
- if (cpcount) {
- cpsum=0;
- scsi_for_each_sg(scp, sl, max_sg, i) {
- unsigned long flags;
- cpnow = (u16)sl->length;
- TRACE(("copy_internal() now %d sum %d count %d %d\n",
- cpnow, cpsum, cpcount, scsi_bufflen(scp)));
- if (cpsum+cpnow > cpcount)
- cpnow = cpcount - cpsum;
- cpsum += cpnow;
- if (!sg_page(sl)) {
- printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
- ha->hanum);
- return;
- }
- local_irq_save(flags);
- address = kmap_atomic(sg_page(sl)) + sl->offset;
- memcpy(address, buffer, cpnow);
- flush_dcache_page(sg_page(sl));
- kunmap_atomic(address);
- local_irq_restore(flags);
- if (cpsum == cpcount)
- break;
- buffer += cpnow;
- }
- } else if (count) {
- printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n",
- ha->hanum);
- WARN_ON(1);
- }
-}
-
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
-{
- u8 t;
- gdth_inq_data inq;
- gdth_rdcap_data rdc;
- gdth_sense_data sd;
- gdth_modep_data mpd;
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
-
- t = scp->device->id;
- TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
- scp->cmnd[0],t));
-
- scp->result = DID_OK << 16;
- scp->sense_buffer[0] = 0;
-
- switch (scp->cmnd[0]) {
- case TEST_UNIT_READY:
- case VERIFY:
- case START_STOP:
- TRACE2(("Test/Verify/Start hdrive %d\n",t));
- break;
-
- case INQUIRY:
- TRACE2(("Inquiry hdrive %d devtype %d\n",
- t,ha->hdr[t].devtype));
- inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK;
- /* you can here set all disks to removable, if you want to do
- a flush using the ALLOW_MEDIUM_REMOVAL command */
- inq.modif_rmb = 0x00;
- if ((ha->hdr[t].devtype & 1) ||
- (ha->hdr[t].cluster_type & CLUSTER_DRIVE))
- inq.modif_rmb = 0x80;
- inq.version = 2;
- inq.resp_aenc = 2;
- inq.add_length= 32;
- strcpy(inq.vendor,ha->oem_name);
- snprintf(inq.product, sizeof(inq.product), "Host Drive #%02d",t);
- strcpy(inq.revision," ");
- gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
- break;
-
- case REQUEST_SENSE:
- TRACE2(("Request sense hdrive %d\n",t));
- sd.errorcode = 0x70;
- sd.segno = 0x00;
- sd.key = NO_SENSE;
- sd.info = 0;
- sd.add_length= 0;
- gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
- break;
-
- case MODE_SENSE:
- TRACE2(("Mode sense hdrive %d\n",t));
- memset((char*)&mpd,0,sizeof(gdth_modep_data));
- mpd.hd.data_length = sizeof(gdth_modep_data);
- mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0;
- mpd.hd.bd_length = sizeof(mpd.bd);
- mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
- mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
- mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
- gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
- break;
-
- case READ_CAPACITY:
- TRACE2(("Read capacity hdrive %d\n",t));
- if (ha->hdr[t].size > (u64)0xffffffff)
- rdc.last_block_no = 0xffffffff;
- else
- rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
- rdc.block_length = cpu_to_be32(SECTOR_SIZE);
- gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
- break;
-
- case SERVICE_ACTION_IN_16:
- if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 &&
- (ha->cache_feat & GDT_64BIT)) {
- gdth_rdcap16_data rdc16;
-
- TRACE2(("Read capacity (16) hdrive %d\n",t));
- rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
- rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
- gdth_copy_internal_data(ha, scp, (char*)&rdc16,
- sizeof(gdth_rdcap16_data));
- } else {
- scp->result = DID_ABORT << 16;
- }
- break;
-
- default:
- TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
- break;
- }
-
- if (!cmndinfo->wait_for_completion)
- cmndinfo->wait_for_completion++;
- else
- return 1;
-
- return 0;
-}
-
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
- u16 hdrive)
-{
- register gdth_cmd_str *cmdp;
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- u32 cnt, blockcnt;
- u64 no, blockno;
- int i, cmd_index, read_write, sgcnt, mode64;
-
- cmdp = ha->pccb;
- TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
- scp->cmnd[0],scp->cmd_len,hdrive));
-
- mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
- /* test for READ_16, WRITE_16 if !mode64 ? ---
- not required, should not occur due to error return on
- READ_CAPACITY_16 */
-
- cmdp->Service = CACHESERVICE;
- cmdp->RequestBuffer = scp;
- /* search free command index */
- if (!(cmd_index=gdth_get_cmd_index(ha))) {
- TRACE(("GDT: No free command index found\n"));
- return 0;
- }
- /* if it's the first command, set command semaphore */
- if (ha->cmd_cnt == 0)
- gdth_set_sema0(ha);
-
- /* fill command */
- read_write = 0;
- if (cmndinfo->OpCode != -1)
- cmdp->OpCode = cmndinfo->OpCode; /* special cache cmd. */
- else if (scp->cmnd[0] == RESERVE)
- cmdp->OpCode = GDT_RESERVE_DRV;
- else if (scp->cmnd[0] == RELEASE)
- cmdp->OpCode = GDT_RELEASE_DRV;
- else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
- if (scp->cmnd[4] & 1) /* prevent ? */
- cmdp->OpCode = GDT_MOUNT;
- else if (scp->cmnd[3] & 1) /* removable drive ? */
- cmdp->OpCode = GDT_UNMOUNT;
- else
- cmdp->OpCode = GDT_FLUSH;
- } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 ||
- scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16
- ) {
- read_write = 1;
- if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) &&
- (ha->cache_feat & GDT_WR_THROUGH)))
- cmdp->OpCode = GDT_WRITE_THR;
- else
- cmdp->OpCode = GDT_WRITE;
- } else {
- read_write = 2;
- cmdp->OpCode = GDT_READ;
- }
-
- cmdp->BoardNode = LOCALBOARD;
- if (mode64) {
- cmdp->u.cache64.DeviceNo = hdrive;
- cmdp->u.cache64.BlockNo = 1;
- cmdp->u.cache64.sg_canz = 0;
- } else {
- cmdp->u.cache.DeviceNo = hdrive;
- cmdp->u.cache.BlockNo = 1;
- cmdp->u.cache.sg_canz = 0;
- }
-
- if (read_write) {
- if (scp->cmd_len == 16) {
- memcpy(&no, &scp->cmnd[2], sizeof(u64));
- blockno = be64_to_cpu(no);
- memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
- blockcnt = be32_to_cpu(cnt);
- } else if (scp->cmd_len == 10) {
- memcpy(&no, &scp->cmnd[2], sizeof(u32));
- blockno = be32_to_cpu(no);
- memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
- blockcnt = be16_to_cpu(cnt);
- } else {
- memcpy(&no, &scp->cmnd[0], sizeof(u32));
- blockno = be32_to_cpu(no) & 0x001fffffUL;
- blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
- }
- if (mode64) {
- cmdp->u.cache64.BlockNo = blockno;
- cmdp->u.cache64.BlockCnt = blockcnt;
- } else {
- cmdp->u.cache.BlockNo = (u32)blockno;
- cmdp->u.cache.BlockCnt = blockcnt;
- }
-
- if (scsi_bufflen(scp)) {
- cmndinfo->dma_dir = (read_write == 1 ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
- scsi_sg_count(scp), cmndinfo->dma_dir);
- if (mode64) {
- struct scatterlist *sl;
-
- cmdp->u.cache64.DestAddr= (u64)-1;
- cmdp->u.cache64.sg_canz = sgcnt;
- scsi_for_each_sg(scp, sl, sgcnt, i) {
- cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
- cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
- }
- } else {
- struct scatterlist *sl;
-
- cmdp->u.cache.DestAddr= 0xffffffff;
- cmdp->u.cache.sg_canz = sgcnt;
- scsi_for_each_sg(scp, sl, sgcnt, i) {
- cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
- cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
- }
- }
-
-#ifdef GDTH_STATISTICS
- if (max_sg < (u32)sgcnt) {
- max_sg = (u32)sgcnt;
- TRACE3(("GDT: max_sg = %d\n",max_sg));
- }
-#endif
-
- }
- }
- /* evaluate command size, check space */
- if (mode64) {
- TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
- cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz,
- cmdp->u.cache64.sg_lst[0].sg_ptr,
- cmdp->u.cache64.sg_lst[0].sg_len));
- TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
- cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
- (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
- } else {
- TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
- cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
- cmdp->u.cache.sg_lst[0].sg_ptr,
- cmdp->u.cache.sg_lst[0].sg_len));
- TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
- cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
- (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
- }
- if (ha->cmd_len & 3)
- ha->cmd_len += (4 - (ha->cmd_len & 3));
-
- if (ha->cmd_cnt > 0) {
- if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
- ha->ic_all_size) {
- TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
- ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
- return 0;
- }
- }
-
- /* copy command */
- gdth_copy_command(ha);
- return cmd_index;
-}
-
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
-{
- register gdth_cmd_str *cmdp;
- u16 i;
- dma_addr_t sense_paddr;
- int cmd_index, sgcnt, mode64;
- u8 t,l;
- struct gdth_cmndinfo *cmndinfo;
-
- t = scp->device->id;
- l = scp->device->lun;
- cmdp = ha->pccb;
- TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
- scp->cmnd[0],b,t,l));
-
- mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
-
- cmdp->Service = SCSIRAWSERVICE;
- cmdp->RequestBuffer = scp;
- /* search free command index */
- if (!(cmd_index=gdth_get_cmd_index(ha))) {
- TRACE(("GDT: No free command index found\n"));
- return 0;
- }
- /* if it's the first command, set command semaphore */
- if (ha->cmd_cnt == 0)
- gdth_set_sema0(ha);
-
- cmndinfo = gdth_cmnd_priv(scp);
- /* fill command */
- if (cmndinfo->OpCode != -1) {
- cmdp->OpCode = cmndinfo->OpCode; /* special raw cmd. */
- cmdp->BoardNode = LOCALBOARD;
- if (mode64) {
- cmdp->u.raw64.direction = (cmndinfo->phase >> 8);
- TRACE2(("special raw cmd 0x%x param 0x%x\n",
- cmdp->OpCode, cmdp->u.raw64.direction));
- /* evaluate command size */
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst);
- } else {
- cmdp->u.raw.direction = (cmndinfo->phase >> 8);
- TRACE2(("special raw cmd 0x%x param 0x%x\n",
- cmdp->OpCode, cmdp->u.raw.direction));
- /* evaluate command size */
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst);
- }
-
- } else {
- sense_paddr = dma_map_single(&ha->pdev->dev, scp->sense_buffer, 16,
- DMA_FROM_DEVICE);
-
- cmndinfo->sense_paddr = sense_paddr;
- cmdp->OpCode = GDT_WRITE; /* always */
- cmdp->BoardNode = LOCALBOARD;
- if (mode64) {
- cmdp->u.raw64.reserved = 0;
- cmdp->u.raw64.mdisc_time = 0;
- cmdp->u.raw64.mcon_time = 0;
- cmdp->u.raw64.clen = scp->cmd_len;
- cmdp->u.raw64.target = t;
- cmdp->u.raw64.lun = l;
- cmdp->u.raw64.bus = b;
- cmdp->u.raw64.priority = 0;
- cmdp->u.raw64.sdlen = scsi_bufflen(scp);
- cmdp->u.raw64.sense_len = 16;
- cmdp->u.raw64.sense_data = sense_paddr;
- cmdp->u.raw64.direction =
- gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
- memcpy(cmdp->u.raw64.cmd,scp->cmnd,16);
- cmdp->u.raw64.sg_ranz = 0;
- } else {
- cmdp->u.raw.reserved = 0;
- cmdp->u.raw.mdisc_time = 0;
- cmdp->u.raw.mcon_time = 0;
- cmdp->u.raw.clen = scp->cmd_len;
- cmdp->u.raw.target = t;
- cmdp->u.raw.lun = l;
- cmdp->u.raw.bus = b;
- cmdp->u.raw.priority = 0;
- cmdp->u.raw.link_p = 0;
- cmdp->u.raw.sdlen = scsi_bufflen(scp);
- cmdp->u.raw.sense_len = 16;
- cmdp->u.raw.sense_data = sense_paddr;
- cmdp->u.raw.direction =
- gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
- memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
- cmdp->u.raw.sg_ranz = 0;
- }
-
- if (scsi_bufflen(scp)) {
- cmndinfo->dma_dir = DMA_BIDIRECTIONAL;
- sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
- scsi_sg_count(scp), cmndinfo->dma_dir);
- if (mode64) {
- struct scatterlist *sl;
-
- cmdp->u.raw64.sdata = (u64)-1;
- cmdp->u.raw64.sg_ranz = sgcnt;
- scsi_for_each_sg(scp, sl, sgcnt, i) {
- cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
- cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
- }
- } else {
- struct scatterlist *sl;
-
- cmdp->u.raw.sdata = 0xffffffff;
- cmdp->u.raw.sg_ranz = sgcnt;
- scsi_for_each_sg(scp, sl, sgcnt, i) {
- cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
- cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
- }
- }
-
-#ifdef GDTH_STATISTICS
- if (max_sg < sgcnt) {
- max_sg = sgcnt;
- TRACE3(("GDT: max_sg = %d\n",sgcnt));
- }
-#endif
-
- }
- if (mode64) {
- TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
- cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz,
- cmdp->u.raw64.sg_lst[0].sg_ptr,
- cmdp->u.raw64.sg_lst[0].sg_len));
- /* evaluate command size */
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
- (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
- } else {
- TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
- cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
- cmdp->u.raw.sg_lst[0].sg_ptr,
- cmdp->u.raw.sg_lst[0].sg_len));
- /* evaluate command size */
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
- (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
- }
- }
- /* check space */
- if (ha->cmd_len & 3)
- ha->cmd_len += (4 - (ha->cmd_len & 3));
-
- if (ha->cmd_cnt > 0) {
- if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
- ha->ic_all_size) {
- TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
- ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
- return 0;
- }
- }
-
- /* copy command */
- gdth_copy_command(ha);
- return cmd_index;
-}
-
-static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
-{
- register gdth_cmd_str *cmdp;
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- int cmd_index;
-
- cmdp= ha->pccb;
- TRACE2(("gdth_special_cmd(): "));
-
- *cmdp = *cmndinfo->internal_cmd_str;
- cmdp->RequestBuffer = scp;
-
- /* search free command index */
- if (!(cmd_index=gdth_get_cmd_index(ha))) {
- TRACE(("GDT: No free command index found\n"));
- return 0;
- }
-
- /* if it's the first command, set command semaphore */
- if (ha->cmd_cnt == 0)
- gdth_set_sema0(ha);
-
- /* evaluate command size, check space */
- if (cmdp->OpCode == GDT_IOCTL) {
- TRACE2(("IOCTL\n"));
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
- } else if (cmdp->Service == CACHESERVICE) {
- TRACE2(("cache command %d\n",cmdp->OpCode));
- if (ha->cache_feat & GDT_64BIT)
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str);
- else
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
- } else if (cmdp->Service == SCSIRAWSERVICE) {
- TRACE2(("raw command %d\n",cmdp->OpCode));
- if (ha->raw_feat & GDT_64BIT)
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str);
- else
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
- }
-
- if (ha->cmd_len & 3)
- ha->cmd_len += (4 - (ha->cmd_len & 3));
-
- if (ha->cmd_cnt > 0) {
- if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
- ha->ic_all_size) {
- TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
- ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
- return 0;
- }
- }
-
- /* copy command */
- gdth_copy_command(ha);
- return cmd_index;
-}
-
-
-/* Controller event handling functions */
-static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
- u16 idx, gdth_evt_data *evt)
-{
- gdth_evt_str *e;
-
- /* no GDTH_LOCK_HA() ! */
- TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
- if (source == 0) /* no source -> no event */
- return NULL;
-
- if (ebuffer[elastidx].event_source == source &&
- ebuffer[elastidx].event_idx == idx &&
- ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
- !memcmp((char *)&ebuffer[elastidx].event_data.eu,
- (char *)&evt->eu, evt->size)) ||
- (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
- !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
- (char *)&evt->event_string)))) {
- e = &ebuffer[elastidx];
- e->last_stamp = (u32)ktime_get_real_seconds();
- ++e->same_count;
- } else {
- if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
- ++elastidx;
- if (elastidx == MAX_EVENTS)
- elastidx = 0;
- if (elastidx == eoldidx) { /* reached mark ? */
- ++eoldidx;
- if (eoldidx == MAX_EVENTS)
- eoldidx = 0;
- }
- }
- e = &ebuffer[elastidx];
- e->event_source = source;
- e->event_idx = idx;
- e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds();
- e->same_count = 1;
- e->event_data = *evt;
- e->application = 0;
- }
- return e;
-}
-
-static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
-{
- gdth_evt_str *e;
- int eindex;
- unsigned long flags;
-
- TRACE2(("gdth_read_event() handle %d\n", handle));
- spin_lock_irqsave(&ha->smp_lock, flags);
- if (handle == -1)
- eindex = eoldidx;
- else
- eindex = handle;
- estr->event_source = 0;
-
- if (eindex < 0 || eindex >= MAX_EVENTS) {
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return eindex;
- }
- e = &ebuffer[eindex];
- if (e->event_source != 0) {
- if (eindex != elastidx) {
- if (++eindex == MAX_EVENTS)
- eindex = 0;
- } else {
- eindex = -1;
- }
- memcpy(estr, e, sizeof(gdth_evt_str));
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return eindex;
-}
-
-static void gdth_readapp_event(gdth_ha_str *ha,
- u8 application, gdth_evt_str *estr)
-{
- gdth_evt_str *e;
- int eindex;
- unsigned long flags;
- u8 found = FALSE;
-
- TRACE2(("gdth_readapp_event() app. %d\n", application));
- spin_lock_irqsave(&ha->smp_lock, flags);
- eindex = eoldidx;
- for (;;) {
- e = &ebuffer[eindex];
- if (e->event_source == 0)
- break;
- if ((e->application & application) == 0) {
- e->application |= application;
- found = TRUE;
- break;
- }
- if (eindex == elastidx)
- break;
- if (++eindex == MAX_EVENTS)
- eindex = 0;
- }
- if (found)
- memcpy(estr, e, sizeof(gdth_evt_str));
- else
- estr->event_source = 0;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-static void gdth_clear_events(void)
-{
- TRACE(("gdth_clear_events()"));
-
- eoldidx = elastidx = 0;
- ebuffer[0].event_source = 0;
-}
-
-
-/* SCSI interface functions */
-
-static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
- int gdth_from_wait, int* pIndex)
-{
- gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
- gdt6_dpram_str __iomem *dp6_ptr;
- struct scsi_cmnd *scp;
- int rval, i;
- u8 IStatus;
- u16 Service;
- unsigned long flags = 0;
-
- TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
-
- /* if polling and not from gdth_wait() -> return */
- if (gdth_polling) {
- if (!gdth_from_wait) {
- return IRQ_HANDLED;
- }
- }
-
- if (!gdth_polling)
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- /* search controller */
- IStatus = gdth_get_status(ha);
- if (IStatus == 0) {
- /* spurious interrupt */
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
-
-#ifdef GDTH_STATISTICS
- ++act_ints;
-#endif
-
- if (ha->type == GDT_PCI) {
- dp6_ptr = ha->brd;
- if (IStatus & 0x80) { /* error flag */
- IStatus &= ~0x80;
- ha->status = readw(&dp6_ptr->u.ic.Status);
- TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
- } else /* no error */
- ha->status = S_OK;
- ha->info = readl(&dp6_ptr->u.ic.Info[0]);
- ha->service = readw(&dp6_ptr->u.ic.Service);
- ha->info2 = readl(&dp6_ptr->u.ic.Info[1]);
-
- writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
- writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */
- writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */
- } else if (ha->type == GDT_PCINEW) {
- if (IStatus & 0x80) { /* error flag */
- IStatus &= ~0x80;
- ha->status = inw(PTR2USHORT(&ha->plx->status));
- TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
- } else
- ha->status = S_OK;
- ha->info = inl(PTR2USHORT(&ha->plx->info[0]));
- ha->service = inw(PTR2USHORT(&ha->plx->service));
- ha->info2 = inl(PTR2USHORT(&ha->plx->info[1]));
-
- outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
- outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
- } else if (ha->type == GDT_PCIMPR) {
- dp6m_ptr = ha->brd;
- if (IStatus & 0x80) { /* error flag */
- IStatus &= ~0x80;
- ha->status = readw(&dp6m_ptr->i960r.status);
- TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
- } else /* no error */
- ha->status = S_OK;
-
- ha->info = readl(&dp6m_ptr->i960r.info[0]);
- ha->service = readw(&dp6m_ptr->i960r.service);
- ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
-
- /* event string */
- if (IStatus == ASYNCINDEX) {
- if (ha->service != SCREENSERVICE &&
- (ha->fw_vers & 0xff) >= 0x1a) {
- ha->dvr.severity = readb
- (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity);
- for (i = 0; i < 256; ++i) {
- ha->dvr.event_string[i] = readb
- (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]);
- if (ha->dvr.event_string[i] == 0)
- break;
- }
- }
- }
- writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
- writeb(0, &dp6m_ptr->i960r.sema1_reg);
- } else {
- TRACE2(("gdth_interrupt() unknown controller type\n"));
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
-
- TRACE(("gdth_interrupt() index %d stat %d info %d\n",
- IStatus,ha->status,ha->info));
-
- if (gdth_from_wait) {
- *pIndex = (int)IStatus;
- }
-
- if (IStatus == ASYNCINDEX) {
- TRACE2(("gdth_interrupt() async. event\n"));
- gdth_async_event(ha);
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- gdth_next(ha);
- return IRQ_HANDLED;
- }
-
- if (IStatus == SPEZINDEX) {
- TRACE2(("Service unknown or not initialized !\n"));
- ha->dvr.size = sizeof(ha->dvr.eu.driver);
- ha->dvr.eu.driver.ionode = ha->hanum;
- gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr);
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
- scp = ha->cmd_tab[IStatus-2].cmnd;
- Service = ha->cmd_tab[IStatus-2].service;
- ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND;
- if (scp == UNUSED_CMND) {
- TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
- ha->dvr.size = sizeof(ha->dvr.eu.driver);
- ha->dvr.eu.driver.ionode = ha->hanum;
- ha->dvr.eu.driver.index = IStatus;
- gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr);
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
- if (scp == INTERNAL_CMND) {
- TRACE(("gdth_interrupt() answer to internal command\n"));
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
-
- TRACE(("gdth_interrupt() sync. status\n"));
- rval = gdth_sync_event(ha,Service,IStatus,scp);
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- if (rval == 2) {
- gdth_putq(ha, scp, gdth_cmnd_priv(scp)->priority);
- } else if (rval == 1) {
- gdth_scsi_done(scp);
- }
-
- gdth_next(ha);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t gdth_interrupt(int irq, void *dev_id)
-{
- gdth_ha_str *ha = dev_id;
-
- return __gdth_interrupt(ha, false, NULL);
-}
-
-static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- struct scsi_cmnd *scp)
-{
- gdth_msg_str *msg;
- gdth_cmd_str *cmdp;
- u8 b, t;
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
-
- cmdp = ha->pccb;
- TRACE(("gdth_sync_event() serv %d status %d\n",
- service,ha->status));
-
- if (service == SCREENSERVICE) {
- msg = ha->pmsg;
- TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n",
- msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
- if (msg->msg_len > MSGLEN+1)
- msg->msg_len = MSGLEN+1;
- if (msg->msg_len)
- if (!(msg->msg_answer && msg->msg_ext)) {
- msg->msg_text[msg->msg_len] = '\0';
- printk("%s",msg->msg_text);
- }
-
- if (msg->msg_ext && !msg->msg_answer) {
- while (gdth_test_busy(ha))
- gdth_delay(0);
- cmdp->Service = SCREENSERVICE;
- cmdp->RequestBuffer = SCREEN_CMND;
- gdth_get_cmd_index(ha);
- gdth_set_sema0(ha);
- cmdp->OpCode = GDT_READ;
- cmdp->BoardNode = LOCALBOARD;
- cmdp->u.screen.reserved = 0;
- cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
- cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
- ha->cmd_offs_dpmem = 0;
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
- + sizeof(u64);
- ha->cmd_cnt = 0;
- gdth_copy_command(ha);
- gdth_release_event(ha);
- return 0;
- }
-
- if (msg->msg_answer && msg->msg_alen) {
- /* default answers (getchar() not possible) */
- if (msg->msg_alen == 1) {
- msg->msg_alen = 0;
- msg->msg_len = 1;
- msg->msg_text[0] = 0;
- } else {
- msg->msg_alen -= 2;
- msg->msg_len = 2;
- msg->msg_text[0] = 1;
- msg->msg_text[1] = 0;
- }
- msg->msg_ext = 0;
- msg->msg_answer = 0;
- while (gdth_test_busy(ha))
- gdth_delay(0);
- cmdp->Service = SCREENSERVICE;
- cmdp->RequestBuffer = SCREEN_CMND;
- gdth_get_cmd_index(ha);
- gdth_set_sema0(ha);
- cmdp->OpCode = GDT_WRITE;
- cmdp->BoardNode = LOCALBOARD;
- cmdp->u.screen.reserved = 0;
- cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
- cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
- ha->cmd_offs_dpmem = 0;
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
- + sizeof(u64);
- ha->cmd_cnt = 0;
- gdth_copy_command(ha);
- gdth_release_event(ha);
- return 0;
- }
- printk("\n");
-
- } else {
- b = scp->device->channel;
- t = scp->device->id;
- if (cmndinfo->OpCode == -1 && b != ha->virt_bus) {
- ha->raw[BUS_L2P(ha,b)].io_cnt[t]--;
- }
- /* cache or raw service */
- if (ha->status == S_BSY) {
- TRACE2(("Controller busy -> retry !\n"));
- if (cmndinfo->OpCode == GDT_MOUNT)
- cmndinfo->OpCode = GDT_CLUST_INFO;
- /* retry */
- return 2;
- }
- if (scsi_bufflen(scp))
- dma_unmap_sg(&ha->pdev->dev, scsi_sglist(scp), scsi_sg_count(scp),
- cmndinfo->dma_dir);
-
- if (cmndinfo->sense_paddr)
- dma_unmap_page(&ha->pdev->dev, cmndinfo->sense_paddr, 16,
- DMA_FROM_DEVICE);
-
- if (ha->status == S_OK) {
- cmndinfo->status = S_OK;
- cmndinfo->info = ha->info;
- if (cmndinfo->OpCode != -1) {
- TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n",
- cmndinfo->OpCode));
- /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
- if (cmndinfo->OpCode == GDT_CLUST_INFO) {
- ha->hdr[t].cluster_type = (u8)ha->info;
- if (!(ha->hdr[t].cluster_type &
- CLUSTER_MOUNTED)) {
- /* NOT MOUNTED -> MOUNT */
- cmndinfo->OpCode = GDT_MOUNT;
- if (ha->hdr[t].cluster_type &
- CLUSTER_RESERVED) {
- /* cluster drive RESERVED (on the other node) */
- cmndinfo->phase = -2; /* reservation conflict */
- }
- } else {
- cmndinfo->OpCode = -1;
- }
- } else {
- if (cmndinfo->OpCode == GDT_MOUNT) {
- ha->hdr[t].cluster_type |= CLUSTER_MOUNTED;
- ha->hdr[t].media_changed = TRUE;
- } else if (cmndinfo->OpCode == GDT_UNMOUNT) {
- ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED;
- ha->hdr[t].media_changed = TRUE;
- }
- cmndinfo->OpCode = -1;
- }
- /* retry */
- cmndinfo->priority = HIGH_PRI;
- return 2;
- } else {
- /* RESERVE/RELEASE ? */
- if (scp->cmnd[0] == RESERVE) {
- ha->hdr[t].cluster_type |= CLUSTER_RESERVED;
- } else if (scp->cmnd[0] == RELEASE) {
- ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
- }
- scp->result = DID_OK << 16;
- scp->sense_buffer[0] = 0;
- }
- } else {
- cmndinfo->status = ha->status;
- cmndinfo->info = ha->info;
-
- if (cmndinfo->OpCode != -1) {
- TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n",
- cmndinfo->OpCode, ha->status));
- if (cmndinfo->OpCode == GDT_SCAN_START ||
- cmndinfo->OpCode == GDT_SCAN_END) {
- cmndinfo->OpCode = -1;
- /* retry */
- cmndinfo->priority = HIGH_PRI;
- return 2;
- }
- memset((char*)scp->sense_buffer,0,16);
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = NOT_READY;
- scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- } else if (service == CACHESERVICE) {
- if (ha->status == S_CACHE_UNKNOWN &&
- (ha->hdr[t].cluster_type &
- CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) {
- /* bus reset -> force GDT_CLUST_INFO */
- ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
- }
- memset((char*)scp->sense_buffer,0,16);
- if (ha->status == (u16)S_CACHE_RESERV) {
- scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
- } else {
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = NOT_READY;
- scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- }
- if (!cmndinfo->internal_command) {
- ha->dvr.size = sizeof(ha->dvr.eu.sync);
- ha->dvr.eu.sync.ionode = ha->hanum;
- ha->dvr.eu.sync.service = service;
- ha->dvr.eu.sync.status = ha->status;
- ha->dvr.eu.sync.info = ha->info;
- ha->dvr.eu.sync.hostdrive = t;
- if (ha->status >= 0x8000)
- gdth_store_event(ha, ES_SYNC, 0, &ha->dvr);
- else
- gdth_store_event(ha, ES_SYNC, service, &ha->dvr);
- }
- } else {
- /* sense buffer filled from controller firmware (DMA) */
- if (ha->status != S_RAW_SCSI || ha->info >= 0x100) {
- scp->result = DID_BAD_TARGET << 16;
- } else {
- scp->result = (DID_OK << 16) | ha->info;
- }
- }
- }
- if (!cmndinfo->wait_for_completion)
- cmndinfo->wait_for_completion++;
- else
- return 1;
- }
-
- return 0;
-}
-
-static char *async_cache_tab[] = {
-/* 0*/ "\011\000\002\002\002\004\002\006\004"
- "GDT HA %u, service %u, async. status %u/%lu unknown",
-/* 1*/ "\011\000\002\002\002\004\002\006\004"
- "GDT HA %u, service %u, async. status %u/%lu unknown",
-/* 2*/ "\005\000\002\006\004"
- "GDT HA %u, Host Drive %lu not ready",
-/* 3*/ "\005\000\002\006\004"
- "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
-/* 4*/ "\005\000\002\006\004"
- "GDT HA %u, mirror update on Host Drive %lu failed",
-/* 5*/ "\005\000\002\006\004"
- "GDT HA %u, Mirror Drive %lu failed",
-/* 6*/ "\005\000\002\006\004"
- "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
-/* 7*/ "\005\000\002\006\004"
- "GDT HA %u, Host Drive %lu write protected",
-/* 8*/ "\005\000\002\006\004"
- "GDT HA %u, media changed in Host Drive %lu",
-/* 9*/ "\005\000\002\006\004"
- "GDT HA %u, Host Drive %lu is offline",
-/*10*/ "\005\000\002\006\004"
- "GDT HA %u, media change of Mirror Drive %lu",
-/*11*/ "\005\000\002\006\004"
- "GDT HA %u, Mirror Drive %lu is write protected",
-/*12*/ "\005\000\002\006\004"
- "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!",
-/*13*/ "\007\000\002\006\002\010\002"
- "GDT HA %u, Array Drive %u: Cache Drive %u failed",
-/*14*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: FAIL state entered",
-/*15*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: error",
-/*16*/ "\007\000\002\006\002\010\002"
- "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u",
-/*17*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity build failed",
-/*18*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive rebuild failed",
-/*19*/ "\005\000\002\010\002"
- "GDT HA %u, Test of Hot Fix %u failed",
-/*20*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive build finished successfully",
-/*21*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive rebuild finished successfully",
-/*22*/ "\007\000\002\006\002\010\002"
- "GDT HA %u, Array Drive %u: Hot Fix %u activated",
-/*23*/ "\005\000\002\006\002"
- "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error",
-/*24*/ "\005\000\002\010\002"
- "GDT HA %u, mirror update on Cache Drive %u completed",
-/*25*/ "\005\000\002\010\002"
- "GDT HA %u, mirror update on Cache Drive %lu failed",
-/*26*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive rebuild started",
-/*27*/ "\005\000\002\012\001"
- "GDT HA %u, Fault bus %u: SHELF OK detected",
-/*28*/ "\005\000\002\012\001"
- "GDT HA %u, Fault bus %u: SHELF not OK detected",
-/*29*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started",
-/*30*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: new disk detected",
-/*31*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: old disk detected",
-/*32*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid",
-/*33*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: invalid device detected",
-/*34*/ "\011\000\002\012\001\013\001\006\004"
- "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)",
-/*35*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: disk write protected",
-/*36*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: disk not available",
-/*37*/ "\007\000\002\012\001\006\004"
- "GDT HA %u, Fault bus %u: swap detected (%lu)",
-/*38*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully",
-/*39*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug",
-/*40*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted",
-/*41*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started",
-/*42*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive build started",
-/*43*/ "\003\000\002"
- "GDT HA %u, DRAM parity error detected",
-/*44*/ "\005\000\002\006\002"
- "GDT HA %u, Mirror Drive %u: update started",
-/*45*/ "\007\000\002\006\002\010\002"
- "GDT HA %u, Mirror Drive %u: Hot Fix %u activated",
-/*46*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available",
-/*47*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available",
-/*48*/ "\005\000\002\006\002"
- "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available",
-/*49*/ "\005\000\002\006\002"
- "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available",
-/*50*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received",
-/*51*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand started",
-/*52*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand finished successfully",
-/*53*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand failed",
-/*54*/ "\003\000\002"
- "GDT HA %u, CPU temperature critical",
-/*55*/ "\003\000\002"
- "GDT HA %u, CPU temperature OK",
-/*56*/ "\005\000\002\006\004"
- "GDT HA %u, Host drive %lu created",
-/*57*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand restarted",
-/*58*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand stopped",
-/*59*/ "\005\000\002\010\002"
- "GDT HA %u, Mirror Drive %u: drive build quited",
-/*60*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity build quited",
-/*61*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive rebuild quited",
-/*62*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity verify started",
-/*63*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity verify done",
-/*64*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity verify failed",
-/*65*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity error detected",
-/*66*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity verify quited",
-/*67*/ "\005\000\002\006\002"
- "GDT HA %u, Host Drive %u reserved",
-/*68*/ "\005\000\002\006\002"
- "GDT HA %u, Host Drive %u mounted and released",
-/*69*/ "\005\000\002\006\002"
- "GDT HA %u, Host Drive %u released",
-/*70*/ "\003\000\002"
- "GDT HA %u, DRAM error detected and corrected with ECC",
-/*71*/ "\003\000\002"
- "GDT HA %u, Uncorrectable DRAM error detected with ECC",
-/*72*/ "\011\000\002\012\001\013\001\014\001"
- "GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block",
-/*73*/ "\005\000\002\006\002"
- "GDT HA %u, Host drive %u resetted locally",
-/*74*/ "\005\000\002\006\002"
- "GDT HA %u, Host drive %u resetted remotely",
-/*75*/ "\003\000\002"
- "GDT HA %u, async. status 75 unknown",
-};
-
-
-static int gdth_async_event(gdth_ha_str *ha)
-{
- gdth_cmd_str *cmdp;
- int cmd_index;
-
- cmdp= ha->pccb;
- TRACE2(("gdth_async_event() ha %d serv %d\n",
- ha->hanum, ha->service));
-
- if (ha->service == SCREENSERVICE) {
- if (ha->status == MSG_REQUEST) {
- while (gdth_test_busy(ha))
- gdth_delay(0);
- cmdp->Service = SCREENSERVICE;
- cmdp->RequestBuffer = SCREEN_CMND;
- cmd_index = gdth_get_cmd_index(ha);
- gdth_set_sema0(ha);
- cmdp->OpCode = GDT_READ;
- cmdp->BoardNode = LOCALBOARD;
- cmdp->u.screen.reserved = 0;
- cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE;
- cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
- ha->cmd_offs_dpmem = 0;
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
- + sizeof(u64);
- ha->cmd_cnt = 0;
- gdth_copy_command(ha);
- printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
- (u16)((ha->brd_phys>>3)&0x1f));
- gdth_release_event(ha);
- }
-
- } else {
- if (ha->type == GDT_PCIMPR &&
- (ha->fw_vers & 0xff) >= 0x1a) {
- ha->dvr.size = 0;
- ha->dvr.eu.async.ionode = ha->hanum;
- ha->dvr.eu.async.status = ha->status;
- /* severity and event_string already set! */
- } else {
- ha->dvr.size = sizeof(ha->dvr.eu.async);
- ha->dvr.eu.async.ionode = ha->hanum;
- ha->dvr.eu.async.service = ha->service;
- ha->dvr.eu.async.status = ha->status;
- ha->dvr.eu.async.info = ha->info;
- *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
- }
- gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
- gdth_log_event( &ha->dvr, NULL );
-
- /* new host drive from expand? */
- if (ha->service == CACHESERVICE && ha->status == 56) {
- TRACE2(("gdth_async_event(): new host drive %d created\n",
- (u16)ha->info));
- /* gdth_analyse_hdrive(hanum, (u16)ha->info); */
- }
- }
- return 1;
-}
-
-static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
-{
- gdth_stackframe stack;
- char *f = NULL;
- int i,j;
-
- TRACE2(("gdth_log_event()\n"));
- if (dvr->size == 0) {
- if (buffer == NULL) {
- printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string);
- } else {
- sprintf(buffer,"Adapter %d: %s\n",
- dvr->eu.async.ionode,dvr->event_string);
- }
- } else if (dvr->eu.async.service == CACHESERVICE &&
- INDEX_OK(dvr->eu.async.status, async_cache_tab)) {
- TRACE2(("GDT: Async. event cache service, event no.: %d\n",
- dvr->eu.async.status));
-
- f = async_cache_tab[dvr->eu.async.status];
-
- /* i: parameter to push, j: stack element to fill */
- for (j=0,i=1; i < f[0]; i+=2) {
- switch (f[i+1]) {
- case 4:
- stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
- break;
- case 2:
- stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
- break;
- case 1:
- stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
- break;
- default:
- break;
- }
- }
-
- if (buffer == NULL) {
- printk(&f[(int)f[0]],stack);
- printk("\n");
- } else {
- sprintf(buffer,&f[(int)f[0]],stack);
- }
-
- } else {
- if (buffer == NULL) {
- printk("GDT HA %u, Unknown async. event service %d event no. %d\n",
- dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
- } else {
- sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d",
- dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
- }
- }
-}
-
-#ifdef GDTH_STATISTICS
-static u8 gdth_timer_running;
-
-static void gdth_timeout(struct timer_list *unused)
-{
- u32 i;
- struct scsi_cmnd *nscp;
- gdth_ha_str *ha;
- unsigned long flags;
-
- if(unlikely(list_empty(&gdth_instances))) {
- gdth_timer_running = 0;
- return;
- }
-
- ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i)
- if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
- ++act_stats;
-
- for (act_rq=0,
- nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
- ++act_rq;
-
- TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
- act_ints, act_ios, act_stats, act_rq));
- act_ints = act_ios = 0;
-
- gdth_timer.expires = jiffies + 30 * HZ;
- add_timer(&gdth_timer);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-static void gdth_timer_init(void)
-{
- if (gdth_timer_running)
- return;
- gdth_timer_running = 1;
- TRACE2(("gdth_detect(): Initializing timer !\n"));
- gdth_timer.expires = jiffies + HZ;
- add_timer(&gdth_timer);
-}
-#else
-static inline void gdth_timer_init(void)
-{
-}
-#endif
-
-static void __init internal_setup(char *str,int *ints)
-{
- int i;
- char *cur_str, *argv;
-
- TRACE2(("internal_setup() str %s ints[0] %d\n",
- str ? str:"NULL", ints ? ints[0]:0));
-
- /* analyse string */
- argv = str;
- while (argv && (cur_str = strchr(argv, ':'))) {
- int val = 0, c = *++cur_str;
-
- if (c == 'n' || c == 'N')
- val = 0;
- else if (c == 'y' || c == 'Y')
- val = 1;
- else
- val = (int)simple_strtoul(cur_str, NULL, 0);
-
- if (!strncmp(argv, "disable:", 8))
- disable = val;
- else if (!strncmp(argv, "reserve_mode:", 13))
- reserve_mode = val;
- else if (!strncmp(argv, "reverse_scan:", 13))
- reverse_scan = val;
- else if (!strncmp(argv, "hdr_channel:", 12))
- hdr_channel = val;
- else if (!strncmp(argv, "max_ids:", 8))
- max_ids = val;
- else if (!strncmp(argv, "rescan:", 7))
- rescan = val;
- else if (!strncmp(argv, "shared_access:", 14))
- shared_access = val;
- else if (!strncmp(argv, "reserve_list:", 13)) {
- reserve_list[0] = val;
- for (i = 1; i < MAX_RES_ARGS; i++) {
- cur_str = strchr(cur_str, ',');
- if (!cur_str)
- break;
- if (!isdigit((int)*++cur_str)) {
- --cur_str;
- break;
- }
- reserve_list[i] =
- (int)simple_strtoul(cur_str, NULL, 0);
- }
- if (!cur_str)
- break;
- argv = ++cur_str;
- continue;
- }
-
- if ((argv = strchr(argv, ',')))
- ++argv;
- }
-}
-
-int __init option_setup(char *str)
-{
- int ints[MAXHA];
- char *cur = str;
- int i = 1;
-
- TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
-
- while (cur && isdigit(*cur) && i < MAXHA) {
- ints[i++] = simple_strtoul(cur, NULL, 0);
- if ((cur = strchr(cur, ',')) != NULL) cur++;
- }
-
- ints[0] = i - 1;
- internal_setup(cur, ints);
- return 1;
-}
-
-static const char *gdth_ctr_name(gdth_ha_str *ha)
-{
- TRACE2(("gdth_ctr_name()\n"));
-
- if (ha->type == GDT_PCI) {
- switch (ha->pdev->device) {
- case PCI_DEVICE_ID_VORTEX_GDT60x0:
- return("GDT6000/6020/6050");
- case PCI_DEVICE_ID_VORTEX_GDT6000B:
- return("GDT6000B/6010");
- }
- }
- /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */
-
- return("");
-}
-
-static const char *gdth_info(struct Scsi_Host *shp)
-{
- gdth_ha_str *ha = shost_priv(shp);
-
- TRACE2(("gdth_info()\n"));
- return ((const char *)ha->binfo.type_string);
-}
-
-static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
-{
- gdth_ha_str *ha = shost_priv(scp->device->host);
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- u8 b, t;
- unsigned long flags;
- enum blk_eh_timer_return retval = BLK_EH_DONE;
-
- TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
- b = scp->device->channel;
- t = scp->device->id;
-
- /*
- * We don't really honor the command timeout, but we try to
- * honor 6 times of the actual command timeout! So reset the
- * timer if this is less than 6th timeout on this command!
- */
- if (++cmndinfo->timeout_count < 6)
- retval = BLK_EH_RESET_TIMER;
-
- /* Reset the timeout if it is locked IO */
- spin_lock_irqsave(&ha->smp_lock, flags);
- if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
- (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
- TRACE2(("%s(): locked IO, reset timeout\n", __func__));
- retval = BLK_EH_RESET_TIMER;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- return retval;
-}
-
-
-static int gdth_eh_bus_reset(struct scsi_cmnd *scp)
-{
- gdth_ha_str *ha = shost_priv(scp->device->host);
- int i;
- unsigned long flags;
- struct scsi_cmnd *cmnd;
- u8 b;
-
- TRACE2(("gdth_eh_bus_reset()\n"));
-
- b = scp->device->channel;
-
- /* clear command tab */
- spin_lock_irqsave(&ha->smp_lock, flags);
- for (i = 0; i < GDTH_MAXCMDS; ++i) {
- cmnd = ha->cmd_tab[i].cmnd;
- if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b)
- ha->cmd_tab[i].cmnd = UNUSED_CMND;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- if (b == ha->virt_bus) {
- /* host drives */
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (ha->hdr[i].present) {
- spin_lock_irqsave(&ha->smp_lock, flags);
- gdth_polling = TRUE;
- while (gdth_test_busy(ha))
- gdth_delay(0);
- if (gdth_internal_cmd(ha, CACHESERVICE,
- GDT_CLUST_RESET, i, 0, 0))
- ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED;
- gdth_polling = FALSE;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- }
- }
- } else {
- /* raw devices */
- spin_lock_irqsave(&ha->smp_lock, flags);
- for (i = 0; i < MAXID; ++i)
- ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0;
- gdth_polling = TRUE;
- while (gdth_test_busy(ha))
- gdth_delay(0);
- gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESET_BUS,
- BUS_L2P(ha,b), 0, 0);
- gdth_polling = FALSE;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- }
- return SUCCESS;
-}
-
-static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
-{
- u8 b, t;
- gdth_ha_str *ha = shost_priv(sdev->host);
- struct scsi_device *sd;
- unsigned capacity;
-
- sd = sdev;
- capacity = cap;
- b = sd->channel;
- t = sd->id;
- TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", ha->hanum, b, t));
-
- if (b != ha->virt_bus || ha->hdr[t].heads == 0) {
- /* raw device or host drive without mapping information */
- TRACE2(("Evaluate mapping\n"));
- gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]);
- } else {
- ip[0] = ha->hdr[t].heads;
- ip[1] = ha->hdr[t].secs;
- ip[2] = capacity / ip[0] / ip[1];
- }
-
- TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n",
- ip[0],ip[1],ip[2]));
- return 0;
-}
-
-
-static int gdth_queuecommand_lck(struct scsi_cmnd *scp,
- void (*done)(struct scsi_cmnd *))
-{
- gdth_ha_str *ha = shost_priv(scp->device->host);
- struct gdth_cmndinfo *cmndinfo;
-
- TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0]));
-
- cmndinfo = gdth_get_cmndinfo(ha);
- BUG_ON(!cmndinfo);
-
- scp->scsi_done = done;
- cmndinfo->timeout_count = 0;
- cmndinfo->priority = DEFAULT_PRI;
-
- return __gdth_queuecommand(ha, scp, cmndinfo);
-}
-
-static DEF_SCSI_QCMD(gdth_queuecommand)
-
-static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
- struct gdth_cmndinfo *cmndinfo)
-{
- scp->host_scribble = (unsigned char *)cmndinfo;
- cmndinfo->wait_for_completion = 1;
- cmndinfo->phase = -1;
- cmndinfo->OpCode = -1;
-
-#ifdef GDTH_STATISTICS
- ++act_ios;
-#endif
-
- gdth_putq(ha, scp, cmndinfo->priority);
- gdth_next(ha);
- return 0;
-}
-
-
-static int gdth_open(struct inode *inode, struct file *filep)
-{
- gdth_ha_str *ha;
-
- mutex_lock(&gdth_mutex);
- list_for_each_entry(ha, &gdth_instances, list) {
- if (!ha->sdev)
- ha->sdev = scsi_get_host_dev(ha->shost);
- }
- mutex_unlock(&gdth_mutex);
-
- TRACE(("gdth_open()\n"));
- return 0;
-}
-
-static int gdth_close(struct inode *inode, struct file *filep)
-{
- TRACE(("gdth_close()\n"));
- return 0;
-}
-
-static int ioc_event(void __user *arg)
-{
- gdth_ioctl_event evt;
- gdth_ha_str *ha;
- unsigned long flags;
-
- if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
- return -EFAULT;
- ha = gdth_find_ha(evt.ionode);
- if (!ha)
- return -EFAULT;
-
- if (evt.erase == 0xff) {
- if (evt.event.event_source == ES_TEST)
- evt.event.event_data.size=sizeof(evt.event.event_data.eu.test);
- else if (evt.event.event_source == ES_DRIVER)
- evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver);
- else if (evt.event.event_source == ES_SYNC)
- evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync);
- else
- evt.event.event_data.size=sizeof(evt.event.event_data.eu.async);
- spin_lock_irqsave(&ha->smp_lock, flags);
- gdth_store_event(ha, evt.event.event_source, evt.event.event_idx,
- &evt.event.event_data);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- } else if (evt.erase == 0xfe) {
- gdth_clear_events();
- } else if (evt.erase == 0) {
- evt.handle = gdth_read_event(ha, evt.handle, &evt.event);
- } else {
- gdth_readapp_event(ha, evt.erase, &evt.event);
- }
- if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event)))
- return -EFAULT;
- return 0;
-}
-
-static int ioc_lockdrv(void __user *arg)
-{
- gdth_ioctl_lockdrv ldrv;
- u8 i, j;
- unsigned long flags;
- gdth_ha_str *ha;
-
- if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
- return -EFAULT;
- ha = gdth_find_ha(ldrv.ionode);
- if (!ha)
- return -EFAULT;
-
- for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) {
- j = ldrv.drives[i];
- if (j >= MAX_HDRIVES || !ha->hdr[j].present)
- continue;
- if (ldrv.lock) {
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[j].lock = 1;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- gdth_wait_completion(ha, ha->bus_cnt, j);
- } else {
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[j].lock = 0;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- gdth_next(ha);
- }
- }
- return 0;
-}
-
-static int ioc_resetdrv(void __user *arg, char *cmnd)
-{
- gdth_ioctl_reset res;
- gdth_cmd_str cmd;
- gdth_ha_str *ha;
- int rval;
-
- if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
- res.number >= MAX_HDRIVES)
- return -EFAULT;
- ha = gdth_find_ha(res.ionode);
- if (!ha)
- return -EFAULT;
-
- if (!ha->hdr[res.number].present)
- return 0;
- memset(&cmd, 0, sizeof(gdth_cmd_str));
- cmd.Service = CACHESERVICE;
- cmd.OpCode = GDT_CLUST_RESET;
- if (ha->cache_feat & GDT_64BIT)
- cmd.u.cache64.DeviceNo = res.number;
- else
- cmd.u.cache.DeviceNo = res.number;
-
- rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL);
- if (rval < 0)
- return rval;
- res.status = rval;
-
- if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset)))
- return -EFAULT;
- return 0;
-}
-
-static void gdth_ioc_cacheservice(gdth_ha_str *ha, gdth_ioctl_general *gen,
- u64 paddr)
-{
- if (ha->cache_feat & GDT_64BIT) {
- /* copy elements from 32-bit IOCTL structure */
- gen->command.u.cache64.BlockCnt = gen->command.u.cache.BlockCnt;
- gen->command.u.cache64.BlockNo = gen->command.u.cache.BlockNo;
- gen->command.u.cache64.DeviceNo = gen->command.u.cache.DeviceNo;
-
- if (ha->cache_feat & SCATTER_GATHER) {
- gen->command.u.cache64.DestAddr = (u64)-1;
- gen->command.u.cache64.sg_canz = 1;
- gen->command.u.cache64.sg_lst[0].sg_ptr = paddr;
- gen->command.u.cache64.sg_lst[0].sg_len = gen->data_len;
- gen->command.u.cache64.sg_lst[1].sg_len = 0;
- } else {
- gen->command.u.cache64.DestAddr = paddr;
- gen->command.u.cache64.sg_canz = 0;
- }
- } else {
- if (ha->cache_feat & SCATTER_GATHER) {
- gen->command.u.cache.DestAddr = 0xffffffff;
- gen->command.u.cache.sg_canz = 1;
- gen->command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
- gen->command.u.cache.sg_lst[0].sg_len = gen->data_len;
- gen->command.u.cache.sg_lst[1].sg_len = 0;
- } else {
- gen->command.u.cache.DestAddr = paddr;
- gen->command.u.cache.sg_canz = 0;
- }
- }
-}
-
-static void gdth_ioc_scsiraw(gdth_ha_str *ha, gdth_ioctl_general *gen,
- u64 paddr)
-{
- if (ha->raw_feat & GDT_64BIT) {
- /* copy elements from 32-bit IOCTL structure */
- char cmd[16];
-
- gen->command.u.raw64.sense_len = gen->command.u.raw.sense_len;
- gen->command.u.raw64.bus = gen->command.u.raw.bus;
- gen->command.u.raw64.lun = gen->command.u.raw.lun;
- gen->command.u.raw64.target = gen->command.u.raw.target;
- memcpy(cmd, gen->command.u.raw.cmd, 16);
- memcpy(gen->command.u.raw64.cmd, cmd, 16);
- gen->command.u.raw64.clen = gen->command.u.raw.clen;
- gen->command.u.raw64.sdlen = gen->command.u.raw.sdlen;
- gen->command.u.raw64.direction = gen->command.u.raw.direction;
-
- /* addresses */
- if (ha->raw_feat & SCATTER_GATHER) {
- gen->command.u.raw64.sdata = (u64)-1;
- gen->command.u.raw64.sg_ranz = 1;
- gen->command.u.raw64.sg_lst[0].sg_ptr = paddr;
- gen->command.u.raw64.sg_lst[0].sg_len = gen->data_len;
- gen->command.u.raw64.sg_lst[1].sg_len = 0;
- } else {
- gen->command.u.raw64.sdata = paddr;
- gen->command.u.raw64.sg_ranz = 0;
- }
-
- gen->command.u.raw64.sense_data = paddr + gen->data_len;
- } else {
- if (ha->raw_feat & SCATTER_GATHER) {
- gen->command.u.raw.sdata = 0xffffffff;
- gen->command.u.raw.sg_ranz = 1;
- gen->command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
- gen->command.u.raw.sg_lst[0].sg_len = gen->data_len;
- gen->command.u.raw.sg_lst[1].sg_len = 0;
- } else {
- gen->command.u.raw.sdata = paddr;
- gen->command.u.raw.sg_ranz = 0;
- }
-
- gen->command.u.raw.sense_data = (u32)paddr + gen->data_len;
- }
-}
-
-static int ioc_general(void __user *arg, char *cmnd)
-{
- gdth_ioctl_general gen;
- gdth_ha_str *ha;
- char *buf = NULL;
- dma_addr_t paddr;
- int rval;
-
- if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
- return -EFAULT;
- ha = gdth_find_ha(gen.ionode);
- if (!ha)
- return -EFAULT;
-
- if (gen.data_len > INT_MAX)
- return -EINVAL;
- if (gen.sense_len > INT_MAX)
- return -EINVAL;
- if (gen.data_len + gen.sense_len > INT_MAX)
- return -EINVAL;
-
- if (gen.data_len + gen.sense_len > 0) {
- buf = dma_alloc_coherent(&ha->pdev->dev,
- gen.data_len + gen.sense_len, &paddr,
- GFP_KERNEL);
- if (!buf)
- return -EFAULT;
-
- rval = -EFAULT;
- if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
- gen.data_len + gen.sense_len))
- goto out_free_buf;
-
- if (gen.command.OpCode == GDT_IOCTL)
- gen.command.u.ioctl.p_param = paddr;
- else if (gen.command.Service == CACHESERVICE)
- gdth_ioc_cacheservice(ha, &gen, paddr);
- else if (gen.command.Service == SCSIRAWSERVICE)
- gdth_ioc_scsiraw(ha, &gen, paddr);
- else
- goto out_free_buf;
- }
-
- rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout,
- &gen.info);
- if (rval < 0)
- goto out_free_buf;
- gen.status = rval;
-
- rval = -EFAULT;
- if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
- gen.data_len + gen.sense_len))
- goto out_free_buf;
- if (copy_to_user(arg, &gen,
- sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str)))
- goto out_free_buf;
-
- rval = 0;
-out_free_buf:
- if (buf)
- dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len,
- buf, paddr);
- return rval;
-}
-
-static int ioc_hdrlist(void __user *arg, char *cmnd)
-{
- gdth_ioctl_rescan *rsc;
- gdth_cmd_str *cmd;
- gdth_ha_str *ha;
- u8 i;
- int rc = -ENOMEM;
- u32 cluster_type = 0;
-
- rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
- if (!rsc || !cmd)
- goto free_fail;
-
- if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
- (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
- rc = -EFAULT;
- goto free_fail;
- }
- memset(cmd, 0, sizeof(gdth_cmd_str));
-
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (!ha->hdr[i].present) {
- rsc->hdr_list[i].bus = 0xff;
- continue;
- }
- rsc->hdr_list[i].bus = ha->virt_bus;
- rsc->hdr_list[i].target = i;
- rsc->hdr_list[i].lun = 0;
- rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
- if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) {
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_CLUST_INFO;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
- if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK)
- rsc->hdr_list[i].cluster_type = cluster_type;
- }
- }
-
- if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
- rc = -EFAULT;
- else
- rc = 0;
-
-free_fail:
- kfree(rsc);
- kfree(cmd);
- return rc;
-}
-
-static int ioc_rescan(void __user *arg, char *cmnd)
-{
- gdth_ioctl_rescan *rsc;
- gdth_cmd_str *cmd;
- u16 i, status, hdr_cnt;
- u32 info;
- int cyls, hds, secs;
- int rc = -ENOMEM;
- unsigned long flags;
- gdth_ha_str *ha;
-
- rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd || !rsc)
- goto free_fail;
-
- if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
- (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
- rc = -EFAULT;
- goto free_fail;
- }
- memset(cmd, 0, sizeof(gdth_cmd_str));
-
- if (rsc->flag == 0) {
- /* old method: re-init. cache service */
- cmd->Service = CACHESERVICE;
- if (ha->cache_feat & GDT_64BIT) {
- cmd->OpCode = GDT_X_INIT_HOST;
- cmd->u.cache64.DeviceNo = LINUX_OS;
- } else {
- cmd->OpCode = GDT_INIT;
- cmd->u.cache.DeviceNo = LINUX_OS;
- }
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
- i = 0;
- hdr_cnt = (status == S_OK ? (u16)info : 0);
- } else {
- i = rsc->hdr_no;
- hdr_cnt = i + 1;
- }
-
- for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) {
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_INFO;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- rsc->hdr_list[i].bus = ha->virt_bus;
- rsc->hdr_list[i].target = i;
- rsc->hdr_list[i].lun = 0;
- if (status != S_OK) {
- ha->hdr[i].present = FALSE;
- } else {
- ha->hdr[i].present = TRUE;
- ha->hdr[i].size = info;
- /* evaluate mapping */
- ha->hdr[i].size &= ~SECS32;
- gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs);
- ha->hdr[i].heads = hds;
- ha->hdr[i].secs = secs;
- /* round size */
- ha->hdr[i].size = cyls * hds * secs;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- if (status != S_OK)
- continue;
-
- /* extended info, if GDT_64BIT, for drives > 2 TB */
- /* but we need ha->info2, not yet stored in scp->SCp */
-
- /* devtype, cluster info, R/W attribs */
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_DEVTYPE;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_CLUST_INFO;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[i].cluster_type =
- ((status == S_OK && !shared_access) ? (u16)info : 0);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
-
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_RW_ATTRIBS;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- }
-
- if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
- rc = -EFAULT;
- else
- rc = 0;
-
-free_fail:
- kfree(rsc);
- kfree(cmd);
- return rc;
-}
-
-static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- gdth_ha_str *ha;
- struct scsi_cmnd *scp;
- unsigned long flags;
- char cmnd[MAX_COMMAND_SIZE];
- void __user *argp = (void __user *)arg;
-
- memset(cmnd, 0xff, 12);
-
- TRACE(("gdth_ioctl() cmd 0x%x\n", cmd));
-
- switch (cmd) {
- case GDTIOCTL_CTRCNT:
- {
- int cnt = gdth_ctr_count;
- if (put_user(cnt, (int __user *)argp))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_DRVERS:
- {
- int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION;
- if (put_user(ver, (int __user *)argp))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_OSVERS:
- {
- gdth_ioctl_osvers osv;
-
- osv.version = (u8)(LINUX_VERSION_CODE >> 16);
- osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
- osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
- if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_CTRTYPE:
- {
- gdth_ioctl_ctrtype ctrt;
-
- if (copy_from_user(&ctrt, argp, sizeof(gdth_ioctl_ctrtype)) ||
- (NULL == (ha = gdth_find_ha(ctrt.ionode))))
- return -EFAULT;
-
- if (ha->type != GDT_PCIMPR) {
- ctrt.type = (u8)((ha->stype<<4) + 6);
- } else {
- ctrt.type = (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
- if (ha->stype >= 0x300)
- ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
- else
- ctrt.ext_type = 0x6000 | ha->stype;
- }
- ctrt.device_id = ha->pdev->device;
- ctrt.sub_device_id = ha->pdev->subsystem_device;
- ctrt.info = ha->brd_phys;
- ctrt.oem_id = ha->oem_id;
- if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype)))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_GENERAL:
- return ioc_general(argp, cmnd);
-
- case GDTIOCTL_EVENT:
- return ioc_event(argp);
-
- case GDTIOCTL_LOCKDRV:
- return ioc_lockdrv(argp);
-
- case GDTIOCTL_LOCKCHN:
- {
- gdth_ioctl_lockchn lchn;
- u8 i, j;
-
- if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
- (NULL == (ha = gdth_find_ha(lchn.ionode))))
- return -EFAULT;
-
- i = lchn.channel;
- if (i < ha->bus_cnt) {
- if (lchn.lock) {
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->raw[i].lock = 1;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j)
- gdth_wait_completion(ha, i, j);
- } else {
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->raw[i].lock = 0;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j)
- gdth_next(ha);
- }
- }
- break;
- }
-
- case GDTIOCTL_RESCAN:
- return ioc_rescan(argp, cmnd);
-
- case GDTIOCTL_HDRLIST:
- return ioc_hdrlist(argp, cmnd);
-
- case GDTIOCTL_RESET_BUS:
- {
- gdth_ioctl_reset res;
- int rval;
-
- if (copy_from_user(&res, argp, sizeof(gdth_ioctl_reset)) ||
- (NULL == (ha = gdth_find_ha(res.ionode))))
- return -EFAULT;
-
- scp = kzalloc(sizeof(*scp), GFP_KERNEL);
- if (!scp)
- return -ENOMEM;
- scp->device = ha->sdev;
- scp->cmd_len = 12;
- scp->device->channel = res.number;
- rval = gdth_eh_bus_reset(scp);
- res.status = (rval == SUCCESS ? S_OK : S_GENERR);
- kfree(scp);
-
- if (copy_to_user(argp, &res, sizeof(gdth_ioctl_reset)))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_RESET_DRV:
- return ioc_resetdrv(argp, cmnd);
-
- default:
- break;
- }
- return 0;
-}
-
-static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret;
-
- mutex_lock(&gdth_mutex);
- ret = gdth_ioctl(file, cmd, arg);
- mutex_unlock(&gdth_mutex);
-
- return ret;
-}
-
-/* flush routine */
-static void gdth_flush(gdth_ha_str *ha)
-{
- int i;
- gdth_cmd_str gdtcmd;
- char cmnd[MAX_COMMAND_SIZE];
- memset(cmnd, 0xff, MAX_COMMAND_SIZE);
-
- TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
-
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (ha->hdr[i].present) {
- gdtcmd.BoardNode = LOCALBOARD;
- gdtcmd.Service = CACHESERVICE;
- gdtcmd.OpCode = GDT_FLUSH;
- if (ha->cache_feat & GDT_64BIT) {
- gdtcmd.u.cache64.DeviceNo = i;
- gdtcmd.u.cache64.BlockNo = 1;
- gdtcmd.u.cache64.sg_canz = 0;
- } else {
- gdtcmd.u.cache.DeviceNo = i;
- gdtcmd.u.cache.BlockNo = 1;
- gdtcmd.u.cache.sg_canz = 0;
- }
- TRACE2(("gdth_flush(): flush ha %d drive %d\n", ha->hanum, i));
-
- gdth_execute(ha->shost, &gdtcmd, cmnd, 30, NULL);
- }
- }
-}
-
-/* configure lun */
-static int gdth_slave_configure(struct scsi_device *sdev)
-{
- sdev->skip_ms_page_3f = 1;
- sdev->skip_ms_page_8 = 1;
- return 0;
-}
-
-static struct scsi_host_template gdth_template = {
- .name = "GDT SCSI Disk Array Controller",
- .info = gdth_info,
- .queuecommand = gdth_queuecommand,
- .eh_bus_reset_handler = gdth_eh_bus_reset,
- .slave_configure = gdth_slave_configure,
- .bios_param = gdth_bios_param,
- .show_info = gdth_show_info,
- .write_info = gdth_set_info,
- .eh_timed_out = gdth_timed_out,
- .proc_name = "gdth",
- .can_queue = GDTH_MAXCMDS,
- .this_id = -1,
- .sg_tablesize = GDTH_MAXSG,
- .cmd_per_lun = GDTH_MAXC_P_L,
- .unchecked_isa_dma = 1,
- .no_write_same = 1,
-};
-
-static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
-{
- struct Scsi_Host *shp;
- gdth_ha_str *ha;
- dma_addr_t scratch_dma_handle = 0;
- int error, i;
- struct pci_dev *pdev = pcistr->pdev;
-
- *ha_out = NULL;
-
- shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
- if (!shp)
- return -ENOMEM;
- ha = shost_priv(shp);
-
- error = -ENODEV;
- if (!gdth_init_pci(pdev, pcistr, ha))
- goto out_host_put;
-
- /* controller found and initialized */
- printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn),
- ha->irq);
-
- error = request_irq(ha->irq, gdth_interrupt,
- IRQF_SHARED, "gdth", ha);
- if (error) {
- printk("GDT-PCI: Unable to allocate IRQ\n");
- goto out_host_put;
- }
-
- shp->unchecked_isa_dma = 0;
- shp->irq = ha->irq;
- shp->dma_channel = 0xff;
-
- ha->hanum = gdth_ctr_count++;
- ha->shost = shp;
-
- ha->pccb = &ha->cmdext;
- ha->ccb_phys = 0L;
-
- error = -ENOMEM;
-
- ha->pscratch = dma_alloc_coherent(&ha->pdev->dev, GDTH_SCRATCH,
- &scratch_dma_handle, GFP_KERNEL);
- if (!ha->pscratch)
- goto out_free_irq;
- ha->scratch_phys = scratch_dma_handle;
-
- ha->pmsg = dma_alloc_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
- &scratch_dma_handle, GFP_KERNEL);
- if (!ha->pmsg)
- goto out_free_pscratch;
- ha->msg_phys = scratch_dma_handle;
-
- ha->scratch_busy = FALSE;
- ha->req_first = NULL;
- ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
- if (max_ids > 0 && max_ids < ha->tid_cnt)
- ha->tid_cnt = max_ids;
- for (i = 0; i < GDTH_MAXCMDS; ++i)
- ha->cmd_tab[i].cmnd = UNUSED_CMND;
- ha->scan_mode = rescan ? 0x10 : 0;
-
- error = -ENODEV;
- if (!gdth_search_drives(ha)) {
- printk("GDT-PCI %d: Error during device scan\n", ha->hanum);
- goto out_free_pmsg;
- }
-
- if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
- hdr_channel = ha->bus_cnt;
- ha->virt_bus = hdr_channel;
-
- /* 64-bit DMA only supported from FW >= x.43 */
- if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) ||
- !ha->dma64_support) {
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "GDT-PCI %d: "
- "Unable to set 32-bit DMA\n", ha->hanum);
- goto out_free_pmsg;
- }
- } else {
- shp->max_cmd_len = 16;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
- } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "GDT-PCI %d: "
- "Unable to set 64/32-bit DMA\n", ha->hanum);
- goto out_free_pmsg;
- }
- }
-
- shp->max_id = ha->tid_cnt;
- shp->max_lun = MAXLUN;
- shp->max_channel = ha->bus_cnt;
-
- spin_lock_init(&ha->smp_lock);
- gdth_enable_int(ha);
-
- error = scsi_add_host(shp, &pdev->dev);
- if (error)
- goto out_free_pmsg;
- list_add_tail(&ha->list, &gdth_instances);
-
- pci_set_drvdata(ha->pdev, ha);
- gdth_timer_init();
-
- scsi_scan_host(shp);
-
- *ha_out = ha;
-
- return 0;
-
- out_free_pmsg:
- dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
- ha->pmsg, ha->msg_phys);
- out_free_pscratch:
- dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
- ha->pscratch, ha->scratch_phys);
- out_free_irq:
- free_irq(ha->irq, ha);
- gdth_ctr_count--;
- out_host_put:
- scsi_host_put(shp);
- return error;
-}
-
-static void gdth_remove_one(gdth_ha_str *ha)
-{
- struct Scsi_Host *shp = ha->shost;
-
- TRACE2(("gdth_remove_one()\n"));
-
- scsi_remove_host(shp);
-
- gdth_flush(ha);
-
- if (ha->sdev) {
- scsi_free_host_dev(ha->sdev);
- ha->sdev = NULL;
- }
-
- if (shp->irq)
- free_irq(shp->irq,ha);
-
- if (ha->pscratch)
- dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
- ha->pscratch, ha->scratch_phys);
- if (ha->pmsg)
- dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
- ha->pmsg, ha->msg_phys);
- if (ha->ccb_phys)
- dma_unmap_single(&ha->pdev->dev, ha->ccb_phys,
- sizeof(gdth_cmd_str), DMA_BIDIRECTIONAL);
-
- scsi_host_put(shp);
-}
-
-static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
-{
- gdth_ha_str *ha;
-
- TRACE2(("gdth_halt() event %d\n", (int)event));
- if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
- return NOTIFY_DONE;
-
- list_for_each_entry(ha, &gdth_instances, list)
- gdth_flush(ha);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block gdth_notifier = {
- gdth_halt, NULL, 0
-};
-
-static int __init gdth_init(void)
-{
- if (disable) {
- printk("GDT-HA: Controller driver disabled from"
- " command line !\n");
- return 0;
- }
-
- printk("GDT-HA: Storage RAID Controller Driver. Version: %s\n",
- GDTH_VERSION_STR);
-
- /* initializations */
- gdth_polling = TRUE;
- gdth_clear_events();
- timer_setup(&gdth_timer, gdth_timeout, 0);
-
- /* scanning for PCI controllers */
- if (pci_register_driver(&gdth_pci_driver)) {
- gdth_ha_str *ha;
-
- list_for_each_entry(ha, &gdth_instances, list)
- gdth_remove_one(ha);
- return -ENODEV;
- }
-
- TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
-
- major = register_chrdev(0,"gdth", &gdth_fops);
- register_reboot_notifier(&gdth_notifier);
- gdth_polling = FALSE;
- return 0;
-}
-
-static void __exit gdth_exit(void)
-{
- gdth_ha_str *ha;
-
- unregister_chrdev(major, "gdth");
- unregister_reboot_notifier(&gdth_notifier);
-
-#ifdef GDTH_STATISTICS
- del_timer_sync(&gdth_timer);
-#endif
-
- pci_unregister_driver(&gdth_pci_driver);
-
- list_for_each_entry(ha, &gdth_instances, list)
- gdth_remove_one(ha);
-}
-
-module_init(gdth_init);
-module_exit(gdth_exit);
-
-#ifndef MODULE
-__setup("gdth=", option_setup);
-#endif
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
deleted file mode 100644
index 5a13d406d40e..000000000000
--- a/drivers/scsi/gdth.h
+++ /dev/null
@@ -1,981 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GDTH_H
-#define _GDTH_H
-
-/*
- * Header file for the GDT Disk Array/Storage RAID controllers driver for Linux
- *
- * gdth.h Copyright (C) 1995-06 ICP vortex, Achim Leubner
- * See gdth.c for further informations and
- * below for supported controller types
- *
- * <achim_leubner@adaptec.com>
- *
- * $Id: gdth.h,v 1.58 2006/01/11 16:14:09 achim Exp $
- */
-
-#include <linux/types.h>
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-/* defines, macros */
-
-/* driver version */
-#define GDTH_VERSION_STR "3.05"
-#define GDTH_VERSION 3
-#define GDTH_SUBVERSION 5
-
-/* protocol version */
-#define PROTOCOL_VERSION 1
-
-/* OEM IDs */
-#define OEM_ID_ICP 0x941c
-#define OEM_ID_INTEL 0x8000
-
-/* controller classes */
-#define GDT_PCI 0x03 /* PCI controller */
-#define GDT_PCINEW 0x04 /* new PCI controller */
-#define GDT_PCIMPR 0x05 /* PCI MPR controller */
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDT60x0
-/* GDT_PCI */
-#define PCI_DEVICE_ID_VORTEX_GDT60x0 0 /* GDT6000/6020/6050 */
-#define PCI_DEVICE_ID_VORTEX_GDT6000B 1 /* GDT6000B/6010 */
-/* GDT_PCINEW */
-#define PCI_DEVICE_ID_VORTEX_GDT6x10 2 /* GDT6110/6510 */
-#define PCI_DEVICE_ID_VORTEX_GDT6x20 3 /* GDT6120/6520 */
-#define PCI_DEVICE_ID_VORTEX_GDT6530 4 /* GDT6530 */
-#define PCI_DEVICE_ID_VORTEX_GDT6550 5 /* GDT6550 */
-/* GDT_PCINEW, wide/ultra SCSI controllers */
-#define PCI_DEVICE_ID_VORTEX_GDT6x17 6 /* GDT6117/6517 */
-#define PCI_DEVICE_ID_VORTEX_GDT6x27 7 /* GDT6127/6527 */
-#define PCI_DEVICE_ID_VORTEX_GDT6537 8 /* GDT6537 */
-#define PCI_DEVICE_ID_VORTEX_GDT6557 9 /* GDT6557/6557-ECC */
-/* GDT_PCINEW, wide SCSI controllers */
-#define PCI_DEVICE_ID_VORTEX_GDT6x15 10 /* GDT6115/6515 */
-#define PCI_DEVICE_ID_VORTEX_GDT6x25 11 /* GDT6125/6525 */
-#define PCI_DEVICE_ID_VORTEX_GDT6535 12 /* GDT6535 */
-#define PCI_DEVICE_ID_VORTEX_GDT6555 13 /* GDT6555/6555-ECC */
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDT6x17RP
-/* GDT_MPR, RP series, wide/ultra SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x100 /* GDT6117RP/GDT6517RP */
-#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x101 /* GDT6127RP/GDT6527RP */
-#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x102 /* GDT6537RP */
-#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x103 /* GDT6557RP */
-/* GDT_MPR, RP series, narrow/ultra SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x104 /* GDT6111RP/GDT6511RP */
-#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x105 /* GDT6121RP/GDT6521RP */
-#endif
-#ifndef PCI_DEVICE_ID_VORTEX_GDT6x17RD
-/* GDT_MPR, RD series, wide/ultra SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x17RD 0x110 /* GDT6117RD/GDT6517RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x27RD 0x111 /* GDT6127RD/GDT6527RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6537RD 0x112 /* GDT6537RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6557RD 0x113 /* GDT6557RD */
-/* GDT_MPR, RD series, narrow/ultra SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x11RD 0x114 /* GDT6111RD/GDT6511RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x21RD 0x115 /* GDT6121RD/GDT6521RD */
-/* GDT_MPR, RD series, wide/ultra2 SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x18RD 0x118 /* GDT6118RD/GDT6518RD/
- GDT6618RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x28RD 0x119 /* GDT6128RD/GDT6528RD/
- GDT6628RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x38RD 0x11A /* GDT6538RD/GDT6638RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x58RD 0x11B /* GDT6558RD/GDT6658RD */
-/* GDT_MPR, RN series (64-bit PCI), wide/ultra2 SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT7x18RN 0x168 /* GDT7118RN/GDT7518RN/
- GDT7618RN */
-#define PCI_DEVICE_ID_VORTEX_GDT7x28RN 0x169 /* GDT7128RN/GDT7528RN/
- GDT7628RN */
-#define PCI_DEVICE_ID_VORTEX_GDT7x38RN 0x16A /* GDT7538RN/GDT7638RN */
-#define PCI_DEVICE_ID_VORTEX_GDT7x58RN 0x16B /* GDT7558RN/GDT7658RN */
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDT6x19RD
-/* GDT_MPR, RD series, Fibre Channel */
-#define PCI_DEVICE_ID_VORTEX_GDT6x19RD 0x210 /* GDT6519RD/GDT6619RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x29RD 0x211 /* GDT6529RD/GDT6629RD */
-/* GDT_MPR, RN series (64-bit PCI), Fibre Channel */
-#define PCI_DEVICE_ID_VORTEX_GDT7x19RN 0x260 /* GDT7519RN/GDT7619RN */
-#define PCI_DEVICE_ID_VORTEX_GDT7x29RN 0x261 /* GDT7529RN/GDT7629RN */
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDTMAXRP
-/* GDT_MPR, last device ID */
-#define PCI_DEVICE_ID_VORTEX_GDTMAXRP 0x2ff
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDTNEWRX
-/* new GDT Rx Controller */
-#define PCI_DEVICE_ID_VORTEX_GDTNEWRX 0x300
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDTNEWRX2
-/* new(2) GDT Rx Controller */
-#define PCI_DEVICE_ID_VORTEX_GDTNEWRX2 0x301
-#endif
-
-#ifndef PCI_DEVICE_ID_INTEL_SRC
-/* Intel Storage RAID Controller */
-#define PCI_DEVICE_ID_INTEL_SRC 0x600
-#endif
-
-#ifndef PCI_DEVICE_ID_INTEL_SRC_XSCALE
-/* Intel Storage RAID Controller */
-#define PCI_DEVICE_ID_INTEL_SRC_XSCALE 0x601
-#endif
-
-/* limits */
-#define GDTH_SCRATCH PAGE_SIZE /* 4KB scratch buffer */
-#define GDTH_MAXCMDS 120
-#define GDTH_MAXC_P_L 16 /* max. cmds per lun */
-#define GDTH_MAX_RAW 2 /* max. cmds per raw device */
-#define MAXOFFSETS 128
-#define MAXHA 16
-#define MAXID 127
-#define MAXLUN 8
-#define MAXBUS 6
-#define MAX_EVENTS 100 /* event buffer count */
-#define MAX_RES_ARGS 40 /* device reservation,
- must be a multiple of 4 */
-#define MAXCYLS 1024
-#define HEADS 64
-#define SECS 32 /* mapping 64*32 */
-#define MEDHEADS 127
-#define MEDSECS 63 /* mapping 127*63 */
-#define BIGHEADS 255
-#define BIGSECS 63 /* mapping 255*63 */
-
-/* special command ptr. */
-#define UNUSED_CMND ((struct scsi_cmnd *)-1)
-#define INTERNAL_CMND ((struct scsi_cmnd *)-2)
-#define SCREEN_CMND ((struct scsi_cmnd *)-3)
-#define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
-
-/* controller services */
-#define SCSIRAWSERVICE 3
-#define CACHESERVICE 9
-#define SCREENSERVICE 11
-
-/* screenservice defines */
-#define MSG_INV_HANDLE -1 /* special message handle */
-#define MSGLEN 16 /* size of message text */
-#define MSG_SIZE 34 /* size of message structure */
-#define MSG_REQUEST 0 /* async. event: message */
-
-/* DPMEM constants */
-#define DPMEM_MAGIC 0xC0FFEE11
-#define IC_HEADER_BYTES 48
-#define IC_QUEUE_BYTES 4
-#define DPMEM_COMMAND_OFFSET IC_HEADER_BYTES+IC_QUEUE_BYTES*MAXOFFSETS
-
-/* cluster_type constants */
-#define CLUSTER_DRIVE 1
-#define CLUSTER_MOUNTED 2
-#define CLUSTER_RESERVED 4
-#define CLUSTER_RESERVE_STATE (CLUSTER_DRIVE|CLUSTER_MOUNTED|CLUSTER_RESERVED)
-
-/* commands for all services, cache service */
-#define GDT_INIT 0 /* service initialization */
-#define GDT_READ 1 /* read command */
-#define GDT_WRITE 2 /* write command */
-#define GDT_INFO 3 /* information about devices */
-#define GDT_FLUSH 4 /* flush dirty cache buffers */
-#define GDT_IOCTL 5 /* ioctl command */
-#define GDT_DEVTYPE 9 /* additional information */
-#define GDT_MOUNT 10 /* mount cache device */
-#define GDT_UNMOUNT 11 /* unmount cache device */
-#define GDT_SET_FEAT 12 /* set feat. (scatter/gather) */
-#define GDT_GET_FEAT 13 /* get features */
-#define GDT_WRITE_THR 16 /* write through */
-#define GDT_READ_THR 17 /* read through */
-#define GDT_EXT_INFO 18 /* extended info */
-#define GDT_RESET 19 /* controller reset */
-#define GDT_RESERVE_DRV 20 /* reserve host drive */
-#define GDT_RELEASE_DRV 21 /* release host drive */
-#define GDT_CLUST_INFO 22 /* cluster info */
-#define GDT_RW_ATTRIBS 23 /* R/W attribs (write thru,..)*/
-#define GDT_CLUST_RESET 24 /* releases the cluster drives*/
-#define GDT_FREEZE_IO 25 /* freezes all IOs */
-#define GDT_UNFREEZE_IO 26 /* unfreezes all IOs */
-#define GDT_X_INIT_HOST 29 /* ext. init: 64 bit support */
-#define GDT_X_INFO 30 /* ext. info for drives>2TB */
-
-/* raw service commands */
-#define GDT_RESERVE 14 /* reserve dev. to raw serv. */
-#define GDT_RELEASE 15 /* release device */
-#define GDT_RESERVE_ALL 16 /* reserve all devices */
-#define GDT_RELEASE_ALL 17 /* release all devices */
-#define GDT_RESET_BUS 18 /* reset bus */
-#define GDT_SCAN_START 19 /* start device scan */
-#define GDT_SCAN_END 20 /* stop device scan */
-#define GDT_X_INIT_RAW 21 /* ext. init: 64 bit support */
-
-/* screen service commands */
-#define GDT_REALTIME 3 /* realtime clock to screens. */
-#define GDT_X_INIT_SCR 4 /* ext. init: 64 bit support */
-
-/* IOCTL command defines */
-#define SCSI_DR_INFO 0x00 /* SCSI drive info */
-#define SCSI_CHAN_CNT 0x05 /* SCSI channel count */
-#define SCSI_DR_LIST 0x06 /* SCSI drive list */
-#define SCSI_DEF_CNT 0x15 /* grown/primary defects */
-#define DSK_STATISTICS 0x4b /* SCSI disk statistics */
-#define IOCHAN_DESC 0x5d /* description of IO channel */
-#define IOCHAN_RAW_DESC 0x5e /* description of raw IO chn. */
-#define L_CTRL_PATTERN 0x20000000L /* SCSI IOCTL mask */
-#define ARRAY_INFO 0x12 /* array drive info */
-#define ARRAY_DRV_LIST 0x0f /* array drive list */
-#define ARRAY_DRV_LIST2 0x34 /* array drive list (new) */
-#define LA_CTRL_PATTERN 0x10000000L /* array IOCTL mask */
-#define CACHE_DRV_CNT 0x01 /* cache drive count */
-#define CACHE_DRV_LIST 0x02 /* cache drive list */
-#define CACHE_INFO 0x04 /* cache info */
-#define CACHE_CONFIG 0x05 /* cache configuration */
-#define CACHE_DRV_INFO 0x07 /* cache drive info */
-#define BOARD_FEATURES 0x15 /* controller features */
-#define BOARD_INFO 0x28 /* controller info */
-#define SET_PERF_MODES 0x82 /* set mode (coalescing,..) */
-#define GET_PERF_MODES 0x83 /* get mode */
-#define CACHE_READ_OEM_STRING_RECORD 0x84 /* read OEM string record */
-#define HOST_GET 0x10001L /* get host drive list */
-#define IO_CHANNEL 0x00020000L /* default IO channel */
-#define INVALID_CHANNEL 0x0000ffffL /* invalid channel */
-
-/* service errors */
-#define S_OK 1 /* no error */
-#define S_GENERR 6 /* general error */
-#define S_BSY 7 /* controller busy */
-#define S_CACHE_UNKNOWN 12 /* cache serv.: drive unknown */
-#define S_RAW_SCSI 12 /* raw serv.: target error */
-#define S_RAW_ILL 0xff /* raw serv.: illegal */
-#define S_NOFUNC -2 /* unknown function */
-#define S_CACHE_RESERV -24 /* cache: reserv. conflict */
-
-/* timeout values */
-#define INIT_RETRIES 100000 /* 100000 * 1ms = 100s */
-#define INIT_TIMEOUT 100000 /* 100000 * 1ms = 100s */
-#define POLL_TIMEOUT 10000 /* 10000 * 1ms = 10s */
-
-/* priorities */
-#define DEFAULT_PRI 0x20
-#define IOCTL_PRI 0x10
-#define HIGH_PRI 0x08
-
-/* data directions */
-#define GDTH_DATA_IN 0x01000000L /* data from target */
-#define GDTH_DATA_OUT 0x00000000L /* data to target */
-
-/* other defines */
-#define LINUX_OS 8 /* used for cache optim. */
-#define SECS32 0x1f /* round capacity */
-#define BIOS_ID_OFFS 0x10 /* offset contr-ID in ISABIOS */
-#define LOCALBOARD 0 /* board node always 0 */
-#define ASYNCINDEX 0 /* cmd index async. event */
-#define SPEZINDEX 1 /* cmd index unknown service */
-#define COALINDEX (GDTH_MAXCMDS + 2)
-
-/* features */
-#define SCATTER_GATHER 1 /* s/g feature */
-#define GDT_WR_THROUGH 0x100 /* WRITE_THROUGH supported */
-#define GDT_64BIT 0x200 /* 64bit / drv>2TB support */
-
-#include "gdth_ioctl.h"
-
-/* screenservice message */
-typedef struct {
- u32 msg_handle; /* message handle */
- u32 msg_len; /* size of message */
- u32 msg_alen; /* answer length */
- u8 msg_answer; /* answer flag */
- u8 msg_ext; /* more messages */
- u8 msg_reserved[2];
- char msg_text[MSGLEN+2]; /* the message text */
-} __attribute__((packed)) gdth_msg_str;
-
-
-/* IOCTL data structures */
-
-/* Status coalescing buffer for returning multiple requests per interrupt */
-typedef struct {
- u32 status;
- u32 ext_status;
- u32 info0;
- u32 info1;
-} __attribute__((packed)) gdth_coal_status;
-
-/* performance mode data structure */
-typedef struct {
- u32 version; /* The version of this IOCTL structure. */
- u32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */
- u32 st_buff_addr1; /* physical address of status buffer 1 */
- u32 st_buff_u_addr1; /* reserved for 64 bit addressing */
- u32 st_buff_indx1; /* reserved command idx. for this buffer */
- u32 st_buff_addr2; /* physical address of status buffer 1 */
- u32 st_buff_u_addr2; /* reserved for 64 bit addressing */
- u32 st_buff_indx2; /* reserved command idx. for this buffer */
- u32 st_buff_size; /* size of each buffer in bytes */
- u32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */
- u32 cmd_buff_addr1; /* physical address of cmd buffer 1 */
- u32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */
- u32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */
- u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
- u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
- u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
- u32 cmd_buff_size; /* size of each cmd buffer in bytes */
- u32 reserved1;
- u32 reserved2;
-} __attribute__((packed)) gdth_perf_modes;
-
-/* SCSI drive info */
-typedef struct {
- u8 vendor[8]; /* vendor string */
- u8 product[16]; /* product string */
- u8 revision[4]; /* revision */
- u32 sy_rate; /* current rate for sync. tr. */
- u32 sy_max_rate; /* max. rate for sync. tr. */
- u32 no_ldrive; /* belongs to this log. drv.*/
- u32 blkcnt; /* number of blocks */
- u16 blksize; /* size of block in bytes */
- u8 available; /* flag: access is available */
- u8 init; /* medium is initialized */
- u8 devtype; /* SCSI devicetype */
- u8 rm_medium; /* medium is removable */
- u8 wp_medium; /* medium is write protected */
- u8 ansi; /* SCSI I/II or III? */
- u8 protocol; /* same as ansi */
- u8 sync; /* flag: sync. transfer enab. */
- u8 disc; /* flag: disconnect enabled */
- u8 queueing; /* flag: command queing enab. */
- u8 cached; /* flag: caching enabled */
- u8 target_id; /* target ID of device */
- u8 lun; /* LUN id of device */
- u8 orphan; /* flag: drive fragment */
- u32 last_error; /* sense key or drive state */
- u32 last_result; /* result of last command */
- u32 check_errors; /* err. in last surface check */
- u8 percent; /* progress for surface check */
- u8 last_check; /* IOCTRL operation */
- u8 res[2];
- u32 flags; /* from 1.19/2.19: raw reserv.*/
- u8 multi_bus; /* multi bus dev? (fibre ch.) */
- u8 mb_status; /* status: available? */
- u8 res2[2];
- u8 mb_alt_status; /* status on second bus */
- u8 mb_alt_bid; /* number of second bus */
- u8 mb_alt_tid; /* target id on second bus */
- u8 res3;
- u8 fc_flag; /* from 1.22/2.22: info valid?*/
- u8 res4;
- u16 fc_frame_size; /* frame size (bytes) */
- char wwn[8]; /* world wide name */
-} __attribute__((packed)) gdth_diskinfo_str;
-
-/* get SCSI channel count */
-typedef struct {
- u32 channel_no; /* number of channel */
- u32 drive_cnt; /* drive count */
- u8 siop_id; /* SCSI processor ID */
- u8 siop_state; /* SCSI processor state */
-} __attribute__((packed)) gdth_getch_str;
-
-/* get SCSI drive numbers */
-typedef struct {
- u32 sc_no; /* SCSI channel */
- u32 sc_cnt; /* sc_list[] elements */
- u32 sc_list[MAXID]; /* minor device numbers */
-} __attribute__((packed)) gdth_drlist_str;
-
-/* get grown/primary defect count */
-typedef struct {
- u8 sddc_type; /* 0x08: grown, 0x10: prim. */
- u8 sddc_format; /* list entry format */
- u8 sddc_len; /* list entry length */
- u8 sddc_res;
- u32 sddc_cnt; /* entry count */
-} __attribute__((packed)) gdth_defcnt_str;
-
-/* disk statistics */
-typedef struct {
- u32 bid; /* SCSI channel */
- u32 first; /* first SCSI disk */
- u32 entries; /* number of elements */
- u32 count; /* (R) number of init. el. */
- u32 mon_time; /* time stamp */
- struct {
- u8 tid; /* target ID */
- u8 lun; /* LUN */
- u8 res[2];
- u32 blk_size; /* block size in bytes */
- u32 rd_count; /* bytes read */
- u32 wr_count; /* bytes written */
- u32 rd_blk_count; /* blocks read */
- u32 wr_blk_count; /* blocks written */
- u32 retries; /* retries */
- u32 reassigns; /* reassigns */
- } __attribute__((packed)) list[1];
-} __attribute__((packed)) gdth_dskstat_str;
-
-/* IO channel header */
-typedef struct {
- u32 version; /* version (-1UL: newest) */
- u8 list_entries; /* list entry count */
- u8 first_chan; /* first channel number */
- u8 last_chan; /* last channel number */
- u8 chan_count; /* (R) channel count */
- u32 list_offset; /* offset of list[0] */
-} __attribute__((packed)) gdth_iochan_header;
-
-/* get IO channel description */
-typedef struct {
- gdth_iochan_header hdr;
- struct {
- u32 address; /* channel address */
- u8 type; /* type (SCSI, FCAL) */
- u8 local_no; /* local number */
- u16 features; /* channel features */
- } __attribute__((packed)) list[MAXBUS];
-} __attribute__((packed)) gdth_iochan_str;
-
-/* get raw IO channel description */
-typedef struct {
- gdth_iochan_header hdr;
- struct {
- u8 proc_id; /* processor id */
- u8 proc_defect; /* defect ? */
- u8 reserved[2];
- } __attribute__((packed)) list[MAXBUS];
-} __attribute__((packed)) gdth_raw_iochan_str;
-
-/* array drive component */
-typedef struct {
- u32 al_controller; /* controller ID */
- u8 al_cache_drive; /* cache drive number */
- u8 al_status; /* cache drive state */
- u8 al_res[2];
-} __attribute__((packed)) gdth_arraycomp_str;
-
-/* array drive information */
-typedef struct {
- u8 ai_type; /* array type (RAID0,4,5) */
- u8 ai_cache_drive_cnt; /* active cachedrives */
- u8 ai_state; /* array drive state */
- u8 ai_master_cd; /* master cachedrive */
- u32 ai_master_controller; /* ID of master controller */
- u32 ai_size; /* user capacity [sectors] */
- u32 ai_striping_size; /* striping size [sectors] */
- u32 ai_secsize; /* sector size [bytes] */
- u32 ai_err_info; /* failed cache drive */
- u8 ai_name[8]; /* name of the array drive */
- u8 ai_controller_cnt; /* number of controllers */
- u8 ai_removable; /* flag: removable */
- u8 ai_write_protected; /* flag: write protected */
- u8 ai_devtype; /* type: always direct access */
- gdth_arraycomp_str ai_drives[35]; /* drive components: */
- u8 ai_drive_entries; /* number of drive components */
- u8 ai_protected; /* protection flag */
- u8 ai_verify_state; /* state of a parity verify */
- u8 ai_ext_state; /* extended array drive state */
- u8 ai_expand_state; /* array expand state (>=2.18)*/
- u8 ai_reserved[3];
-} __attribute__((packed)) gdth_arrayinf_str;
-
-/* get array drive list */
-typedef struct {
- u32 controller_no; /* controller no. */
- u8 cd_handle; /* master cachedrive */
- u8 is_arrayd; /* Flag: is array drive? */
- u8 is_master; /* Flag: is array master? */
- u8 is_parity; /* Flag: is parity drive? */
- u8 is_hotfix; /* Flag: is hotfix drive? */
- u8 res[3];
-} __attribute__((packed)) gdth_alist_str;
-
-typedef struct {
- u32 entries_avail; /* allocated entries */
- u32 entries_init; /* returned entries */
- u32 first_entry; /* first entry number */
- u32 list_offset; /* offset of following list */
- gdth_alist_str list[1]; /* list */
-} __attribute__((packed)) gdth_arcdl_str;
-
-/* cache info/config IOCTL */
-typedef struct {
- u32 version; /* firmware version */
- u16 state; /* cache state (on/off) */
- u16 strategy; /* cache strategy */
- u16 write_back; /* write back state (on/off) */
- u16 block_size; /* cache block size */
-} __attribute__((packed)) gdth_cpar_str;
-
-typedef struct {
- u32 csize; /* cache size */
- u32 read_cnt; /* read/write counter */
- u32 write_cnt;
- u32 tr_hits; /* hits */
- u32 sec_hits;
- u32 sec_miss; /* misses */
-} __attribute__((packed)) gdth_cstat_str;
-
-typedef struct {
- gdth_cpar_str cpar;
- gdth_cstat_str cstat;
-} __attribute__((packed)) gdth_cinfo_str;
-
-/* cache drive info */
-typedef struct {
- u8 cd_name[8]; /* cache drive name */
- u32 cd_devtype; /* SCSI devicetype */
- u32 cd_ldcnt; /* number of log. drives */
- u32 cd_last_error; /* last error */
- u8 cd_initialized; /* drive is initialized */
- u8 cd_removable; /* media is removable */
- u8 cd_write_protected; /* write protected */
- u8 cd_flags; /* Pool Hot Fix? */
- u32 ld_blkcnt; /* number of blocks */
- u32 ld_blksize; /* blocksize */
- u32 ld_dcnt; /* number of disks */
- u32 ld_slave; /* log. drive index */
- u32 ld_dtype; /* type of logical drive */
- u32 ld_last_error; /* last error */
- u8 ld_name[8]; /* log. drive name */
- u8 ld_error; /* error */
-} __attribute__((packed)) gdth_cdrinfo_str;
-
-/* OEM string */
-typedef struct {
- u32 ctl_version;
- u32 file_major_version;
- u32 file_minor_version;
- u32 buffer_size;
- u32 cpy_count;
- u32 ext_error;
- u32 oem_id;
- u32 board_id;
-} __attribute__((packed)) gdth_oem_str_params;
-
-typedef struct {
- u8 product_0_1_name[16];
- u8 product_4_5_name[16];
- u8 product_cluster_name[16];
- u8 product_reserved[16];
- u8 scsi_cluster_target_vendor_id[16];
- u8 cluster_raid_fw_name[16];
- u8 oem_brand_name[16];
- u8 oem_raid_type[16];
- u8 bios_type[13];
- u8 bios_title[50];
- u8 oem_company_name[37];
- u32 pci_id_1;
- u32 pci_id_2;
- u8 validation_status[80];
- u8 reserved_1[4];
- u8 scsi_host_drive_inquiry_vendor_id[16];
- u8 library_file_template[16];
- u8 reserved_2[16];
- u8 tool_name_1[32];
- u8 tool_name_2[32];
- u8 tool_name_3[32];
- u8 oem_contact_1[84];
- u8 oem_contact_2[84];
- u8 oem_contact_3[84];
-} __attribute__((packed)) gdth_oem_str;
-
-typedef struct {
- gdth_oem_str_params params;
- gdth_oem_str text;
-} __attribute__((packed)) gdth_oem_str_ioctl;
-
-/* board features */
-typedef struct {
- u8 chaining; /* Chaining supported */
- u8 striping; /* Striping (RAID-0) supp. */
- u8 mirroring; /* Mirroring (RAID-1) supp. */
- u8 raid; /* RAID-4/5/10 supported */
-} __attribute__((packed)) gdth_bfeat_str;
-
-/* board info IOCTL */
-typedef struct {
- u32 ser_no; /* serial no. */
- u8 oem_id[2]; /* OEM ID */
- u16 ep_flags; /* eprom flags */
- u32 proc_id; /* processor ID */
- u32 memsize; /* memory size (bytes) */
- u8 mem_banks; /* memory banks */
- u8 chan_type; /* channel type */
- u8 chan_count; /* channel count */
- u8 rdongle_pres; /* dongle present? */
- u32 epr_fw_ver; /* (eprom) firmware version */
- u32 upd_fw_ver; /* (update) firmware version */
- u32 upd_revision; /* update revision */
- char type_string[16]; /* controller name */
- char raid_string[16]; /* RAID firmware name */
- u8 update_pres; /* update present? */
- u8 xor_pres; /* XOR engine present? */
- u8 prom_type; /* ROM type (eprom/flash) */
- u8 prom_count; /* number of ROM devices */
- u32 dup_pres; /* duplexing module present? */
- u32 chan_pres; /* number of expansion chn. */
- u32 mem_pres; /* memory expansion inst. ? */
- u8 ft_bus_system; /* fault bus supported? */
- u8 subtype_valid; /* board_subtype valid? */
- u8 board_subtype; /* subtype/hardware level */
- u8 ramparity_pres; /* RAM parity check hardware? */
-} __attribute__((packed)) gdth_binfo_str;
-
-/* get host drive info */
-typedef struct {
- char name[8]; /* host drive name */
- u32 size; /* size (sectors) */
- u8 host_drive; /* host drive number */
- u8 log_drive; /* log. drive (master) */
- u8 reserved;
- u8 rw_attribs; /* r/w attribs */
- u32 start_sec; /* start sector */
-} __attribute__((packed)) gdth_hentry_str;
-
-typedef struct {
- u32 entries; /* entry count */
- u32 offset; /* offset of entries */
- u8 secs_p_head; /* sectors/head */
- u8 heads_p_cyl; /* heads/cylinder */
- u8 reserved;
- u8 clust_drvtype; /* cluster drive type */
- u32 location; /* controller number */
- gdth_hentry_str entry[MAX_HDRIVES]; /* entries */
-} __attribute__((packed)) gdth_hget_str;
-
-
-/* DPRAM structures */
-
-/* interface area ISA/PCI */
-typedef struct {
- u8 S_Cmd_Indx; /* special command */
- u8 volatile S_Status; /* status special command */
- u16 reserved1;
- u32 S_Info[4]; /* add. info special command */
- u8 volatile Sema0; /* command semaphore */
- u8 reserved2[3];
- u8 Cmd_Index; /* command number */
- u8 reserved3[3];
- u16 volatile Status; /* command status */
- u16 Service; /* service(for async.events) */
- u32 Info[2]; /* additional info */
- struct {
- u16 offset; /* command offs. in the DPRAM*/
- u16 serv_id; /* service */
- } __attribute__((packed)) comm_queue[MAXOFFSETS]; /* command queue */
- u32 bios_reserved[2];
- u8 gdt_dpr_cmd[1]; /* commands */
-} __attribute__((packed)) gdt_dpr_if;
-
-/* SRAM structure PCI controllers */
-typedef struct {
- u32 magic; /* controller ID from BIOS */
- u16 need_deinit; /* switch betw. BIOS/driver */
- u8 switch_support; /* see need_deinit */
- u8 padding[9];
- u8 os_used[16]; /* OS code per service */
- u8 unused[28];
- u8 fw_magic; /* contr. ID from firmware */
-} __attribute__((packed)) gdt_pci_sram;
-
-/* DPRAM ISA controllers */
-typedef struct {
- union {
- struct {
- u8 bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
- u16 need_deinit; /* switch betw. BIOS/driver */
- u8 switch_support; /* see need_deinit */
- u8 padding[9];
- u8 os_used[16]; /* OS code per service */
- } __attribute__((packed)) dp_sram;
- u8 bios_area[0x4000]; /* 16KB reserved for BIOS */
- } bu;
- union {
- gdt_dpr_if ic; /* interface area */
- u8 if_area[0x3000]; /* 12KB for interface */
- } u;
- struct {
- u8 memlock; /* write protection DPRAM */
- u8 event; /* release event */
- u8 irqen; /* board interrupts enable */
- u8 irqdel; /* acknowledge board int. */
- u8 volatile Sema1; /* status semaphore */
- u8 rq; /* IRQ/DRQ configuration */
- } __attribute__((packed)) io;
-} __attribute__((packed)) gdt2_dpram_str;
-
-/* DPRAM PCI controllers */
-typedef struct {
- union {
- gdt_dpr_if ic; /* interface area */
- u8 if_area[0xff0-sizeof(gdt_pci_sram)];
- } u;
- gdt_pci_sram gdt6sr; /* SRAM structure */
- struct {
- u8 unused0[1];
- u8 volatile Sema1; /* command semaphore */
- u8 unused1[3];
- u8 irqen; /* board interrupts enable */
- u8 unused2[2];
- u8 event; /* release event */
- u8 unused3[3];
- u8 irqdel; /* acknowledge board int. */
- u8 unused4[3];
- } __attribute__((packed)) io;
-} __attribute__((packed)) gdt6_dpram_str;
-
-/* PLX register structure (new PCI controllers) */
-typedef struct {
- u8 cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
- u8 unused1[0x3f];
- u8 volatile sema0_reg; /* command semaphore */
- u8 volatile sema1_reg; /* status semaphore */
- u8 unused2[2];
- u16 volatile status; /* command status */
- u16 service; /* service */
- u32 info[2]; /* additional info */
- u8 unused3[0x10];
- u8 ldoor_reg; /* PCI to local doorbell */
- u8 unused4[3];
- u8 volatile edoor_reg; /* local to PCI doorbell */
- u8 unused5[3];
- u8 control0; /* control0 register(unused) */
- u8 control1; /* board interrupts enable */
- u8 unused6[0x16];
-} __attribute__((packed)) gdt6c_plx_regs;
-
-/* DPRAM new PCI controllers */
-typedef struct {
- union {
- gdt_dpr_if ic; /* interface area */
- u8 if_area[0x4000-sizeof(gdt_pci_sram)];
- } u;
- gdt_pci_sram gdt6sr; /* SRAM structure */
-} __attribute__((packed)) gdt6c_dpram_str;
-
-/* i960 register structure (PCI MPR controllers) */
-typedef struct {
- u8 unused1[16];
- u8 volatile sema0_reg; /* command semaphore */
- u8 unused2;
- u8 volatile sema1_reg; /* status semaphore */
- u8 unused3;
- u16 volatile status; /* command status */
- u16 service; /* service */
- u32 info[2]; /* additional info */
- u8 ldoor_reg; /* PCI to local doorbell */
- u8 unused4[11];
- u8 volatile edoor_reg; /* local to PCI doorbell */
- u8 unused5[7];
- u8 edoor_en_reg; /* board interrupts enable */
- u8 unused6[27];
- u32 unused7[939];
- u32 severity;
- char evt_str[256]; /* event string */
-} __attribute__((packed)) gdt6m_i960_regs;
-
-/* DPRAM PCI MPR controllers */
-typedef struct {
- gdt6m_i960_regs i960r; /* 4KB i960 registers */
- union {
- gdt_dpr_if ic; /* interface area */
- u8 if_area[0x3000-sizeof(gdt_pci_sram)];
- } u;
- gdt_pci_sram gdt6sr; /* SRAM structure */
-} __attribute__((packed)) gdt6m_dpram_str;
-
-
-/* PCI resources */
-typedef struct {
- struct pci_dev *pdev;
- unsigned long dpmem; /* DPRAM address */
- unsigned long io; /* IO address */
-} gdth_pci_str;
-
-
-/* controller information structure */
-typedef struct {
- struct Scsi_Host *shost;
- struct list_head list;
- u16 hanum;
- u16 oem_id; /* OEM */
- u16 type; /* controller class */
- u32 stype; /* subtype (PCI: device ID) */
- u16 fw_vers; /* firmware version */
- u16 cache_feat; /* feat. cache serv. (s/g,..)*/
- u16 raw_feat; /* feat. raw service (s/g,..)*/
- u16 screen_feat; /* feat. raw service (s/g,..)*/
- void __iomem *brd; /* DPRAM address */
- u32 brd_phys; /* slot number/BIOS address */
- gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */
- gdth_cmd_str cmdext;
- gdth_cmd_str *pccb; /* address command structure */
- u32 ccb_phys; /* phys. address */
-#ifdef INT_COAL
- gdth_coal_status *coal_stat; /* buffer for coalescing int.*/
- u64 coal_stat_phys; /* phys. address */
-#endif
- char *pscratch; /* scratch (DMA) buffer */
- u64 scratch_phys; /* phys. address */
- u8 scratch_busy; /* in use? */
- u8 dma64_support; /* 64-bit DMA supported? */
- gdth_msg_str *pmsg; /* message buffer */
- u64 msg_phys; /* phys. address */
- u8 scan_mode; /* current scan mode */
- u8 irq; /* IRQ */
- u8 drq; /* DRQ (ISA controllers) */
- u16 status; /* command status */
- u16 service; /* service/firmware ver./.. */
- u32 info;
- u32 info2; /* additional info */
- struct scsi_cmnd *req_first; /* top of request queue */
- struct {
- u8 present; /* Flag: host drive present? */
- u8 is_logdrv; /* Flag: log. drive (master)? */
- u8 is_arraydrv; /* Flag: array drive? */
- u8 is_master; /* Flag: array drive master? */
- u8 is_parity; /* Flag: parity drive? */
- u8 is_hotfix; /* Flag: hotfix drive? */
- u8 master_no; /* number of master drive */
- u8 lock; /* drive locked? (hot plug) */
- u8 heads; /* mapping */
- u8 secs;
- u16 devtype; /* further information */
- u64 size; /* capacity */
- u8 ldr_no; /* log. drive no. */
- u8 rw_attribs; /* r/w attributes */
- u8 cluster_type; /* cluster properties */
- u8 media_changed; /* Flag:MOUNT/UNMOUNT occurred */
- u32 start_sec; /* start sector */
- } hdr[MAX_LDRIVES]; /* host drives */
- struct {
- u8 lock; /* channel locked? (hot plug) */
- u8 pdev_cnt; /* physical device count */
- u8 local_no; /* local channel number */
- u8 io_cnt[MAXID]; /* current IO count */
- u32 address; /* channel address */
- u32 id_list[MAXID]; /* IDs of the phys. devices */
- } raw[MAXBUS]; /* SCSI channels */
- struct {
- struct scsi_cmnd *cmnd; /* pending request */
- u16 service; /* service */
- } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
- struct gdth_cmndinfo { /* per-command private info */
- int index;
- int internal_command; /* don't call scsi_done */
- gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
- dma_addr_t sense_paddr; /* sense dma-addr */
- u8 priority;
- int timeout_count; /* # of timeout calls */
- volatile int wait_for_completion;
- u16 status;
- u32 info;
- enum dma_data_direction dma_dir;
- int phase; /* ???? */
- int OpCode;
- } cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */
- u8 bus_cnt; /* SCSI bus count */
- u8 tid_cnt; /* Target ID count */
- u8 bus_id[MAXBUS]; /* IOP IDs */
- u8 virt_bus; /* number of virtual bus */
- u8 more_proc; /* more /proc info supported */
- u16 cmd_cnt; /* command count in DPRAM */
- u16 cmd_len; /* length of actual command */
- u16 cmd_offs_dpmem; /* actual offset in DPRAM */
- u16 ic_all_size; /* sizeof DPRAM interf. area */
- gdth_cpar_str cpar; /* controller cache par. */
- gdth_bfeat_str bfeat; /* controller features */
- gdth_binfo_str binfo; /* controller info */
- gdth_evt_data dvr; /* event structure */
- spinlock_t smp_lock;
- struct pci_dev *pdev;
- char oem_name[8];
-#ifdef GDTH_DMA_STATISTICS
- unsigned long dma32_cnt, dma64_cnt; /* statistics: DMA buffer */
-#endif
- struct scsi_device *sdev;
-} gdth_ha_str;
-
-static inline struct gdth_cmndinfo *gdth_cmnd_priv(struct scsi_cmnd* cmd)
-{
- return (struct gdth_cmndinfo *)cmd->host_scribble;
-}
-
-/* INQUIRY data format */
-typedef struct {
- u8 type_qual;
- u8 modif_rmb;
- u8 version;
- u8 resp_aenc;
- u8 add_length;
- u8 reserved1;
- u8 reserved2;
- u8 misc;
- u8 vendor[8];
- u8 product[16];
- u8 revision[4];
-} __attribute__((packed)) gdth_inq_data;
-
-/* READ_CAPACITY data format */
-typedef struct {
- u32 last_block_no;
- u32 block_length;
-} __attribute__((packed)) gdth_rdcap_data;
-
-/* READ_CAPACITY (16) data format */
-typedef struct {
- u64 last_block_no;
- u32 block_length;
-} __attribute__((packed)) gdth_rdcap16_data;
-
-/* REQUEST_SENSE data format */
-typedef struct {
- u8 errorcode;
- u8 segno;
- u8 key;
- u32 info;
- u8 add_length;
- u32 cmd_info;
- u8 adsc;
- u8 adsq;
- u8 fruc;
- u8 key_spec[3];
-} __attribute__((packed)) gdth_sense_data;
-
-/* MODE_SENSE data format */
-typedef struct {
- struct {
- u8 data_length;
- u8 med_type;
- u8 dev_par;
- u8 bd_length;
- } __attribute__((packed)) hd;
- struct {
- u8 dens_code;
- u8 block_count[3];
- u8 reserved;
- u8 block_length[3];
- } __attribute__((packed)) bd;
-} __attribute__((packed)) gdth_modep_data;
-
-/* stack frame */
-typedef struct {
- unsigned long b[10]; /* 32/64 bit compiler ! */
-} __attribute__((packed)) gdth_stackframe;
-
-
-/* function prototyping */
-
-int gdth_show_info(struct seq_file *, struct Scsi_Host *);
-int gdth_set_info(struct Scsi_Host *, char *, int);
-
-#endif
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
deleted file mode 100644
index ee4c9bf1022a..000000000000
--- a/drivers/scsi/gdth_ioctl.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GDTH_IOCTL_H
-#define _GDTH_IOCTL_H
-
-/* gdth_ioctl.h
- * $Id: gdth_ioctl.h,v 1.14 2004/02/19 15:43:15 achim Exp $
- */
-
-/* IOCTLs */
-#define GDTIOCTL_MASK ('J'<<8)
-#define GDTIOCTL_GENERAL (GDTIOCTL_MASK | 0) /* general IOCTL */
-#define GDTIOCTL_DRVERS (GDTIOCTL_MASK | 1) /* get driver version */
-#define GDTIOCTL_CTRTYPE (GDTIOCTL_MASK | 2) /* get controller type */
-#define GDTIOCTL_OSVERS (GDTIOCTL_MASK | 3) /* get OS version */
-#define GDTIOCTL_HDRLIST (GDTIOCTL_MASK | 4) /* get host drive list */
-#define GDTIOCTL_CTRCNT (GDTIOCTL_MASK | 5) /* get controller count */
-#define GDTIOCTL_LOCKDRV (GDTIOCTL_MASK | 6) /* lock host drive */
-#define GDTIOCTL_LOCKCHN (GDTIOCTL_MASK | 7) /* lock channel */
-#define GDTIOCTL_EVENT (GDTIOCTL_MASK | 8) /* read controller events */
-#define GDTIOCTL_SCSI (GDTIOCTL_MASK | 9) /* SCSI command */
-#define GDTIOCTL_RESET_BUS (GDTIOCTL_MASK |10) /* reset SCSI bus */
-#define GDTIOCTL_RESCAN (GDTIOCTL_MASK |11) /* rescan host drives */
-#define GDTIOCTL_RESET_DRV (GDTIOCTL_MASK |12) /* reset (remote) drv. res. */
-
-#define GDTIOCTL_MAGIC 0xaffe0004
-#define EVENT_SIZE 294
-#define GDTH_MAXSG 32 /* max. s/g elements */
-
-#define MAX_LDRIVES 255 /* max. log. drive count */
-#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */
-
-/* scatter/gather element */
-typedef struct {
- u32 sg_ptr; /* address */
- u32 sg_len; /* length */
-} __attribute__((packed)) gdth_sg_str;
-
-/* scatter/gather element - 64bit addresses */
-typedef struct {
- u64 sg_ptr; /* address */
- u32 sg_len; /* length */
-} __attribute__((packed)) gdth_sg64_str;
-
-/* command structure */
-typedef struct {
- u32 BoardNode; /* board node (always 0) */
- u32 CommandIndex; /* command number */
- u16 OpCode; /* the command (READ,..) */
- union {
- struct {
- u16 DeviceNo; /* number of cache drive */
- u32 BlockNo; /* block number */
- u32 BlockCnt; /* block count */
- u32 DestAddr; /* dest. addr. (if s/g: -1) */
- u32 sg_canz; /* s/g element count */
- gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } __attribute__((packed)) cache; /* cache service cmd. str. */
- struct {
- u16 DeviceNo; /* number of cache drive */
- u64 BlockNo; /* block number */
- u32 BlockCnt; /* block count */
- u64 DestAddr; /* dest. addr. (if s/g: -1) */
- u32 sg_canz; /* s/g element count */
- gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } __attribute__((packed)) cache64; /* cache service cmd. str. */
- struct {
- u16 param_size; /* size of p_param buffer */
- u32 subfunc; /* IOCTL function */
- u32 channel; /* device */
- u64 p_param; /* buffer */
- } __attribute__((packed)) ioctl; /* IOCTL command structure */
- struct {
- u16 reserved;
- union {
- struct {
- u32 msg_handle; /* message handle */
- u64 msg_addr; /* message buffer address */
- } __attribute__((packed)) msg;
- u8 data[12]; /* buffer for rtc data, ... */
- } su;
- } __attribute__((packed)) screen; /* screen service cmd. str. */
- struct {
- u16 reserved;
- u32 direction; /* data direction */
- u32 mdisc_time; /* disc. time (0: no timeout)*/
- u32 mcon_time; /* connect time(0: no to.) */
- u32 sdata; /* dest. addr. (if s/g: -1) */
- u32 sdlen; /* data length (bytes) */
- u32 clen; /* SCSI cmd. length(6,10,12) */
- u8 cmd[12]; /* SCSI command */
- u8 target; /* target ID */
- u8 lun; /* LUN */
- u8 bus; /* SCSI bus number */
- u8 priority; /* only 0 used */
- u32 sense_len; /* sense data length */
- u32 sense_data; /* sense data addr. */
- u32 link_p; /* linked cmds (not supp.) */
- u32 sg_ranz; /* s/g element count */
- gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } __attribute__((packed)) raw; /* raw service cmd. struct. */
- struct {
- u16 reserved;
- u32 direction; /* data direction */
- u32 mdisc_time; /* disc. time (0: no timeout)*/
- u32 mcon_time; /* connect time(0: no to.) */
- u64 sdata; /* dest. addr. (if s/g: -1) */
- u32 sdlen; /* data length (bytes) */
- u32 clen; /* SCSI cmd. length(6,..,16) */
- u8 cmd[16]; /* SCSI command */
- u8 target; /* target ID */
- u8 lun; /* LUN */
- u8 bus; /* SCSI bus number */
- u8 priority; /* only 0 used */
- u32 sense_len; /* sense data length */
- u64 sense_data; /* sense data addr. */
- u32 sg_ranz; /* s/g element count */
- gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } __attribute__((packed)) raw64; /* raw service cmd. struct. */
- } u;
- /* additional variables */
- u8 Service; /* controller service */
- u8 reserved;
- u16 Status; /* command result */
- u32 Info; /* additional information */
- void *RequestBuffer; /* request buffer */
-} __attribute__((packed)) gdth_cmd_str;
-
-/* controller event structure */
-#define ES_ASYNC 1
-#define ES_DRIVER 2
-#define ES_TEST 3
-#define ES_SYNC 4
-typedef struct {
- u16 size; /* size of structure */
- union {
- char stream[16];
- struct {
- u16 ionode;
- u16 service;
- u32 index;
- } __attribute__((packed)) driver;
- struct {
- u16 ionode;
- u16 service;
- u16 status;
- u32 info;
- u8 scsi_coord[3];
- } __attribute__((packed)) async;
- struct {
- u16 ionode;
- u16 service;
- u16 status;
- u32 info;
- u16 hostdrive;
- u8 scsi_coord[3];
- u8 sense_key;
- } __attribute__((packed)) sync;
- struct {
- u32 l1, l2, l3, l4;
- } __attribute__((packed)) test;
- } eu;
- u32 severity;
- u8 event_string[256];
-} __attribute__((packed)) gdth_evt_data;
-
-typedef struct {
- u32 first_stamp;
- u32 last_stamp;
- u16 same_count;
- u16 event_source;
- u16 event_idx;
- u8 application;
- u8 reserved;
- gdth_evt_data event_data;
-} __attribute__((packed)) gdth_evt_str;
-
-/* GDTIOCTL_GENERAL */
-typedef struct {
- u16 ionode; /* controller number */
- u16 timeout; /* timeout */
- u32 info; /* error info */
- u16 status; /* status */
- unsigned long data_len; /* data buffer size */
- unsigned long sense_len; /* sense buffer size */
- gdth_cmd_str command; /* command */
-} gdth_ioctl_general;
-
-/* GDTIOCTL_LOCKDRV */
-typedef struct {
- u16 ionode; /* controller number */
- u8 lock; /* lock/unlock */
- u8 drive_cnt; /* drive count */
- u16 drives[MAX_HDRIVES]; /* drives */
-} gdth_ioctl_lockdrv;
-
-/* GDTIOCTL_LOCKCHN */
-typedef struct {
- u16 ionode; /* controller number */
- u8 lock; /* lock/unlock */
- u8 channel; /* channel */
-} gdth_ioctl_lockchn;
-
-/* GDTIOCTL_OSVERS */
-typedef struct {
- u8 version; /* OS version */
- u8 subversion; /* OS subversion */
- u16 revision; /* revision */
-} gdth_ioctl_osvers;
-
-/* GDTIOCTL_CTRTYPE */
-typedef struct {
- u16 ionode; /* controller number */
- u8 type; /* controller type */
- u16 info; /* slot etc. */
- u16 oem_id; /* OEM ID */
- u16 bios_ver; /* not used */
- u16 access; /* not used */
- u16 ext_type; /* extended type */
- u16 device_id; /* device ID */
- u16 sub_device_id; /* sub device ID */
-} gdth_ioctl_ctrtype;
-
-/* GDTIOCTL_EVENT */
-typedef struct {
- u16 ionode;
- int erase; /* erase event? */
- int handle; /* event handle */
- gdth_evt_str event;
-} gdth_ioctl_event;
-
-/* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */
-typedef struct {
- u16 ionode; /* controller number */
- u8 flag; /* add/remove */
- u16 hdr_no; /* drive no. */
- struct {
- u8 bus; /* SCSI bus */
- u8 target; /* target ID */
- u8 lun; /* LUN */
- u8 cluster_type; /* cluster properties */
- } hdr_list[MAX_HDRIVES]; /* index is host drive number */
-} gdth_ioctl_rescan;
-
-/* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */
-typedef struct {
- u16 ionode; /* controller number */
- u16 number; /* bus/host drive number */
- u16 status; /* status */
-} gdth_ioctl_reset;
-
-#endif
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
deleted file mode 100644
index 381d849726ac..000000000000
--- a/drivers/scsi/gdth_proc.c
+++ /dev/null
@@ -1,586 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* gdth_proc.c
- * $Id: gdth_proc.c,v 1.43 2006/01/11 16:15:00 achim Exp $
- */
-
-#include <linux/completion.h>
-#include <linux/slab.h>
-
-int gdth_set_info(struct Scsi_Host *host, char *buffer, int length)
-{
- gdth_ha_str *ha = shost_priv(host);
- int ret_val = -EINVAL;
-
- TRACE2(("gdth_set_info() ha %d\n",ha->hanum,));
-
- if (length >= 4) {
- if (strncmp(buffer,"gdth",4) == 0) {
- buffer += 5;
- length -= 5;
- ret_val = gdth_set_asc_info(host, buffer, length, ha);
- }
- }
-
- return ret_val;
-}
-
-static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
- int length, gdth_ha_str *ha)
-{
- int orig_length, drive, wb_mode;
- int i, found;
- gdth_cmd_str gdtcmd;
- gdth_cpar_str *pcpar;
-
- char cmnd[MAX_COMMAND_SIZE];
- memset(cmnd, 0xff, 12);
- memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
-
- TRACE2(("gdth_set_asc_info() ha %d\n",ha->hanum));
- orig_length = length + 5;
- drive = -1;
- wb_mode = 0;
- found = FALSE;
-
- if (length >= 5 && strncmp(buffer,"flush",5)==0) {
- buffer += 6;
- length -= 6;
- if (length && *buffer>='0' && *buffer<='9') {
- drive = (int)(*buffer-'0');
- ++buffer; --length;
- if (length && *buffer>='0' && *buffer<='9') {
- drive = drive*10 + (int)(*buffer-'0');
- ++buffer; --length;
- }
- printk("GDT: Flushing host drive %d .. ",drive);
- } else {
- printk("GDT: Flushing all host drives .. ");
- }
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (ha->hdr[i].present) {
- if (drive != -1 && i != drive)
- continue;
- found = TRUE;
- gdtcmd.Service = CACHESERVICE;
- gdtcmd.OpCode = GDT_FLUSH;
- if (ha->cache_feat & GDT_64BIT) {
- gdtcmd.u.cache64.DeviceNo = i;
- gdtcmd.u.cache64.BlockNo = 1;
- } else {
- gdtcmd.u.cache.DeviceNo = i;
- gdtcmd.u.cache.BlockNo = 1;
- }
-
- gdth_execute(host, &gdtcmd, cmnd, 30, NULL);
- }
- }
- if (!found)
- printk("\nNo host drive found !\n");
- else
- printk("Done.\n");
- return(orig_length);
- }
-
- if (length >= 7 && strncmp(buffer,"wbp_off",7)==0) {
- buffer += 8;
- length -= 8;
- printk("GDT: Disabling write back permanently .. ");
- wb_mode = 1;
- } else if (length >= 6 && strncmp(buffer,"wbp_on",6)==0) {
- buffer += 7;
- length -= 7;
- printk("GDT: Enabling write back permanently .. ");
- wb_mode = 2;
- } else if (length >= 6 && strncmp(buffer,"wb_off",6)==0) {
- buffer += 7;
- length -= 7;
- printk("GDT: Disabling write back commands .. ");
- if (ha->cache_feat & GDT_WR_THROUGH) {
- gdth_write_through = TRUE;
- printk("Done.\n");
- } else {
- printk("Not supported !\n");
- }
- return(orig_length);
- } else if (length >= 5 && strncmp(buffer,"wb_on",5)==0) {
- buffer += 6;
- length -= 6;
- printk("GDT: Enabling write back commands .. ");
- gdth_write_through = FALSE;
- printk("Done.\n");
- return(orig_length);
- }
-
- if (wb_mode) {
- unsigned long flags;
-
- BUILD_BUG_ON(sizeof(gdth_cpar_str) > GDTH_SCRATCH);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- if (ha->scratch_busy) {
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return -EBUSY;
- }
- ha->scratch_busy = TRUE;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- pcpar = (gdth_cpar_str *)ha->pscratch;
- memcpy( pcpar, &ha->cpar, sizeof(gdth_cpar_str) );
- gdtcmd.Service = CACHESERVICE;
- gdtcmd.OpCode = GDT_IOCTL;
- gdtcmd.u.ioctl.p_param = ha->scratch_phys;
- gdtcmd.u.ioctl.param_size = sizeof(gdth_cpar_str);
- gdtcmd.u.ioctl.subfunc = CACHE_CONFIG;
- gdtcmd.u.ioctl.channel = INVALID_CHANNEL;
- pcpar->write_back = wb_mode==1 ? 0:1;
-
- gdth_execute(host, &gdtcmd, cmnd, 30, NULL);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->scratch_busy = FALSE;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- printk("Done.\n");
- return(orig_length);
- }
-
- printk("GDT: Unknown command: %s Length: %d\n",buffer,length);
- return(-EINVAL);
-}
-
-int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
-{
- gdth_ha_str *ha = shost_priv(host);
- int hlen;
- int id, i, j, k, sec, flag;
- int no_mdrv = 0, drv_no, is_mirr;
- u32 cnt;
- dma_addr_t paddr;
- int rc = -ENOMEM;
-
- gdth_cmd_str *gdtcmd;
- gdth_evt_str *estr;
- char hrec[277];
-
- char *buf;
- gdth_dskstat_str *pds;
- gdth_diskinfo_str *pdi;
- gdth_arrayinf_str *pai;
- gdth_defcnt_str *pdef;
- gdth_cdrinfo_str *pcdi;
- gdth_hget_str *phg;
- char cmnd[MAX_COMMAND_SIZE];
-
- gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
- estr = kmalloc(sizeof(*estr), GFP_KERNEL);
- if (!gdtcmd || !estr)
- goto free_fail;
-
- memset(cmnd, 0xff, 12);
- memset(gdtcmd, 0, sizeof(gdth_cmd_str));
-
- TRACE2(("gdth_get_info() ha %d\n",ha->hanum));
-
-
- /* request is i.e. "cat /proc/scsi/gdth/0" */
- /* format: %-15s\t%-10s\t%-15s\t%s */
- /* driver parameters */
- seq_puts(m, "Driver Parameters:\n");
- if (reserve_list[0] == 0xff)
- strcpy(hrec, "--");
- else {
- hlen = sprintf(hrec, "%d", reserve_list[0]);
- for (i = 1; i < MAX_RES_ARGS; i++) {
- if (reserve_list[i] == 0xff)
- break;
- hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
- }
- }
- seq_printf(m,
- " reserve_mode: \t%d \treserve_list: \t%s\n",
- reserve_mode, hrec);
- seq_printf(m,
- " max_ids: \t%-3d \thdr_channel: \t%d\n",
- max_ids, hdr_channel);
-
- /* controller information */
- seq_puts(m, "\nDisk Array Controller Information:\n");
- seq_printf(m,
- " Number: \t%d \tName: \t%s\n",
- ha->hanum, ha->binfo.type_string);
-
- seq_printf(m,
- " Driver Ver.: \t%-10s\tFirmware Ver.: \t",
- GDTH_VERSION_STR);
- if (ha->more_proc)
- seq_printf(m, "%d.%02d.%02d-%c%03X\n",
- (u8)(ha->binfo.upd_fw_ver>>24),
- (u8)(ha->binfo.upd_fw_ver>>16),
- (u8)(ha->binfo.upd_fw_ver),
- ha->bfeat.raid ? 'R':'N',
- ha->binfo.upd_revision);
- else
- seq_printf(m, "%d.%02d\n", (u8)(ha->cpar.version>>8),
- (u8)(ha->cpar.version));
-
- if (ha->more_proc)
- /* more information: 1. about controller */
- seq_printf(m,
- " Serial No.: \t0x%8X\tCache RAM size:\t%d KB\n",
- ha->binfo.ser_no, ha->binfo.memsize / 1024);
-
- if (ha->more_proc) {
- size_t size = max_t(size_t, GDTH_SCRATCH, sizeof(gdth_hget_str));
-
- /* more information: 2. about physical devices */
- seq_puts(m, "\nPhysical Devices:");
- flag = FALSE;
-
- buf = dma_alloc_coherent(&ha->pdev->dev, size, &paddr, GFP_KERNEL);
- if (!buf)
- goto stop_output;
- for (i = 0; i < ha->bus_cnt; ++i) {
- /* 2.a statistics (and retries/reassigns) */
- TRACE2(("pdr_statistics() chn %d\n",i));
- pds = (gdth_dskstat_str *)(buf + GDTH_SCRATCH/4);
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr + GDTH_SCRATCH/4;
- gdtcmd->u.ioctl.param_size = 3*GDTH_SCRATCH/4;
- gdtcmd->u.ioctl.subfunc = DSK_STATISTICS | L_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel = ha->raw[i].address | INVALID_CHANNEL;
- pds->bid = ha->raw[i].local_no;
- pds->first = 0;
- pds->entries = ha->raw[i].pdev_cnt;
- cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) /
- sizeof(pds->list[0]);
- if (pds->entries > cnt)
- pds->entries = cnt;
-
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) != S_OK)
- pds->count = 0;
-
- /* other IOCTLs must fit into area GDTH_SCRATCH/4 */
- for (j = 0; j < ha->raw[i].pdev_cnt; ++j) {
- /* 2.b drive info */
- TRACE2(("scsi_drv_info() chn %d dev %d\n",
- i, ha->raw[i].id_list[j]));
- pdi = (gdth_diskinfo_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_diskinfo_str);
- gdtcmd->u.ioctl.subfunc = SCSI_DR_INFO | L_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel =
- ha->raw[i].address | ha->raw[i].id_list[j];
-
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
- strncpy(hrec,pdi->vendor,8);
- strncpy(hrec+8,pdi->product,16);
- strncpy(hrec+24,pdi->revision,4);
- hrec[28] = 0;
- seq_printf(m,
- "\n Chn/ID/LUN: \t%c/%02d/%d \tName: \t%s\n",
- 'A'+i,pdi->target_id,pdi->lun,hrec);
- flag = TRUE;
- pdi->no_ldrive &= 0xffff;
- if (pdi->no_ldrive == 0xffff)
- strcpy(hrec,"--");
- else
- sprintf(hrec,"%d",pdi->no_ldrive);
- seq_printf(m,
- " Capacity [MB]:\t%-6d \tTo Log. Drive: \t%s\n",
- pdi->blkcnt/(1024*1024/pdi->blksize),
- hrec);
- } else {
- pdi->devtype = 0xff;
- }
-
- if (pdi->devtype == 0) {
- /* search retries/reassigns */
- for (k = 0; k < pds->count; ++k) {
- if (pds->list[k].tid == pdi->target_id &&
- pds->list[k].lun == pdi->lun) {
- seq_printf(m,
- " Retries: \t%-6d \tReassigns: \t%d\n",
- pds->list[k].retries,
- pds->list[k].reassigns);
- break;
- }
- }
- /* 2.c grown defects */
- TRACE2(("scsi_drv_defcnt() chn %d dev %d\n",
- i, ha->raw[i].id_list[j]));
- pdef = (gdth_defcnt_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_defcnt_str);
- gdtcmd->u.ioctl.subfunc = SCSI_DEF_CNT | L_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel =
- ha->raw[i].address | ha->raw[i].id_list[j];
- pdef->sddc_type = 0x08;
-
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
- seq_printf(m,
- " Grown Defects:\t%d\n",
- pdef->sddc_cnt);
- }
- }
- }
- }
-
- if (!flag)
- seq_puts(m, "\n --\n");
-
- /* 3. about logical drives */
- seq_puts(m, "\nLogical Drives:");
- flag = FALSE;
-
- for (i = 0; i < MAX_LDRIVES; ++i) {
- if (!ha->hdr[i].is_logdrv)
- continue;
- drv_no = i;
- j = k = 0;
- is_mirr = FALSE;
- do {
- /* 3.a log. drive info */
- TRACE2(("cache_drv_info() drive no %d\n",drv_no));
- pcdi = (gdth_cdrinfo_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_cdrinfo_str);
- gdtcmd->u.ioctl.subfunc = CACHE_DRV_INFO;
- gdtcmd->u.ioctl.channel = drv_no;
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) != S_OK)
- break;
- pcdi->ld_dtype >>= 16;
- j++;
- if (pcdi->ld_dtype > 2) {
- strcpy(hrec, "missing");
- } else if (pcdi->ld_error & 1) {
- strcpy(hrec, "fault");
- } else if (pcdi->ld_error & 2) {
- strcpy(hrec, "invalid");
- k++; j--;
- } else {
- strcpy(hrec, "ok");
- }
-
- if (drv_no == i) {
- seq_printf(m,
- "\n Number: \t%-2d \tStatus: \t%s\n",
- drv_no, hrec);
- flag = TRUE;
- no_mdrv = pcdi->cd_ldcnt;
- if (no_mdrv > 1 || pcdi->ld_slave != -1) {
- is_mirr = TRUE;
- strcpy(hrec, "RAID-1");
- } else if (pcdi->ld_dtype == 0) {
- strcpy(hrec, "Disk");
- } else if (pcdi->ld_dtype == 1) {
- strcpy(hrec, "RAID-0");
- } else if (pcdi->ld_dtype == 2) {
- strcpy(hrec, "Chain");
- } else {
- strcpy(hrec, "???");
- }
- seq_printf(m,
- " Capacity [MB]:\t%-6d \tType: \t%s\n",
- pcdi->ld_blkcnt/(1024*1024/pcdi->ld_blksize),
- hrec);
- } else {
- seq_printf(m,
- " Slave Number: \t%-2d \tStatus: \t%s\n",
- drv_no & 0x7fff, hrec);
- }
- drv_no = pcdi->ld_slave;
- } while (drv_no != -1);
-
- if (is_mirr)
- seq_printf(m,
- " Missing Drv.: \t%-2d \tInvalid Drv.: \t%d\n",
- no_mdrv - j - k, k);
-
- if (!ha->hdr[i].is_arraydrv)
- strcpy(hrec, "--");
- else
- sprintf(hrec, "%d", ha->hdr[i].master_no);
- seq_printf(m,
- " To Array Drv.:\t%s\n", hrec);
- }
-
- if (!flag)
- seq_puts(m, "\n --\n");
-
- /* 4. about array drives */
- seq_puts(m, "\nArray Drives:");
- flag = FALSE;
-
- for (i = 0; i < MAX_LDRIVES; ++i) {
- if (!(ha->hdr[i].is_arraydrv && ha->hdr[i].is_master))
- continue;
- /* 4.a array drive info */
- TRACE2(("array_info() drive no %d\n",i));
- pai = (gdth_arrayinf_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_arrayinf_str);
- gdtcmd->u.ioctl.subfunc = ARRAY_INFO | LA_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel = i;
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
- if (pai->ai_state == 0)
- strcpy(hrec, "idle");
- else if (pai->ai_state == 2)
- strcpy(hrec, "build");
- else if (pai->ai_state == 4)
- strcpy(hrec, "ready");
- else if (pai->ai_state == 6)
- strcpy(hrec, "fail");
- else if (pai->ai_state == 8 || pai->ai_state == 10)
- strcpy(hrec, "rebuild");
- else
- strcpy(hrec, "error");
- if (pai->ai_ext_state & 0x10)
- strcat(hrec, "/expand");
- else if (pai->ai_ext_state & 0x1)
- strcat(hrec, "/patch");
- seq_printf(m,
- "\n Number: \t%-2d \tStatus: \t%s\n",
- i,hrec);
- flag = TRUE;
-
- if (pai->ai_type == 0)
- strcpy(hrec, "RAID-0");
- else if (pai->ai_type == 4)
- strcpy(hrec, "RAID-4");
- else if (pai->ai_type == 5)
- strcpy(hrec, "RAID-5");
- else
- strcpy(hrec, "RAID-10");
- seq_printf(m,
- " Capacity [MB]:\t%-6d \tType: \t%s\n",
- pai->ai_size/(1024*1024/pai->ai_secsize),
- hrec);
- }
- }
-
- if (!flag)
- seq_puts(m, "\n --\n");
-
- /* 5. about host drives */
- seq_puts(m, "\nHost Drives:");
- flag = FALSE;
-
- for (i = 0; i < MAX_LDRIVES; ++i) {
- if (!ha->hdr[i].is_logdrv ||
- (ha->hdr[i].is_arraydrv && !ha->hdr[i].is_master))
- continue;
- /* 5.a get host drive list */
- TRACE2(("host_get() drv_no %d\n",i));
- phg = (gdth_hget_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_hget_str);
- gdtcmd->u.ioctl.subfunc = HOST_GET | LA_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel = i;
- phg->entries = MAX_HDRIVES;
- phg->offset = GDTOFFSOF(gdth_hget_str, entry[0]);
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
- ha->hdr[i].ldr_no = i;
- ha->hdr[i].rw_attribs = 0;
- ha->hdr[i].start_sec = 0;
- } else {
- for (j = 0; j < phg->entries; ++j) {
- k = phg->entry[j].host_drive;
- if (k >= MAX_LDRIVES)
- continue;
- ha->hdr[k].ldr_no = phg->entry[j].log_drive;
- ha->hdr[k].rw_attribs = phg->entry[j].rw_attribs;
- ha->hdr[k].start_sec = phg->entry[j].start_sec;
- }
- }
- }
- dma_free_coherent(&ha->pdev->dev, size, buf, paddr);
-
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (!(ha->hdr[i].present))
- continue;
-
- seq_printf(m,
- "\n Number: \t%-2d \tArr/Log. Drive:\t%d\n",
- i, ha->hdr[i].ldr_no);
- flag = TRUE;
-
- seq_printf(m,
- " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n",
- (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
- }
-
- if (!flag)
- seq_puts(m, "\n --\n");
- }
-
- /* controller events */
- seq_puts(m, "\nController Events:\n");
-
- for (id = -1;;) {
- id = gdth_read_event(ha, id, estr);
- if (estr->event_source == 0)
- break;
- if (estr->event_data.eu.driver.ionode == ha->hanum &&
- estr->event_source == ES_ASYNC) {
- gdth_log_event(&estr->event_data, hrec);
-
- /*
- * Elapsed seconds subtraction with unsigned operands is
- * safe from wrap around in year 2106. Executes as:
- * operand a + (2's complement operand b) + 1
- */
-
- sec = (int)((u32)ktime_get_real_seconds() - estr->first_stamp);
- if (sec < 0) sec = 0;
- seq_printf(m," date- %02d:%02d:%02d\t%s\n",
- sec/3600, sec%3600/60, sec%60, hrec);
- }
- if (id == -1)
- break;
- }
-stop_output:
- rc = 0;
-free_fail:
- kfree(gdtcmd);
- kfree(estr);
- return rc;
-}
-
-static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
-{
- unsigned long flags;
- int i;
- struct scsi_cmnd *scp;
- struct gdth_cmndinfo *cmndinfo;
- u8 b, t;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (i = 0; i < GDTH_MAXCMDS; ++i) {
- scp = ha->cmd_tab[i].cmnd;
- cmndinfo = gdth_cmnd_priv(scp);
-
- b = scp->device->channel;
- t = scp->device->id;
- if (!SPECIAL_SCP(scp) && t == (u8)id &&
- b == (u8)busnum) {
- cmndinfo->wait_for_completion = 0;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- while (!cmndinfo->wait_for_completion)
- barrier();
- spin_lock_irqsave(&ha->smp_lock, flags);
- }
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
deleted file mode 100644
index 4cc5377cb92e..000000000000
--- a/drivers/scsi/gdth_proc.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GDTH_PROC_H
-#define _GDTH_PROC_H
-
-/* gdth_proc.h
- * $Id: gdth_proc.h,v 1.16 2004/01/14 13:09:01 achim Exp $
- */
-
-int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
- int timeout, u32 *info);
-
-static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
- int length, gdth_ha_str *ha);
-
-static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
-
-#endif
-
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 11df0eca0293..7d56a236a011 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -9,11 +9,14 @@
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "gvp11.h"
@@ -23,8 +26,12 @@
struct gvp11_hostdata {
struct WD33C93_hostdata wh;
struct gvp11_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define TO_DMA_MASK(m) (~((unsigned long long)m & 0xffffffff))
+
static irqreturn_t gvp11_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -50,18 +57,35 @@ void gvp11_setup(char *str, int *ints)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
unsigned short cntr = GVP11_DMAC_INT_ENABLE;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ dma_addr_t addr;
int bank_mask;
static int scsi_alloc_out_of_range = 0;
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
+
/* use bounce buffer if the physical address is bad */
if (addr & wh->dma_xfer_mask) {
- wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
+ wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
if (!scsi_alloc_out_of_range) {
wh->dma_bounce_buffer =
@@ -83,10 +107,32 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
- /* check if the address of the bounce buffer is OK */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev,
+ wh->dma_bounce_buffer,
+ wh->dma_bounce_len,
+ DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
+ }
if (addr & wh->dma_xfer_mask) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
/* fall back to Chip RAM if address out of range */
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
kfree(wh->dma_bounce_buffer);
@@ -104,15 +150,19 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+ /* chip RAM can be mapped to phys. address directly */
+ addr = virt_to_phys(wh->dma_bounce_buffer);
+ /* no need to flush/invalidate cache */
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
+ /* finally, have OK mapping (punted for PIO else) */
+ scsi_pointer->dma_handle = addr;
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
- cmd->SCp.this_residual);
- }
}
/* setup dma direction */
@@ -125,13 +175,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
if (bank_mask)
@@ -147,6 +191,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
@@ -156,11 +201,16 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* remove write bit from CONTROL bits */
regs->CNTR = GVP11_DMAC_INT_ENABLE;
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir && SCpnt)
- memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
+ memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
+ scsi_pointer->this_residual);
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
kfree(wh->dma_bounce_buffer);
@@ -186,6 +236,7 @@ static struct scsi_host_template gvp11_scsi_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int check_wd33c93(struct gvp11_scsiregs *regs)
@@ -281,6 +332,13 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
default_dma_xfer_mask = ent->driver_data;
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(default_dma_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(default_dma_xfer_mask));
+ return -ENODEV;
+ }
+
/*
* Rumors state that some GVP ram boards use the same product
* code as the SCSI controllers. Therefore if the board-size
@@ -321,9 +379,16 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
- if (gvp11_xfer_mask)
+ if (gvp11_xfer_mask) {
hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
- else
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(gvp11_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(gvp11_xfer_mask));
+ error = -ENODEV;
+ goto fail_check_or_alloc;
+ }
+ } else
hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
hdata->wh.no_sync = 0xff;
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index 90a17452a50d..4ba3a8eadb77 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -6,6 +6,7 @@ config SCSI_HISI_SAS
select SCSI_SAS_LIBSAS
select BLK_DEV_INTEGRITY
depends on ATA
+ select SATA_HOST
help
This driver supports HiSilicon's SAS HBA, including support based
on platform device
@@ -14,5 +15,12 @@ config SCSI_HISI_SAS_PCI
tristate "HiSilicon SAS on PCI bus"
depends on SCSI_HISI_SAS
depends on PCI
+ depends on ACPI
help
This driver supports HiSilicon's SAS HBA based on PCI device
+
+config SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE
+ bool "HiSilicon SAS debugging default enable"
+ depends on SCSI_HISI_SAS
+ help
+ Set Y to default enable DEBUGFS for SCSI_HISI_SAS
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2bdd64648ef0..9aebf4a26b13 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -8,10 +8,13 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/lcm.h>
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
@@ -19,6 +22,7 @@
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/timer.h>
@@ -30,8 +34,10 @@
#define HISI_SAS_QUEUE_SLOTS 4096
#define HISI_SAS_MAX_ITCT_ENTRIES 1024
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
-#define HISI_SAS_RESET_BIT 0
+#define HISI_SAS_RESETTING_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
+#define HISI_SAS_PM_BIT 2
+#define HISI_SAS_HW_FAULT_BIT 3
#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
#define HISI_SAS_RESERVED_IPTT 96
#define HISI_SAS_UNRESERVED_IPTT \
@@ -39,6 +45,7 @@
#define HISI_SAS_IOST_ITCT_CACHE_NUM 64
#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10
+#define HISI_SAS_FIFO_DATA_DW_SIZE 32
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -84,8 +91,8 @@
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
-#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
-#define CLEAR_ITCT_TIMEOUT 20
+#define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ)
+#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
struct hisi_hba;
@@ -146,9 +153,20 @@ enum hisi_sas_bit_err_type {
enum hisi_sas_phy_event {
HISI_PHYE_PHY_UP = 0U,
HISI_PHYE_LINK_RESET,
+ HISI_PHYE_PHY_UP_PM,
HISI_PHYES_NUM,
};
+struct hisi_sas_debugfs_fifo {
+ u32 signal_sel;
+ u32 dump_msk;
+ u32 dump_mode;
+ u32 trigger;
+ u32 trigger_msk;
+ u32 trigger_mode;
+ u32 rd_data[HISI_SAS_FIFO_DATA_DW_SIZE];
+};
+
struct hisi_sas_phy {
struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
@@ -169,7 +187,11 @@ struct hisi_sas_phy {
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
int enable;
+ int wait_phyup_cnt;
atomic_t down_cnt;
+
+ /* Trace FIFO */
+ struct hisi_sas_debugfs_fifo fifo;
};
struct hisi_sas_port {
@@ -207,13 +229,6 @@ struct hisi_sas_device {
spinlock_t lock; /* For protecting slots */
};
-struct hisi_sas_tmf_task {
- int force_phy;
- int phy_id;
- u8 tmf;
- u16 tag_of_task_to_be_managed;
-};
-
struct hisi_sas_slot {
struct list_head entry;
struct list_head delivery;
@@ -232,31 +247,13 @@ struct hisi_sas_slot {
dma_addr_t cmd_hdr_dma;
struct timer_list internal_abort_timer;
bool is_internal;
- struct hisi_sas_tmf_task *tmf;
+ struct sas_tmf_task *tmf;
/* Do not reorder/change members after here */
void *buf;
dma_addr_t buf_dma;
u16 idx;
};
-#define HISI_SAS_DEBUGFS_REG(x) {#x, x}
-
-struct hisi_sas_debugfs_reg_lu {
- char *name;
- int off;
-};
-
-struct hisi_sas_debugfs_reg {
- const struct hisi_sas_debugfs_reg_lu *lu;
- int count;
- int base_off;
- union {
- u32 (*read_global_reg)(struct hisi_hba *hisi_hba, u32 off);
- u32 (*read_port_reg)(struct hisi_hba *hisi_hba, int port,
- u32 off);
- };
-};
-
struct hisi_sas_iost_itct_cache {
u32 data[HISI_SAS_IOST_ITCT_CACHE_DW_SZ];
};
@@ -273,8 +270,42 @@ enum hisi_sas_debugfs_cache_type {
HISI_SAS_IOST_CACHE,
};
+enum hisi_sas_debugfs_bist_ffe_cfg {
+ FFE_SAS_1_5_GBPS,
+ FFE_SAS_3_0_GBPS,
+ FFE_SAS_6_0_GBPS,
+ FFE_SAS_12_0_GBPS,
+ FFE_RESV,
+ FFE_SATA_1_5_GBPS,
+ FFE_SATA_3_0_GBPS,
+ FFE_SATA_6_0_GBPS,
+ FFE_CFG_MAX
+};
+
+enum hisi_sas_debugfs_bist_fixed_code {
+ FIXED_CODE,
+ FIXED_CODE_1,
+ FIXED_CODE_MAX
+};
+
+enum {
+ HISI_SAS_BIST_CODE_MODE_PRBS7,
+ HISI_SAS_BIST_CODE_MODE_PRBS23,
+ HISI_SAS_BIST_CODE_MODE_PRBS31,
+ HISI_SAS_BIST_CODE_MODE_JTPAT,
+ HISI_SAS_BIST_CODE_MODE_CJTPAT,
+ HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
+ HISI_SAS_BIST_CODE_MODE_TRAIN,
+ HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
+ HISI_SAS_BIST_CODE_MODE_HFTP,
+ HISI_SAS_BIST_CODE_MODE_MFTP,
+ HISI_SAS_BIST_CODE_MODE_LFTP,
+ HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
+};
+
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
+ int (*interrupt_preinit)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
@@ -289,8 +320,7 @@ struct hisi_sas_hw {
void (*prep_stp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
void (*prep_abort)(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort);
+ struct hisi_sas_slot *slot);
void (*phys_init)(struct hisi_hba *hisi_hba);
void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -311,17 +341,9 @@ struct hisi_sas_hw {
u8 reg_index, u8 reg_count, u8 *write_data);
void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
int delay_ms, int timeout_ms);
- void (*snapshot_prepare)(struct hisi_hba *hisi_hba);
- void (*snapshot_restore)(struct hisi_hba *hisi_hba);
- int (*set_bist)(struct hisi_hba *hisi_hba, bool enable);
- void (*read_iost_itct_cache)(struct hisi_hba *hisi_hba,
- enum hisi_sas_debugfs_cache_type type,
- u32 *cache);
+ void (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
int complete_hdr_size;
struct scsi_host_template *sht;
-
- const struct hisi_sas_debugfs_reg *debugfs_reg_array[DEBUGFS_REGS_NUM];
- const struct hisi_sas_debugfs_reg *debugfs_reg_port;
};
#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
@@ -381,6 +403,8 @@ struct hisi_hba {
u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
+ int *irq_map; /* v2 hw */
+
int n_phy;
spinlock_t lock;
struct semaphore sem;
@@ -431,7 +455,6 @@ struct hisi_hba {
u32 intr_coal_count; /* Interrupt count to coalesce */
int cq_nvecs;
- unsigned int *reply_map;
/* bist */
enum sas_linkrate debugfs_bist_linkrate;
@@ -440,6 +463,8 @@ struct hisi_hba {
int debugfs_bist_mode;
u32 debugfs_bist_cnt;
int debugfs_bist_enable;
+ u32 debugfs_bist_ffe[HISI_SAS_MAX_PHYS][FFE_CFG_MAX];
+ u32 debugfs_bist_fixed_code[FIXED_CODE_MAX];
/* debugfs memories */
/* Put Global AXI and RAS Register into register array */
@@ -457,6 +482,7 @@ struct hisi_hba {
struct dentry *debugfs_dir;
struct dentry *debugfs_dump_dentry;
struct dentry *debugfs_bist_dentry;
+ struct dentry *debugfs_fifo_dentry;
};
/* Generic HW DMA host memory structures */
@@ -615,12 +641,15 @@ extern int hisi_sas_probe(struct platform_device *pdev,
extern int hisi_sas_remove(struct platform_device *pdev);
extern int hisi_sas_slave_configure(struct scsi_device *sdev);
+extern int hisi_sas_slave_alloc(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no,
int enable);
-extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
+extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
+ gfp_t gfp_flags);
+extern void hisi_sas_phy_bcast(struct hisi_sas_phy *phy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
struct hisi_sas_slot *slot);
@@ -635,7 +664,4 @@ extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
-extern void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba);
-extern void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba);
-extern void hisi_sas_debugfs_work_handler(struct work_struct *work);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 9a6deb21fe4d..699b07abb6b0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -10,12 +10,6 @@
#define DEV_IS_GONE(dev) \
((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
-static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
- u8 *lun, struct hisi_sas_tmf_task *tmf);
-static int
-hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device,
- int abort_flag, int tag);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
@@ -23,6 +17,10 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
struct domain_device *device);
static void hisi_sas_dev_gone(struct domain_device *device);
+struct hisi_sas_internal_abort_data {
+ bool rst_ha_timeout; /* reset the HA for timeout */
+};
+
u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
switch (fis->command) {
@@ -158,7 +156,7 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
- clear_bit(slot_idx, bitmap);
+ __clear_bit(slot_idx, bitmap);
}
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
@@ -175,7 +173,7 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
- set_bit(slot_idx, bitmap);
+ __set_bit(slot_idx, bitmap);
}
static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
@@ -185,7 +183,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
void *bitmap = hisi_hba->slot_index_tags;
if (scsi_cmnd)
- return scsi_cmnd->request->tag;
+ return scsi_cmd_to_rq(scsi_cmnd)->tag;
spin_lock(&hisi_hba->lock);
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
@@ -206,14 +204,6 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
return index;
}
-static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
-{
- int i;
-
- for (i = 0; i < hisi_hba->slot_index_count; ++i)
- hisi_sas_slot_index_clear(hisi_hba, i);
-}
-
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
@@ -229,17 +219,23 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->lldd_task = NULL;
if (!sas_protocol_ata(task->task_proto)) {
- struct sas_ssp_task *ssp_task = &task->ssp_task;
- struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+ if (slot->n_elem) {
+ if (task->task_proto & SAS_PROTOCOL_SSP)
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
+ task->data_dir);
+ else
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ }
+ if (slot->n_elem_dif) {
+ struct sas_ssp_task *ssp_task = &task->ssp_task;
+ struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
- if (slot->n_elem_dif)
dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
scsi_prot_sg_count(scsi_cmnd),
task->data_dir);
+ }
}
}
@@ -272,36 +268,29 @@ static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
}
static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_slot *slot)
{
- hisi_hba->hw->prep_abort(hisi_hba, slot,
- device_id, abort_flag, tag_to_abort);
+ hisi_hba->hw->prep_abort(hisi_hba, slot);
}
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
- struct sas_task *task, int n_elem,
- int n_elem_req)
+ struct sas_task *task, int n_elem)
{
struct device *dev = hisi_hba->dev;
- if (!sas_protocol_ata(task->task_proto)) {
+ if (!sas_protocol_ata(task->task_proto) && n_elem) {
if (task->num_scatter) {
- if (n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(dev, task->scatter, task->num_scatter,
+ task->data_dir);
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- if (n_elem_req)
- dma_unmap_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
}
}
}
static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
- struct sas_task *task, int *n_elem,
- int *n_elem_req)
+ struct sas_task *task, int *n_elem)
{
struct device *dev = hisi_hba->dev;
int rc;
@@ -319,9 +308,9 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
goto prep_out;
}
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
- if (!*n_elem_req) {
+ *n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ if (!*n_elem) {
rc = -ENOMEM;
goto prep_out;
}
@@ -334,7 +323,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
}
if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
- dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+ dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
*n_elem);
rc = -EINVAL;
goto err_out_dma_unmap;
@@ -343,8 +332,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
err_out_dma_unmap:
/* It would be better to call dma_unmap_sg() here, but it's messy */
- hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
- *n_elem_req);
+ hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
prep_out:
return rc;
}
@@ -402,89 +390,17 @@ err_out_dif_dma_unmap:
return rc;
}
-static int hisi_sas_task_prep(struct sas_task *task,
- struct hisi_sas_dq **dq_pointer,
- bool is_tmf, struct hisi_sas_tmf_task *tmf,
- int *pass)
+static
+void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ struct hisi_sas_dq *dq,
+ struct hisi_sas_device *sas_dev)
{
- struct domain_device *device = task->dev;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_sas_port *port;
- struct hisi_sas_slot *slot;
- struct hisi_sas_cmd_hdr *cmd_hdr_base;
- struct asd_sas_port *sas_port = device->port;
- struct device *dev = hisi_hba->dev;
- int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
- int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
- struct hisi_sas_dq *dq;
- unsigned long flags;
+ struct hisi_sas_cmd_hdr *cmd_hdr_base;
+ int dlvry_queue_slot, dlvry_queue;
+ struct sas_task *task = slot->task;
int wr_q_index;
- if (DEV_IS_GONE(sas_dev)) {
- if (sas_dev)
- dev_info(dev, "task prep: device %d not ready\n",
- sas_dev->device_id);
- else
- dev_info(dev, "task prep: device %016llx not ready\n",
- SAS_ADDR(device->sas_addr));
-
- return -ECOMM;
- }
-
- if (hisi_hba->reply_map) {
- int cpu = raw_smp_processor_id();
- unsigned int dq_index = hisi_hba->reply_map[cpu];
-
- *dq_pointer = dq = &hisi_hba->dq[dq_index];
- } else {
- *dq_pointer = dq = sas_dev->dq;
- }
-
- port = to_hisi_sas_port(sas_port);
- if (port && !port->port_attached) {
- dev_info(dev, "task prep: %s port%d not attach device\n",
- (dev_is_sata(device)) ?
- "SATA/STP" : "SAS",
- device->port->id);
-
- return -ECOMM;
- }
-
- rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
- &n_elem_req);
- if (rc < 0)
- goto prep_out;
-
- if (!sas_protocol_ata(task->task_proto)) {
- rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
- if (rc < 0)
- goto err_out_dma_unmap;
- }
-
- if (hisi_hba->hw->slot_index_alloc)
- rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
- else {
- struct scsi_cmnd *scsi_cmnd = NULL;
-
- if (task->uldd_task) {
- struct ata_queued_cmd *qc;
-
- if (dev_is_sata(device)) {
- qc = task->uldd_task;
- scsi_cmnd = qc->scsicmd;
- } else {
- scsi_cmnd = task->uldd_task;
- }
- }
- rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
- }
- if (rc < 0)
- goto err_out_dif_dma_unmap;
-
- slot_idx = rc;
- slot = &hisi_hba->slot_info[slot_idx];
-
spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
@@ -498,16 +414,11 @@ static int hisi_sas_task_prep(struct sas_task *task,
dlvry_queue_slot = wr_q_index;
slot->device_id = sas_dev->device_id;
- slot->n_elem = n_elem;
- slot->n_elem_dif = n_elem_dif;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
- slot->task = task;
- slot->port = port;
- slot->tmf = tmf;
- slot->is_internal = is_tmf;
+
task->lldd_task = slot;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
@@ -524,45 +435,39 @@ static int hisi_sas_task_prep(struct sas_task *task,
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_STP_ALL:
hisi_sas_task_prep_ata(hisi_hba, slot);
break;
- default:
- dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
- task->task_proto);
+ case SAS_PROTOCOL_INTERNAL_ABORT:
+ hisi_sas_task_prep_abort(hisi_hba, slot);
break;
+ default:
+ return;
}
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- ++(*pass);
+ /* Make slot memories observable before marking as ready */
+ smp_wmb();
WRITE_ONCE(slot->ready, 1);
- return 0;
-
-err_out_dif_dma_unmap:
- if (!sas_protocol_ata(task->task_proto))
- hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
-err_out_dma_unmap:
- hisi_sas_dma_unmap(hisi_hba, task, n_elem,
- n_elem_req);
-prep_out:
- dev_err(dev, "task prep: failed[%d]!\n", rc);
- return rc;
+ spin_lock(&dq->lock);
+ hisi_hba->hw->start_delivery(dq);
+ spin_unlock(&dq->lock);
}
-static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
- bool is_tmf, struct hisi_sas_tmf_task *tmf)
+static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
- u32 rc;
- u32 pass = 0;
- struct hisi_hba *hisi_hba;
- struct device *dev;
+ int n_elem = 0, n_elem_dif = 0;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ bool internal_abort = sas_is_internal_abort(task);
+ struct scsi_cmnd *scmd = NULL;
struct hisi_sas_dq *dq = NULL;
+ struct hisi_sas_port *port;
+ struct hisi_hba *hisi_hba;
+ struct hisi_sas_slot *slot;
+ struct device *dev;
+ int rc;
if (!sas_port) {
struct task_status_struct *ts = &task->task_status;
@@ -573,7 +478,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
* libsas will use dev->port, should
* not call task_done for sata
*/
- if (device->dev_type != SAS_SATA_DEV)
+ if (device->dev_type != SAS_SATA_DEV && !internal_abort)
task->task_done(task);
return -ECOMM;
}
@@ -581,45 +486,139 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
hisi_hba = dev_to_hisi_hba(device);
dev = hisi_hba->dev;
- if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- /*
- * For IOs from upper layer, it may already disable preempt
- * in the IO path, if disable preempt again in down(),
- * function schedule() will report schedule_bug(), so check
- * preemptible() before goto down().
- */
- if (!preemptible())
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ case SAS_PROTOCOL_SMP:
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_STP_ALL:
+ if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
+ if (!gfpflags_allow_blocking(gfp_flags))
+ return -EINVAL;
+
+ down(&hisi_hba->sem);
+ up(&hisi_hba->sem);
+ }
+
+ if (DEV_IS_GONE(sas_dev)) {
+ if (sas_dev)
+ dev_info(dev, "task prep: device %d not ready\n",
+ sas_dev->device_id);
+ else
+ dev_info(dev, "task prep: device %016llx not ready\n",
+ SAS_ADDR(device->sas_addr));
+
+ return -ECOMM;
+ }
+
+ port = to_hisi_sas_port(sas_port);
+ if (!port->port_attached) {
+ dev_info(dev, "task prep: %s port%d not attach device\n",
+ dev_is_sata(device) ? "SATA/STP" : "SAS",
+ device->port->id);
+
+ return -ECOMM;
+ }
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(device)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
+
+ if (scmd) {
+ unsigned int dq_index;
+ u32 blk_tag;
+
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
+ dq = &hisi_hba->dq[dq_index];
+ } else {
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ int queue = qmap->mq_map[raw_smp_processor_id()];
+
+ dq = &hisi_hba->dq[queue];
+ }
+ break;
+ case SAS_PROTOCOL_INTERNAL_ABORT:
+ if (!hisi_hba->hw->prep_abort)
+ return TMF_RESP_FUNC_FAILED;
+
+ if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
+ return -EIO;
+
+ hisi_hba = dev_to_hisi_hba(device);
+
+ if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
return -EINVAL;
- down(&hisi_hba->sem);
- up(&hisi_hba->sem);
+ port = to_hisi_sas_port(sas_port);
+ dq = &hisi_hba->dq[task->abort_task.qid];
+ break;
+ default:
+ dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
+ task->task_proto);
+ return -EINVAL;
}
- /* protect task_prep and start_delivery sequence */
- rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
- if (rc)
- dev_err(dev, "task exec: failed[%d]!\n", rc);
+ rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
+ if (rc < 0)
+ goto prep_out;
- if (likely(pass)) {
- spin_lock(&dq->lock);
- hisi_hba->hw->start_delivery(dq);
- spin_unlock(&dq->lock);
+ if (!sas_protocol_ata(task->task_proto)) {
+ rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
+ if (rc < 0)
+ goto err_out_dma_unmap;
}
+ if (!internal_abort && hisi_hba->hw->slot_index_alloc)
+ rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
+ else
+ rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
+
+ if (rc < 0)
+ goto err_out_dif_dma_unmap;
+
+ slot = &hisi_hba->slot_info[rc];
+ slot->n_elem = n_elem;
+ slot->n_elem_dif = n_elem_dif;
+ slot->task = task;
+ slot->port = port;
+
+ slot->tmf = task->tmf;
+ slot->is_internal = !!task->tmf || internal_abort;
+
+ /* protect task_prep and start_delivery sequence */
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev);
+
+ return 0;
+
+err_out_dif_dma_unmap:
+ if (!sas_protocol_ata(task->task_proto))
+ hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
+err_out_dma_unmap:
+ hisi_sas_dma_unmap(hisi_hba, task, n_elem);
+prep_out:
+ dev_err(dev, "task exec: failed[%d]!\n", rc);
return rc;
}
-static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
+static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
+ gfp_t gfp_flags)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha;
if (!phy->phy_attached)
return;
- sas_ha = &hisi_hba->sha;
- sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
if (sas_phy->phy) {
struct sas_phy *sphy = sas_phy->phy;
@@ -647,7 +646,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
}
sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
- sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+ sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
}
static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
@@ -683,25 +682,39 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
return sas_dev;
}
+static void hisi_sas_tmf_aborted(struct sas_task *task)
+{
+ struct hisi_sas_slot *slot = task->lldd_task;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * sync irq to avoid free'ing task
+ * before using task in IO completion
+ */
+ synchronize_irq(cq->irq_no);
+ slot->task = NULL;
+ }
+}
+
#define HISI_SAS_DISK_RECOVER_CNT 3
static int hisi_sas_init_device(struct domain_device *device)
{
int rc = TMF_RESP_FUNC_COMPLETE;
struct scsi_lun lun;
- struct hisi_sas_tmf_task tmf_task;
int retry = HISI_SAS_DISK_RECOVER_CNT;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct device *dev = hisi_hba->dev;
- struct sas_phy *local_phy;
switch (device->dev_type) {
case SAS_END_DEVICE:
int_to_scsilun(0, &lun);
- tmf_task.tmf = TMF_CLEAR_TASK_SET;
while (retry-- > 0) {
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
- &tmf_task);
+ rc = sas_clear_task_set(device, lun.scsi_lun);
if (rc == TMF_RESP_FUNC_COMPLETE) {
hisi_sas_release_task(hisi_hba, device);
break;
@@ -713,30 +726,18 @@ static int hisi_sas_init_device(struct domain_device *device)
case SAS_SATA_PM_PORT:
case SAS_SATA_PENDING:
/*
- * send HARD RESET to clear previous affiliation of
- * STP target port
+ * If an expander is swapped when a SATA disk is attached then
+ * we should issue a hard reset to clear previous affiliation
+ * of STP target port, see SPL (chapter 6.19.4).
+ *
+ * However we don't need to issue a hard reset here for these
+ * reasons:
+ * a. When probing the device, libsas/libata already issues a
+ * hard reset in sas_probe_sata() -> ata_sas_async_probe().
+ * Note that in hisi_sas_debug_I_T_nexus_reset() we take care
+ * to issue a hard reset by checking the dev status (== INIT).
+ * b. When resetting the controller, this is simply unnecessary.
*/
- local_phy = sas_get_local_phy(device);
- if (!scsi_is_sas_phy_local(local_phy) &&
- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
- unsigned long deadline = ata_deadline(jiffies, 20000);
- struct sata_device *sata_dev = &device->sata_dev;
- struct ata_host *ata_host = sata_dev->ata_host;
- struct ata_port_operations *ops = ata_host->ops;
- struct ata_port *ap = sata_dev->ap;
- struct ata_link *link;
- unsigned int classes;
-
- ata_for_each_link(link, ap, EDGE)
- rc = ops->hardreset(link, &classes,
- deadline);
- }
- sas_put_local_phy(local_phy);
- if (rc) {
- dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
- return rc;
- }
-
while (retry-- > 0) {
rc = hisi_sas_softreset_ata_disk(device);
if (!rc)
@@ -750,6 +751,24 @@ static int hisi_sas_init_device(struct domain_device *device)
return rc;
}
+int hisi_sas_slave_alloc(struct scsi_device *sdev)
+{
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+ struct hisi_sas_device *sas_dev = ddev->lldd_dev;
+ int rc;
+
+ rc = sas_slave_alloc(sdev);
+ if (rc)
+ return rc;
+
+ rc = hisi_sas_init_device(ddev);
+ if (rc)
+ return rc;
+ sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
+
static int hisi_sas_dev_found(struct domain_device *device)
{
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
@@ -796,10 +815,6 @@ static int hisi_sas_dev_found(struct domain_device *device)
dev_info(dev, "dev[%d:%x] found\n",
sas_dev->device_id, sas_dev->dev_type);
- rc = hisi_sas_init_device(device);
- if (rc)
- goto err_out;
- sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
return 0;
err_out:
@@ -843,17 +858,24 @@ int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
-static void hisi_sas_phyup_work(struct work_struct *work)
+static void hisi_sas_phyup_work_common(struct work_struct *work,
+ enum hisi_sas_phy_event event)
{
struct hisi_sas_phy *phy =
- container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
+ container_of(work, typeof(*phy), works[event]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
+ phy->wait_phyup_cnt = 0;
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
- hisi_sas_bytes_dmaed(hisi_hba, phy_no);
+ hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
+}
+
+static void hisi_sas_phyup_work(struct work_struct *work)
+{
+ hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
}
static void hisi_sas_linkreset_work(struct work_struct *work)
@@ -865,9 +887,21 @@ static void hisi_sas_linkreset_work(struct work_struct *work)
hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
}
+static void hisi_sas_phyup_pm_work(struct work_struct *work)
+{
+ struct hisi_sas_phy *phy =
+ container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct device *dev = hisi_hba->dev;
+
+ hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
+ pm_runtime_put_sync(dev);
+}
+
static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
+ [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
};
bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
@@ -893,17 +927,38 @@ static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
}
+#define HISI_SAS_WAIT_PHYUP_RETRIES 10
+
void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev;
+ unsigned long flags;
+
+ dev_dbg(dev, "phy%d OOB ready\n", phy_no);
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->phy_attached) {
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return;
+ }
if (!timer_pending(&phy->timer)) {
- dev_dbg(dev, "phy%d OOB ready\n", phy_no);
- phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
- add_timer(&phy->timer);
+ if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
+ phy->wait_phyup_cnt++;
+ phy->timer.expires = jiffies +
+ HISI_SAS_WAIT_PHYUP_TIMEOUT;
+ add_timer(&phy->timer);
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return;
+ }
+
+ dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
+ phy_no, phy->wait_phyup_cnt);
+ phy->wait_phyup_cnt = 0;
}
+ spin_unlock_irqrestore(&phy->lock, flags);
}
+
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
@@ -995,8 +1050,7 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1041,6 +1095,29 @@ static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
hisi_hba->hw->dereg_device(hisi_hba, device);
}
+static int
+hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev,
+ bool rst_ha_timeout)
+{
+ struct hisi_sas_internal_abort_data data = { rst_ha_timeout };
+ struct domain_device *device = sas_dev->sas_device;
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+ int i, rc;
+
+ for (i = 0; i < hisi_hba->cq_nvecs; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ const struct cpumask *mask = cq->irq_mask;
+
+ if (mask && !cpumask_intersects(cpu_online_mask, mask))
+ continue;
+ rc = sas_execute_internal_abort_dev(device, i, &data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static void hisi_sas_dev_gone(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1052,9 +1129,8 @@ static void hisi_sas_dev_gone(struct domain_device *device)
sas_dev->device_id, sas_dev->dev_type);
down(&hisi_hba->sem);
- if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
+ hisi_sas_internal_task_abort_dev(sas_dev, true);
hisi_sas_dereg_device(hisi_hba, device);
@@ -1072,11 +1148,6 @@ static void hisi_sas_dev_gone(struct domain_device *device)
up(&hisi_hba->sem);
}
-static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
-{
- return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
-}
-
static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
@@ -1115,9 +1186,18 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
+ struct hisi_sas_phy *phy = container_of(sas_phy,
+ struct hisi_sas_phy, sas_phy);
struct sas_ha_struct *sas_ha = sas_phy->ha;
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ DECLARE_COMPLETION_ONSTACK(completion);
int phy_no = sas_phy->id;
+ u8 sts = phy->phy_attached;
+ int ret = 0;
+
+ down(&hisi_hba->sem);
+ phy->reset_completion = &completion;
switch (func) {
case PHY_FUNC_HARD_RESET:
@@ -1132,163 +1212,37 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_DISABLE:
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
- break;
+ goto out;
case PHY_FUNC_SET_LINK_RATE:
- return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
+ ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
+ break;
+
case PHY_FUNC_GET_EVENTS:
if (hisi_hba->hw->get_events) {
hisi_hba->hw->get_events(hisi_hba, phy_no);
- break;
+ goto out;
}
- /* fallthru */
+ fallthrough;
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
- return 0;
-}
-
-static void hisi_sas_task_done(struct sas_task *task)
-{
- del_timer(&task->slow_task->timer);
- complete(&task->slow_task->completion);
-}
-static void hisi_sas_tmf_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
- unsigned long flags;
- bool is_completed = true;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- is_completed = false;
+ if (sts && !wait_for_completion_timeout(&completion,
+ HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
+ dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
+ phy_no, func);
+ if (phy->in_reset)
+ ret = -ETIMEDOUT;
}
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- if (!is_completed)
- complete(&task->slow_task->completion);
-}
-
-#define TASK_TIMEOUT 20
-#define TASK_RETRY 3
-#define INTERNAL_ABORT_TIMEOUT 6
-static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
- void *parameter, u32 para_len,
- struct hisi_sas_tmf_task *tmf)
-{
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
- struct device *dev = hisi_hba->dev;
- struct sas_task *task;
- int res, retry;
-
- for (retry = 0; retry < TASK_RETRY; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = device;
- task->task_proto = device->tproto;
-
- if (dev_is_sata(device)) {
- task->ata_task.device_control_reg_update = 1;
- memcpy(&task->ata_task.fis, parameter, para_len);
- } else {
- memcpy(&task->ssp_task, parameter, para_len);
- }
- task->task_done = hisi_sas_task_done;
-
- task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
- add_timer(&task->slow_task->timer);
-
- res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- dev_err(dev, "abort tmf: executing internal task failed: %d\n",
- res);
- goto ex_err;
- }
-
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- struct hisi_sas_slot *slot = task->lldd_task;
-
- dev_err(dev, "abort tmf: TMF task timeout and not done\n");
- if (slot) {
- struct hisi_sas_cq *cq =
- &hisi_hba->cq[slot->dlvry_queue];
- /*
- * sync irq to avoid free'ing task
- * before using task in IO completion
- */
- synchronize_irq(cq->irq_no);
- slot->task = NULL;
- }
-
- goto ex_err;
- } else
- dev_err(dev, "abort tmf: TMF task timeout\n");
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_SUCC) {
- res = TMF_RESP_FUNC_SUCC;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun
- */
- dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
- SAS_ADDR(device->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- res = task->task_status.residual;
- break;
- }
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- dev_warn(dev, "abort tmf: blocked task error\n");
- res = -EMSGSIZE;
- break;
- }
+out:
+ phy->reset_completion = NULL;
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_OPEN_REJECT) {
- dev_warn(dev, "abort tmf: open reject failed\n");
- res = -EIO;
- } else {
- dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
- SAS_ADDR(device->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- }
- sas_free_task(task);
- task = NULL;
- }
-ex_err:
- if (retry == TASK_RETRY)
- dev_warn(dev, "abort tmf: executing internal task failed!\n");
- sas_free_task(task);
- return res;
+ up(&hisi_hba->sem);
+ return ret;
}
static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
@@ -1313,13 +1267,12 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
int rc = TMF_RESP_FUNC_FAILED;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- int s = sizeof(struct host_to_dev_fis);
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
+ rc = sas_execute_ata_cmd(device, fis, -1);
if (rc != TMF_RESP_FUNC_COMPLETE)
break;
}
@@ -1329,13 +1282,14 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis,
- s, NULL);
+ rc = sas_execute_ata_cmd(device, fis, -1);
if (rc != TMF_RESP_FUNC_COMPLETE)
- dev_err(dev, "ata disk de-reset failed\n");
+ dev_err(dev, "ata disk %016llx de-reset failed\n",
+ SAS_ADDR(device->sas_addr));
}
} else {
- dev_err(dev, "ata disk reset failed\n");
+ dev_err(dev, "ata disk %016llx reset failed\n",
+ SAS_ADDR(device->sas_addr));
}
if (rc == TMF_RESP_FUNC_COMPLETE)
@@ -1344,20 +1298,6 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
return rc;
}
-static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
- u8 *lun, struct hisi_sas_tmf_task *tmf)
-{
- struct sas_ssp_task ssp_task;
-
- if (!(device->tproto & SAS_PROTOCOL_SSP))
- return TMF_RESP_FUNC_ESUPP;
-
- memcpy(ssp_task.LUN, lun, 8);
-
- return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
- sizeof(ssp_task), tmf);
-}
-
static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
{
u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
@@ -1378,11 +1318,13 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
sas_port = device->port;
port = to_hisi_sas_port(sas_port);
+ spin_lock(&sas_port->phy_list_lock);
list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
if (state & BIT(sas_phy->id)) {
phy = sas_phy->lldd_phy;
break;
}
+ spin_unlock(&sas_port->phy_list_lock);
if (phy) {
port->id = phy->port_id;
@@ -1420,14 +1362,20 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
_sas_port = sas_port;
if (dev_is_expander(dev->dev_type))
- sas_ha->notify_port_event(sas_phy,
- PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD,
+ GFP_KERNEL);
}
} else {
- hisi_sas_phy_down(hisi_hba, phy_no, 0);
+ hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
}
-
}
+ /*
+ * Ensure any bcast events are processed prior to calling async nexus
+ * reset calls from hisi_sas_clear_nexus_ha() ->
+ * hisi_sas_async_I_T_nexus_reset()
+ */
+ sas_drain_work(sas_ha);
}
static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
@@ -1451,31 +1399,25 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
struct asd_sas_port *sas_port,
struct domain_device *device)
{
- struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
struct ata_port *ap = device->sata_dev.ap;
struct device *dev = hisi_hba->dev;
- int s = sizeof(struct host_to_dev_fis);
int rc = TMF_RESP_FUNC_FAILED;
- struct asd_sas_phy *sas_phy;
struct ata_link *link;
u8 fis[20] = {0};
- u32 state;
+ int i;
- state = hisi_hba->hw->get_phys_state(hisi_hba);
- list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
- if (!(state & BIT(sas_phy->id)))
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ if (!(sas_port->phy_mask & BIT(i)))
continue;
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
- tmf_task.phy_id = sas_phy->id;
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
- &tmf_task);
+ rc = sas_execute_ata_cmd(device, fis, i);
if (rc != TMF_RESP_FUNC_COMPLETE) {
dev_err(dev, "phy%d ata reset failed rc=%d\n",
- sas_phy->id, rc);
+ i, rc);
break;
}
}
@@ -1494,8 +1436,7 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
continue;
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0)
dev_err(dev, "STP reject: abort dev failed %d\n", rc);
}
@@ -1526,14 +1467,12 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
{
struct Scsi_Host *shost = hisi_hba->shost;
- down(&hisi_hba->sem);
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
- if (timer_pending(&hisi_hba->timer))
- del_timer_sync(&hisi_hba->timer);
+ del_timer_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
}
@@ -1542,7 +1481,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
{
struct Scsi_Host *shost = hisi_hba->shost;
- u32 state;
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
@@ -1553,29 +1491,36 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
if (hisi_hba->reject_stp_links_msk)
hisi_sas_terminate_stp_reject(hisi_hba);
hisi_sas_reset_init_all_devices(hisi_hba);
- up(&hisi_hba->sem);
scsi_unblock_requests(shost);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
- state = hisi_hba->hw->get_phys_state(hisi_hba);
- hisi_sas_rescan_topology(hisi_hba, state);
+ hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
-static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
{
- struct device *dev = hisi_hba->dev;
- struct Scsi_Host *shost = hisi_hba->shost;
- int rc;
-
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-
if (!hisi_hba->hw->soft_reset)
return -1;
- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ down(&hisi_hba->sem);
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
+ up(&hisi_hba->sem);
return -1;
+ }
+
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+ hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
+
+ return 0;
+}
+
+static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ int rc;
dev_info(dev, "controller resetting...\n");
hisi_sas_controller_reset_prepare(hisi_hba);
@@ -1586,9 +1531,10 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
up(&hisi_hba->sem);
scsi_unblock_requests(shost);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return rc;
}
+ clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_done(hisi_hba);
dev_info(dev, "controller reset complete\n");
@@ -1598,8 +1544,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
static int hisi_sas_abort_task(struct sas_task *task)
{
- struct scsi_lun lun;
- struct hisi_sas_tmf_task tmf_task;
+ struct hisi_sas_internal_abort_data internal_abort_data = { false };
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba;
@@ -1634,20 +1579,13 @@ static int hisi_sas_abort_task(struct sas_task *task)
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd *cmnd = task->uldd_task;
struct hisi_sas_slot *slot = task->lldd_task;
u16 tag = slot->idx;
int rc2;
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
-
- rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
- &tmf_task);
-
- rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag);
+ rc = sas_abort_task(task, tag);
+ rc2 = sas_execute_internal_abort_single(device, tag,
+ slot->dlvry_queue, &internal_abort_data);
if (rc2 < 0) {
dev_err(dev, "abort task: internal abort (%d)\n", rc2);
return TMF_RESP_FUNC_FAILED;
@@ -1667,9 +1605,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV,
- 0);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "abort task: internal abort failed\n");
goto out;
@@ -1683,8 +1619,9 @@ static int hisi_sas_abort_task(struct sas_task *task)
u32 tag = slot->idx;
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag);
+ rc = sas_execute_internal_abort_single(device,
+ tag, slot->dlvry_queue,
+ &internal_abort_data);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) {
/*
@@ -1704,46 +1641,31 @@ out:
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- struct hisi_sas_tmf_task tmf_task;
int rc;
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
return TMF_RESP_FUNC_FAILED;
}
hisi_sas_dereg_device(hisi_hba, device);
- tmf_task.tmf = TMF_ABORT_TASK_SET;
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
-
+ rc = sas_abort_task_set(device, lun);
if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
return rc;
}
-static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
-{
- struct hisi_sas_tmf_task tmf_task;
- int rc;
-
- tmf_task.tmf = TMF_CLEAR_ACA;
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
-
- return rc;
-}
-
static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
{
struct sas_phy *local_phy = sas_get_local_phy(device);
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
- DECLARE_COMPLETION_ONSTACK(phyreset);
int rc, reset_type;
if (!local_phy->enabled) {
@@ -1756,8 +1678,11 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
sas_ha->sas_phy[local_phy->number];
struct hisi_sas_phy *phy =
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy->lock, flags);
phy->in_reset = 1;
- phy->reset_completion = &phyreset;
+ spin_unlock_irqrestore(&phy->lock, flags);
}
reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
@@ -1771,24 +1696,27 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
sas_ha->sas_phy[local_phy->number];
struct hisi_sas_phy *phy =
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
- int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
- phy->reset_completion = NULL;
phy->in_reset = 0;
spin_unlock_irqrestore(&phy->lock, flags);
/* report PHY down if timed out */
- if (!ret)
- hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
- } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
- /*
- * If in init state, we rely on caller to wait for link to be
- * ready; otherwise, except phy reset is fail, delay.
- */
- if (!rc)
- msleep(2000);
+ if (rc == -ETIMEDOUT)
+ hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
+ return rc;
+ }
+
+ if (rc)
+ return rc;
+
+ /* Remote phy */
+ if (dev_is_sata(device)) {
+ rc = sas_ata_wait_after_reset(device,
+ HISI_SAS_WAIT_PHYUP_TIMEOUT);
+ } else {
+ msleep(2000);
}
return rc;
@@ -1796,26 +1724,45 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
int rc;
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
return TMF_RESP_FUNC_FAILED;
}
hisi_sas_dereg_device(hisi_hba, device);
- if (dev_is_sata(device)) {
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
+ if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
+ struct sas_phy *local_phy;
+
rc = hisi_sas_softreset_ata_disk(device);
- if (rc == TMF_RESP_FUNC_FAILED)
- return TMF_RESP_FUNC_FAILED;
+ switch (rc) {
+ case -ECOMM:
+ rc = -ENODEV;
+ break;
+ case TMF_RESP_FUNC_FAILED:
+ case -EMSGSIZE:
+ case -EIO:
+ local_phy = sas_get_local_phy(device);
+ rc = sas_phy_enable(local_phy, 0);
+ if (!rc) {
+ local_phy->enabled = 0;
+ dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
+ SAS_ADDR(device->sas_addr), rc);
+ rc = -ENODEV;
+ }
+ sas_put_local_phy(local_phy);
+ break;
+ default:
+ break;
+ }
}
- rc = hisi_sas_debug_I_T_nexus_reset(device);
-
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
hisi_sas_release_task(hisi_hba, device);
@@ -1830,8 +1777,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
int rc = TMF_RESP_FUNC_FAILED;
/* Clear internal IO and then lu reset */
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
@@ -1849,9 +1795,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
hisi_sas_release_task(hisi_hba, device);
sas_put_local_phy(phy);
} else {
- struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
-
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
+ rc = sas_lu_reset(device, lun);
if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
}
@@ -1862,17 +1806,31 @@ out:
return rc;
}
+static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
+{
+ struct domain_device *device = data;
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ int rc;
+
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
+ SAS_ADDR(device->sas_addr), rc);
+}
+
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
- struct device *dev = hisi_hba->dev;
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
- int rc, i;
+ ASYNC_DOMAIN_EXCLUSIVE(async);
+ int i, ret;
queue_work(hisi_hba->wq, &r.work);
wait_for_completion(r.completion);
- if (!r.done)
- return TMF_RESP_FUNC_FAILED;
+ if (!r.done) {
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
@@ -1882,36 +1840,27 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
dev_is_expander(device->dev_type))
continue;
- rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc != TMF_RESP_FUNC_COMPLETE)
- dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
- sas_dev->device_id, rc);
+ async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
+ device, &async);
}
+ async_synchronize_full_domain(&async);
hisi_sas_release_tasks(hisi_hba);
- return TMF_RESP_FUNC_COMPLETE;
+ ret = TMF_RESP_FUNC_COMPLETE;
+out:
+ return ret;
}
static int hisi_sas_query_task(struct sas_task *task)
{
- struct scsi_lun lun;
- struct hisi_sas_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd *cmnd = task->uldd_task;
- struct domain_device *device = task->dev;
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
-
- rc = hisi_sas_debug_issue_ssp_tmf(device,
- lun.scsi_lun,
- &tmf_task);
+ rc = sas_query_task(task, tag);
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
@@ -1927,223 +1876,48 @@ static int hisi_sas_query_task(struct sas_task *task)
return rc;
}
-static int
-hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
- struct sas_task *task, int abort_flag,
- int task_tag, struct hisi_sas_dq *dq)
+static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
+ void *data)
{
struct domain_device *device = task->dev;
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct device *dev = hisi_hba->dev;
- struct hisi_sas_port *port;
- struct hisi_sas_slot *slot;
- struct asd_sas_port *sas_port = device->port;
- struct hisi_sas_cmd_hdr *cmd_hdr_base;
- int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
- unsigned long flags;
- int wr_q_index;
-
- if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
- return -EINVAL;
-
- if (!device->port)
- return -1;
-
- port = to_hisi_sas_port(sas_port);
-
- /* simply get a slot and send abort command */
- rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
- if (rc < 0)
- goto err_out;
-
- slot_idx = rc;
- slot = &hisi_hba->slot_info[slot_idx];
-
- spin_lock(&dq->lock);
- wr_q_index = dq->wr_point;
- dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
- list_add_tail(&slot->delivery, &dq->list);
- spin_unlock(&dq->lock);
- spin_lock(&sas_dev->lock);
- list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock(&sas_dev->lock);
-
- dlvry_queue = dq->id;
- dlvry_queue_slot = wr_q_index;
-
- slot->device_id = sas_dev->device_id;
- slot->n_elem = n_elem;
- slot->dlvry_queue = dlvry_queue;
- slot->dlvry_queue_slot = dlvry_queue_slot;
- cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
- slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
- slot->task = task;
- slot->port = port;
- slot->is_internal = true;
- task->lldd_task = slot;
-
- memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
- memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
- memset(hisi_sas_status_buf_addr_mem(slot), 0,
- sizeof(struct hisi_sas_err_record));
-
- hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
- abort_flag, task_tag);
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- WRITE_ONCE(slot->ready, 1);
- /* send abort command to the chip */
- spin_lock(&dq->lock);
- hisi_hba->hw->start_delivery(dq);
- spin_unlock(&dq->lock);
-
- return 0;
-
-err_out:
- dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
-
- return rc;
-}
-
-/**
- * _hisi_sas_internal_task_abort -- execute an internal
- * abort command for single IO command or a device
- * @hisi_hba: host controller struct
- * @device: domain device
- * @abort_flag: mode of operation, device or single IO
- * @tag: tag of IO to be aborted (only relevant to single
- * IO mode)
- * @dq: delivery queue for this internal abort command
- */
-static int
-_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device, int abort_flag,
- int tag, struct hisi_sas_dq *dq)
-{
- struct sas_task *task;
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct device *dev = hisi_hba->dev;
- int res;
-
- /*
- * The interface is not realized means this HW don't support internal
- * abort, or don't need to do internal abort. Then here, we return
- * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
- * the internal abort has been executed and returned CQ.
- */
- if (!hisi_hba->hw->prep_abort)
- return TMF_RESP_FUNC_FAILED;
-
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = device;
- task->task_proto = device->tproto;
- task->task_done = hisi_sas_task_done;
- task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
- add_timer(&task->slow_task->timer);
-
- res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
- task, abort_flag, tag, dq);
- if (res) {
- del_timer(&task->slow_task->timer);
- dev_err(dev, "internal task abort: executing internal task failed: %d\n",
- res);
- goto exit;
- }
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
-
- /* Internal abort timed out */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- struct hisi_sas_slot *slot = task->lldd_task;
-
- if (slot) {
- struct hisi_sas_cq *cq =
- &hisi_hba->cq[slot->dlvry_queue];
- /*
- * sync irq to avoid free'ing task
- * before using task in IO completion
- */
- synchronize_irq(cq->irq_no);
- slot->task = NULL;
- }
- dev_err(dev, "internal task abort: timeout and not done.\n");
-
- res = -EIO;
- goto exit;
- } else
- dev_err(dev, "internal task abort: timeout.\n");
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
- res = TMF_RESP_FUNC_COMPLETE;
- goto exit;
- }
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct hisi_sas_internal_abort_data *timeout = data;
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_SUCC) {
- res = TMF_RESP_FUNC_SUCC;
- goto exit;
- }
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+ queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-exit:
- dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
- SAS_ADDR(device->sas_addr), task,
- task->task_status.resp, /* 0 is complete, -1 is undelivered */
- task->task_status.stat);
- sas_free_task(task);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ pr_err("Internal abort: timeout %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ } else {
+ struct hisi_sas_slot *slot = task->lldd_task;
- return res;
-}
+ set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
-static int
-hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device,
- int abort_flag, int tag)
-{
- struct hisi_sas_slot *slot;
- struct device *dev = hisi_hba->dev;
- struct hisi_sas_dq *dq;
- int i, rc;
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * sync irq to avoid free'ing task
+ * before using task in IO completion
+ */
+ synchronize_irq(cq->irq_no);
+ slot->task = NULL;
+ }
- switch (abort_flag) {
- case HISI_SAS_INT_ABT_CMD:
- slot = &hisi_hba->slot_info[tag];
- dq = &hisi_hba->dq[slot->dlvry_queue];
- return _hisi_sas_internal_task_abort(hisi_hba, device,
- abort_flag, tag, dq);
- case HISI_SAS_INT_ABT_DEV:
- for (i = 0; i < hisi_hba->cq_nvecs; i++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- const struct cpumask *mask = cq->irq_mask;
-
- if (mask && !cpumask_intersects(cpu_online_mask, mask))
- continue;
- dq = &hisi_hba->dq[i];
- rc = _hisi_sas_internal_task_abort(hisi_hba, device,
- abort_flag, tag,
- dq);
- if (rc)
- return rc;
+ if (timeout->rst_ha_timeout) {
+ pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n",
+ SAS_ADDR(device->sas_addr));
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ } else {
+ pr_err("Internal abort: timeout and not done %016llx.\n",
+ SAS_ADDR(device->sas_addr));
}
- break;
- default:
- dev_err(dev, "Unrecognised internal abort flag (%d)\n",
- abort_flag);
- return -EINVAL;
+
+ return true;
}
- return 0;
+ return false;
}
static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
@@ -2181,27 +1955,27 @@ static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
spin_unlock_irqrestore(&phy->lock, flags);
}
-void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
+void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
+ gfp_t gfp_flags)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct device *dev = hisi_hba->dev;
if (rdy) {
/* Phy down but ready */
- hisi_sas_bytes_dmaed(hisi_hba, phy_no);
+ hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
hisi_sas_port_notify_formed(sas_phy);
} else {
struct hisi_sas_port *port = phy->port;
- if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
+ if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
phy->in_reset) {
dev_info(dev, "ignore flutter phy%d down\n", phy_no);
return;
}
/* Phy down and not ready */
- sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags);
sas_phy_disconnected(sas_phy);
if (port) {
@@ -2219,6 +1993,22 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
+void hisi_sas_phy_bcast(struct hisi_sas_phy *phy)
+{
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct sas_ha_struct *sha = &hisi_hba->sha;
+
+ if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ return;
+
+ if (test_bit(SAS_HA_FROZEN, &sha->state))
+ return;
+
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast);
+
void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
{
int i;
@@ -2254,13 +2044,14 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_control_phy = hisi_sas_control_phy,
.lldd_abort_task = hisi_sas_abort_task,
.lldd_abort_task_set = hisi_sas_abort_task_set,
- .lldd_clear_aca = hisi_sas_clear_aca,
.lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
.lldd_lu_reset = hisi_sas_lu_reset,
.lldd_query_task = hisi_sas_query_task,
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
.lldd_write_gpio = hisi_sas_write_gpio,
+ .lldd_tmf_aborted = hisi_sas_tmf_aborted,
+ .lldd_abort_timeout = hisi_sas_internal_abort_timeout,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -2410,9 +2201,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->breakpoint)
goto err_out;
- hisi_hba->slot_index_count = max_command_entries;
- s = hisi_hba->slot_index_count / BITS_PER_BYTE;
- hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
+ s = hisi_hba->slot_index_count = max_command_entries;
+ hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
if (!hisi_hba->slot_index_tags)
goto err_out;
@@ -2430,7 +2220,6 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->sata_breakpoint)
goto err_out;
- hisi_sas_slot_index_init(hisi_hba);
hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
@@ -2465,6 +2254,9 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, rst_work);
+ if (hisi_sas_controller_prereset(hisi_hba))
+ return;
+
hisi_sas_controller_reset(hisi_hba);
}
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
@@ -2474,8 +2266,12 @@ void hisi_sas_sync_rst_work_handler(struct work_struct *work)
struct hisi_sas_rst *rst =
container_of(work, struct hisi_sas_rst, work);
+ if (hisi_sas_controller_prereset(rst->hisi_hba))
+ goto rst_complete;
+
if (!hisi_sas_controller_reset(rst->hisi_hba))
rst->done = true;
+rst_complete:
complete(rst->completion);
}
EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
@@ -2574,9 +2370,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (error)
- error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
if (error) {
dev_err(dev, "No usable DMA addressing method\n");
goto err_out;
@@ -2605,6 +2398,13 @@ err_out:
return NULL;
}
+static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ if (hisi_hba->hw->interrupt_preinit)
+ return hisi_hba->hw->interrupt_preinit(hisi_hba);
+ return 0;
+}
+
int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
{
@@ -2662,6 +2462,10 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ rc = hisi_sas_interrupt_preinit(hisi_hba);
+ if (rc)
+ goto err_out_ha;
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha;
@@ -2672,1301 +2476,30 @@ int hisi_sas_probe(struct platform_device *pdev,
rc = hisi_hba->hw->hw_init(hisi_hba);
if (rc)
- goto err_out_register_ha;
+ goto err_out_hw_init;
scsi_scan_host(shost);
return 0;
+err_out_hw_init:
+ sas_unregister_ha(sha);
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
- hisi_sas_debugfs_exit(hisi_hba);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
return rc;
}
EXPORT_SYMBOL_GPL(hisi_sas_probe);
-struct dentry *hisi_sas_debugfs_dir;
-
-static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
-{
- int queue_entry_size = hisi_hba->hw->complete_hdr_size;
- int dump_index = hisi_hba->debugfs_dump_index;
- int i;
-
- for (i = 0; i < hisi_hba->queue_count; i++)
- memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
- hisi_hba->complete_hdr[i],
- HISI_SAS_QUEUE_SLOTS * queue_entry_size);
-}
-
-static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
-{
- int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
- int dump_index = hisi_hba->debugfs_dump_index;
- int i;
-
- for (i = 0; i < hisi_hba->queue_count; i++) {
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
- int j;
-
- debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
- cmd_hdr = hisi_hba->cmd_hdr[i];
-
- for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
- memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
- queue_entry_size);
- }
-}
-
-static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- const struct hisi_sas_debugfs_reg *port =
- hisi_hba->hw->debugfs_reg_port;
- int i, phy_cnt;
- u32 offset;
- u32 *databuf;
-
- for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
- databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
- for (i = 0; i < port->count; i++, databuf++) {
- offset = port->base_off + 4 * i;
- *databuf = port->read_port_reg(hisi_hba, phy_cnt,
- offset);
- }
- }
-}
-
-static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const struct hisi_sas_debugfs_reg *global =
- hw->debugfs_reg_array[DEBUGFS_GLOBAL];
- int i;
-
- for (i = 0; i < global->count; i++, databuf++)
- *databuf = global->read_global_reg(hisi_hba, 4 * i);
-}
-
-static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const struct hisi_sas_debugfs_reg *axi =
- hw->debugfs_reg_array[DEBUGFS_AXI];
- int i;
-
- for (i = 0; i < axi->count; i++, databuf++)
- *databuf = axi->read_global_reg(hisi_hba,
- 4 * i + axi->base_off);
-}
-
-static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const struct hisi_sas_debugfs_reg *ras =
- hw->debugfs_reg_array[DEBUGFS_RAS];
- int i;
-
- for (i = 0; i < ras->count; i++, databuf++)
- *databuf = ras->read_global_reg(hisi_hba,
- 4 * i + ras->base_off);
-}
-
-static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
- void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
- struct hisi_sas_itct *itct;
- int i;
-
- hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_ITCT_CACHE,
- cachebuf);
-
- itct = hisi_hba->itct;
-
- for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
- memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
- databuf += sizeof(struct hisi_sas_itct);
- }
-}
-
-static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- int max_command_entries = HISI_SAS_MAX_COMMANDS;
- void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
- void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
- struct hisi_sas_iost *iost;
- int i;
-
- hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_IOST_CACHE,
- cachebuf);
-
- iost = hisi_hba->iost;
-
- for (i = 0; i < max_command_entries; i++, iost++) {
- memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
- databuf += sizeof(struct hisi_sas_iost);
- }
-}
-
-static const char *
-hisi_sas_debugfs_to_reg_name(int off, int base_off,
- const struct hisi_sas_debugfs_reg_lu *lu)
-{
- for (; lu->name; lu++) {
- if (off == lu->off - base_off)
- return lu->name;
- }
-
- return NULL;
-}
-
-static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
- struct seq_file *s)
-{
- const struct hisi_sas_debugfs_reg *reg = ptr;
- int i;
-
- for (i = 0; i < reg->count; i++) {
- int off = i * 4;
- const char *name;
-
- name = hisi_sas_debugfs_to_reg_name(off, reg->base_off,
- reg->lu);
-
- if (name)
- seq_printf(s, "0x%08x 0x%08x %s\n", off,
- regs_val[i], name);
- else
- seq_printf(s, "0x%08x 0x%08x\n", off,
- regs_val[i]);
- }
-}
-
-static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_regs *global = s->private;
- struct hisi_hba *hisi_hba = global->hisi_hba;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
-
- hisi_sas_debugfs_print_reg(global->data,
- reg_global, s);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_global_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_global_fops = {
- .open = hisi_sas_debugfs_global_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_regs *axi = s->private;
- struct hisi_hba *hisi_hba = axi->hisi_hba;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
-
- hisi_sas_debugfs_print_reg(axi->data,
- reg_axi, s);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_axi_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_axi_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_axi_fops = {
- .open = hisi_sas_debugfs_axi_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_regs *ras = s->private;
- struct hisi_hba *hisi_hba = ras->hisi_hba;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
-
- hisi_sas_debugfs_print_reg(ras->data,
- reg_ras, s);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_ras_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_ras_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_ras_fops = {
- .open = hisi_sas_debugfs_ras_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_port *port = s->private;
- struct hisi_sas_phy *phy = port->phy;
- struct hisi_hba *hisi_hba = phy->hisi_hba;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
-
- hisi_sas_debugfs_print_reg(port->data, reg_port, s);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_port_fops = {
- .open = hisi_sas_debugfs_port_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static void hisi_sas_show_row_64(struct seq_file *s, int index,
- int sz, __le64 *ptr)
-{
- int i;
-
- /* completion header size not fixed per HW version */
- seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 8; i++, ptr++) {
- seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
- if (!(i % 2))
- seq_puts(s, "\n\t");
- }
-
- seq_puts(s, "\n");
-}
-
-static void hisi_sas_show_row_32(struct seq_file *s, int index,
- int sz, __le32 *ptr)
-{
- int i;
-
- /* completion header size not fixed per HW version */
- seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 4; i++, ptr++) {
- seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
- if (!(i % 4))
- seq_puts(s, "\n\t");
- }
- seq_puts(s, "\n");
-}
-
-static void hisi_sas_cq_show_slot(struct seq_file *s, int slot,
- struct hisi_sas_debugfs_cq *debugfs_cq)
-{
- struct hisi_sas_cq *cq = debugfs_cq->cq;
- struct hisi_hba *hisi_hba = cq->hisi_hba;
- __le32 *complete_hdr = debugfs_cq->complete_hdr +
- (hisi_hba->hw->complete_hdr_size * slot);
-
- hisi_sas_show_row_32(s, slot,
- hisi_hba->hw->complete_hdr_size,
- complete_hdr);
-}
-
-static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
- int slot;
-
- for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- hisi_sas_cq_show_slot(s, slot, debugfs_cq);
- }
- return 0;
-}
-
-static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_cq_fops = {
- .open = hisi_sas_debugfs_cq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
-{
- struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
- void *cmd_queue = debugfs_dq->hdr;
- __le32 *cmd_hdr = cmd_queue +
- sizeof(struct hisi_sas_cmd_hdr) * slot;
-
- hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr);
-}
-
-static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
-{
- int slot;
-
- for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- hisi_sas_dq_show_slot(s, slot, s->private);
- }
- return 0;
-}
-
-static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_dq_fops = {
- .open = hisi_sas_debugfs_dq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
- struct hisi_sas_iost *iost = debugfs_iost->iost;
- int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
-
- for (i = 0; i < max_command_entries; i++, iost++) {
- __le64 *data = &iost->qw0;
-
- hisi_sas_show_row_64(s, i, sizeof(*iost), data);
- }
-
- return 0;
-}
-
-static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_iost_fops = {
- .open = hisi_sas_debugfs_iost_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
- struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
- int i, tab_idx;
- __le64 *iost;
-
- for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
- /*
- * Data struct of IOST cache:
- * Data[1]: BIT0~15: Table index
- * Bit16: Valid mask
- * Data[2]~[9]: IOST table
- */
- tab_idx = (iost_cache->data[1] & 0xffff);
- iost = (__le64 *)iost_cache;
-
- hisi_sas_show_row_64(s, tab_idx, cache_size, iost);
- }
-
- return 0;
-}
-
-static int hisi_sas_debugfs_iost_cache_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_iost_cache_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
- .open = hisi_sas_debugfs_iost_cache_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
-{
- int i;
- struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
- struct hisi_sas_itct *itct = debugfs_itct->itct;
-
- for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
- __le64 *data = &itct->qw0;
-
- hisi_sas_show_row_64(s, i, sizeof(*itct), data);
- }
-
- return 0;
-}
-
-static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_itct_fops = {
- .open = hisi_sas_debugfs_itct_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
- struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
- int i, tab_idx;
- __le64 *itct;
-
- for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
- /*
- * Data struct of ITCT cache:
- * Data[1]: BIT0~15: Table index
- * Bit16: Valid mask
- * Data[2]~[9]: ITCT table
- */
- tab_idx = itct_cache->data[1] & 0xffff;
- itct = (__le64 *)itct_cache;
-
- hisi_sas_show_row_64(s, tab_idx, cache_size, itct);
- }
-
- return 0;
-}
-
-static int hisi_sas_debugfs_itct_cache_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_itct_cache_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
- .open = hisi_sas_debugfs_itct_cache_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
-{
- u64 *debugfs_timestamp;
- int dump_index = hisi_hba->debugfs_dump_index;
- struct dentry *dump_dentry;
- struct dentry *dentry;
- char name[256];
- int p;
- int c;
- int d;
-
- snprintf(name, 256, "%d", dump_index);
-
- dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
-
- debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
-
- debugfs_create_u64("timestamp", 0400, dump_dentry,
- debugfs_timestamp);
-
- debugfs_create_file("global", 0400, dump_dentry,
- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
- &hisi_sas_debugfs_global_fops);
-
- /* Create port dir and files */
- dentry = debugfs_create_dir("port", dump_dentry);
- for (p = 0; p < hisi_hba->n_phy; p++) {
- snprintf(name, 256, "%d", p);
-
- debugfs_create_file(name, 0400, dentry,
- &hisi_hba->debugfs_port_reg[dump_index][p],
- &hisi_sas_debugfs_port_fops);
- }
-
- /* Create CQ dir and files */
- dentry = debugfs_create_dir("cq", dump_dentry);
- for (c = 0; c < hisi_hba->queue_count; c++) {
- snprintf(name, 256, "%d", c);
-
- debugfs_create_file(name, 0400, dentry,
- &hisi_hba->debugfs_cq[dump_index][c],
- &hisi_sas_debugfs_cq_fops);
- }
-
- /* Create DQ dir and files */
- dentry = debugfs_create_dir("dq", dump_dentry);
- for (d = 0; d < hisi_hba->queue_count; d++) {
- snprintf(name, 256, "%d", d);
-
- debugfs_create_file(name, 0400, dentry,
- &hisi_hba->debugfs_dq[dump_index][d],
- &hisi_sas_debugfs_dq_fops);
- }
-
- debugfs_create_file("iost", 0400, dump_dentry,
- &hisi_hba->debugfs_iost[dump_index],
- &hisi_sas_debugfs_iost_fops);
-
- debugfs_create_file("iost_cache", 0400, dump_dentry,
- &hisi_hba->debugfs_iost_cache[dump_index],
- &hisi_sas_debugfs_iost_cache_fops);
-
- debugfs_create_file("itct", 0400, dump_dentry,
- &hisi_hba->debugfs_itct[dump_index],
- &hisi_sas_debugfs_itct_fops);
-
- debugfs_create_file("itct_cache", 0400, dump_dentry,
- &hisi_hba->debugfs_itct_cache[dump_index],
- &hisi_sas_debugfs_itct_cache_fops);
-
- debugfs_create_file("axi", 0400, dump_dentry,
- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
- &hisi_sas_debugfs_axi_fops);
-
- debugfs_create_file("ras", 0400, dump_dentry,
- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
- &hisi_sas_debugfs_ras_fops);
-
- return;
-}
-
-static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
-{
- hisi_hba->hw->snapshot_prepare(hisi_hba);
-
- hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_axi_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_ras_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_iost_reg(hisi_hba);
-
- hisi_sas_debugfs_create_files(hisi_hba);
-
- hisi_hba->hw->snapshot_restore(hisi_hba);
-}
-
-static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hisi_hba *hisi_hba = file->f_inode->i_private;
- char buf[8];
-
- if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
- return -EFAULT;
-
- if (count > 8)
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- if (buf[0] != '1')
- return -EFAULT;
-
- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-
- return count;
-}
-
-static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
- .write = &hisi_sas_debugfs_trigger_dump_write,
- .owner = THIS_MODULE,
-};
-
-enum {
- HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0,
- HISI_SAS_BIST_LOOPBACK_MODE_SERDES,
- HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
-};
-
-enum {
- HISI_SAS_BIST_CODE_MODE_PRBS7 = 0,
- HISI_SAS_BIST_CODE_MODE_PRBS23,
- HISI_SAS_BIST_CODE_MODE_PRBS31,
- HISI_SAS_BIST_CODE_MODE_JTPAT,
- HISI_SAS_BIST_CODE_MODE_CJTPAT,
- HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
- HISI_SAS_BIST_CODE_MODE_TRAIN,
- HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
- HISI_SAS_BIST_CODE_MODE_HFTP,
- HISI_SAS_BIST_CODE_MODE_MFTP,
- HISI_SAS_BIST_CODE_MODE_LFTP,
- HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
-};
-
-static const struct {
- int value;
- char *name;
-} hisi_sas_debugfs_loop_linkrate[] = {
- { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
- { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
- { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
- { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
-};
-
-static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
- int match = (hisi_hba->debugfs_bist_linkrate ==
- hisi_sas_debugfs_loop_linkrate[i].value);
-
- seq_printf(s, "%s%s%s ", match ? "[" : "",
- hisi_sas_debugfs_loop_linkrate[i].name,
- match ? "]" : "");
- }
- seq_puts(s, "\n");
-
- return 0;
-}
-
-static ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
- bool found = false;
- int i;
-
- if (hisi_hba->debugfs_bist_enable)
- return -EPERM;
-
- if (count >= sizeof(kbuf))
- return -EOVERFLOW;
-
- if (copy_from_user(kbuf, buf, count))
- return -EINVAL;
-
- pkbuf = strstrip(kbuf);
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
- if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name,
- pkbuf, 16)) {
- hisi_hba->debugfs_bist_linkrate =
- hisi_sas_debugfs_loop_linkrate[i].value;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_linkrate_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = {
- .open = hisi_sas_debugfs_bist_linkrate_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_linkrate_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct {
- int value;
- char *name;
-} hisi_sas_debugfs_loop_code_mode[] = {
- { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" },
- { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" },
- { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" },
- { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" },
- { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" },
- { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" },
- { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" },
- { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" },
- { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" },
- { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" },
- { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" },
- { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" },
-};
-
-static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
- int match = (hisi_hba->debugfs_bist_code_mode ==
- hisi_sas_debugfs_loop_code_mode[i].value);
-
- seq_printf(s, "%s%s%s ", match ? "[" : "",
- hisi_sas_debugfs_loop_code_mode[i].name,
- match ? "]" : "");
- }
- seq_puts(s, "\n");
-
- return 0;
-}
-
-static ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp,
- const char __user *buf,
- size_t count,
- loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
- bool found = false;
- int i;
-
- if (hisi_hba->debugfs_bist_enable)
- return -EPERM;
-
- if (count >= sizeof(kbuf))
- return -EINVAL;
-
- if (copy_from_user(kbuf, buf, count))
- return -EOVERFLOW;
-
- pkbuf = strstrip(kbuf);
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
- if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name,
- pkbuf, 16)) {
- hisi_hba->debugfs_bist_code_mode =
- hisi_sas_debugfs_loop_code_mode[i].value;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_code_mode_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = {
- .open = hisi_sas_debugfs_bist_code_mode_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_code_mode_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- unsigned int phy_no;
- int val;
-
- if (hisi_hba->debugfs_bist_enable)
- return -EPERM;
-
- val = kstrtouint_from_user(buf, count, 0, &phy_no);
- if (val)
- return val;
-
- if (phy_no >= hisi_hba->n_phy)
- return -EINVAL;
-
- hisi_hba->debugfs_bist_phy_no = phy_no;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
-
- seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_bist_phy_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_phy_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_phy_ops = {
- .open = hisi_sas_debugfs_bist_phy_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_phy_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct {
- int value;
- char *name;
-} hisi_sas_debugfs_loop_modes[] = {
- { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
- { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
- { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
-};
-
-static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
- int match = (hisi_hba->debugfs_bist_mode ==
- hisi_sas_debugfs_loop_modes[i].value);
-
- seq_printf(s, "%s%s%s ", match ? "[" : "",
- hisi_sas_debugfs_loop_modes[i].name,
- match ? "]" : "");
- }
- seq_puts(s, "\n");
-
- return 0;
-}
-
-static ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
- bool found = false;
- int i;
-
- if (hisi_hba->debugfs_bist_enable)
- return -EPERM;
-
- if (count >= sizeof(kbuf))
- return -EINVAL;
-
- if (copy_from_user(kbuf, buf, count))
- return -EOVERFLOW;
-
- pkbuf = strstrip(kbuf);
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
- if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) {
- hisi_hba->debugfs_bist_mode =
- hisi_sas_debugfs_loop_modes[i].value;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_mode_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_mode_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_mode_ops = {
- .open = hisi_sas_debugfs_bist_mode_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_mode_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- unsigned int enable;
- int val;
-
- val = kstrtouint_from_user(buf, count, 0, &enable);
- if (val)
- return val;
-
- if (enable > 1)
- return -EINVAL;
-
- if (enable == hisi_hba->debugfs_bist_enable)
- return count;
-
- if (!hisi_hba->hw->set_bist)
- return -EPERM;
-
- val = hisi_hba->hw->set_bist(hisi_hba, enable);
- if (val < 0)
- return val;
-
- hisi_hba->debugfs_bist_enable = enable;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
-
- seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_bist_enable_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_enable_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
- .open = hisi_sas_debugfs_bist_enable_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_enable_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *s = filp->private_data;
- struct hisi_sas_phy *phy = s->private;
- unsigned int set_val;
- int res;
-
- res = kstrtouint_from_user(buf, count, 0, &set_val);
- if (res)
- return res;
-
- if (set_val > 0)
- return -EINVAL;
-
- atomic_set(&phy->down_cnt, 0);
-
- return count;
-}
-
-static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_phy *phy = s->private;
-
- seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
-
- return 0;
-}
-
-static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = {
- .open = hisi_sas_debugfs_phy_down_cnt_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_phy_down_cnt_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-void hisi_sas_debugfs_work_handler(struct work_struct *work)
-{
- struct hisi_hba *hisi_hba =
- container_of(work, struct hisi_hba, debugfs_work);
- int debugfs_dump_index = hisi_hba->debugfs_dump_index;
- struct device *dev = hisi_hba->dev;
- u64 timestamp = local_clock();
-
- if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
- dev_warn(dev, "dump count exceeded!\n");
- return;
- }
-
- do_div(timestamp, NSEC_PER_MSEC);
- hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
-
- hisi_sas_debugfs_snapshot_regs(hisi_hba);
- hisi_hba->debugfs_dump_index++;
-}
-EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
-
-static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index)
-{
- struct device *dev = hisi_hba->dev;
- int i;
-
- devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
- devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
- devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
- devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
-
- for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
-
- for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev,
- hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
-
- for (i = 0; i < DEBUGFS_REGS_NUM; i++)
- devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
-
- for (i = 0; i < hisi_hba->n_phy; i++)
- devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
-}
-
-static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index)
-{
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- struct device *dev = hisi_hba->dev;
- int p, c, d, r, i;
- size_t sz;
-
- for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
- struct hisi_sas_debugfs_regs *regs =
- &hisi_hba->debugfs_regs[dump_index][r];
-
- sz = hw->debugfs_reg_array[r]->count * 4;
- regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!regs->data)
- goto fail;
- regs->hisi_hba = hisi_hba;
- }
-
- sz = hw->debugfs_reg_port->count * 4;
- for (p = 0; p < hisi_hba->n_phy; p++) {
- struct hisi_sas_debugfs_port *port =
- &hisi_hba->debugfs_port_reg[dump_index][p];
-
- port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!port->data)
- goto fail;
- port->phy = &hisi_hba->phy[p];
- }
-
- sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
- for (c = 0; c < hisi_hba->queue_count; c++) {
- struct hisi_sas_debugfs_cq *cq =
- &hisi_hba->debugfs_cq[dump_index][c];
-
- cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!cq->complete_hdr)
- goto fail;
- cq->cq = &hisi_hba->cq[c];
- }
-
- sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
- for (d = 0; d < hisi_hba->queue_count; d++) {
- struct hisi_sas_debugfs_dq *dq =
- &hisi_hba->debugfs_dq[dump_index][d];
-
- dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!dq->hdr)
- goto fail;
- dq->dq = &hisi_hba->dq[d];
- }
-
- sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
-
- hisi_hba->debugfs_iost[dump_index].iost =
- devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost[dump_index].iost)
- goto fail;
-
- sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
- sizeof(struct hisi_sas_iost_itct_cache);
-
- hisi_hba->debugfs_iost_cache[dump_index].cache =
- devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
- goto fail;
-
- sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
- sizeof(struct hisi_sas_iost_itct_cache);
-
- hisi_hba->debugfs_itct_cache[dump_index].cache =
- devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
- goto fail;
-
- /* New memory allocation must be locate before itct */
- sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
-
- hisi_hba->debugfs_itct[dump_index].itct =
- devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct[dump_index].itct)
- goto fail;
-
- return 0;
-fail:
- for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
- hisi_sas_debugfs_release(hisi_hba, i);
- return -ENOMEM;
-}
-
-static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
-{
- struct dentry *dir = debugfs_create_dir("phy_down_cnt",
- hisi_hba->debugfs_dir);
- char name[16];
- int phy_no;
-
- for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
- snprintf(name, 16, "%d", phy_no);
- debugfs_create_file(name, 0600, dir,
- &hisi_hba->phy[phy_no],
- &hisi_sas_debugfs_phy_down_cnt_ops);
- }
-}
-
-static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
-{
- hisi_hba->debugfs_bist_dentry =
- debugfs_create_dir("bist", hisi_hba->debugfs_dir);
- debugfs_create_file("link_rate", 0600,
- hisi_hba->debugfs_bist_dentry, hisi_hba,
- &hisi_sas_debugfs_bist_linkrate_ops);
-
- debugfs_create_file("code_mode", 0600,
- hisi_hba->debugfs_bist_dentry, hisi_hba,
- &hisi_sas_debugfs_bist_code_mode_ops);
-
- debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
- hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
-
- debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
- &hisi_hba->debugfs_bist_cnt);
-
- debugfs_create_file("loopback_mode", 0600,
- hisi_hba->debugfs_bist_dentry,
- hisi_hba, &hisi_sas_debugfs_bist_mode_ops);
-
- debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
- hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
-
- hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
-}
-
-void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
-{
- struct device *dev = hisi_hba->dev;
- int i;
-
- hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
- hisi_sas_debugfs_dir);
- debugfs_create_file("trigger_dump", 0200,
- hisi_hba->debugfs_dir,
- hisi_hba,
- &hisi_sas_debugfs_trigger_dump_fops);
-
- /* create bist structures */
- hisi_sas_debugfs_bist_init(hisi_hba);
-
- hisi_hba->debugfs_dump_dentry =
- debugfs_create_dir("dump", hisi_hba->debugfs_dir);
-
- hisi_sas_debugfs_phy_down_cnt_init(hisi_hba);
-
- for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
- if (hisi_sas_debugfs_alloc(hisi_hba, i)) {
- debugfs_remove_recursive(hisi_hba->debugfs_dir);
- dev_dbg(dev, "failed to init debugfs!\n");
- break;
- }
- }
-}
-EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
-
-void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba)
-{
- debugfs_remove_recursive(hisi_hba->debugfs_dir);
-}
-EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit);
-
int hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
- if (timer_pending(&hisi_hba->timer))
- del_timer(&hisi_hba->timer);
+ del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
@@ -3977,16 +2510,28 @@ int hisi_sas_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(hisi_sas_remove);
+#if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE)
+#define DEBUGFS_ENABLE_DEFAULT "enabled"
+bool hisi_sas_debugfs_enable = true;
+u32 hisi_sas_debugfs_dump_count = 50;
+#else
+#define DEBUGFS_ENABLE_DEFAULT "disabled"
bool hisi_sas_debugfs_enable;
+u32 hisi_sas_debugfs_dump_count = 1;
+#endif
+
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
-MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
+MODULE_PARM_DESC(hisi_sas_debugfs_enable,
+ "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")");
-u32 hisi_sas_debugfs_dump_count = 1;
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
+struct dentry *hisi_sas_debugfs_dir;
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir);
+
static __init int hisi_sas_init(void)
{
hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index fa25766502a2..d643c5a49aa9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -752,7 +752,7 @@ static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
rc = reset_hw_v1_hw(hisi_hba);
if (rc) {
- dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
return rc;
}
@@ -958,7 +958,7 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
u8 *buf_cmd, fburst = 0;
u32 dw1, dw2;
@@ -1152,21 +1152,21 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
}
default:
{
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
}
}
break;
case SAS_PROTOCOL_SMP:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
- dev_err(dev, "slot err: SATA/STP not supported");
+ dev_err(dev, "slot err: SATA/STP not supported\n");
}
break;
default:
@@ -1175,15 +1175,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
}
-static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot)
+static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
- enum exec_status sts;
struct hisi_sas_complete_v1_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v1_hdr *complete_hdr;
@@ -1194,15 +1193,14 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
cmplt_hdr_data = le32_to_cpu(complete_hdr->data);
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1219,35 +1217,35 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
u32 info_reg = hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO);
if (info_reg & HGC_INVLD_DQE_INFO_DQ_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq IPTT err",
+ dev_err(dev, "slot complete: [%d:%d] has dq IPTT err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_TYPE_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq type err",
+ dev_err(dev, "slot complete: [%d:%d] has dq type err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_FORCE_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq force phy err",
+ dev_err(dev, "slot complete: [%d:%d] has dq force phy err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_PHY_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq phy id err",
+ dev_err(dev, "slot complete: [%d:%d] has dq phy id err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_ABORT_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq abort flag err",
+ dev_err(dev, "slot complete: [%d:%d] has dq abort flag err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_IPTT_OF_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err",
+ dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_SSP_ERR_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err",
+ dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_OFL_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq order frame len err",
+ dev_err(dev, "slot complete: [%d:%d] has dq order frame len err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
ts->stat = SAS_OPEN_REJECT;
@@ -1259,8 +1257,10 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
slot_err_v1_hw(hisi_hba, task, slot);
- if (unlikely(slot->abort))
- return ts->stat;
+ if (unlikely(slot->abort)) {
+ sas_task_abort(task);
+ return;
+ }
goto out;
}
@@ -1280,10 +1280,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
void *to = page_address(sg_page(sg_resp));
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
@@ -1293,11 +1291,11 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- dev_err(dev, "slot complete: SATA/STP not supported");
+ dev_err(dev, "slot complete: SATA/STP not supported\n");
break;
default:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
@@ -1309,12 +1307,9 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
out:
hisi_sas_slot_task_free(hisi_hba, task, slot);
- sts = ts->stat;
if (task->task_done)
task->task_done(task);
-
- return sts;
}
/* Interrupts */
@@ -1329,7 +1324,6 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
irqreturn_t res = IRQ_HANDLED;
- unsigned long flags;
irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) {
@@ -1382,15 +1376,9 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
-
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->reset_completion) {
- phy->in_reset = 0;
- complete(phy->reset_completion);
- }
- spin_unlock_irqrestore(&phy->lock, flags);
-
end:
+ if (phy->reset_completion)
+ complete(phy->reset_completion);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
CHL_INT2_SL_PHY_ENA_MSK);
@@ -1410,7 +1398,6 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
struct hisi_sas_phy *phy = p;
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sha = &hisi_hba->sha;
struct device *dev = hisi_hba->dev;
int phy_no = sas_phy->id;
u32 irq_value;
@@ -1419,14 +1406,13 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
if (!(irq_value & CHL_INT2_SL_RX_BC_ACK_MSK)) {
- dev_err(dev, "bcast: irq_value = %x not set enable bit",
+ dev_err(dev, "bcast: irq_value = %x not set enable bit\n",
irq_value);
res = IRQ_NONE;
goto end;
}
- if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ hisi_sas_phy_bcast(phy);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
@@ -1455,7 +1441,8 @@ static irqreturn_t int_abnormal_v1_hw(int irq, void *p)
u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
hisi_sas_phy_down(hisi_hba, phy_no,
- (phy_state & 1 << phy_no) ? 1 : 0);
+ (phy_state & 1 << phy_no) ? 1 : 0,
+ GFP_ATOMIC);
}
if (irq_value & CHL_INT0_ID_TIMEOUT_MSK)
@@ -1647,18 +1634,15 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
idx = i * HISI_SAS_PHY_INT_NR;
for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (!irq) {
- dev_err(dev, "irq init: fail map phy interrupt %d\n",
- idx);
- return -ENOENT;
- }
+ if (irq < 0)
+ return irq;
rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
DRV_NAME " phy", phy);
if (rc) {
dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
irq, rc);
- return -ENOENT;
+ return rc;
}
}
}
@@ -1666,36 +1650,30 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (!irq) {
- dev_err(dev, "irq init: could not map cq interrupt %d\n",
- idx);
- return -ENOENT;
- }
+ if (irq < 0)
+ return irq;
rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0,
DRV_NAME " cq", &hisi_hba->cq[i]);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
irq, rc);
- return -ENOENT;
+ return rc;
}
}
idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (!irq) {
- dev_err(dev, "irq init: could not map fatal interrupt %d\n",
- idx);
- return -ENOENT;
- }
+ if (irq < 0)
+ return irq;
rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
irq, rc);
- return -ENOENT;
+ return rc;
}
}
@@ -1750,15 +1728,19 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
return 0;
}
-static struct device_attribute *host_attrs_v1_hw[] = {
- &dev_attr_phy_event_threshold,
+static struct attribute *host_v1_hw_attrs[] = {
+ &dev_attr_phy_event_threshold.attr,
NULL
};
+ATTRIBUTE_GROUPS(host_v1_hw);
+
static struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
@@ -1770,12 +1752,13 @@ static struct scsi_host_template sht_v1_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = hisi_sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = host_attrs_v1_hw,
+ .shost_groups = host_v1_hw_groups,
.host_reset = hisi_sas_host_reset,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e05faf315dcd..cded42f4ca44 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -805,8 +805,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
return -SAS_QUEUE_FULL;
}
/*
- * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
- */
+ * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
+ */
if (sata_dev ^ (start & 1))
break;
start++;
@@ -994,7 +994,7 @@ static int clear_itct_v2_hw(struct hisi_hba *hisi_hba,
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
if (!wait_for_completion_timeout(sas_dev->completion,
- CLEAR_ITCT_TIMEOUT * HZ)) {
+ HISI_SAS_CLEAR_ITCT_TIMEOUT)) {
dev_warn(dev, "failed to clear ITCT\n");
return -ETIMEDOUT;
}
@@ -1202,7 +1202,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffe20fe);
hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
for (i = 0; i < hisi_hba->queue_count; i++)
- hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
@@ -1382,7 +1382,7 @@ static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
rc = reset_hw_v2_hw(hisi_hba);
if (rc) {
- dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
return rc;
}
@@ -1742,7 +1742,7 @@ static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
u8 *buf_cmd;
u32 dw1 = 0, dw2 = 0;
@@ -2168,7 +2168,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
}
break;
case SAS_PROTOCOL_SMP:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
case SAS_PROTOCOL_SATA:
@@ -2318,8 +2318,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
}
}
-static int
-slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -2327,7 +2327,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
- enum exec_status sts;
struct hisi_sas_complete_v2_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v2_hdr *complete_hdr =
@@ -2337,7 +2336,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -2345,8 +2344,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
@@ -2369,18 +2367,18 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
case STAT_IO_COMPLETE:
/* internal abort command complete */
ts->stat = TMF_RESP_FUNC_SUCC;
- del_timer(&slot->internal_abort_timer);
+ del_timer_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NO_DEVICE:
ts->stat = TMF_RESP_FUNC_COMPLETE;
- del_timer(&slot->internal_abort_timer);
+ del_timer_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NOT_VALID:
/* abort single io, controller don't find
* the io need to abort
*/
ts->stat = TMF_RESP_FUNC_FAILED;
- del_timer(&slot->internal_abort_timer);
+ del_timer_sync(&slot->internal_abort_timer);
goto out;
default:
break;
@@ -2405,8 +2403,10 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
error_info[0], error_info[1],
error_info[2], error_info[3]);
- if (unlikely(slot->abort))
- return ts->stat;
+ if (unlikely(slot->abort)) {
+ sas_task_abort(task);
+ return;
+ }
goto out;
}
@@ -2426,10 +2426,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
void *to = page_address(sg_page(sg_resp));
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
@@ -2440,12 +2438,12 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
hisi_sas_sata_done(task, slot);
break;
}
default:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
@@ -2456,12 +2454,11 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task);
- return SAS_ABORTED_TASK;
+ return;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2473,15 +2470,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n",
task);
- return sts;
+ return;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
-
- return sts;
}
static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
@@ -2494,7 +2489,8 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_ata_task *ata_task = &task->ata_task;
+ struct sas_tmf_task *tmf = slot->tmf;
u8 *buf_cmd;
int has_data = 0, hdr_tag = 0;
u32 dw0, dw1 = 0, dw2 = 0;
@@ -2507,9 +2503,9 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
else
dw0 |= 4 << CMD_HDR_CMD_OFF;
- if (tmf && tmf->force_phy) {
+ if (tmf && ata_task->force_phy) {
dw0 |= CMD_HDR_FORCE_PHY_MSK;
- dw0 |= (1 << tmf->phy_id) << CMD_HDR_PHY_ID_OFF;
+ dw0 |= (1 << ata_task->force_phy_id) << CMD_HDR_PHY_ID_OFF;
}
hdr->dw0 = cpu_to_le32(dw0);
@@ -2605,14 +2601,15 @@ static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
}
static void prep_abort_v2_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
+ struct sas_internal_abort_task *abort = &task->abort_task;
struct domain_device *dev = task->dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct hisi_sas_port *port = slot->port;
struct timer_list *timer = &slot->internal_abort_timer;
+ struct hisi_sas_device *sas_dev = dev->lldd_dev;
/* setup the quirk timer */
timer_setup(timer, hisi_sas_internal_abort_quirk_timeout, 0);
@@ -2624,13 +2621,13 @@ static void prep_abort_v2_hw(struct hisi_hba *hisi_hba,
(port->id << CMD_HDR_PORT_OFF) |
(dev_is_sata(dev) <<
CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
- (abort_flag << CMD_HDR_ABORT_FLAG_OFF));
+ (abort->type << CMD_HDR_ABORT_FLAG_OFF));
/* dw1 */
- hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF);
+ hdr->dw1 = cpu_to_le32(sas_dev->device_id << CMD_HDR_DEV_ID_OFF);
/* dw7 */
- hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+ hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
}
@@ -2643,7 +2640,6 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct device *dev = hisi_hba->dev;
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
- unsigned long flags;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
@@ -2698,14 +2694,9 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
set_link_timer_quirk(hisi_hba);
}
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->reset_completion) {
- phy->in_reset = 0;
- complete(phy->reset_completion);
- }
- spin_unlock_irqrestore(&phy->lock, flags);
-
end:
+ if (phy->reset_completion)
+ complete(phy->reset_completion);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_PHY_ENABLE_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
@@ -2736,7 +2727,8 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
- hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+ hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0,
+ GFP_ATOMIC);
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
@@ -2819,15 +2811,12 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ hisi_sas_phy_bcast(phy);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -3205,7 +3194,6 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
irqreturn_t res = IRQ_HANDLED;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
- unsigned long flags;
int phy_no, offset;
del_timer(&phy->timer);
@@ -3281,12 +3269,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->reset_completion) {
- phy->in_reset = 0;
+ if (phy->reset_completion)
complete(phy->reset_completion);
- }
- spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
@@ -3304,7 +3288,29 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
fatal_axi_int_v2_hw
};
-/**
+#define CQ0_IRQ_INDEX (96)
+
+static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ struct platform_device *pdev = hisi_hba->platform_dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct irq_affinity desc = {
+ .pre_vectors = CQ0_IRQ_INDEX,
+ .post_vectors = 16,
+ };
+ int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec;
+
+ nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128,
+ &hisi_hba->irq_map);
+ if (nvec < 0)
+ return nvec;
+
+ shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv;
+
+ return 0;
+}
+
+/*
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
*/
@@ -3312,14 +3318,11 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
- int irq, rc = 0, irq_map[128];
+ int irq, rc = 0;
int i, phy_no, fatal_no, queue_no;
- for (i = 0; i < 128; i++)
- irq_map[i] = platform_get_irq(pdev, i);
-
for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
- irq = irq_map[i + 1]; /* Phy up/down is irq1 */
+ irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */
rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
@@ -3333,7 +3336,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- irq = irq_map[phy_no + 72];
+ irq = hisi_hba->irq_map[phy_no + 72];
rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
DRV_NAME " sata", phy);
if (rc) {
@@ -3345,7 +3348,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
- irq = irq_map[fatal_no + 81];
+ irq = hisi_hba->irq_map[fatal_no + 81];
rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
@@ -3356,24 +3359,22 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
- for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+ for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
- cq->irq_no = irq_map[queue_no + 96];
+ cq->irq_no = hisi_hba->irq_map[queue_no + 96];
rc = devm_request_threaded_irq(dev, cq->irq_no,
cq_interrupt_v2_hw,
cq_thread_v2_hw, IRQF_ONESHOT,
DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
- irq, rc);
+ cq->irq_no, rc);
rc = -ENOENT;
goto err_out;
}
+ cq->irq_mask = irq_get_affinity_mask(cq->irq_no);
}
-
- hisi_hba->cq_nvecs = hisi_hba->queue_count;
-
err_out:
return rc;
}
@@ -3526,15 +3527,36 @@ static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
}
-static struct device_attribute *host_attrs_v2_hw[] = {
- &dev_attr_phy_event_threshold,
+static struct attribute *host_v2_hw_attrs[] = {
+ &dev_attr_phy_event_threshold.attr,
NULL
};
+ATTRIBUTE_GROUPS(host_v2_hw);
+
+static void map_queues_v2_hw(struct Scsi_Host *shost)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
+ mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
+ if (!mask)
+ continue;
+
+ for_each_cpu(cpu, mask)
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
+ }
+}
+
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
@@ -3546,17 +3568,21 @@ static struct scsi_host_template sht_v2_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = hisi_sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = host_attrs_v2_hw,
+ .shost_groups = host_v2_hw_groups,
.host_reset = hisi_sas_host_reset,
+ .map_queues = map_queues_v2_hw,
+ .host_tagset = 1,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
+ .interrupt_preinit = hisi_sas_v2_interrupt_preinit,
.setup_itct = setup_itct_v2_hw,
.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
.alloc_dev = alloc_dev_quirk_v2_hw,
@@ -3586,18 +3612,6 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
static int hisi_sas_v2_probe(struct platform_device *pdev)
{
- /*
- * Check if we should defer the probe before we probe the
- * upper layer, as it's hard to defer later on.
- */
- int ret = platform_get_irq(pdev, 0);
-
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "cannot obtain irq\n");
- return ret;
- }
-
return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index a2debe0c8185..d56b4bfd2767 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3,6 +3,7 @@
* Copyright (c) 2017 Hisilicon Limited.
*/
+#include <linux/sched/clock.h>
#include "hisi_sas.h"
#define DRV_NAME "hisi_sas_v3_hw"
@@ -191,8 +192,10 @@
#define PHY_CFG_PHY_RST_OFF 3
#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
-#define CFG_PROG_PHY_LINK_RATE_OFF 8
-#define CFG_PROG_PHY_LINK_RATE_MSK (0xf << CFG_PROG_PHY_LINK_RATE_OFF)
+#define CFG_PROG_PHY_LINK_RATE_OFF 0
+#define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF)
+#define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8
+#define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF)
#define PHY_CTRL (PORT_BASE + 0x14)
#define PHY_CTRL_RESET_OFF 0
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
@@ -295,11 +298,25 @@
#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
#define COARSETUNE_TIME (PORT_BASE + 0x304)
+#define TXDEEMPH_G1 (PORT_BASE + 0x350)
#define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
#define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
#define ERR_CNT_CODE_ERR (PORT_BASE + 0x394)
#define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
+#define DFX_FIFO_CTRL (PORT_BASE + 0x3a0)
+#define DFX_FIFO_CTRL_TRIGGER_MODE_OFF 0
+#define DFX_FIFO_CTRL_TRIGGER_MODE_MSK (0x7 << DFX_FIFO_CTRL_TRIGGER_MODE_OFF)
+#define DFX_FIFO_CTRL_DUMP_MODE_OFF 3
+#define DFX_FIFO_CTRL_DUMP_MODE_MSK (0x7 << DFX_FIFO_CTRL_DUMP_MODE_OFF)
+#define DFX_FIFO_CTRL_SIGNAL_SEL_OFF 6
+#define DFX_FIFO_CTRL_SIGNAL_SEL_MSK (0xF << DFX_FIFO_CTRL_SIGNAL_SEL_OFF)
+#define DFX_FIFO_CTRL_DUMP_DISABLE_OFF 10
+#define DFX_FIFO_CTRL_DUMP_DISABLE_MSK (0x1 << DFX_FIFO_CTRL_DUMP_DISABLE_OFF)
+#define DFX_FIFO_TRIGGER (PORT_BASE + 0x3a4)
+#define DFX_FIFO_TRIGGER_MSK (PORT_BASE + 0x3a8)
+#define DFX_FIFO_DUMP_MSK (PORT_BASE + 0x3aC)
+#define DFX_FIFO_RD_DATA (PORT_BASE + 0x3b0)
#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
@@ -389,6 +406,8 @@
#define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
#define CMPLT_HDR_RSPNS_XFRD_OFF 10
#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
+#define CMPLT_HDR_RSPNS_GOOD_OFF 11
+#define CMPLT_HDR_RSPNS_GOOD_MSK (0x1 << CMPLT_HDR_RSPNS_GOOD_OFF)
#define CMPLT_HDR_ERX_OFF 12
#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
#define CMPLT_HDR_ABORT_STAT_OFF 13
@@ -462,6 +481,9 @@ struct hisi_sas_err_record_v3 {
#define RX_DATA_LEN_UNDERFLOW_OFF 6
#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
+#define RX_FIS_STATUS_ERR_OFF 0
+#define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF)
+
#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
#define HISI_SAS_MSI_COUNT_V3_HW 32
@@ -502,6 +524,8 @@ struct hisi_sas_err_record_v3 {
#define CHNL_INT_STS_INT2_MSK BIT(3)
#define CHNL_WIDTH 4
+#define BAR_NO_V3_HW 5
+
enum {
DSM_FUNC_ERR_HANDLE_MSI = 0,
};
@@ -511,13 +535,11 @@ MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
static int prot_mask;
-module_param(prot_mask, int, 0);
+module_param(prot_mask, int, 0444);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
-static bool auto_affine_msi_experimental;
-module_param(auto_affine_msi_experimental, bool, 0444);
-MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n"
- "default is off");
+static void debugfs_work_handler_v3_hw(struct work_struct *work);
+static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
@@ -565,7 +587,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
{
- int i;
+ int i, j;
/* Global registers init */
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
@@ -593,25 +615,24 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
for (i = 0; i < hisi_hba->queue_count; i++)
- hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
for (i = 0; i < hisi_hba->n_phy; i++) {
+ enum sas_linkrate max;
struct hisi_sas_phy *phy = &hisi_hba->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- u32 prog_phy_link_rate = 0x800;
+ u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, i,
+ PROG_PHY_LINK_RATE);
+ prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK;
if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
- SAS_LINK_RATE_1_5_GBPS)) {
- prog_phy_link_rate = 0x855;
- } else {
- enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
-
- prog_phy_link_rate =
- hisi_sas_get_prog_phy_linkrate_mask(max) |
- 0x800;
- }
+ SAS_LINK_RATE_1_5_GBPS))
+ max = SAS_LINK_RATE_12_0_GBPS;
+ else
+ max = sas_phy->phy->maximum_linkrate;
+ prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
@@ -636,6 +657,13 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
+
+ /* get default FFE configuration for BIST */
+ for (j = 0; j < FFE_CFG_MAX; j++) {
+ u32 val = hisi_sas_phy_read32(hisi_hba, i,
+ TXDEEMPH_G1 + (j * 0x4));
+ hisi_hba->debugfs_bist_ffe[i][j] = val;
+ }
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -823,7 +851,7 @@ static int clear_itct_v3_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
if (!wait_for_completion_timeout(sas_dev->completion,
- CLEAR_ITCT_TIMEOUT * HZ)) {
+ HISI_SAS_CLEAR_ITCT_TIMEOUT)) {
dev_warn(dev, "failed to clear ITCT\n");
return -ETIMEDOUT;
}
@@ -894,13 +922,14 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ struct acpi_device *acpi_dev;
union acpi_object *obj;
guid_t guid;
int rc;
rc = reset_hw_v3_hw(hisi_hba);
if (rc) {
- dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
return rc;
}
@@ -912,14 +941,21 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
return -EINVAL;
}
- /* Switch over to MSI handling , from PCI AER default */
+ /*
+ * This DSM handles some hardware-related configurations:
+ * 1. Switch over to MSI error handling in kernel
+ * 2. BIOS *may* reset some register values through this method
+ */
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
DSM_FUNC_ERR_HANDLE_MSI, NULL);
if (!obj)
- dev_warn(dev, "Switch over to MSI handling failed\n");
+ dev_warn(dev, "can not find DSM method, ignore\n");
else
ACPI_FREE(obj);
+ acpi_dev = ACPI_COMPANION(dev);
+ if (!acpi_device_power_manageable(acpi_dev))
+ dev_notice(dev, "neither _PS0 nor _PR0 is defined\n");
return 0;
}
@@ -1125,7 +1161,7 @@ static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
{
unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
unsigned int interval = scsi_prot_interval(scsi_cmnd);
- u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
+ u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd));
switch (prot_op) {
case SCSI_PROT_READ_INSERT:
@@ -1188,7 +1224,7 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
unsigned char prot_op;
u8 *buf_cmd;
@@ -1337,7 +1373,6 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
-
}
static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
@@ -1420,30 +1455,29 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
}
static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
+ struct sas_internal_abort_task *abort = &task->abort_task;
struct domain_device *dev = task->dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct hisi_sas_port *port = slot->port;
+ struct hisi_sas_device *sas_dev = dev->lldd_dev;
+ bool sata = dev_is_sata(dev);
/* dw0 */
- hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/
+ hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /* abort */
(port->id << CMD_HDR_PORT_OFF) |
- (dev_is_sata(dev)
- << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
- (abort_flag
- << CMD_HDR_ABORT_FLAG_OFF));
+ (sata << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
+ (abort->type << CMD_HDR_ABORT_FLAG_OFF));
/* dw1 */
- hdr->dw1 = cpu_to_le32(device_id
+ hdr->dw1 = cpu_to_le32(sas_dev->device_id
<< CMD_HDR_DEV_ID_OFF);
/* dw7 */
- hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+ hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
-
}
static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -1454,9 +1488,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
- unsigned long flags;
- del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
@@ -1533,16 +1565,27 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
}
phy->port_id = port_id;
- phy->phy_attached = 1;
- hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
+
+ /*
+ * Call pm_runtime_get_noresume() which pairs with
+ * hisi_sas_phyup_pm_work() -> pm_runtime_put_sync().
+ * For failure call pm_runtime_put() as we are in a hardirq context.
+ */
+ pm_runtime_get_noresume(dev);
+ res = hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
+ if (!res)
+ pm_runtime_put(dev);
+
res = IRQ_HANDLED;
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->reset_completion) {
- phy->in_reset = 0;
- complete(phy->reset_completion);
- }
- spin_unlock_irqrestore(&phy->lock, flags);
+
+ spin_lock(&phy->lock);
+ /* Delete timer and set phy_attached atomically */
+ del_timer(&phy->timer);
+ phy->phy_attached = 1;
+ spin_unlock(&phy->lock);
end:
+ if (phy->reset_completion)
+ complete(phy->reset_completion);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_PHY_ENABLE_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
@@ -1563,7 +1606,8 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
- hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+ hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0,
+ GFP_ATOMIC);
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
@@ -1582,15 +1626,12 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ hisi_sas_phy_bcast(phy);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -1691,8 +1732,11 @@ static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
int i;
irq_value &= ~irq_msk;
- if (!irq_value)
+ if (!irq_value) {
+ dev_warn(dev, "phy%d channel int 1 received with status bits cleared\n",
+ phy_no);
return;
+ }
for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
const struct hisi_sas_hw_error *error = &port_axi_error[i];
@@ -1753,8 +1797,11 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
BIT(CHL_INT2_RX_INVLD_DW_OFF);
irq_value &= ~irq_msk;
- if (!irq_value)
+ if (!irq_value) {
+ dev_warn(dev, "phy%d channel int 2 received with status bits cleared\n",
+ phy_no);
return;
+ }
if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
dev_warn(dev, "phy%d identify timeout\n", phy_no);
@@ -2101,7 +2148,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
-static void
+static bool
slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
@@ -2114,11 +2161,21 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
hisi_sas_status_buf_addr_mem(slot);
u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
+ u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type);
u32 dw3 = le32_to_cpu(complete_hdr->dw3);
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ /*
+ * If returned response frame is incorrect because of data underflow,
+ * but I/O information has been written to the host memory, we examine
+ * response IU.
+ */
+ if (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
+ (complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))
+ return false;
+
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
@@ -2132,7 +2189,10 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ if ((complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
+ ts->stat = SAS_PROTO_RESPONSE;
+ } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
@@ -2145,15 +2205,16 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
hisi_sas_sata_done(task, slot);
break;
case SAS_PROTOCOL_SMP:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
default:
break;
}
+ return true;
}
-static int
-slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -2161,7 +2222,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
- enum exec_status sts;
struct hisi_sas_complete_v3_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v3_hdr *complete_hdr =
@@ -2171,7 +2231,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0, dw1, dw3;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -2179,8 +2239,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
@@ -2225,16 +2284,20 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
- slot_err_v3_hw(hisi_hba, task, slot);
- if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
- slot->idx, task, sas_dev->device_id,
- dw0, dw1, complete_hdr->act, dw3,
- error_info[0], error_info[1],
- error_info[2], error_info[3]);
- if (unlikely(slot->abort))
- return ts->stat;
- goto out;
+ if (slot_err_v3_hw(hisi_hba, task, slot)) {
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task, sas_dev->device_id,
+ SAS_ADDR(device->sas_addr),
+ dw0, dw1, complete_hdr->act, dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
+ if (unlikely(slot->abort)) {
+ sas_task_abort(task);
+ return;
+ }
+ goto out;
+ }
}
switch (task->task_proto) {
@@ -2250,10 +2313,8 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
void *to = page_address(sg_page(sg_resp));
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
@@ -2263,11 +2324,11 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
hisi_sas_sata_done(task, slot);
break;
default:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
@@ -2278,12 +2339,11 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task);
- return SAS_ABORTED_TASK;
+ return;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2295,15 +2355,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n ",
task);
- return sts;
+ return;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
-
- return sts;
}
static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
@@ -2360,76 +2418,52 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
return IRQ_WAKE_THREAD;
}
-static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
+static void hisi_sas_v3_free_vectors(void *data)
{
- const struct cpumask *mask;
- int queue, cpu;
+ struct pci_dev *pdev = data;
- for (queue = 0; queue < nvecs; queue++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[queue];
+ pci_free_irq_vectors(pdev);
+}
- mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue +
- BASE_VECTORS_V3_HW);
- if (!mask)
- goto fallback;
- cq->irq_mask = mask;
- for_each_cpu(cpu, mask)
- hisi_hba->reply_map[cpu] = queue;
- }
- return;
+static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int vectors;
+ int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct pci_dev *pdev = hisi_hba->pci_dev;
+ struct irq_affinity desc = {
+ .pre_vectors = BASE_VECTORS_V3_HW,
+ };
+
+ min_msi = MIN_AFFINE_VECTORS_V3_HW;
+ vectors = pci_alloc_irq_vectors_affinity(pdev,
+ min_msi, max_msi,
+ PCI_IRQ_MSI |
+ PCI_IRQ_AFFINITY,
+ &desc);
+ if (vectors < 0)
+ return -ENOENT;
-fallback:
- for_each_possible_cpu(cpu)
- hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count;
- /* Don't clean all CQ masks */
+
+ hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
+ shost->nr_hw_queues = hisi_hba->cq_nvecs;
+
+ devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
+ return 0;
}
static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
struct pci_dev *pdev = hisi_hba->pci_dev;
- int vectors, rc, i;
- int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
-
- if (auto_affine_msi_experimental) {
- struct irq_affinity desc = {
- .pre_vectors = BASE_VECTORS_V3_HW,
- };
-
- dev_info(dev, "Enable MSI auto-affinity\n");
-
- min_msi = MIN_AFFINE_VECTORS_V3_HW;
-
- hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids,
- sizeof(unsigned int),
- GFP_KERNEL);
- if (!hisi_hba->reply_map)
- return -ENOMEM;
- vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
- min_msi, max_msi,
- PCI_IRQ_MSI |
- PCI_IRQ_AFFINITY,
- &desc);
- if (vectors < 0)
- return -ENOENT;
- setup_reply_map_v3_hw(hisi_hba, vectors - BASE_VECTORS_V3_HW);
- } else {
- min_msi = max_msi;
- vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, min_msi,
- max_msi, PCI_IRQ_MSI);
- if (vectors < 0)
- return vectors;
- }
-
- hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
+ int rc, i;
rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
int_phy_up_down_bcast_v3_hw, 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
dev_err(dev, "could not request phy interrupt, rc=%d\n", rc);
- rc = -ENOENT;
- goto free_irq_vectors;
+ return -ENOENT;
}
rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
@@ -2437,8 +2471,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
DRV_NAME " channel", hisi_hba);
if (rc) {
dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
- rc = -ENOENT;
- goto free_irq_vectors;
+ return -ENOENT;
}
rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
@@ -2446,8 +2479,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
DRV_NAME " fatal", hisi_hba);
if (rc) {
dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
- rc = -ENOENT;
- goto free_irq_vectors;
+ return -ENOENT;
}
if (hisi_sas_intr_conv)
@@ -2468,16 +2500,16 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
i, rc);
- rc = -ENOENT;
- goto free_irq_vectors;
+ return -ENOENT;
+ }
+ cq->irq_mask = pci_irq_get_affinity(pdev, i + BASE_VECTORS_V3_HW);
+ if (!cq->irq_mask) {
+ dev_err(dev, "could not get cq%d irq affinity!\n", i);
+ return -ENOENT;
}
}
return 0;
-
-free_irq_vectors:
- pci_free_irq_vectors(pdev);
- return rc;
}
static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
@@ -2499,8 +2531,10 @@ static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
enum sas_linkrate max = r->maximum_linkrate;
- u32 prog_phy_link_rate = 0x800;
+ u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, phy_no,
+ PROG_PHY_LINK_RATE);
+ prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK;
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
@@ -2514,10 +2548,11 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
synchronize_irq(pci_irq_vector(pdev, 1));
synchronize_irq(pci_irq_vector(pdev, 2));
synchronize_irq(pci_irq_vector(pdev, 11));
- for (i = 0; i < hisi_hba->queue_count; i++) {
+ for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
+
+ for (i = 0; i < hisi_hba->cq_nvecs; i++)
synchronize_irq(pci_irq_vector(pdev, i + 16));
- }
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
@@ -2742,14 +2777,53 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
}
static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
-static struct device_attribute *host_attrs_v3_hw[] = {
- &dev_attr_phy_event_threshold,
- &dev_attr_intr_conv_v3_hw,
- &dev_attr_intr_coal_ticks_v3_hw,
- &dev_attr_intr_coal_count_v3_hw,
+static int slave_configure_v3_hw(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ int ret = hisi_sas_slave_configure(sdev);
+ struct device *dev = hisi_hba->dev;
+
+ if (ret)
+ return ret;
+
+ if (sdev->type == TYPE_ENCLOSURE)
+ return 0;
+
+ if (!device_link_add(&sdev->sdev_gendev, dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)) {
+ if (pm_runtime_enabled(dev)) {
+ dev_info(dev, "add device link failed, disable runtime PM for the host\n");
+ pm_runtime_disable(dev);
+ }
+ }
+
+ return 0;
+}
+
+static struct attribute *host_v3_hw_attrs[] = {
+ &dev_attr_phy_event_threshold.attr,
+ &dev_attr_intr_conv_v3_hw.attr,
+ &dev_attr_intr_coal_ticks_v3_hw.attr,
+ &dev_attr_intr_coal_count_v3_hw.attr,
NULL
};
+ATTRIBUTE_GROUPS(host_v3_hw);
+
+#define HISI_SAS_DEBUGFS_REG(x) {#x, x}
+
+struct hisi_sas_debugfs_reg_lu {
+ char *name;
+ int off;
+};
+
+struct hisi_sas_debugfs_reg {
+ const struct hisi_sas_debugfs_reg_lu *lu;
+ int count;
+ int base_off;
+};
+
static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = {
HISI_SAS_DEBUGFS_REG(PHY_CFG),
HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE),
@@ -2805,7 +2879,6 @@ static const struct hisi_sas_debugfs_reg debugfs_port_reg = {
.lu = debugfs_port_reg_lu,
.count = 0x100,
.base_off = PORT_BASE,
- .read_port_reg = hisi_sas_phy_read32,
};
static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
@@ -2878,7 +2951,6 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
.lu = debugfs_global_reg_lu,
.count = 0x800,
- .read_global_reg = hisi_sas_read32,
};
static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
@@ -2893,10 +2965,10 @@ static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
.lu = debugfs_axi_reg_lu,
.count = 0x61,
.base_off = AXI_MASTER_CFG_BASE,
- .read_global_reg = hisi_sas_read32,
};
static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
+ HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK),
@@ -2910,7 +2982,6 @@ static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
.lu = debugfs_ras_reg_lu,
.count = 0x10,
.base_off = RAS_BASE,
- .read_global_reg = hisi_sas_read32,
};
static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
@@ -2938,6 +3009,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
{
u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ *
HISI_SAS_IOST_ITCT_CACHE_NUM;
+ struct device *dev = hisi_hba->dev;
u32 *buf = cache;
u32 i, val;
@@ -2950,7 +3022,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
}
if (val != 0xffffffff) {
- pr_err("Issue occur when reading IOST/ITCT cache!\n");
+ dev_err(dev, "Issue occurred in reading IOST/ITCT cache!\n");
return;
}
@@ -2964,42 +3036,48 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba)
{
u32 reg_val;
- int phy_id = hisi_hba->debugfs_bist_phy_no;
+ int phy_no = hisi_hba->debugfs_bist_phy_no;
+ int i;
/* disable PHY */
- hisi_sas_phy_enable(hisi_hba, phy_id, 0);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
+
+ /* update FFE */
+ for (i = 0; i < FFE_CFG_MAX; i++)
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXDEEMPH_G1 + (i * 0x4),
+ hisi_hba->debugfs_bist_ffe[phy_no][i]);
/* disable ALOS */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG);
reg_val |= CFG_ALOS_CHK_DISABLE_MSK;
- hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val);
}
static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
{
u32 reg_val;
- int phy_id = hisi_hba->debugfs_bist_phy_no;
+ int phy_no = hisi_hba->debugfs_bist_phy_no;
/* disable loopback */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL);
reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
CFG_BIST_TEST_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val);
/* enable ALOS */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG);
reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK;
- hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val);
/* restore the linkrate */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, PROG_PHY_LINK_RATE);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
/* init OOB link rate as 1.5 Gbits */
- reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
- reg_val |= (0x8 << CFG_PROG_PHY_LINK_RATE_OFF);
- hisi_sas_phy_write32(hisi_hba, phy_id, PROG_PHY_LINK_RATE, reg_val);
+ reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
+ reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val);
/* enable PHY */
- hisi_sas_phy_enable(hisi_hba, phy_id, 1);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 1);
}
#define SAS_PHY_BIST_CODE_INIT 0x1
@@ -3008,74 +3086,100 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
{
u32 reg_val, mode_tmp;
u32 linkrate = hisi_hba->debugfs_bist_linkrate;
- u32 phy_id = hisi_hba->debugfs_bist_phy_no;
+ u32 phy_no = hisi_hba->debugfs_bist_phy_no;
+ u32 *ffe = hisi_hba->debugfs_bist_ffe[phy_no];
u32 code_mode = hisi_hba->debugfs_bist_code_mode;
u32 path_mode = hisi_hba->debugfs_bist_mode;
+ u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0];
struct device *dev = hisi_hba->dev;
- dev_info(dev, "BIST info:linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n",
- linkrate, phy_id, code_mode, path_mode);
+ dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
+ phy_no, linkrate, code_mode, path_mode,
+ ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS],
+ ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS],
+ ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS],
+ ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE],
+ fix_code[FIXED_CODE_1]);
mode_tmp = path_mode ? 2 : 1;
if (enable) {
/* some preparations before bist test */
hisi_sas_bist_test_prep_v3_hw(hisi_hba);
/* set linkrate of bit test*/
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
PROG_PHY_LINK_RATE);
- reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
- reg_val |= (linkrate << CFG_PROG_PHY_LINK_RATE_OFF);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- PROG_PHY_LINK_RATE, reg_val);
+ reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
+ reg_val |= (linkrate << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ reg_val);
/* set code mode of bit test */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
SAS_PHY_BIST_CTRL);
- reg_val &= ~(CFG_BIST_MODE_SEL_MSK |
- CFG_LOOP_TEST_MODE_MSK |
- CFG_RX_BIST_EN_MSK |
- CFG_TX_BIST_EN_MSK |
- CFG_BIST_TEST_MSK);
+ reg_val &= ~(CFG_BIST_MODE_SEL_MSK | CFG_LOOP_TEST_MODE_MSK |
+ CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
+ CFG_BIST_TEST_MSK);
reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) |
(mode_tmp << CFG_LOOP_TEST_MODE_OFF) |
CFG_BIST_TEST_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
+ reg_val);
/* set the bist init value */
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CODE,
- SAS_PHY_BIST_CODE_INIT);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CODE1,
- SAS_PHY_BIST_CODE1_INIT);
+ if (code_mode == HISI_SAS_BIST_CODE_MODE_FIXED_DATA) {
+ reg_val = hisi_hba->debugfs_bist_fixed_code[0];
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE, reg_val);
+
+ reg_val = hisi_hba->debugfs_bist_fixed_code[1];
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE1, reg_val);
+ } else {
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE,
+ SAS_PHY_BIST_CODE_INIT);
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE1,
+ SAS_PHY_BIST_CODE1_INIT);
+ }
mdelay(100);
reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
+ reg_val);
/* clear error bit */
mdelay(100);
- hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
+ hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT);
} else {
/* disable bist test and recover it */
hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba,
- phy_id, SAS_BIST_ERR_CNT);
+ phy_no, SAS_BIST_ERR_CNT);
hisi_sas_bist_test_restore_v3_hw(hisi_hba);
}
return 0;
}
+static void hisi_sas_map_queues(struct Scsi_Host *shost)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+
+ blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, BASE_VECTORS_V3_HW);
+}
+
static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
- .slave_configure = hisi_sas_slave_configure,
+ .slave_configure = slave_configure_v3_hw,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
+ .map_queues = hisi_sas_map_queues,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.this_id = -1,
@@ -3084,18 +3188,19 @@ static struct scsi_host_template sht_v3_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = hisi_sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = host_attrs_v3_hw,
+ .shost_groups = host_v3_hw_groups,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.host_reset = hisi_sas_host_reset,
+ .host_tagset = 1,
};
static const struct hisi_sas_hw hisi_sas_v3_hw = {
- .hw_init = hisi_sas_v3_init,
.setup_itct = setup_itct_v3_hw,
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
@@ -3118,14 +3223,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.get_events = phy_get_events_v3_hw,
.write_gpio = write_gpio_v3_hw,
.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
- .debugfs_reg_array[DEBUGFS_GLOBAL] = &debugfs_global_reg,
- .debugfs_reg_array[DEBUGFS_AXI] = &debugfs_axi_reg,
- .debugfs_reg_array[DEBUGFS_RAS] = &debugfs_ras_reg,
- .debugfs_reg_port = &debugfs_port_reg,
- .snapshot_prepare = debugfs_snapshot_prepare_v3_hw,
- .snapshot_restore = debugfs_snapshot_restore_v3_hw,
- .read_iost_itct_cache = read_iost_itct_cache_v3_hw,
- .set_bist = debugfs_set_bist_v3_hw,
+ .debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw,
};
static struct Scsi_Host *
@@ -3143,7 +3241,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
hisi_hba = shost_priv(shost);
INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
- INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler);
+ INIT_WORK(&hisi_hba->debugfs_work, debugfs_work_handler_v3_hw);
hisi_hba->hw = &hisi_sas_v3_hw;
hisi_hba->pci_dev = pdev;
hisi_hba->dev = dev;
@@ -3171,6 +3269,1483 @@ err_out:
return NULL;
}
+static void debugfs_snapshot_cq_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int queue_entry_size = hisi_hba->hw->complete_hdr_size;
+ int dump_index = hisi_hba->debugfs_dump_index;
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
+ hisi_hba->complete_hdr[i],
+ HISI_SAS_QUEUE_SLOTS * queue_entry_size);
+}
+
+static void debugfs_snapshot_dq_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
+ int dump_index = hisi_hba->debugfs_dump_index;
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
+ int j;
+
+ debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
+ cmd_hdr = hisi_hba->cmd_hdr[i];
+
+ for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
+ memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
+ queue_entry_size);
+ }
+}
+
+static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ const struct hisi_sas_debugfs_reg *port = &debugfs_port_reg;
+ int i, phy_cnt;
+ u32 offset;
+ u32 *databuf;
+
+ for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
+ databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
+ for (i = 0; i < port->count; i++, databuf++) {
+ offset = port->base_off + 4 * i;
+ *databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt,
+ offset);
+ }
+ }
+}
+
+static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
+ int i;
+
+ for (i = 0; i < debugfs_axi_reg.count; i++, databuf++)
+ *databuf = hisi_sas_read32(hisi_hba, 4 * i);
+}
+
+static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
+ const struct hisi_sas_debugfs_reg *axi = &debugfs_axi_reg;
+ int i;
+
+ for (i = 0; i < axi->count; i++, databuf++)
+ *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off);
+}
+
+static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
+ const struct hisi_sas_debugfs_reg *ras = &debugfs_ras_reg;
+ int i;
+
+ for (i = 0; i < ras->count; i++, databuf++)
+ *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off);
+}
+
+static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
+ struct hisi_sas_itct *itct;
+ int i;
+
+ read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_ITCT_CACHE, cachebuf);
+
+ itct = hisi_hba->itct;
+
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
+ databuf += sizeof(struct hisi_sas_itct);
+ }
+}
+
+static void debugfs_snapshot_iost_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ int max_command_entries = HISI_SAS_MAX_COMMANDS;
+ void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
+ struct hisi_sas_iost *iost;
+ int i;
+
+ read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_IOST_CACHE, cachebuf);
+
+ iost = hisi_hba->iost;
+
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
+ databuf += sizeof(struct hisi_sas_iost);
+ }
+}
+
+static const char *
+debugfs_to_reg_name_v3_hw(int off, int base_off,
+ const struct hisi_sas_debugfs_reg_lu *lu)
+{
+ for (; lu->name; lu++) {
+ if (off == lu->off - base_off)
+ return lu->name;
+ }
+
+ return NULL;
+}
+
+static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
+ const struct hisi_sas_debugfs_reg *reg)
+{
+ int i;
+
+ for (i = 0; i < reg->count; i++) {
+ int off = i * 4;
+ const char *name;
+
+ name = debugfs_to_reg_name_v3_hw(off, reg->base_off,
+ reg->lu);
+
+ if (name)
+ seq_printf(s, "0x%08x 0x%08x %s\n", off,
+ regs_val[i], name);
+ else
+ seq_printf(s, "0x%08x 0x%08x\n", off,
+ regs_val[i]);
+ }
+}
+
+static int debugfs_global_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_regs *global = s->private;
+
+ debugfs_print_reg_v3_hw(global->data, s,
+ &debugfs_global_reg);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_global_v3_hw);
+
+static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_regs *axi = s->private;
+
+ debugfs_print_reg_v3_hw(axi->data, s,
+ &debugfs_axi_reg);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_axi_v3_hw);
+
+static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_regs *ras = s->private;
+
+ debugfs_print_reg_v3_hw(ras->data, s,
+ &debugfs_ras_reg);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_ras_v3_hw);
+
+static int debugfs_port_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_port *port = s->private;
+ const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg;
+
+ debugfs_print_reg_v3_hw(port->data, s, reg_port);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_port_v3_hw);
+
+static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index,
+ int sz, __le64 *ptr)
+{
+ int i;
+
+ /* completion header size not fixed per HW version */
+ seq_printf(s, "index %04d:\n\t", index);
+ for (i = 1; i <= sz / 8; i++, ptr++) {
+ seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
+ if (!(i % 2))
+ seq_puts(s, "\n\t");
+ }
+
+ seq_puts(s, "\n");
+}
+
+static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index,
+ int sz, __le32 *ptr)
+{
+ int i;
+
+ /* completion header size not fixed per HW version */
+ seq_printf(s, "index %04d:\n\t", index);
+ for (i = 1; i <= sz / 4; i++, ptr++) {
+ seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
+ if (!(i % 4))
+ seq_puts(s, "\n\t");
+ }
+ seq_puts(s, "\n");
+}
+
+static void debugfs_cq_show_slot_v3_hw(struct seq_file *s, int slot,
+ struct hisi_sas_debugfs_cq *debugfs_cq)
+{
+ struct hisi_sas_cq *cq = debugfs_cq->cq;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ __le32 *complete_hdr = debugfs_cq->complete_hdr +
+ (hisi_hba->hw->complete_hdr_size * slot);
+
+ debugfs_show_row_32_v3_hw(s, slot,
+ hisi_hba->hw->complete_hdr_size,
+ complete_hdr);
+}
+
+static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
+ int slot;
+
+ for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
+ debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_cq_v3_hw);
+
+static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot,
+ void *dq_ptr)
+{
+ struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
+ void *cmd_queue = debugfs_dq->hdr;
+ __le32 *cmd_hdr = cmd_queue +
+ sizeof(struct hisi_sas_cmd_hdr) * slot;
+
+ debugfs_show_row_32_v3_hw(s, slot, sizeof(struct hisi_sas_cmd_hdr),
+ cmd_hdr);
+}
+
+static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p)
+{
+ int slot;
+
+ for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
+ debugfs_dq_show_slot_v3_hw(s, slot, s->private);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_dq_v3_hw);
+
+static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
+ struct hisi_sas_iost *iost = debugfs_iost->iost;
+ int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
+
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ __le64 *data = &iost->qw0;
+
+ debugfs_show_row_64_v3_hw(s, i, sizeof(*iost), data);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_iost_v3_hw);
+
+static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
+ struct hisi_sas_iost_itct_cache *iost_cache =
+ debugfs_iost_cache->cache;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ int i, tab_idx;
+ __le64 *iost;
+
+ for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
+ /*
+ * Data struct of IOST cache:
+ * Data[1]: BIT0~15: Table index
+ * Bit16: Valid mask
+ * Data[2]~[9]: IOST table
+ */
+ tab_idx = (iost_cache->data[1] & 0xffff);
+ iost = (__le64 *)iost_cache;
+
+ debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, iost);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_iost_cache_v3_hw);
+
+static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p)
+{
+ int i;
+ struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
+ struct hisi_sas_itct *itct = debugfs_itct->itct;
+
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ __le64 *data = &itct->qw0;
+
+ debugfs_show_row_64_v3_hw(s, i, sizeof(*itct), data);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_itct_v3_hw);
+
+static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
+ struct hisi_sas_iost_itct_cache *itct_cache =
+ debugfs_itct_cache->cache;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ int i, tab_idx;
+ __le64 *itct;
+
+ for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
+ /*
+ * Data struct of ITCT cache:
+ * Data[1]: BIT0~15: Table index
+ * Bit16: Valid mask
+ * Data[2]~[9]: ITCT table
+ */
+ tab_idx = itct_cache->data[1] & 0xffff;
+ itct = (__le64 *)itct_cache;
+
+ debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, itct);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw);
+
+static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+{
+ u64 *debugfs_timestamp;
+ int dump_index = hisi_hba->debugfs_dump_index;
+ struct dentry *dump_dentry;
+ struct dentry *dentry;
+ char name[256];
+ int p;
+ int c;
+ int d;
+
+ snprintf(name, 256, "%d", dump_index);
+
+ dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
+
+ debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
+
+ debugfs_create_u64("timestamp", 0400, dump_dentry,
+ debugfs_timestamp);
+
+ debugfs_create_file("global", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
+ &debugfs_global_v3_hw_fops);
+
+ /* Create port dir and files */
+ dentry = debugfs_create_dir("port", dump_dentry);
+ for (p = 0; p < hisi_hba->n_phy; p++) {
+ snprintf(name, 256, "%d", p);
+
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_port_reg[dump_index][p],
+ &debugfs_port_v3_hw_fops);
+ }
+
+ /* Create CQ dir and files */
+ dentry = debugfs_create_dir("cq", dump_dentry);
+ for (c = 0; c < hisi_hba->queue_count; c++) {
+ snprintf(name, 256, "%d", c);
+
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_cq[dump_index][c],
+ &debugfs_cq_v3_hw_fops);
+ }
+
+ /* Create DQ dir and files */
+ dentry = debugfs_create_dir("dq", dump_dentry);
+ for (d = 0; d < hisi_hba->queue_count; d++) {
+ snprintf(name, 256, "%d", d);
+
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_dq[dump_index][d],
+ &debugfs_dq_v3_hw_fops);
+ }
+
+ debugfs_create_file("iost", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost[dump_index],
+ &debugfs_iost_v3_hw_fops);
+
+ debugfs_create_file("iost_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost_cache[dump_index],
+ &debugfs_iost_cache_v3_hw_fops);
+
+ debugfs_create_file("itct", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct[dump_index],
+ &debugfs_itct_v3_hw_fops);
+
+ debugfs_create_file("itct_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct_cache[dump_index],
+ &debugfs_itct_cache_v3_hw_fops);
+
+ debugfs_create_file("axi", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
+ &debugfs_axi_v3_hw_fops);
+
+ debugfs_create_file("ras", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
+ &debugfs_ras_v3_hw_fops);
+}
+
+static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+ struct device *dev = hisi_hba->dev;
+ u64 timestamp = local_clock();
+
+ if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+ dev_warn(dev, "dump count exceeded!\n");
+ return;
+ }
+
+ do_div(timestamp, NSEC_PER_MSEC);
+ hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
+
+ debugfs_snapshot_prepare_v3_hw(hisi_hba);
+
+ debugfs_snapshot_global_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_port_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_axi_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_ras_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_cq_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_dq_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
+
+ debugfs_create_files_v3_hw(hisi_hba);
+
+ debugfs_snapshot_restore_v3_hw(hisi_hba);
+ hisi_hba->debugfs_dump_index++;
+}
+
+static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hisi_hba *hisi_hba = file->f_inode->i_private;
+ char buf[8];
+
+ if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
+ return -EFAULT;
+
+ if (count > 8)
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (buf[0] != '1')
+ return -EFAULT;
+
+ queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+
+ return count;
+}
+
+static const struct file_operations debugfs_trigger_dump_v3_hw_fops = {
+ .write = &debugfs_trigger_dump_v3_hw_write,
+ .owner = THIS_MODULE,
+};
+
+enum {
+ HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0,
+ HISI_SAS_BIST_LOOPBACK_MODE_SERDES,
+ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
+};
+
+static const struct {
+ int value;
+ char *name;
+} debugfs_loop_linkrate_v3_hw[] = {
+ { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
+ { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
+ { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
+ { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
+};
+
+static int debugfs_bist_linkrate_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) {
+ int match = (hisi_hba->debugfs_bist_linkrate ==
+ debugfs_loop_linkrate_v3_hw[i].value);
+
+ seq_printf(s, "%s%s%s ", match ? "[" : "",
+ debugfs_loop_linkrate_v3_hw[i].name,
+ match ? "]" : "");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ char kbuf[16] = {}, *pkbuf;
+ bool found = false;
+ int i;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ if (count >= sizeof(kbuf))
+ return -EOVERFLOW;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EINVAL;
+
+ pkbuf = strstrip(kbuf);
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) {
+ if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name,
+ pkbuf, 16)) {
+ hisi_hba->debugfs_bist_linkrate =
+ debugfs_loop_linkrate_v3_hw[i].value;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return count;
+}
+
+static int debugfs_bist_linkrate_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_linkrate_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_linkrate_v3_hw_fops = {
+ .open = debugfs_bist_linkrate_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_linkrate_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct {
+ int value;
+ char *name;
+} debugfs_loop_code_mode_v3_hw[] = {
+ { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" },
+ { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" },
+ { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" },
+ { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" },
+ { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" },
+ { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" },
+ { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" },
+ { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" },
+ { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" },
+ { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" },
+ { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" },
+ { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" },
+};
+
+static int debugfs_bist_code_mode_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) {
+ int match = (hisi_hba->debugfs_bist_code_mode ==
+ debugfs_loop_code_mode_v3_hw[i].value);
+
+ seq_printf(s, "%s%s%s ", match ? "[" : "",
+ debugfs_loop_code_mode_v3_hw[i].name,
+ match ? "]" : "");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ char kbuf[16] = {}, *pkbuf;
+ bool found = false;
+ int i;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EOVERFLOW;
+
+ pkbuf = strstrip(kbuf);
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) {
+ if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name,
+ pkbuf, 16)) {
+ hisi_hba->debugfs_bist_code_mode =
+ debugfs_loop_code_mode_v3_hw[i].value;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return count;
+}
+
+static int debugfs_bist_code_mode_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_code_mode_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_code_mode_v3_hw_fops = {
+ .open = debugfs_bist_code_mode_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_code_mode_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t debugfs_bist_phy_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ unsigned int phy_no;
+ int val;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ val = kstrtouint_from_user(buf, count, 0, &phy_no);
+ if (val)
+ return val;
+
+ if (phy_no >= hisi_hba->n_phy)
+ return -EINVAL;
+
+ hisi_hba->debugfs_bist_phy_no = phy_no;
+
+ return count;
+}
+
+static int debugfs_bist_phy_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+
+ seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no);
+
+ return 0;
+}
+
+static int debugfs_bist_phy_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_phy_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_phy_v3_hw_fops = {
+ .open = debugfs_bist_phy_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_phy_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ unsigned int cnt;
+ int val;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ val = kstrtouint_from_user(buf, count, 0, &cnt);
+ if (val)
+ return val;
+
+ if (cnt)
+ return -EINVAL;
+
+ hisi_hba->debugfs_bist_cnt = 0;
+ return count;
+}
+
+static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+
+ seq_printf(s, "%u\n", hisi_hba->debugfs_bist_cnt);
+
+ return 0;
+}
+
+static int debugfs_bist_cnt_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_cnt_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_cnt_v3_hw_ops = {
+ .open = debugfs_bist_cnt_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_cnt_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct {
+ int value;
+ char *name;
+} debugfs_loop_modes_v3_hw[] = {
+ { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
+};
+
+static int debugfs_bist_mode_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) {
+ int match = (hisi_hba->debugfs_bist_mode ==
+ debugfs_loop_modes_v3_hw[i].value);
+
+ seq_printf(s, "%s%s%s ", match ? "[" : "",
+ debugfs_loop_modes_v3_hw[i].name,
+ match ? "]" : "");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ char kbuf[16] = {}, *pkbuf;
+ bool found = false;
+ int i;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EOVERFLOW;
+
+ pkbuf = strstrip(kbuf);
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) {
+ if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) {
+ hisi_hba->debugfs_bist_mode =
+ debugfs_loop_modes_v3_hw[i].value;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return count;
+}
+
+static int debugfs_bist_mode_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_mode_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_mode_v3_hw_fops = {
+ .open = debugfs_bist_mode_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_mode_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t debugfs_bist_enable_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ unsigned int enable;
+ int val;
+
+ val = kstrtouint_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ if (enable > 1)
+ return -EINVAL;
+
+ if (enable == hisi_hba->debugfs_bist_enable)
+ return count;
+
+ val = debugfs_set_bist_v3_hw(hisi_hba, enable);
+ if (val < 0)
+ return val;
+
+ hisi_hba->debugfs_bist_enable = enable;
+
+ return count;
+}
+
+static int debugfs_bist_enable_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+
+ seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable);
+
+ return 0;
+}
+
+static int debugfs_bist_enable_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_enable_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_enable_v3_hw_fops = {
+ .open = debugfs_bist_enable_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_enable_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct {
+ char *name;
+} debugfs_ffe_name_v3_hw[FFE_CFG_MAX] = {
+ { "SAS_1_5_GBPS" },
+ { "SAS_3_0_GBPS" },
+ { "SAS_6_0_GBPS" },
+ { "SAS_12_0_GBPS" },
+ { "FFE_RESV" },
+ { "SATA_1_5_GBPS" },
+ { "SATA_3_0_GBPS" },
+ { "SATA_6_0_GBPS" },
+};
+
+static ssize_t debugfs_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ u32 *val = m->private;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, val);
+ if (res)
+ return res;
+
+ return count;
+}
+
+static int debugfs_v3_hw_show(struct seq_file *s, void *p)
+{
+ u32 *val = s->private;
+
+ seq_printf(s, "0x%x\n", *val);
+
+ return 0;
+}
+
+static int debugfs_v3_hw_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, debugfs_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_v3_hw_fops = {
+ .open = debugfs_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t debugfs_phy_down_cnt_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct hisi_sas_phy *phy = s->private;
+ unsigned int set_val;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, &set_val);
+ if (res)
+ return res;
+
+ if (set_val > 0)
+ return -EINVAL;
+
+ atomic_set(&phy->down_cnt, 0);
+
+ return count;
+}
+
+static int debugfs_phy_down_cnt_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_phy *phy = s->private;
+
+ seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
+
+ return 0;
+}
+
+static int debugfs_phy_down_cnt_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_phy_down_cnt_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_phy_down_cnt_v3_hw_fops = {
+ .open = debugfs_phy_down_cnt_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_phy_down_cnt_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+enum fifo_dump_mode_v3_hw {
+ FIFO_DUMP_FORVER = (1U << 0),
+ FIFO_DUMP_AFTER_TRIGGER = (1U << 1),
+ FIFO_DUMP_UNTILL_TRIGGER = (1U << 2),
+};
+
+enum fifo_trigger_mode_v3_hw {
+ FIFO_TRIGGER_EDGE = (1U << 0),
+ FIFO_TRIGGER_SAME_LEVEL = (1U << 1),
+ FIFO_TRIGGER_DIFF_LEVEL = (1U << 2),
+};
+
+static int debugfs_is_fifo_config_valid_v3_hw(struct hisi_sas_phy *phy)
+{
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+
+ if (phy->fifo.signal_sel > 0xf) {
+ dev_info(hisi_hba->dev, "Invalid signal select: %u\n",
+ phy->fifo.signal_sel);
+ return -EINVAL;
+ }
+
+ switch (phy->fifo.dump_mode) {
+ case FIFO_DUMP_FORVER:
+ case FIFO_DUMP_AFTER_TRIGGER:
+ case FIFO_DUMP_UNTILL_TRIGGER:
+ break;
+ default:
+ dev_info(hisi_hba->dev, "Invalid dump mode: %u\n",
+ phy->fifo.dump_mode);
+ return -EINVAL;
+ }
+
+ /* when FIFO_DUMP_FORVER, no need to check trigger_mode */
+ if (phy->fifo.dump_mode == FIFO_DUMP_FORVER)
+ return 0;
+
+ switch (phy->fifo.trigger_mode) {
+ case FIFO_TRIGGER_EDGE:
+ case FIFO_TRIGGER_SAME_LEVEL:
+ case FIFO_TRIGGER_DIFF_LEVEL:
+ break;
+ default:
+ dev_info(hisi_hba->dev, "Invalid trigger mode: %u\n",
+ phy->fifo.trigger_mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int debugfs_update_fifo_config_v3_hw(struct hisi_sas_phy *phy)
+{
+ u32 trigger_mode = phy->fifo.trigger_mode;
+ u32 signal_sel = phy->fifo.signal_sel;
+ u32 dump_mode = phy->fifo.dump_mode;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ int phy_no = phy->sas_phy.id;
+ u32 reg_val;
+ int res;
+
+ /* Check the validity of trace FIFO configuration */
+ res = debugfs_is_fifo_config_valid_v3_hw(phy);
+ if (res)
+ return res;
+
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ /* Disable trace FIFO before update configuration */
+ reg_val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK;
+
+ /* Update trace FIFO configuration */
+ reg_val &= ~(DFX_FIFO_CTRL_DUMP_MODE_MSK |
+ DFX_FIFO_CTRL_SIGNAL_SEL_MSK |
+ DFX_FIFO_CTRL_TRIGGER_MODE_MSK);
+
+ reg_val |= ((trigger_mode << DFX_FIFO_CTRL_TRIGGER_MODE_OFF) |
+ (dump_mode << DFX_FIFO_CTRL_DUMP_MODE_OFF) |
+ (signal_sel << DFX_FIFO_CTRL_SIGNAL_SEL_OFF));
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK,
+ phy->fifo.dump_msk);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER,
+ phy->fifo.trigger);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK,
+ phy->fifo.trigger_msk);
+
+ /* Enable trace FIFO after updated configuration */
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ reg_val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val);
+
+ return 0;
+}
+
+static ssize_t debugfs_fifo_update_cfg_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hisi_sas_phy *phy = filp->private_data;
+ bool update;
+ int val;
+
+ val = kstrtobool_from_user(buf, count, &update);
+ if (val)
+ return val;
+
+ if (update != 1)
+ return -EINVAL;
+
+ val = debugfs_update_fifo_config_v3_hw(phy);
+ if (val)
+ return val;
+
+ return count;
+}
+
+static const struct file_operations debugfs_fifo_update_cfg_v3_hw_fops = {
+ .open = simple_open,
+ .write = debugfs_fifo_update_cfg_v3_hw_write,
+ .owner = THIS_MODULE,
+};
+
+static void debugfs_read_fifo_data_v3_hw(struct hisi_sas_phy *phy)
+{
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ u32 *buf = phy->fifo.rd_data;
+ int phy_no = phy->sas_phy.id;
+ u32 val;
+ int i;
+
+ memset(buf, 0, sizeof(phy->fifo.rd_data));
+
+ /* Disable trace FIFO before read data */
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val);
+
+ for (i = 0; i < HISI_SAS_FIFO_DATA_DW_SIZE; i++) {
+ val = hisi_sas_phy_read32(hisi_hba, phy_no,
+ DFX_FIFO_RD_DATA);
+ buf[i] = val;
+ }
+
+ /* Enable trace FIFO after read data */
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val);
+}
+
+static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_phy *phy = s->private;
+
+ debugfs_read_fifo_data_v3_hw(phy);
+
+ debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4,
+ phy->fifo.rd_data);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_fifo_data_v3_hw);
+
+static void debugfs_fifo_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int phy_no;
+
+ hisi_hba->debugfs_fifo_dentry =
+ debugfs_create_dir("fifo", hisi_hba->debugfs_dir);
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct dentry *port_dentry;
+ char name[256];
+ u32 val;
+
+ /* get default configuration for trace FIFO */
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val &= DFX_FIFO_CTRL_DUMP_MODE_MSK;
+ val >>= DFX_FIFO_CTRL_DUMP_MODE_OFF;
+ phy->fifo.dump_mode = val;
+
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val &= DFX_FIFO_CTRL_TRIGGER_MODE_MSK;
+ val >>= DFX_FIFO_CTRL_TRIGGER_MODE_OFF;
+ phy->fifo.trigger_mode = val;
+
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val &= DFX_FIFO_CTRL_SIGNAL_SEL_MSK;
+ val >>= DFX_FIFO_CTRL_SIGNAL_SEL_OFF;
+ phy->fifo.signal_sel = val;
+
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK);
+ phy->fifo.dump_msk = val;
+
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER);
+ phy->fifo.trigger = val;
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK);
+ phy->fifo.trigger_msk = val;
+
+ snprintf(name, 256, "%d", phy_no);
+ port_dentry = debugfs_create_dir(name,
+ hisi_hba->debugfs_fifo_dentry);
+
+ debugfs_create_file("update_config", 0200, port_dentry, phy,
+ &debugfs_fifo_update_cfg_v3_hw_fops);
+
+ debugfs_create_file("signal_sel", 0600, port_dentry,
+ &phy->fifo.signal_sel,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("dump_msk", 0600, port_dentry,
+ &phy->fifo.dump_msk,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("dump_mode", 0600, port_dentry,
+ &phy->fifo.dump_mode,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("trigger_mode", 0600, port_dentry,
+ &phy->fifo.trigger_mode,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("trigger", 0600, port_dentry,
+ &phy->fifo.trigger,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("trigger_msk", 0600, port_dentry,
+ &phy->fifo.trigger_msk,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("fifo_data", 0400, port_dentry, phy,
+ &debugfs_fifo_data_v3_hw_fops);
+ }
+}
+
+static void debugfs_work_handler_v3_hw(struct work_struct *work)
+{
+ struct hisi_hba *hisi_hba =
+ container_of(work, struct hisi_hba, debugfs_work);
+
+ debugfs_snapshot_regs_v3_hw(hisi_hba);
+}
+
+static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
+{
+ struct device *dev = hisi_hba->dev;
+ int i;
+
+ devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
+ devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ devm_kfree(dev,
+ hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
+
+ for (i = 0; i < DEBUGFS_REGS_NUM; i++)
+ devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
+}
+
+static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = {
+ [DEBUGFS_GLOBAL] = &debugfs_global_reg,
+ [DEBUGFS_AXI] = &debugfs_axi_reg,
+ [DEBUGFS_RAS] = &debugfs_ras_reg,
+};
+
+static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
+{
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ struct device *dev = hisi_hba->dev;
+ int p, c, d, r, i;
+ size_t sz;
+
+ for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
+ struct hisi_sas_debugfs_regs *regs =
+ &hisi_hba->debugfs_regs[dump_index][r];
+
+ sz = debugfs_reg_array_v3_hw[r]->count * 4;
+ regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!regs->data)
+ goto fail;
+ regs->hisi_hba = hisi_hba;
+ }
+
+ sz = debugfs_port_reg.count * 4;
+ for (p = 0; p < hisi_hba->n_phy; p++) {
+ struct hisi_sas_debugfs_port *port =
+ &hisi_hba->debugfs_port_reg[dump_index][p];
+
+ port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!port->data)
+ goto fail;
+ port->phy = &hisi_hba->phy[p];
+ }
+
+ sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
+ for (c = 0; c < hisi_hba->queue_count; c++) {
+ struct hisi_sas_debugfs_cq *cq =
+ &hisi_hba->debugfs_cq[dump_index][c];
+
+ cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!cq->complete_hdr)
+ goto fail;
+ cq->cq = &hisi_hba->cq[c];
+ }
+
+ sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
+ for (d = 0; d < hisi_hba->queue_count; d++) {
+ struct hisi_sas_debugfs_dq *dq =
+ &hisi_hba->debugfs_dq[dump_index][d];
+
+ dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!dq->hdr)
+ goto fail;
+ dq->dq = &hisi_hba->dq[d];
+ }
+
+ sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
+
+ hisi_hba->debugfs_iost[dump_index].iost =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost[dump_index].iost)
+ goto fail;
+
+ sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
+ sizeof(struct hisi_sas_iost_itct_cache);
+
+ hisi_hba->debugfs_iost_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
+ goto fail;
+
+ sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
+ sizeof(struct hisi_sas_iost_itct_cache);
+
+ hisi_hba->debugfs_itct_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
+ goto fail;
+
+ /* New memory allocation must be locate before itct */
+ sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
+
+ hisi_hba->debugfs_itct[dump_index].itct =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct[dump_index].itct)
+ goto fail;
+
+ return 0;
+fail:
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
+ debugfs_release_v3_hw(hisi_hba, i);
+ return -ENOMEM;
+}
+
+static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct dentry *dir = debugfs_create_dir("phy_down_cnt",
+ hisi_hba->debugfs_dir);
+ char name[16];
+ int phy_no;
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ snprintf(name, 16, "%d", phy_no);
+ debugfs_create_file(name, 0600, dir,
+ &hisi_hba->phy[phy_no],
+ &debugfs_phy_down_cnt_v3_hw_fops);
+ }
+}
+
+static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct dentry *ports_dentry;
+ int phy_no;
+
+ hisi_hba->debugfs_bist_dentry =
+ debugfs_create_dir("bist", hisi_hba->debugfs_dir);
+ debugfs_create_file("link_rate", 0600,
+ hisi_hba->debugfs_bist_dentry, hisi_hba,
+ &debugfs_bist_linkrate_v3_hw_fops);
+
+ debugfs_create_file("code_mode", 0600,
+ hisi_hba->debugfs_bist_dentry, hisi_hba,
+ &debugfs_bist_code_mode_v3_hw_fops);
+
+ debugfs_create_file("fixed_code", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_fixed_code[0],
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("fixed_code_1", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_fixed_code[1],
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &debugfs_bist_phy_v3_hw_fops);
+
+ debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &debugfs_bist_cnt_v3_hw_ops);
+
+ debugfs_create_file("loopback_mode", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &debugfs_bist_mode_v3_hw_fops);
+
+ debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &debugfs_bist_enable_v3_hw_fops);
+
+ ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry);
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ struct dentry *port_dentry;
+ struct dentry *ffe_dentry;
+ char name[256];
+ int i;
+
+ snprintf(name, 256, "%d", phy_no);
+ port_dentry = debugfs_create_dir(name, ports_dentry);
+ ffe_dentry = debugfs_create_dir("ffe", port_dentry);
+ for (i = 0; i < FFE_CFG_MAX; i++) {
+ if (i == FFE_RESV)
+ continue;
+ debugfs_create_file(debugfs_ffe_name_v3_hw[i].name,
+ 0600, ffe_dentry,
+ &hisi_hba->debugfs_bist_ffe[phy_no][i],
+ &debugfs_v3_hw_fops);
+ }
+ }
+
+ hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+}
+
+static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ int i;
+
+ hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
+ hisi_sas_debugfs_dir);
+ debugfs_create_file("trigger_dump", 0200,
+ hisi_hba->debugfs_dir,
+ hisi_hba,
+ &debugfs_trigger_dump_v3_hw_fops);
+
+ /* create bist structures */
+ debugfs_bist_init_v3_hw(hisi_hba);
+
+ hisi_hba->debugfs_dump_dentry =
+ debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+
+ debugfs_phy_down_cnt_init_v3_hw(hisi_hba);
+ debugfs_fifo_init_v3_hw(hisi_hba);
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ if (debugfs_alloc_v3_hw(hisi_hba, i)) {
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ break;
+ }
+ }
+}
+
+static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+{
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+}
+
static int
hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -3182,40 +4757,38 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct sas_ha_struct *sha;
int rc, phy_nr, port_nr, i;
- rc = pci_enable_device(pdev);
+ rc = pcim_enable_device(pdev);
if (rc)
goto err_out;
pci_set_master(pdev);
- rc = pci_request_regions(pdev, DRV_NAME);
+ rc = pcim_iomap_regions(pdev, 1 << BAR_NO_V3_HW, DRV_NAME);
if (rc)
- goto err_out_disable_device;
+ goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
rc = -ENODEV;
- goto err_out_regions;
+ goto err_out;
}
shost = hisi_sas_shost_alloc_pci(pdev);
if (!shost) {
rc = -ENOMEM;
- goto err_out_regions;
+ goto err_out;
}
sha = SHOST_TO_SAS_HA(shost);
hisi_hba = shost_priv(shost);
dev_set_drvdata(dev, sha);
- hisi_hba->regs = pcim_iomap(pdev, 5, 0);
+ hisi_hba->regs = pcim_iomap_table(pdev)[BAR_NO_V3_HW];
if (!hisi_hba->regs) {
dev_err(dev, "cannot map register\n");
rc = -ENOMEM;
- goto err_out_ha;
+ goto err_out_free_host;
}
phy_nr = port_nr = hisi_hba->n_phy;
@@ -3224,7 +4797,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
if (!arr_phy || !arr_port) {
rc = -ENOMEM;
- goto err_out_ha;
+ goto err_out_free_host;
}
sha->sas_phy = arr_phy;
@@ -3261,33 +4834,50 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (hisi_sas_debugfs_enable)
- hisi_sas_debugfs_init(hisi_hba);
+ debugfs_init_v3_hw(hisi_hba);
+
+ rc = interrupt_preinit_v3_hw(hisi_hba);
+ if (rc)
+ goto err_out_undo_debugfs;
rc = scsi_add_host(shost, dev);
if (rc)
- goto err_out_ha;
+ goto err_out_undo_debugfs;
rc = sas_register_ha(sha);
if (rc)
- goto err_out_register_ha;
+ goto err_out_remove_host;
- rc = hisi_hba->hw->hw_init(hisi_hba);
+ rc = hisi_sas_v3_init(hisi_hba);
if (rc)
- goto err_out_register_ha;
+ goto err_out_unregister_ha;
scsi_scan_host(shost);
+ pm_runtime_set_autosuspend_delay(dev, 5000);
+ pm_runtime_use_autosuspend(dev);
+ /*
+ * For the situation that there are ATA disks connected with SAS
+ * controller, it additionally creates ata_port which will affect the
+ * child_count of hisi_hba->dev. Even if suspended all the disks,
+ * ata_port is still and the child_count of hisi_hba->dev is not 0.
+ * So use pm_suspend_ignore_children() to ignore the effect to
+ * hisi_hba->dev.
+ */
+ pm_suspend_ignore_children(dev, true);
+ pm_runtime_put_noidle(&pdev->dev);
+
return 0;
-err_out_register_ha:
+err_out_unregister_ha:
+ sas_unregister_ha(sha);
+err_out_remove_host:
scsi_remove_host(shost);
-err_out_ha:
- hisi_sas_debugfs_exit(hisi_hba);
+err_out_undo_debugfs:
+ debugfs_exit_v3_hw(hisi_hba);
+err_out_free_host:
+ hisi_sas_free(hisi_hba);
scsi_host_put(shost);
-err_out_regions:
- pci_release_regions(pdev);
-err_out_disable_device:
- pci_disable_device(pdev);
err_out:
return rc;
}
@@ -3297,16 +4887,15 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
{
int i;
- free_irq(pci_irq_vector(pdev, 1), hisi_hba);
- free_irq(pci_irq_vector(pdev, 2), hisi_hba);
- free_irq(pci_irq_vector(pdev, 11), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba);
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
int nr = hisi_sas_intr_conv ? 16 : 16 + i;
- free_irq(pci_irq_vector(pdev, nr), cq);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
}
- pci_free_irq_vectors(pdev);
}
static void hisi_sas_v3_remove(struct pci_dev *pdev)
@@ -3316,17 +4905,16 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
- if (timer_pending(&hisi_hba->timer))
- del_timer(&hisi_hba->timer);
+ pm_runtime_get_noresume(dev);
+ del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
+ flush_workqueue(hisi_hba->wq);
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
- hisi_sas_debugfs_exit(hisi_hba);
+ debugfs_exit_v3_hw(hisi_hba);
scsi_host_put(shost);
}
@@ -3338,7 +4926,8 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
int rc;
dev_info(dev, "FLR prepare\n");
- set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ down(&hisi_hba->sem);
+ set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_prepare(hisi_hba);
rc = disable_host_v3_hw(hisi_hba);
@@ -3370,13 +4959,13 @@ enum {
hip08,
};
-static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
+static int _suspend_v3_hw(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
- pci_power_t device_state;
int rc;
if (!pdev->pm_cap) {
@@ -3384,9 +4973,11 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
return -ENODEV;
}
- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
+ dev_warn(dev, "entering suspend state\n");
+
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
flush_workqueue(hisi_hba->wq);
@@ -3395,28 +4986,24 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
if (rc) {
dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
scsi_unblock_requests(shost);
return rc;
}
hisi_sas_init_mem(hisi_hba);
- device_state = pci_choose_state(pdev, state);
- dev_warn(dev, "entering operating state [D%d]\n",
- device_state);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, device_state);
-
hisi_sas_release_tasks(hisi_hba);
sas_suspend_ha(sha);
+
+ dev_warn(dev, "end of suspending controller\n");
return 0;
}
-static int hisi_sas_v3_resume(struct pci_dev *pdev)
+static int _resume_v3_hw(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = hisi_hba->shost;
@@ -3426,16 +5013,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
dev_warn(dev, "resuming from operating state [D%d]\n",
device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_err(dev, "enable device failed during resume (%d)\n", rc);
- return rc;
- }
- pci_set_master(pdev);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
@@ -3443,16 +5021,53 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
rc = hw_init_v3_hw(hisi_hba);
if (rc) {
scsi_remove_host(shost);
- pci_disable_device(pdev);
return rc;
}
- hisi_hba->hw->phys_init(hisi_hba);
- sas_resume_ha(sha);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ phys_init_v3_hw(hisi_hba);
+
+ /*
+ * If a directly-attached disk is removed during suspend, a deadlock
+ * may occur, as the PHYE_RESUME_TIMEOUT processing will require the
+ * hisi_hba->device to be active, which can only happen when resume
+ * completes. So don't wait for the HA event workqueue to drain upon
+ * resume.
+ */
+ sas_resume_ha_no_sync(sha);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+
+ dev_warn(dev, "end of resuming controller\n");
return 0;
}
+static int __maybe_unused suspend_v3_hw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ int rc;
+
+ set_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
+
+ rc = _suspend_v3_hw(device);
+ if (rc)
+ clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
+
+ return rc;
+}
+
+static int __maybe_unused resume_v3_hw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ int rc = _resume_v3_hw(device);
+
+ clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
+
+ return rc;
+}
+
static const struct pci_device_id sas_v3_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
{}
@@ -3464,14 +5079,18 @@ static const struct pci_error_handlers hisi_sas_err_handler = {
.reset_done = hisi_sas_reset_done_v3_hw,
};
+static UNIVERSAL_DEV_PM_OPS(hisi_sas_v3_pm_ops,
+ suspend_v3_hw,
+ resume_v3_hw,
+ NULL);
+
static struct pci_driver sas_v3_pci_driver = {
.name = DRV_NAME,
.id_table = sas_v3_pci_table,
.probe = hisi_sas_v3_probe,
.remove = hisi_sas_v3_remove,
- .suspend = hisi_sas_v3_suspend,
- .resume = hisi_sas_v3_resume,
.err_handler = &hisi_sas_err_handler,
+ .driver.pm = &hisi_sas_v3_pm_ops,
};
module_pci_driver(sas_v3_pci_driver);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 1d669e47b692..9857dba09c95 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -61,6 +61,7 @@ static void scsi_host_cls_release(struct device *dev)
static struct class shost_class = {
.name = "scsi_host",
.dev_release = scsi_host_cls_release,
+ .dev_groups = scsi_shost_groups,
};
/**
@@ -181,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
+ /*
+ * New SCSI devices cannot be attached anymore because of the SCSI host
+ * state so drop the tag set refcnt. Wait until the tag set refcnt drops
+ * to zero because .exit_cmd_priv implementations may need the host
+ * pointer.
+ */
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
+ wait_for_completion(&shost->tagset_freed);
+
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_DEL))
BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
@@ -220,11 +230,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
- error = scsi_init_sense_cache(shost);
- if (error)
- goto fail;
+ /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
+ shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
+ shost->can_queue);
- error = scsi_mq_setup_tags(shost);
+ error = scsi_init_sense_cache(shost);
if (error)
goto fail;
@@ -235,6 +245,18 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
shost->dma_dev = dma_dev;
+ if (dma_dev->dma_mask) {
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
+ error = scsi_mq_setup_tags(shost);
+ if (error)
+ goto fail;
+
+ kref_init(&shost->tagset_refcnt);
+ init_completion(&shost->tagset_freed);
+
/*
* Increase usage count temporarily here so that calling
* scsi_autopm_put_host() will trigger runtime idle if there is
@@ -254,12 +276,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
device_enable_async_suspend(&shost->shost_dev);
+ get_device(&shost->shost_gendev);
error = device_add(&shost->shost_dev);
if (error)
goto out_del_gendev;
- get_device(&shost->shost_gendev);
-
if (shost->transportt->host_size) {
shost->shost_data = kzalloc(shost->transportt->host_size,
GFP_KERNEL);
@@ -272,37 +293,43 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (shost->transportt->create_work_queue) {
snprintf(shost->work_q_name, sizeof(shost->work_q_name),
"scsi_wq_%d", shost->host_no);
- shost->work_q = create_singlethread_workqueue(
- shost->work_q_name);
+ shost->work_q = alloc_workqueue("%s",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 1, shost->work_q_name);
+
if (!shost->work_q) {
error = -EINVAL;
- goto out_free_shost_data;
+ goto out_del_dev;
}
}
error = scsi_sysfs_add_host(shost);
if (error)
- goto out_destroy_host;
+ goto out_del_dev;
scsi_proc_host_add(shost);
scsi_autopm_put_host(shost);
return error;
- out_destroy_host:
- if (shost->work_q)
- destroy_workqueue(shost->work_q);
- out_free_shost_data:
- kfree(shost->shost_data);
+ /*
+ * Any host allocation in this function will be freed in
+ * scsi_host_dev_release().
+ */
out_del_dev:
device_del(&shost->shost_dev);
out_del_gendev:
+ /*
+ * Host state is SHOST_RUNNING so we have to explicitly release
+ * ->shost_dev.
+ */
+ put_device(&shost->shost_dev);
device_del(&shost->shost_gendev);
out_disable_runtime_pm:
device_disable_async_suspend(&shost->shost_gendev);
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- scsi_mq_destroy_tags(shost);
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
fail:
return error;
}
@@ -315,7 +342,7 @@ static void scsi_host_dev_release(struct device *dev)
scsi_proc_hostdir_rm(shost->hostt);
- /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
rcu_barrier();
if (shost->tmf_work_q)
@@ -336,14 +363,11 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
- if (shost->tag_set.tags)
- scsi_mq_destroy_tags(shost);
-
kfree(shost->shost_data);
- ida_simple_remove(&host_index_ida, shost->host_no);
+ ida_free(&host_index_ida, shost->host_no);
- if (parent)
+ if (shost->shost_state != SHOST_CREATED)
put_device(parent);
kfree(shost);
}
@@ -369,13 +393,9 @@ static struct device_type scsi_host_type = {
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
- gfp_t gfp_mask = GFP_KERNEL;
int index;
- if (sht->unchecked_isa_dma && privsize)
- gfp_mask |= __GFP_DMA;
-
- shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
+ shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
if (!shost)
return NULL;
@@ -384,14 +404,17 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->shost_state = SHOST_CREATED;
INIT_LIST_HEAD(&shost->__devices);
INIT_LIST_HEAD(&shost->__targets);
+ INIT_LIST_HEAD(&shost->eh_abort_list);
INIT_LIST_HEAD(&shost->eh_cmd_q);
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
- index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
- if (index < 0)
- goto fail_kfree;
+ index = ida_alloc(&host_index_ida, GFP_KERNEL);
+ if (index < 0) {
+ kfree(shost);
+ return NULL;
+ }
shost->host_no = index;
shost->dma_channel = 0xff;
@@ -417,8 +440,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->sg_tablesize = sht->sg_tablesize;
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
shost->cmd_per_lun = sht->cmd_per_lun;
- shost->unchecked_isa_dma = sht->unchecked_isa_dma;
shost->no_write_same = sht->no_write_same;
+ shost->host_tagset = sht->host_tagset;
if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
shost->eh_deadline = -1;
@@ -470,12 +493,13 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
shost->shost_gendev.type = &scsi_host_type;
+ scsi_enable_async_suspend(&shost->shost_gendev);
device_initialize(&shost->shost_dev);
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
- shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
+ shost->shost_dev.groups = sht->shost_groups;
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
@@ -483,26 +507,28 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost_printk(KERN_WARNING, shost,
"error handler thread failed to spawn, error = %ld\n",
PTR_ERR(shost->ehandler));
- goto fail_index_remove;
+ shost->ehandler = NULL;
+ goto fail;
}
shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
- WQ_UNBOUND | WQ_MEM_RECLAIM,
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
1, shost->host_no);
if (!shost->tmf_work_q) {
shost_printk(KERN_WARNING, shost,
"failed to create tmf workq\n");
- goto fail_kthread;
+ goto fail;
}
scsi_proc_hostdir_add(shost->hostt);
return shost;
+ fail:
+ /*
+ * Host state is still SHOST_CREATED and that is enough to release
+ * ->shost_gendev. scsi_host_dev_release() will free
+ * dev_name(&shost->shost_dev).
+ */
+ put_device(&shost->shost_gendev);
- fail_kthread:
- kthread_stop(shost->ehandler);
- fail_index_remove:
- ida_simple_remove(&host_index_ida, shost->host_no);
- fail_kfree:
- kfree(shost);
return NULL;
}
EXPORT_SYMBOL(scsi_host_alloc);
@@ -555,8 +581,7 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
-static bool scsi_host_check_in_flight(struct request *rq, void *data,
- bool reserved)
+static bool scsi_host_check_in_flight(struct request *rq, void *data)
{
int *count = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -650,3 +675,69 @@ void scsi_flush_work(struct Scsi_Host *shost)
flush_workqueue(shost->work_q);
}
EXPORT_SYMBOL_GPL(scsi_flush_work);
+
+static bool complete_all_cmds_iter(struct request *rq, void *data)
+{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ enum scsi_host_status status = *(enum scsi_host_status *)data;
+
+ scsi_dma_unmap(scmd);
+ scmd->result = 0;
+ set_host_byte(scmd, status);
+ scsi_done(scmd);
+ return true;
+}
+
+/**
+ * scsi_host_complete_all_commands - Terminate all running commands
+ * @shost: Scsi Host on which commands should be terminated
+ * @status: Status to be set for the terminated commands
+ *
+ * There is no protection against modification of the number
+ * of outstanding commands. It is the responsibility of the
+ * caller to ensure that concurrent I/O submission and/or
+ * completion is stopped when calling this function.
+ */
+void scsi_host_complete_all_commands(struct Scsi_Host *shost,
+ enum scsi_host_status status)
+{
+ blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
+ &status);
+}
+EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
+
+struct scsi_host_busy_iter_data {
+ bool (*fn)(struct scsi_cmnd *, void *);
+ void *priv;
+};
+
+static bool __scsi_host_busy_iter_fn(struct request *req, void *priv)
+{
+ struct scsi_host_busy_iter_data *iter_data = priv;
+ struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
+
+ return iter_data->fn(sc, iter_data->priv);
+}
+
+/**
+ * scsi_host_busy_iter - Iterate over all busy commands
+ * @shost: Pointer to Scsi_Host.
+ * @fn: Function to call on each busy command
+ * @priv: Data pointer passed to @fn
+ *
+ * If locking against concurrent command completions is required
+ * ithas to be provided by the caller
+ **/
+void scsi_host_busy_iter(struct Scsi_Host *shost,
+ bool (*fn)(struct scsi_cmnd *, void *),
+ void *priv)
+{
+ struct scsi_host_busy_iter_data iter_data = {
+ .fn = fn,
+ .priv = priv,
+ };
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn,
+ &iter_data);
+}
+EXPORT_SYMBOL_GPL(scsi_host_busy_iter);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 1a4ddfacb458..f8e832b1bc46 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
@@ -59,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.20-170"
+#define HPSA_DRIVER_VERSION "3.4.20-200"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -79,7 +80,6 @@
MODULE_AUTHOR("Hewlett-Packard Company");
MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
HPSA_DRIVER_VERSION);
-MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
MODULE_VERSION(HPSA_DRIVER_VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("cciss");
@@ -254,6 +254,10 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
void __user *arg);
+static int hpsa_passthru_ioctl(struct ctlr_info *h,
+ IOCTL_Command_struct *iocommand);
+static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
+ BIG_IOCTL_Command_struct *ioc);
#ifdef CONFIG_COMPAT
static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
@@ -504,6 +508,12 @@ static ssize_t host_store_rescan(struct device *dev,
return count;
}
+static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
+{
+ device->offload_enabled = 0;
+ device->offload_to_be_enabled = 0;
+}
+
static ssize_t host_show_firmware_revision(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -926,30 +936,34 @@ static DEVICE_ATTR(ctlr_num, S_IRUGO,
static DEVICE_ATTR(legacy_board, S_IRUGO,
host_show_legacy_board, NULL);
-static struct device_attribute *hpsa_sdev_attrs[] = {
- &dev_attr_raid_level,
- &dev_attr_lunid,
- &dev_attr_unique_id,
- &dev_attr_hp_ssd_smart_path_enabled,
- &dev_attr_path_info,
- &dev_attr_sas_address,
+static struct attribute *hpsa_sdev_attrs[] = {
+ &dev_attr_raid_level.attr,
+ &dev_attr_lunid.attr,
+ &dev_attr_unique_id.attr,
+ &dev_attr_hp_ssd_smart_path_enabled.attr,
+ &dev_attr_path_info.attr,
+ &dev_attr_sas_address.attr,
NULL,
};
-static struct device_attribute *hpsa_shost_attrs[] = {
- &dev_attr_rescan,
- &dev_attr_firmware_revision,
- &dev_attr_commands_outstanding,
- &dev_attr_transport_mode,
- &dev_attr_resettable,
- &dev_attr_hp_ssd_smart_path_status,
- &dev_attr_raid_offload_debug,
- &dev_attr_lockup_detected,
- &dev_attr_ctlr_num,
- &dev_attr_legacy_board,
+ATTRIBUTE_GROUPS(hpsa_sdev);
+
+static struct attribute *hpsa_shost_attrs[] = {
+ &dev_attr_rescan.attr,
+ &dev_attr_firmware_revision.attr,
+ &dev_attr_commands_outstanding.attr,
+ &dev_attr_transport_mode.attr,
+ &dev_attr_resettable.attr,
+ &dev_attr_hp_ssd_smart_path_status.attr,
+ &dev_attr_raid_offload_debug.attr,
+ &dev_attr_lockup_detected.attr,
+ &dev_attr_ctlr_num.attr,
+ &dev_attr_legacy_board.attr,
NULL,
};
+ATTRIBUTE_GROUPS(hpsa_shost);
+
#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
HPSA_MAX_CONCURRENT_PASSTHRUS)
@@ -970,8 +984,8 @@ static struct scsi_host_template hpsa_driver_template = {
#ifdef CONFIG_COMPAT
.compat_ioctl = hpsa_compat_ioctl,
#endif
- .sdev_attrs = hpsa_sdev_attrs,
- .shost_attrs = hpsa_shost_attrs,
+ .sdev_groups = hpsa_sdev_groups,
+ .shost_groups = hpsa_shost_groups,
.max_sectors = 2048,
.no_write_same = 1,
};
@@ -1140,7 +1154,10 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
{
dial_down_lockup_detection_during_fw_flash(h, c);
atomic_inc(&h->commands_outstanding);
- if (c->device)
+ /*
+ * Check to see if the command is being retried.
+ */
+ if (c->device && !c->retry_pending)
atomic_inc(&c->device->commands_outstanding);
reply_queue = h->reply_map[raw_smp_processor_id()];
@@ -1738,8 +1755,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
__func__,
h->scsi_host->host_no, logical_drive->bus,
logical_drive->target, logical_drive->lun);
- logical_drive->offload_enabled = 0;
- logical_drive->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(logical_drive);
logical_drive->queue_depth = 8;
}
}
@@ -2125,6 +2141,7 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
}
/* configure scsi device based on internal per-device structure */
+#define CTLR_TIMEOUT (120 * HZ)
static int hpsa_slave_configure(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *sd;
@@ -2135,17 +2152,21 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
if (sd) {
sd->was_removed = 0;
+ queue_depth = sd->queue_depth != 0 ?
+ sd->queue_depth : sdev->host->can_queue;
if (sd->external) {
queue_depth = EXTERNAL_QD;
sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
blk_queue_rq_timeout(sdev->request_queue,
HPSA_EH_PTRAID_TIMEOUT);
- } else {
- queue_depth = sd->queue_depth != 0 ?
- sd->queue_depth : sdev->host->can_queue;
}
- } else
+ if (is_hba_lunid(sd->scsi3addr)) {
+ sdev->eh_timeout = CTLR_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT);
+ }
+ } else {
queue_depth = sdev->host->can_queue;
+ }
scsi_change_queue_depth(sdev, queue_depth);
@@ -2381,7 +2402,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
break;
case IOACCEL2_STATUS_SR_UNDERRUN:
cmd->result = (DID_OK << 16); /* host byte */
- cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
ioaccel2_resid = get_unaligned_le32(
&c2->error_data.resid_cnt[0]);
scsi_set_resid(cmd, ioaccel2_resid);
@@ -2466,8 +2486,8 @@ static void hpsa_cmd_free_and_done(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd)
{
hpsa_cmd_resolve_and_free(h, c);
- if (cmd && cmd->scsi_done)
- cmd->scsi_done(cmd);
+ if (cmd)
+ scsi_done(cmd);
}
static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
@@ -2499,8 +2519,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
IOACCEL2_SERV_RESPONSE_FAILURE) {
if (c2->error_data.status ==
IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
- dev->offload_enabled = 0;
- dev->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(dev);
}
if (dev->in_reset) {
@@ -2583,8 +2602,7 @@ static void complete_scsi_command(struct CommandList *cp)
(c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
- cmd->result = (DID_OK << 16); /* host byte */
- cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+ cmd->result = (DID_OK << 16); /* host byte */
/* SCSI command has already been cleaned up in SML */
if (dev->was_removed) {
@@ -3435,9 +3453,14 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
struct ErrorInfo *ei = NULL;
struct bmic_sense_storage_box_params *bssbp = NULL;
struct bmic_identify_physical_device *id_phys = NULL;
- struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
+ struct ext_report_lun_entry *rle;
u16 bmic_device_index = 0;
+ if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
+ return;
+
+ rle = &rlep->LUN[rle_index];
+
encl_dev->eli =
hpsa_get_enclosure_logical_identifier(h, scsi3addr);
@@ -3670,10 +3693,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
this_device->offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
if (this_device->offload_config) {
- this_device->offload_to_be_enabled =
+ bool offload_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
- if (hpsa_get_raid_map(h, scsi3addr, this_device))
- this_device->offload_to_be_enabled = 0;
+ /*
+ * Check to see if offload can be enabled.
+ */
+ if (offload_enabled) {
+ rc = hpsa_get_raid_map(h, scsi3addr, this_device);
+ if (rc) /* could not load raid_map */
+ goto out;
+ this_device->offload_to_be_enabled = 1;
+ }
}
out:
@@ -3855,8 +3885,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
u8 sense_key, asc, ascq;
int sense_len;
int rc, ldstat = 0;
- u16 cmd_status;
- u8 scsi_status;
#define ASC_LUN_NOT_READY 0x04
#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
@@ -3876,8 +3904,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
else
sense_len = c->err_info->SenseLen;
decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
- cmd_status = c->err_info->CommandStatus;
- scsi_status = c->err_info->ScsiStatus;
cmd_free(h, c);
/* Determine the reason for not ready state */
@@ -3996,8 +4022,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
} else {
this_device->raid_level = RAID_UNKNOWN;
this_device->offload_config = 0;
- this_device->offload_enabled = 0;
- this_device->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(this_device);
this_device->hba_ioaccel_enabled = 0;
this_device->volume_offline = 0;
this_device->queue_depth = h->nr_cmds;
@@ -4160,6 +4185,9 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
int rc;
struct ext_report_lun_entry *rle;
+ if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
+ return;
+
rle = &rlep->LUN[rle_index];
dev->ioaccel_handle = rle->ioaccel_handle;
@@ -4184,7 +4212,12 @@ static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
struct ReportExtendedLUNdata *rlep, int rle_index,
struct bmic_identify_physical_device *id_phys)
{
- struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
+ struct ext_report_lun_entry *rle;
+
+ if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
+ return;
+
+ rle = &rlep->LUN[rle_index];
if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
this_device->hba_ioaccel_enabled = 1;
@@ -4318,10 +4351,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
u32 ndev_allocated = 0;
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
- int i, n_ext_target_devs, ndevs_to_allocate;
+ int i, ndevs_to_allocate;
int raid_ctlr_position;
bool physical_device;
- DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
@@ -4335,7 +4367,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
- memset(lunzerobits, 0, sizeof(lunzerobits));
h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
@@ -4383,7 +4414,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
raid_ctlr_position = nphysicals + nlogicals;
/* adjust our table of devices */
- n_ext_target_devs = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
u8 *lunaddrbytes, is_OBDR = 0;
int rc = 0;
@@ -4406,7 +4436,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
/*
* Skip over some devices such as a spare.
*/
- if (!tmpdevice->external && physical_device) {
+ if (phys_dev_index >= 0 && !tmpdevice->external &&
+ physical_device) {
skip_device = hpsa_skip_device(h, lunaddrbytes,
&physdev_list->LUN[phys_dev_index]);
if (skip_device)
@@ -4546,7 +4577,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
struct scsi_cmnd *cmd)
{
struct scatterlist *sg;
- int use_sg, i, sg_limit, chained, last_sg;
+ int use_sg, i, sg_limit, chained;
struct SGDescriptor *curr_sg;
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
@@ -4568,7 +4599,6 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
curr_sg = cp->SG;
chained = use_sg > h->max_cmd_sg_entries;
sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
- last_sg = scsi_sg_count(cmd) - 1;
scsi_for_each_sg(cmd, sg, sg_limit, i) {
hpsa_set_sg_descriptor(curr_sg, sg);
curr_sg++;
@@ -4664,7 +4694,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
case WRITE_6:
case WRITE_12:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_6:
case READ_12:
if (*cdb_len == 6) {
@@ -5114,7 +5144,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
switch (cmd->cmnd[0]) {
case WRITE_6:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_6:
first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
(cmd->cmnd[2] << 8) |
@@ -5125,7 +5155,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case WRITE_10:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_10:
first_block =
(((u64) cmd->cmnd[2]) << 24) |
@@ -5138,7 +5168,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case WRITE_12:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_12:
first_block =
(((u64) cmd->cmnd[2]) << 24) |
@@ -5153,7 +5183,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case WRITE_16:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_16:
first_block =
(((u64) cmd->cmnd[2]) << 56) |
@@ -5230,8 +5260,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
/* Handles load balance across RAID 1 members.
* (2-drive R1 and R10 with even # of drives.)
* Appropriate for SSDs, not optimal for HDDs
+ * Ensure we have the correct raid_map.
*/
- BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
+ if (le16_to_cpu(map->layout_map_count) != 2) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
if (dev->offload_to_mirror)
map_index += le16_to_cpu(map->data_disks_per_row);
dev->offload_to_mirror = !dev->offload_to_mirror;
@@ -5239,8 +5273,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
case HPSA_RAID_ADM:
/* Handles N-way mirrors (R1-ADM)
* and R10 with # of drives divisible by 3.)
+ * Ensure we have the correct raid_map.
*/
- BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
+ if (le16_to_cpu(map->layout_map_count) != 3) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
offload_to_mirror = dev->offload_to_mirror;
raid_map_helper(map, offload_to_mirror,
@@ -5265,7 +5303,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
r5or6_blocks_per_row =
le16_to_cpu(map->strip_size) *
le16_to_cpu(map->data_disks_per_row);
- BUG_ON(r5or6_blocks_per_row == 0);
+ if (r5or6_blocks_per_row == 0) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
stripesize = r5or6_blocks_per_row *
le16_to_cpu(map->layout_map_count);
#if BITS_PER_LONG == 32
@@ -5530,7 +5571,8 @@ static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
}
static int hpsa_ioaccel_submit(struct ctlr_info *h,
- struct CommandList *c, struct scsi_cmnd *cmd)
+ struct CommandList *c, struct scsi_cmnd *cmd,
+ bool retry)
{
struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
int rc = IO_ACCEL_INELIGIBLE;
@@ -5547,18 +5589,22 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
cmd->host_scribble = (unsigned char *) c;
if (dev->offload_enabled) {
- hpsa_cmd_init(h, c->cmdindex, c);
+ hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
c->device = dev;
+ if (retry) /* Resubmit but do not increment device->commands_outstanding. */
+ c->retry_pending = true;
rc = hpsa_scsi_ioaccel_raid_map(h, c);
if (rc < 0) /* scsi_dma_map failed. */
rc = SCSI_MLQUEUE_HOST_BUSY;
} else if (dev->hba_ioaccel_enabled) {
- hpsa_cmd_init(h, c->cmdindex, c);
+ hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
c->device = dev;
+ if (retry) /* Resubmit but do not increment device->commands_outstanding. */
+ c->retry_pending = true;
rc = hpsa_scsi_ioaccel_direct_map(h, c);
if (rc < 0) /* scsi_dma_map failed. */
rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -5591,7 +5637,8 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
if (c2->error_data.serv_response ==
IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
- rc = hpsa_ioaccel_submit(h, c, cmd);
+ /* Resubmit with the retry_pending flag set. */
+ rc = hpsa_ioaccel_submit(h, c, cmd, true);
if (rc == 0)
return;
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
@@ -5607,6 +5654,15 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
}
}
hpsa_cmd_partial_init(c->h, c->cmdindex, c);
+ /*
+ * Here we have not come in though queue_command, so we
+ * can set the retry_pending flag to true for a driver initiated
+ * retry attempt (I.E. not a SML retry).
+ * I.E. We are submitting a driver initiated retry.
+ * Note: hpsa_ciss_submit does not zero out the command fields like
+ * ioaccel submit does.
+ */
+ c->retry_pending = true;
if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
/*
* If we get here, it means dma mapping failed. Try
@@ -5617,7 +5673,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
* if it encountered a dma mapping failure.
*/
cmd->result = DID_IMM_RETRY << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
@@ -5632,24 +5688,24 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/* Get the ptr to our adapter structure out of cmd->host. */
h = sdev_to_hba(cmd->device);
- BUG_ON(cmd->request->tag < 0);
+ BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0);
dev = cmd->device->hostdata;
if (!dev) {
cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
if (dev->removed) {
cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
if (unlikely(lockup_detected(h))) {
cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -5669,11 +5725,16 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/*
* Call alternate submit routine for I/O accelerated commands.
* Retries always go down the normal I/O path.
+ * Note: If cmd->retries is non-zero, then this is a SML
+ * initiated retry and not a driver initiated retry.
+ * This command has been obtained from cmd_tagged_alloc
+ * and is therefore a brand-new command.
*/
if (likely(cmd->retries == 0 &&
- !blk_rq_is_passthrough(cmd->request) &&
+ !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) &&
h->acciopath_status)) {
- rc = hpsa_ioaccel_submit(h, c, cmd);
+ /* Submit with the retry_pending flag unset. */
+ rc = hpsa_ioaccel_submit(h, c, cmd, false);
if (rc == 0)
return 0;
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
@@ -5835,7 +5896,7 @@ static int hpsa_scsi_add_host(struct ctlr_info *h)
*/
static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
{
- int idx = scmd->request->tag;
+ int idx = scsi_cmd_to_rq(scmd)->tag;
if (idx < 0)
return idx;
@@ -6068,6 +6129,7 @@ return_reset_status:
* at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
* block request tag as an index into a table of entries. cmd_tagged_free() is
* the complement, although cmd_free() may be called instead.
+ * This function is only called for new requests from queue_command.
*/
static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
struct scsi_cmnd *scmd)
@@ -6102,8 +6164,14 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
}
atomic_inc(&c->refcount);
-
hpsa_cmd_partial_init(h, idx, c);
+
+ /*
+ * This is a new command obtained from queue_command so
+ * there have not been any driver initiated retry attempts.
+ */
+ c->retry_pending = false;
+
return c;
}
@@ -6165,12 +6233,18 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
offset = (i + 1) % HPSA_NRESERVED_CMDS;
continue;
}
- set_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ set_bit(i, h->cmd_pool_bits);
break; /* it's ours now. */
}
hpsa_cmd_partial_init(h, i, c);
c->device = NULL;
+
+ /*
+ * cmd_alloc is for "internal" commands and they are never
+ * retried.
+ */
+ c->retry_pending = false;
+
return c;
}
@@ -6186,8 +6260,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
int i;
i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ clear_bit(i, h->cmd_pool_bits);
}
}
@@ -6196,75 +6269,63 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
void __user *arg)
{
- IOCTL32_Command_struct __user *arg32 =
- (IOCTL32_Command_struct __user *) arg;
+ struct ctlr_info *h = sdev_to_hba(dev);
+ IOCTL32_Command_struct __user *arg32 = arg;
IOCTL_Command_struct arg64;
- IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
int err;
u32 cp;
- memset(&arg64, 0, sizeof(arg64));
- err = 0;
- err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
- sizeof(arg64.LUN_info));
- err |= copy_from_user(&arg64.Request, &arg32->Request,
- sizeof(arg64.Request));
- err |= copy_from_user(&arg64.error_info, &arg32->error_info,
- sizeof(arg64.error_info));
- err |= get_user(arg64.buf_size, &arg32->buf_size);
- err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
+ if (!arg)
+ return -EINVAL;
- if (err)
+ memset(&arg64, 0, sizeof(arg64));
+ if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf)))
+ return -EFAULT;
+ if (get_user(cp, &arg32->buf))
return -EFAULT;
+ arg64.buf = compat_ptr(cp);
- err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
+ if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
+ return -EAGAIN;
+ err = hpsa_passthru_ioctl(h, &arg64);
+ atomic_inc(&h->passthru_cmds_avail);
if (err)
return err;
- err |= copy_in_user(&arg32->error_info, &p->error_info,
- sizeof(arg32->error_info));
- if (err)
+ if (copy_to_user(&arg32->error_info, &arg64.error_info,
+ sizeof(arg32->error_info)))
return -EFAULT;
- return err;
+ return 0;
}
static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
unsigned int cmd, void __user *arg)
{
- BIG_IOCTL32_Command_struct __user *arg32 =
- (BIG_IOCTL32_Command_struct __user *) arg;
+ struct ctlr_info *h = sdev_to_hba(dev);
+ BIG_IOCTL32_Command_struct __user *arg32 = arg;
BIG_IOCTL_Command_struct arg64;
- BIG_IOCTL_Command_struct __user *p =
- compat_alloc_user_space(sizeof(arg64));
int err;
u32 cp;
+ if (!arg)
+ return -EINVAL;
memset(&arg64, 0, sizeof(arg64));
- err = 0;
- err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
- sizeof(arg64.LUN_info));
- err |= copy_from_user(&arg64.Request, &arg32->Request,
- sizeof(arg64.Request));
- err |= copy_from_user(&arg64.error_info, &arg32->error_info,
- sizeof(arg64.error_info));
- err |= get_user(arg64.buf_size, &arg32->buf_size);
- err |= get_user(arg64.malloc_size, &arg32->malloc_size);
- err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
-
- if (err)
+ if (copy_from_user(&arg64, arg32,
+ offsetof(BIG_IOCTL32_Command_struct, buf)))
return -EFAULT;
+ if (get_user(cp, &arg32->buf))
+ return -EFAULT;
+ arg64.buf = compat_ptr(cp);
- err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
+ if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
+ return -EAGAIN;
+ err = hpsa_big_passthru_ioctl(h, &arg64);
+ atomic_inc(&h->passthru_cmds_avail);
if (err)
return err;
- err |= copy_in_user(&arg32->error_info, &p->error_info,
- sizeof(arg32->error_info));
- if (err)
+ if (copy_to_user(&arg32->error_info, &arg64.error_info,
+ sizeof(arg32->error_info)))
return -EFAULT;
- return err;
+ return 0;
}
static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
@@ -6337,37 +6398,33 @@ static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
return 0;
}
-static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+static int hpsa_passthru_ioctl(struct ctlr_info *h,
+ IOCTL_Command_struct *iocommand)
{
- IOCTL_Command_struct iocommand;
struct CommandList *c;
char *buff = NULL;
u64 temp64;
int rc = 0;
- if (!argp)
- return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
- return -EFAULT;
- if ((iocommand.buf_size < 1) &&
- (iocommand.Request.Type.Direction != XFER_NONE)) {
+ if ((iocommand->buf_size < 1) &&
+ (iocommand->Request.Type.Direction != XFER_NONE)) {
return -EINVAL;
}
- if (iocommand.buf_size > 0) {
- buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
+ if (iocommand->buf_size > 0) {
+ buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
if (buff == NULL)
return -ENOMEM;
- if (iocommand.Request.Type.Direction & XFER_WRITE) {
+ if (iocommand->Request.Type.Direction & XFER_WRITE) {
/* Copy the data into the buffer we created */
- if (copy_from_user(buff, iocommand.buf,
- iocommand.buf_size)) {
+ if (copy_from_user(buff, iocommand->buf,
+ iocommand->buf_size)) {
rc = -EFAULT;
goto out_kfree;
}
} else {
- memset(buff, 0, iocommand.buf_size);
+ memset(buff, 0, iocommand->buf_size);
}
}
c = cmd_alloc(h);
@@ -6377,23 +6434,23 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->scsi_cmd = SCSI_CMD_BUSY;
/* Fill in Command Header */
c->Header.ReplyQueue = 0; /* unused in simple mode */
- if (iocommand.buf_size > 0) { /* buffer to fill */
+ if (iocommand->buf_size > 0) { /* buffer to fill */
c->Header.SGList = 1;
c->Header.SGTotal = cpu_to_le16(1);
} else { /* no buffers to fill */
c->Header.SGList = 0;
c->Header.SGTotal = cpu_to_le16(0);
}
- memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
+ memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN));
/* Fill in Request block */
- memcpy(&c->Request, &iocommand.Request,
+ memcpy(&c->Request, &iocommand->Request,
sizeof(c->Request));
/* Fill in the scatter gather information */
- if (iocommand.buf_size > 0) {
+ if (iocommand->buf_size > 0) {
temp64 = dma_map_single(&h->pdev->dev, buff,
- iocommand.buf_size, DMA_BIDIRECTIONAL);
+ iocommand->buf_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
c->SG[0].Addr = cpu_to_le64(0);
c->SG[0].Len = cpu_to_le32(0);
@@ -6401,12 +6458,12 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
goto out;
}
c->SG[0].Addr = cpu_to_le64(temp64);
- c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
+ c->SG[0].Len = cpu_to_le32(iocommand->buf_size);
c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
}
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
NO_TIMEOUT);
- if (iocommand.buf_size > 0)
+ if (iocommand->buf_size > 0)
hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
if (rc) {
@@ -6415,16 +6472,12 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
/* Copy the error information out */
- memcpy(&iocommand.error_info, c->err_info,
- sizeof(iocommand.error_info));
- if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
- rc = -EFAULT;
- goto out;
- }
- if ((iocommand.Request.Type.Direction & XFER_READ) &&
- iocommand.buf_size > 0) {
+ memcpy(&iocommand->error_info, c->err_info,
+ sizeof(iocommand->error_info));
+ if ((iocommand->Request.Type.Direction & XFER_READ) &&
+ iocommand->buf_size > 0) {
/* Copy the data out of the buffer we created */
- if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
+ if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) {
rc = -EFAULT;
goto out;
}
@@ -6436,9 +6489,9 @@ out_kfree:
return rc;
}
-static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
+ BIG_IOCTL_Command_struct *ioc)
{
- BIG_IOCTL_Command_struct *ioc;
struct CommandList *c;
unsigned char **buff = NULL;
int *buff_size = NULL;
@@ -6449,29 +6502,17 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
u32 sz;
BYTE __user *data_ptr;
- if (!argp)
- return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- ioc = vmemdup_user(argp, sizeof(*ioc));
- if (IS_ERR(ioc)) {
- status = PTR_ERR(ioc);
- goto cleanup1;
- }
+
if ((ioc->buf_size < 1) &&
- (ioc->Request.Type.Direction != XFER_NONE)) {
- status = -EINVAL;
- goto cleanup1;
- }
+ (ioc->Request.Type.Direction != XFER_NONE))
+ return -EINVAL;
/* Check kmalloc limits using all SGs */
- if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
- status = -EINVAL;
- goto cleanup1;
- }
- if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
- status = -EINVAL;
- goto cleanup1;
- }
+ if (ioc->malloc_size > MAX_KMALLOC_SIZE)
+ return -EINVAL;
+ if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD)
+ return -EINVAL;
buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
if (!buff) {
status = -ENOMEM;
@@ -6544,10 +6585,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Copy the error information out */
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
- if (copy_to_user(argp, ioc, sizeof(*ioc))) {
- status = -EFAULT;
- goto cleanup0;
- }
if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
int i;
@@ -6573,7 +6610,6 @@ cleanup1:
kfree(buff);
}
kfree(buff_size);
- kvfree(ioc);
return status;
}
@@ -6589,14 +6625,11 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
* ioctl
*/
static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
- void __user *arg)
+ void __user *argp)
{
- struct ctlr_info *h;
- void __user *argp = (void __user *)arg;
+ struct ctlr_info *h = sdev_to_hba(dev);
int rc;
- h = sdev_to_hba(dev);
-
switch (cmd) {
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
@@ -6607,18 +6640,35 @@ static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
return hpsa_getpciinfo_ioctl(h, argp);
case CCISS_GETDRIVVER:
return hpsa_getdrivver_ioctl(h, argp);
- case CCISS_PASSTHRU:
+ case CCISS_PASSTHRU: {
+ IOCTL_Command_struct iocommand;
+
+ if (!argp)
+ return -EINVAL;
+ if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
+ return -EFAULT;
if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
return -EAGAIN;
- rc = hpsa_passthru_ioctl(h, argp);
+ rc = hpsa_passthru_ioctl(h, &iocommand);
atomic_inc(&h->passthru_cmds_avail);
+ if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand)))
+ rc = -EFAULT;
return rc;
- case CCISS_BIG_PASSTHRU:
+ }
+ case CCISS_BIG_PASSTHRU: {
+ BIG_IOCTL_Command_struct ioc;
+ if (!argp)
+ return -EINVAL;
+ if (copy_from_user(&ioc, argp, sizeof(ioc)))
+ return -EFAULT;
if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
return -EAGAIN;
- rc = hpsa_big_passthru_ioctl(h, argp);
+ rc = hpsa_big_passthru_ioctl(h, &ioc);
atomic_inc(&h->passthru_cmds_avail);
+ if (!rc && copy_to_user(argp, &ioc, sizeof(ioc)))
+ rc = -EFAULT;
return rc;
+ }
default:
return -ENOTTY;
}
@@ -7420,7 +7470,6 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
dev_warn(&pdev->dev,
"base address is invalid\n");
return -1;
- break;
}
}
if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
@@ -7979,7 +8028,7 @@ out_disable:
static void hpsa_free_cmd_pool(struct ctlr_info *h)
{
- kfree(h->cmd_pool_bits);
+ bitmap_free(h->cmd_pool_bits);
h->cmd_pool_bits = NULL;
if (h->cmd_pool) {
dma_free_coherent(&h->pdev->dev,
@@ -8001,9 +8050,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
{
- h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
- sizeof(unsigned long),
- GFP_KERNEL);
+ h->cmd_pool_bits = bitmap_zalloc(h->nr_cmds, GFP_KERNEL);
h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->cmd_pool),
&h->cmd_pool_dhandle, GFP_KERNEL);
@@ -8285,7 +8332,7 @@ static int detect_controller_lockup(struct ctlr_info *h)
*
* Called from monitor controller worker (hpsa_event_monitor_worker)
*
- * A Volume (or Volumes that comprise an Array set may be undergoing a
+ * A Volume (or Volumes that comprise an Array set) may be undergoing a
* transformation, so we will be turning off ioaccel for all volumes that
* make up the Array.
*/
@@ -8308,6 +8355,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
* Run through current device list used during I/O requests.
*/
for (i = 0; i < h->ndevices; i++) {
+ int offload_to_be_enabled = 0;
+ int offload_config = 0;
+
device = h->dev[i];
if (!device)
@@ -8325,25 +8375,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
continue;
ioaccel_status = buf[IOACCEL_STATUS_BYTE];
- device->offload_config =
+
+ /*
+ * Check if offload is still configured on
+ */
+ offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
- if (device->offload_config)
- device->offload_to_be_enabled =
+ /*
+ * If offload is configured on, check to see if ioaccel
+ * needs to be enabled.
+ */
+ if (offload_config)
+ offload_to_be_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
/*
+ * If ioaccel is to be re-enabled, re-enable later during the
+ * scan operation so the driver can get a fresh raidmap
+ * before turning ioaccel back on.
+ */
+ if (offload_to_be_enabled)
+ continue;
+
+ /*
* Immediately turn off ioaccel for any volume the
* controller tells us to. Some of the reasons could be:
* transformation - change to the LVs of an Array.
* degraded volume - component failure
- *
- * If ioaccel is to be re-enabled, re-enable later during the
- * scan operation so the driver can get a fresh raidmap
- * before turning ioaccel back on.
- *
*/
- if (!device->offload_to_be_enabled)
- device->offload_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(device);
}
kfree(buf);
@@ -8601,7 +8661,7 @@ static struct ctlr_info *hpda_alloc_ctlr_info(void)
static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int dac, rc;
+ int rc;
struct ctlr_info *h;
int try_soft_reset = 0;
unsigned long flags;
@@ -8677,13 +8737,9 @@ reinit_after_soft_reset:
/* configure PCI DMA stuff */
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc == 0) {
- dac = 1;
- } else {
+ if (rc != 0) {
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc == 0) {
- dac = 0;
- } else {
+ if (rc != 0) {
dev_err(&pdev->dev, "no suitable DMA available\n");
goto clean3; /* shost, pci, lu, aer/h */
}
@@ -8820,7 +8876,7 @@ reinit_after_soft_reset:
/* hook into SCSI subsystem */
rc = hpsa_scsi_add_host(h);
if (rc)
- goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
/* Monitor the controller for firmware lockups */
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
@@ -8835,6 +8891,8 @@ reinit_after_soft_reset:
HPSA_EVENT_MONITOR_INTERVAL);
return 0;
+clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ kfree(h->lastlogicals);
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
hpsa_free_performant_mode(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
@@ -9055,25 +9113,27 @@ static void hpsa_remove_one(struct pci_dev *pdev)
hpda_free_ctlr_info(h); /* init_one 1 */
}
-static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
- __attribute__((unused)) pm_message_t state)
+static int __maybe_unused hpsa_suspend(
+ __attribute__((unused)) struct device *dev)
{
return -ENOSYS;
}
-static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
+static int __maybe_unused hpsa_resume
+ (__attribute__((unused)) struct device *dev)
{
return -ENOSYS;
}
+static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume);
+
static struct pci_driver hpsa_pci_driver = {
.name = HPSA,
.probe = hpsa_init_one,
.remove = hpsa_remove_one,
.id_table = hpsa_pci_device_id, /* id_table */
.shutdown = hpsa_shutdown,
- .suspend = hpsa_suspend,
- .resume = hpsa_resume,
+ .driver.pm = &hpsa_pm_ops,
};
/* Fill in bucket_map[], given nsgs (the max number of
@@ -9262,10 +9322,9 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
} else if (trans_support & CFGTBL_Trans_io_accel2) {
u64 cfg_offset, cfg_base_addr_index;
u32 bft2_offset, cfg_base_addr;
- int rc;
- rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
- &cfg_base_addr_index, &cfg_offset);
+ hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
@@ -9295,10 +9354,10 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
{
if (h->ioaccel_cmd_pool) {
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
- h->ioaccel_cmd_pool,
- h->ioaccel_cmd_pool_dhandle);
+ dma_free_coherent(&h->pdev->dev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ h->ioaccel_cmd_pool,
+ h->ioaccel_cmd_pool_dhandle);
h->ioaccel_cmd_pool = NULL;
h->ioaccel_cmd_pool_dhandle = 0;
}
@@ -9348,10 +9407,10 @@ static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
hpsa_free_ioaccel2_sg_chain_blocks(h);
if (h->ioaccel2_cmd_pool) {
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
- h->ioaccel2_cmd_pool,
- h->ioaccel2_cmd_pool_dhandle);
+ dma_free_coherent(&h->pdev->dev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool,
+ h->ioaccel2_cmd_pool_dhandle);
h->ioaccel2_cmd_pool = NULL;
h->ioaccel2_cmd_pool_dhandle = 0;
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index f8c88fc7b80a..99b0750850b2 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
@@ -57,7 +58,7 @@ struct hpsa_sas_phy {
bool added_to_port;
};
-#define EXTERNAL_QD 7
+#define EXTERNAL_QD 128
struct hpsa_scsi_dev_t {
unsigned int devtype;
int bus, target, lun; /* as presented to the OS */
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 7825cbfea4dc..ba6a3aa8d954 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
@@ -19,6 +20,11 @@
#ifndef HPSA_CMD_H
#define HPSA_CMD_H
+#include <linux/compiler.h>
+
+#include <linux/build_bug.h> /* static_assert */
+#include <linux/stddef.h> /* offsetof */
+
/* general boundary defintions */
#define SENSEINFOBYTES 32 /* may vary between hbas */
#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */
@@ -199,12 +205,10 @@ union u64bit {
MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
/* SCSI-3 Commands */
-#pragma pack(1)
-
#define HPSA_INQUIRY 0x12
struct InquiryData {
u8 data_byte[36];
-};
+} __packed;
#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
@@ -220,7 +224,7 @@ struct raid_map_disk_data {
u8 xor_mult[2]; /**< XOR multipliers for this position,
* valid for data disks only */
u8 reserved[2];
-};
+} __packed;
struct raid_map_data {
__le32 structure_size; /* Size of entire structure in bytes */
@@ -246,14 +250,14 @@ struct raid_map_data {
__le16 dekindex; /* Data encryption key index. */
u8 reserved[16];
struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
-};
+} __packed;
struct ReportLUNdata {
u8 LUNListLength[4];
u8 extended_response_flag;
u8 reserved[3];
u8 LUN[HPSA_MAX_LUN][8];
-};
+} __packed;
struct ext_report_lun_entry {
u8 lunid[8];
@@ -268,20 +272,20 @@ struct ext_report_lun_entry {
u8 lun_count; /* multi-lun device, how many luns */
u8 redundant_paths;
u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
-};
+} __packed;
struct ReportExtendedLUNdata {
u8 LUNListLength[4];
u8 extended_response_flag;
u8 reserved[3];
struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN];
-};
+} __packed;
struct SenseSubsystem_info {
u8 reserved[36];
u8 portname[8];
u8 reserved1[1108];
-};
+} __packed;
/* BMIC commands */
#define BMIC_READ 0x26
@@ -316,7 +320,7 @@ union SCSI3Addr {
u8 Targ:6;
u8 Mode:2; /* b10 */
} LogUnit;
-};
+} __packed;
struct PhysDevAddr {
u32 TargetId:24;
@@ -324,20 +328,20 @@ struct PhysDevAddr {
u32 Mode:2;
/* 2 level target device addr */
union SCSI3Addr Target[2];
-};
+} __packed;
struct LogDevAddr {
u32 VolId:30;
u32 Mode:2;
u8 reserved[4];
-};
+} __packed;
union LUNAddr {
u8 LunAddrBytes[8];
union SCSI3Addr SCSI3Lun[4];
struct PhysDevAddr PhysDev;
struct LogDevAddr LogDev;
-};
+} __packed;
struct CommandListHeader {
u8 ReplyQueue;
@@ -345,7 +349,7 @@ struct CommandListHeader {
__le16 SGTotal;
__le64 tag;
union LUNAddr LUN;
-};
+} __packed;
struct RequestBlock {
u8 CDBLen;
@@ -364,18 +368,18 @@ struct RequestBlock {
#define GET_DIR(tad) (((tad) >> 6) & 0x03)
u16 Timeout;
u8 CDB[16];
-};
+} __packed;
struct ErrDescriptor {
__le64 Addr;
__le32 Len;
-};
+} __packed;
struct SGDescriptor {
__le64 Addr;
__le32 Len;
__le32 Ext;
-};
+} __packed;
union MoreErrInfo {
struct {
@@ -389,7 +393,8 @@ union MoreErrInfo {
u8 offense_num; /* byte # of offense 0-base */
u32 offense_value;
} Invalid_Cmd;
-};
+} __packed;
+
struct ErrorInfo {
u8 ScsiStatus;
u8 SenseLen;
@@ -397,7 +402,7 @@ struct ErrorInfo {
u32 ResidualCnt;
union MoreErrInfo MoreErrInfo;
u8 SenseInfo[SENSEINFOBYTES];
-};
+} __packed;
/* Command types */
#define CMD_IOCTL_PEND 0x01
#define CMD_SCSI 0x03
@@ -447,11 +452,20 @@ struct CommandList {
*/
struct hpsa_scsi_dev_t *phys_disk;
- int abort_pending;
+ bool retry_pending;
struct hpsa_scsi_dev_t *device;
atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
} __aligned(COMMANDLIST_ALIGNMENT);
+/*
+ * Make sure our embedded atomic variable is aligned. Otherwise we break atomic
+ * operations on architectures that don't support unaligned atomics like IA64.
+ *
+ * The assert guards against reintroductin against unwanted __packed to
+ * the struct CommandList.
+ */
+static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0);
+
/* Max S/G elements in I/O accelerator command */
#define IOACCEL1_MAXSGENTRIES 24
#define IOACCEL2_MAXSGENTRIES 28
@@ -488,7 +502,7 @@ struct io_accel1_cmd {
__le64 host_addr; /* 0x70 - 0x77 */
u8 CISS_LUN[8]; /* 0x78 - 0x7F */
struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
-} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
#define IOACCEL1_FUNCTION_SCSIIO 0x00
#define IOACCEL1_SGLOFFSET 32
@@ -518,7 +532,7 @@ struct ioaccel2_sg_element {
u8 chain_indicator;
#define IOACCEL2_CHAIN 0x80
#define IOACCEL2_LAST_SG 0x40
-};
+} __packed;
/*
* SCSI Response Format structure for IO Accelerator Mode 2
@@ -558,7 +572,7 @@ struct io_accel2_scsi_response {
u8 sense_data_len; /* sense/response data length */
u8 resid_cnt[4]; /* residual count */
u8 sense_data_buff[32]; /* sense/response data buffer */
-};
+} __packed;
/*
* Structure for I/O accelerator (mode 2 or m2) commands.
@@ -591,7 +605,7 @@ struct io_accel2_cmd {
__le32 tweak_upper; /* Encryption tweak, upper 4 bytes */
struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
struct io_accel2_scsi_response error_data;
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
/*
* defines for Mode 2 command struct
@@ -617,7 +631,7 @@ struct hpsa_tmf_struct {
__le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */
__le64 error_ptr; /* Error Pointer */
__le32 error_len; /* Error Length */
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
/* Configuration Table Structure */
struct HostWrite {
@@ -625,7 +639,7 @@ struct HostWrite {
__le32 command_pool_addr_hi;
__le32 CoalIntDelay;
__le32 CoalIntCount;
-};
+} __packed;
#define SIMPLE_MODE 0x02
#define PERFORMANT_MODE 0x04
@@ -674,7 +688,7 @@ struct CfgTable {
#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
__le32 clear_event_notify;
-};
+} __packed;
#define NUM_BLOCKFETCH_ENTRIES 8
struct TransTable_struct {
@@ -685,14 +699,14 @@ struct TransTable_struct {
__le32 RepQCtrAddrHigh32;
#define MAX_REPLY_QUEUES 64
struct vals32 RepQAddr[MAX_REPLY_QUEUES];
-};
+} __packed;
struct hpsa_pci_info {
unsigned char bus;
unsigned char dev_fn;
unsigned short domain;
u32 board_id;
-};
+} __packed;
struct bmic_identify_controller {
u8 configured_logical_drive_count; /* offset 0 */
@@ -701,7 +715,7 @@ struct bmic_identify_controller {
u8 pad2[136];
u8 controller_mode; /* offset 292 */
u8 pad3[32];
-};
+} __packed;
struct bmic_identify_physical_device {
@@ -844,7 +858,7 @@ struct bmic_identify_physical_device {
u8 max_link_rate[256];
u8 neg_phys_link_rate[256];
u8 box_conn_name[8];
-} __attribute((aligned(512)));
+} __packed __attribute((aligned(512)));
struct bmic_sense_subsystem_info {
u8 primary_slot_number;
@@ -857,7 +871,7 @@ struct bmic_sense_subsystem_info {
u8 secondary_array_serial_number[32];
u8 secondary_cache_serial_number[32];
u8 pad[332];
-};
+} __packed;
struct bmic_sense_storage_box_params {
u8 reserved[36];
@@ -869,7 +883,6 @@ struct bmic_sense_storage_box_params {
u8 reserver_3[84];
u8 phys_connector[2];
u8 reserved_4[296];
-};
+} __packed;
-#pragma pack()
#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 6a2561f26e38..7e8903718245 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -758,10 +758,9 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
scp->result = SAM_STAT_CHECK_CONDITION;
memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
goto skip_resid;
- break;
default:
- scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
+ scp->result = DID_ABORT << 16;
break;
}
@@ -770,7 +769,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
skip_resid:
dprintk("scsi_done(%p)\n", scp);
- scp->scsi_done(scp);
+ scsi_done(scp);
free_req(hba, &hba->reqs[tag]);
}
@@ -994,8 +993,7 @@ static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
return 0;
}
-static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
- void (*done)(struct scsi_cmnd *))
+static int hptiop_queuecommand_lck(struct scsi_cmnd *scp)
{
struct Scsi_Host *host = scp->device->host;
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
@@ -1003,9 +1001,6 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
int sg_count = 0;
struct hptiop_request *_req;
- BUG_ON(!done);
- scp->scsi_done = done;
-
_req = get_req(hba);
if (_req == NULL) {
dprintk("hptiop_queuecmd : no free req\n");
@@ -1049,10 +1044,7 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
req->channel = scp->device->channel;
req->target = scp->device->id;
req->lun = scp->device->lun;
- req->header.size = cpu_to_le32(
- sizeof(struct hpt_iop_request_scsi_command)
- - sizeof(struct hpt_iopsg)
- + sg_count * sizeof(struct hpt_iopsg));
+ req->header.size = cpu_to_le32(struct_size(req, sg_list, sg_count));
memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
hba->ops->post_req(hba, _req);
@@ -1060,7 +1052,7 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
cmd_done:
dprintk("scsi_done(scp=%p)\n", scp);
- scp->scsi_done(scp);
+ scsi_done(scp);
return 0;
}
@@ -1151,12 +1143,14 @@ static struct device_attribute hptiop_attr_fw_version = {
.show = hptiop_show_fw_version,
};
-static struct device_attribute *hptiop_attrs[] = {
- &hptiop_attr_version,
- &hptiop_attr_fw_version,
+static struct attribute *hptiop_host_attrs[] = {
+ &hptiop_attr_version.attr,
+ &hptiop_attr_fw_version.attr,
NULL
};
+ATTRIBUTE_GROUPS(hptiop_host);
+
static int hptiop_slave_config(struct scsi_device *sdev)
{
if (sdev->type == TYPE_TAPE)
@@ -1173,10 +1167,11 @@ static struct scsi_host_template driver_template = {
.info = hptiop_info,
.emulated = 0,
.proc_name = driver_name,
- .shost_attrs = hptiop_attrs,
+ .shost_groups = hptiop_host_groups,
.slave_configure = hptiop_slave_config,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,
+ .cmd_size = sizeof(struct hpt_cmd_priv),
};
static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
@@ -1399,8 +1394,8 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
host->max_cmd_len = 16;
- req_size = sizeof(struct hpt_iop_request_scsi_command)
- + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
+ req_size = struct_size((struct hpt_iop_request_scsi_command *)0,
+ sg_list, hba->max_sg_descriptors);
if ((req_size & 0x1f) != 0)
req_size = (req_size + 0x1f) & ~0x1f;
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 35184c2008af..394ef6aa469e 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -228,7 +228,7 @@ struct hpt_iop_request_scsi_command {
u8 pad1;
u8 cdb[16];
__le32 dataxfer_length;
- struct hpt_iopsg sg_list[1];
+ struct hpt_iopsg sg_list[];
};
struct hpt_iop_request_ioctl_command {
@@ -237,7 +237,7 @@ struct hpt_iop_request_ioctl_command {
__le32 inbuf_size;
__le32 outbuf_size;
__le32 bytes_returned;
- u8 buf[1];
+ u8 buf[];
/* out data should be put at buf[(inbuf_size+3)&~3] */
};
@@ -251,13 +251,13 @@ struct hptiop_request {
int index;
};
-struct hpt_scsi_pointer {
+struct hpt_cmd_priv {
int mapped;
int sgcnt;
dma_addr_t dma_handle;
};
-#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
+#define HPT_SCP(scp) ((struct hpt_cmd_priv *)scsi_cmd_priv(scp))
enum hptiop_family {
UNKNOWN_BASED_IOP,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index df897df5cafe..1a0c0b7289d2 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -13,6 +13,7 @@
#include <linux/dmapool.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -21,6 +22,7 @@
#include <linux/bsg-lib.h>
#include <asm/firmware.h>
#include <asm/irq.h>
+#include <asm/rtas.h>
#include <asm/vio.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -40,6 +42,12 @@ static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
+static unsigned int mq_enabled = IBMVFC_MQ;
+static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
+static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
+static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
+static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
+
static LIST_HEAD(ibmvfc_head);
static DEFINE_SPINLOCK(ibmvfc_driver_lock);
static struct scsi_transport_template *ibmvfc_transport_template;
@@ -49,6 +57,22 @@ MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBMVFC_DRIVER_VERSION);
+module_param_named(mq, mq_enabled, uint, S_IRUGO);
+MODULE_PARM_DESC(mq, "Enable multiqueue support. "
+ "[Default=" __stringify(IBMVFC_MQ) "]");
+module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
+ "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
+module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
+MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
+ "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
+module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
+MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
+ "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
+module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
+MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
+ "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
+
module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
"[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
@@ -133,9 +157,53 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
+static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
+static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
+
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
static const char *unknown_error = "unknown error";
+static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
+ unsigned long length, unsigned long *cookie,
+ unsigned long *irq)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
+ *cookie = retbuf[0];
+ *irq = retbuf[1];
+
+ return rc;
+}
+
+static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
+{
+ u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
+
+ return (host_caps & cap_flags) ? 1 : 0;
+}
+
+static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
+ struct ibmvfc_cmd *vfc_cmd)
+{
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ return &vfc_cmd->v2.iu;
+ else
+ return &vfc_cmd->v1.iu;
+}
+
+static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
+ struct ibmvfc_cmd *vfc_cmd)
+{
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ return &vfc_cmd->v2.rsp;
+ else
+ return &vfc_cmd->v1.rsp;
+}
+
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
* ibmvfc_trc_start - Log a start trace entry
@@ -147,9 +215,11 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
struct ibmvfc_trace_entry *entry;
+ int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
- entry = &vhost->trace[vhost->trace_index++];
+ entry = &vhost->trace[index];
entry->evt = evt;
entry->time = jiffies;
entry->fmt = evt->crq.format;
@@ -157,11 +227,11 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
- entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->op_code = iu->cdb[0];
entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
- entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
- entry->tmf_flags = vfc_cmd->iu.tmf_flags;
- entry->u.start.xfer_len = be32_to_cpu(vfc_cmd->iu.xfer_len);
+ entry->lun = scsilun_to_int(&iu->lun);
+ entry->tmf_flags = iu->tmf_flags;
+ entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
break;
case IBMVFC_MAD_FORMAT:
entry->op_code = be32_to_cpu(mad->opcode);
@@ -181,8 +251,12 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
- struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
+ struct ibmvfc_trace_entry *entry;
+ int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
+ entry = &vhost->trace[index];
entry->evt = evt;
entry->time = jiffies;
entry->fmt = evt->crq.format;
@@ -190,15 +264,15 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
- entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->op_code = iu->cdb[0];
entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
- entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
- entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+ entry->lun = scsilun_to_int(&iu->lun);
+ entry->tmf_flags = iu->tmf_flags;
entry->u.end.status = be16_to_cpu(vfc_cmd->status);
entry->u.end.error = be16_to_cpu(vfc_cmd->error);
- entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
- entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
- entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
+ entry->u.end.fcp_rsp_flags = rsp->flags;
+ entry->u.end.rsp_code = rsp->data.info.rsp_code;
+ entry->u.end.scsi_status = rsp->scsi_status;
break;
case IBMVFC_MAD_FORMAT:
entry->op_code = be32_to_cpu(mad->opcode);
@@ -253,15 +327,16 @@ static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
/**
* ibmvfc_get_err_result - Find the scsi status to return for the fcp response
+ * @vhost: ibmvfc host struct
* @vfc_cmd: ibmvfc command struct
*
* Return value:
* SCSI result value to return for completed command
**/
-static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
+static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
{
int err;
- struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
if ((rsp->flags & FCP_RSP_LEN_VALID) &&
@@ -413,22 +488,59 @@ static const char *ibmvfc_get_fc_type(u16 status)
* @tgt: ibmvfc target struct
* @action: action to perform
*
+ * Returns:
+ * 0 if action changed / non-zero if not changed
**/
-static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
+static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
enum ibmvfc_target_action action)
{
+ int rc = -EINVAL;
+
switch (tgt->action) {
+ case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
+ action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
+ if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
+ action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
case IBMVFC_TGT_ACTION_DEL_RPORT:
- if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
+ if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
tgt->action = action;
+ rc = 0;
+ }
+ break;
case IBMVFC_TGT_ACTION_DELETED_RPORT:
break;
default:
- if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
- tgt->add_rport = 0;
tgt->action = action;
+ rc = 0;
break;
}
+
+ if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
+ tgt->add_rport = 0;
+
+ return rc;
}
/**
@@ -493,8 +605,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
vhost->action = action;
break;
+ case IBMVFC_HOST_ACTION_REENABLE:
+ case IBMVFC_HOST_ACTION_RESET:
+ vhost->action = action;
+ break;
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_LOGO:
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ case IBMVFC_HOST_ACTION_NONE:
+ default:
switch (vhost->action) {
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
@@ -504,15 +625,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
break;
}
break;
- case IBMVFC_HOST_ACTION_LOGO:
- case IBMVFC_HOST_ACTION_QUERY_TGTS:
- case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
- case IBMVFC_HOST_ACTION_NONE:
- case IBMVFC_HOST_ACTION_RESET:
- case IBMVFC_HOST_ACTION_REENABLE:
- default:
- vhost->action = action;
- break;
}
}
@@ -525,7 +637,8 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
**/
static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
{
- if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
+ if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
+ vhost->state == IBMVFC_ACTIVE) {
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
@@ -537,6 +650,19 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_del_tgt - Schedule cleanup and removal of the target
+ * @tgt: ibmvfc target struct
+ **/
+static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
+{
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
+ tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
+ tgt->init_retries = 0;
+ }
+ wake_up(&tgt->vhost->work_wait_q);
+}
+
+/**
* ibmvfc_link_down - Handle a link down event from the adapter
* @vhost: ibmvfc host struct
* @state: ibmvfc host state to enter
@@ -550,7 +676,7 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost,
ENTER;
scsi_block_requests(vhost->host);
list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
ibmvfc_set_host_state(vhost, state);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
@@ -579,11 +705,16 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost)
}
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
- memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
+ memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
vhost->async_crq.cur = 0;
- list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (vhost->client_migrated)
+ tgt->need_login = 1;
+ else
+ ibmvfc_del_tgt(tgt);
+ }
+
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
@@ -606,6 +737,15 @@ static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
+static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
+ u64 word2, u64 word3, u64 word4)
+{
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+
+ return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
+ word1, word2, word3, word4);
+}
+
/**
* ibmvfc_send_crq_init - Send a CRQ init message
* @vhost: ibmvfc host struct
@@ -633,6 +773,115 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
+ * @vhost: ibmvfc host who owns the event pool
+ * @queue: ibmvfc queue struct
+ * @size: pool size
+ *
+ * Returns zero on success.
+ **/
+static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue,
+ unsigned int size)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &queue->evt_pool;
+
+ ENTER;
+ if (!size)
+ return 0;
+
+ pool->size = size;
+ pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
+ if (!pool->events)
+ return -ENOMEM;
+
+ pool->iu_storage = dma_alloc_coherent(vhost->dev,
+ size * sizeof(*pool->iu_storage),
+ &pool->iu_token, 0);
+
+ if (!pool->iu_storage) {
+ kfree(pool->events);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&queue->sent);
+ INIT_LIST_HEAD(&queue->free);
+ spin_lock_init(&queue->l_lock);
+
+ for (i = 0; i < size; ++i) {
+ struct ibmvfc_event *evt = &pool->events[i];
+
+ /*
+ * evt->active states
+ * 1 = in flight
+ * 0 = being completed
+ * -1 = free/freed
+ */
+ atomic_set(&evt->active, -1);
+ atomic_set(&evt->free, 1);
+ evt->crq.valid = 0x80;
+ evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
+ evt->xfer_iu = pool->iu_storage + i;
+ evt->vhost = vhost;
+ evt->queue = queue;
+ evt->ext_list = NULL;
+ list_add_tail(&evt->queue_list, &queue->free);
+ }
+
+ LEAVE;
+ return 0;
+}
+
+/**
+ * ibmvfc_free_event_pool - Frees memory of the event pool of a host
+ * @vhost: ibmvfc host who owns the event pool
+ * @queue: ibmvfc queue struct
+ *
+ **/
+static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &queue->evt_pool;
+
+ ENTER;
+ for (i = 0; i < pool->size; ++i) {
+ list_del(&pool->events[i].queue_list);
+ BUG_ON(atomic_read(&pool->events[i].free) != 1);
+ if (pool->events[i].ext_list)
+ dma_pool_free(vhost->sg_pool,
+ pool->events[i].ext_list,
+ pool->events[i].ext_list_token);
+ }
+
+ kfree(pool->events);
+ dma_free_coherent(vhost->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ pool->iu_storage, pool->iu_token);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_free_queue - Deallocate queue
+ * @vhost: ibmvfc host struct
+ * @queue: ibmvfc queue struct
+ *
+ * Unmaps dma and deallocates page for messages
+ **/
+static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue)
+{
+ struct device *dev = vhost->dev;
+
+ dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)queue->msgs.handle);
+ queue->msgs.handle = NULL;
+
+ ibmvfc_free_event_pool(vhost, queue);
+}
+
+/**
* ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
* @vhost: ibmvfc host struct
*
@@ -643,7 +892,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
{
long rc = 0;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
- struct ibmvfc_crq_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *crq = &vhost->crq;
ibmvfc_dbg(vhost, "Releasing CRQ\n");
free_irq(vdev->irq, vhost);
@@ -656,8 +905,8 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
- dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
- free_page((unsigned long)crq->msgs);
+
+ ibmvfc_free_queue(vhost, crq);
}
/**
@@ -671,6 +920,9 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
{
int rc = 0;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ unsigned long flags;
+
+ ibmvfc_dereg_sub_crqs(vhost);
/* Re-enable the CRQ */
do {
@@ -682,6 +934,15 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
if (rc)
dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(vhost->crq.q_lock);
+ vhost->do_enquiry = 1;
+ vhost->using_channels = 0;
+ spin_unlock(vhost->crq.q_lock);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ ibmvfc_reg_sub_crqs(vhost);
+
return rc;
}
@@ -697,7 +958,9 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
int rc = 0;
unsigned long flags;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
- struct ibmvfc_crq_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *crq = &vhost->crq;
+
+ ibmvfc_dereg_sub_crqs(vhost);
/* Close the CRQ */
do {
@@ -707,11 +970,14 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(vhost->crq.q_lock);
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
+ vhost->do_enquiry = 1;
+ vhost->using_channels = 0;
/* Clean out the queue */
- memset(crq->msgs, 0, PAGE_SIZE);
+ memset(crq->msgs.crq, 0, PAGE_SIZE);
crq->cur = 0;
/* And re-open it again */
@@ -723,8 +989,12 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
dev_warn(vhost->dev, "Partner adapter not ready\n");
else if (rc != 0)
dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
+
+ spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_reg_sub_crqs(vhost);
+
return rc;
}
@@ -754,12 +1024,18 @@ static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
**/
static void ibmvfc_free_event(struct ibmvfc_event *evt)
{
- struct ibmvfc_host *vhost = evt->vhost;
- struct ibmvfc_event_pool *pool = &vhost->pool;
+ struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
+ unsigned long flags;
BUG_ON(!ibmvfc_valid_event(pool, evt));
BUG_ON(atomic_inc_return(&evt->free) != 1);
- list_add_tail(&evt->queue, &vhost->free);
+ BUG_ON(atomic_dec_and_test(&evt->active));
+
+ spin_lock_irqsave(&evt->queue->l_lock, flags);
+ list_add_tail(&evt->queue_list, &evt->queue->free);
+ if (evt->eh_comp)
+ complete(evt->eh_comp);
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
}
/**
@@ -775,16 +1051,31 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
if (cmnd) {
scsi_dma_unmap(cmnd);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
- if (evt->eh_comp)
- complete(evt->eh_comp);
-
ibmvfc_free_event(evt);
}
/**
+ * ibmvfc_complete_purge - Complete failed command list
+ * @purge_list: list head of failed commands
+ *
+ * This function runs completions on commands to fail as a result of a
+ * host reset or platform migration.
+ **/
+static void ibmvfc_complete_purge(struct list_head *purge_list)
+{
+ struct ibmvfc_event *evt, *pos;
+
+ list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
+ list_del(&evt->queue_list);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+ }
+}
+
+/**
* ibmvfc_fail_request - Fail request with specified error code
* @evt: ibmvfc event struct
* @error_code: error code to fail request with
@@ -794,16 +1085,19 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
**/
static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
{
+ /*
+ * Anything we are failing should still be active. Otherwise, it
+ * implies we already got a response for the command and are doing
+ * something bad like double completing it.
+ */
+ BUG_ON(!atomic_dec_and_test(&evt->active));
if (evt->cmnd) {
evt->cmnd->result = (error_code << 16);
evt->done = ibmvfc_scsi_eh_done;
} else
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
- list_del(&evt->queue);
del_timer(&evt->timer);
- ibmvfc_trc_end(evt);
- evt->done(evt);
}
/**
@@ -817,10 +1111,30 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
{
struct ibmvfc_event *evt, *pos;
+ struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
+ unsigned long flags;
+ int hwqs = 0;
+ int i;
+
+ if (vhost->using_channels)
+ hwqs = vhost->scsi_scrqs.active_queues;
ibmvfc_dbg(vhost, "Purging all requests\n");
- list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
+ spin_lock_irqsave(&vhost->crq.l_lock, flags);
+ list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
ibmvfc_fail_request(evt, error_code);
+ list_splice_init(&vhost->crq.sent, &vhost->purge);
+ spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+
+ for (i = 0; i < hwqs; i++) {
+ spin_lock_irqsave(queues[i].q_lock, flags);
+ spin_lock(&queues[i].l_lock);
+ list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
+ ibmvfc_fail_request(evt, error_code);
+ list_splice_init(&queues[i].sent, &vhost->purge);
+ spin_unlock(&queues[i].l_lock);
+ spin_unlock_irqrestore(queues[i].q_lock, flags);
+ }
}
/**
@@ -1124,6 +1438,7 @@ static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
/**
* ibmvfc_gather_partition_info - Gather info about the LPAR
+ * @vhost: ibmvfc host struct
*
* Return value:
* none
@@ -1157,6 +1472,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login *login_info = &vhost->login_info;
+ struct ibmvfc_queue *async_crq = &vhost->async_crq;
struct device_node *of_node = vhost->dev->of_node;
const char *location;
@@ -1174,9 +1490,14 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
- login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE);
+ login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
+
+ if (vhost->mq_enabled || vhost->using_channels)
+ login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
+
login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
- login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
+ login_info->async.len = cpu_to_be32(async_crq->size *
+ sizeof(*async_crq->msgs.async));
strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
strncpy(login_info->device_name,
dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
@@ -1187,88 +1508,40 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
}
/**
- * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
- * @vhost: ibmvfc host who owns the event pool
- *
- * Returns zero on success.
- **/
-static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
-{
- int i;
- struct ibmvfc_event_pool *pool = &vhost->pool;
-
- ENTER;
- pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
- pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
- if (!pool->events)
- return -ENOMEM;
-
- pool->iu_storage = dma_alloc_coherent(vhost->dev,
- pool->size * sizeof(*pool->iu_storage),
- &pool->iu_token, 0);
-
- if (!pool->iu_storage) {
- kfree(pool->events);
- return -ENOMEM;
- }
-
- for (i = 0; i < pool->size; ++i) {
- struct ibmvfc_event *evt = &pool->events[i];
- atomic_set(&evt->free, 1);
- evt->crq.valid = 0x80;
- evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
- evt->xfer_iu = pool->iu_storage + i;
- evt->vhost = vhost;
- evt->ext_list = NULL;
- list_add_tail(&evt->queue, &vhost->free);
- }
-
- LEAVE;
- return 0;
-}
-
-/**
- * ibmvfc_free_event_pool - Frees memory of the event pool of a host
- * @vhost: ibmvfc host who owns the event pool
+ * ibmvfc_get_event - Gets the next free event in pool
+ * @queue: ibmvfc queue struct
*
+ * Returns a free event from the pool.
**/
-static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
+static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
{
- int i;
- struct ibmvfc_event_pool *pool = &vhost->pool;
-
- ENTER;
- for (i = 0; i < pool->size; ++i) {
- list_del(&pool->events[i].queue);
- BUG_ON(atomic_read(&pool->events[i].free) != 1);
- if (pool->events[i].ext_list)
- dma_pool_free(vhost->sg_pool,
- pool->events[i].ext_list,
- pool->events[i].ext_list_token);
- }
+ struct ibmvfc_event *evt;
+ unsigned long flags;
- kfree(pool->events);
- dma_free_coherent(vhost->dev,
- pool->size * sizeof(*pool->iu_storage),
- pool->iu_storage, pool->iu_token);
- LEAVE;
+ spin_lock_irqsave(&queue->l_lock, flags);
+ BUG_ON(list_empty(&queue->free));
+ evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ atomic_set(&evt->free, 0);
+ list_del(&evt->queue_list);
+ spin_unlock_irqrestore(&queue->l_lock, flags);
+ return evt;
}
/**
- * ibmvfc_get_event - Gets the next free event in pool
- * @vhost: ibmvfc host struct
+ * ibmvfc_locked_done - Calls evt completion with host_lock held
+ * @evt: ibmvfc evt to complete
*
- * Returns a free event from the pool.
+ * All non-scsi command completion callbacks have the expectation that the
+ * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
+ * MAD evt with the host_lock.
**/
-static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
+static void ibmvfc_locked_done(struct ibmvfc_event *evt)
{
- struct ibmvfc_event *evt;
+ unsigned long flags;
- BUG_ON(list_empty(&vhost->free));
- evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
- atomic_set(&evt->free, 0);
- list_del(&evt->queue);
- return evt;
+ spin_lock_irqsave(evt->vhost->host->host_lock, flags);
+ evt->_done(evt);
+ spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
}
/**
@@ -1283,9 +1556,15 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt,
{
evt->cmnd = NULL;
evt->sync_iu = NULL;
- evt->crq.format = format;
- evt->done = done;
evt->eh_comp = NULL;
+ evt->crq.format = format;
+ if (format == IBMVFC_CMD_FORMAT)
+ evt->done = done;
+ else {
+ evt->_done = done;
+ evt->done = ibmvfc_locked_done;
+ }
+ evt->hwq = 0;
}
/**
@@ -1308,7 +1587,7 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
}
/**
- * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
+ * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
* @scmd: struct scsi_cmnd with the scatterlist
* @evt: ibmvfc event struct
* @vfc_cmd: vfc_cmd that contains the memory descriptor
@@ -1325,6 +1604,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
int sg_mapped;
struct srp_direct_buf *data = &vfc_cmd->ioba;
struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
if (cls3_error)
vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
@@ -1341,10 +1621,10 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
- vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
+ iu->add_cdb_len |= IBMVFC_WRDATA;
} else {
vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
- vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
+ iu->add_cdb_len |= IBMVFC_RDDATA;
}
if (sg_mapped == 1) {
@@ -1376,7 +1656,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
/**
* ibmvfc_timeout - Internal command timeout handler
- * @evt: struct ibmvfc_event that timed out
+ * @t: struct ibmvfc_event that timed out
*
* Called when an internally generated command times out
**/
@@ -1400,6 +1680,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
struct ibmvfc_host *vhost, unsigned long timeout)
{
__be64 *crq_as_u64 = (__be64 *) &evt->crq;
+ unsigned long flags;
int rc;
/* Copy the IU into the transfer area */
@@ -1411,7 +1692,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
else
BUG();
- list_add_tail(&evt->queue, &vhost->sent);
timer_setup(&evt->timer, ibmvfc_timeout, 0);
if (timeout) {
@@ -1419,11 +1699,26 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
add_timer(&evt->timer);
}
+ spin_lock_irqsave(&evt->queue->l_lock, flags);
+ list_add_tail(&evt->queue_list, &evt->queue->sent);
+ atomic_set(&evt->active, 1);
+
mb();
- if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
- be64_to_cpu(crq_as_u64[1])))) {
- list_del(&evt->queue);
+ if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
+ rc = ibmvfc_send_sub_crq(vhost,
+ evt->queue->vios_cookie,
+ be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]),
+ 0, 0);
+ else
+ rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]));
+
+ if (rc) {
+ atomic_set(&evt->active, 0);
+ list_del(&evt->queue_list);
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
del_timer(&evt->timer);
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
@@ -1448,8 +1743,10 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
evt->done(evt);
- } else
+ } else {
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
ibmvfc_trc_start(evt);
+ }
return 0;
}
@@ -1463,7 +1760,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
{
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_host *vhost = evt->vhost;
- struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
struct scsi_cmnd *cmnd = evt->cmnd;
const char *err = unknown_error;
int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
@@ -1497,15 +1794,18 @@ static void ibmvfc_relogin(struct scsi_device *sdev)
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_target *tgt;
+ unsigned long flags;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(tgt, &vhost->targets, queue) {
if (rport == tgt->rport) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
}
}
ibmvfc_reinit_host(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
/**
@@ -1517,7 +1817,7 @@ static void ibmvfc_relogin(struct scsi_device *sdev)
static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
{
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
- struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
struct scsi_cmnd *cmnd = evt->cmnd;
u32 rsp_len = 0;
u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
@@ -1531,7 +1831,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
scsi_set_resid(cmnd, 0);
if (vfc_cmd->status) {
- cmnd->result = ibmvfc_get_err_result(vfc_cmd);
+ cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
if (rsp->flags & FCP_RSP_LEN_VALID)
rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
@@ -1554,12 +1854,9 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
cmnd->result = (DID_ERROR << 16);
scsi_dma_unmap(cmnd);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
- if (evt->eh_comp)
- complete(evt->eh_comp);
-
ibmvfc_free_event(evt);
}
@@ -1593,53 +1890,84 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
return result;
}
+static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
+ size_t offset;
+
+ memset(vfc_cmd, 0, sizeof(*vfc_cmd));
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ offset = offsetof(struct ibmvfc_cmd, v2.rsp);
+ vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
+ } else
+ offset = offsetof(struct ibmvfc_cmd, v1.rsp);
+ vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
+ vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
+ vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
+ vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
+ vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
+ vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
+ int_to_scsilun(sdev->lun, &iu->lun);
+
+ return vfc_cmd;
+}
+
/**
* ibmvfc_queuecommand - The queuecommand function of the scsi template
+ * @shost: scsi host struct
* @cmnd: struct scsi_cmnd to be executed
- * @done: Callback function to be called when cmnd is completed
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
- void (*done) (struct scsi_cmnd *))
+static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
- struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
+ struct ibmvfc_host *vhost = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct ibmvfc_cmd *vfc_cmd;
+ struct ibmvfc_fcp_cmd_iu *iu;
struct ibmvfc_event *evt;
+ u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
+ u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
+ u16 scsi_channel;
int rc;
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
unlikely((rc = ibmvfc_host_chkready(vhost)))) {
cmnd->result = rc;
- done(cmnd);
+ scsi_done(cmnd);
return 0;
}
cmnd->result = (DID_OK << 16);
- evt = ibmvfc_get_event(vhost);
+ if (vhost->using_channels) {
+ scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
+ evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+ } else
+ evt = ibmvfc_get_event(&vhost->crq);
+
ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
evt->cmnd = cmnd;
- cmnd->scsi_done = done;
- vfc_cmd = &evt->iu.cmd;
- memset(vfc_cmd, 0, sizeof(*vfc_cmd));
- vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
- vfc_cmd->resp.len = cpu_to_be32(sizeof(vfc_cmd->rsp));
- vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
- vfc_cmd->payload_len = cpu_to_be32(sizeof(vfc_cmd->iu));
- vfc_cmd->resp_len = cpu_to_be32(sizeof(vfc_cmd->rsp));
- vfc_cmd->cancel_key = cpu_to_be32((unsigned long)cmnd->device->hostdata);
- vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
- vfc_cmd->iu.xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
- int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
- memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
+
+ vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
+ iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
+
+ iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
+ memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
if (cmnd->flags & SCMD_TAGGED) {
- vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
- vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
+ vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
+ iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
}
+ vfc_cmd->correlation = cpu_to_be64((u64)evt);
+
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0);
@@ -1652,12 +1980,10 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
"Failed to map DMA buffer for command. rc=%d\n", rc);
cmnd->result = DID_ERROR << 16;
- done(cmnd);
+ scsi_done(cmnd);
return 0;
}
-static DEF_SCSI_QCMD(ibmvfc_queuecommand)
-
/**
* ibmvfc_sync_completion - Signal that a synchronous command has completed
* @evt: ibmvfc event struct
@@ -1711,7 +2037,7 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
}
vhost->aborting_passthru = 1;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
tmf = &evt->iu.tmf;
@@ -1769,7 +2095,7 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
if (unlikely((rc = ibmvfc_host_chkready(vhost))))
goto unlock_out;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
@@ -1830,7 +2156,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
(bsg_request->rqst_data.h_els.port_id[1] << 8) |
bsg_request->rqst_data.h_els.port_id[2];
- /* fall through */
+ fallthrough;
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
@@ -1839,7 +2165,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
bsg_request->rqst_data.h_ct.port_id[2];
- /* fall through */
+ fallthrough;
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
@@ -1887,7 +2213,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
goto out;
}
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
mad = &evt->iu.passthru;
@@ -1963,28 +2289,27 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt = NULL;
union ibmvfc_iu rsp_iu;
- struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ struct ibmvfc_fcp_cmd_iu *iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
int rsp_rc = -EBUSY;
unsigned long flags;
int rsp_code = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(vhost);
+ if (vhost->using_channels)
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
+ else
+ evt = ibmvfc_get_event(&vhost->crq);
+
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
- tmf = &evt->iu.cmd;
- memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
- tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
- tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
- tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
- tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
- int_to_scsilun(sdev->lun, &tmf->iu.lun);
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
- tmf->iu.tmf_flags = type;
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ iu->tmf_flags = type;
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
@@ -2002,7 +2327,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
wait_for_completion(&evt->comp);
if (rsp_iu.cmd.status)
- rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+ rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
@@ -2026,7 +2351,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
/**
* ibmvfc_match_rport - Match function for specified remote port
* @evt: ibmvfc event struct
- * @device: device to match (rport)
+ * @rport: device to match
*
* Returns:
* 1 if event matches rport / 0 if event does not match rport
@@ -2074,6 +2399,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
}
/**
+ * ibmvfc_event_is_free - Check if event is free or not
+ * @evt: ibmvfc event struct
+ *
+ * Returns:
+ * true / false
+ **/
+static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_event *loop_evt;
+
+ list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
+ if (loop_evt == evt)
+ return true;
+
+ return false;
+}
+
+/**
* ibmvfc_wait_for_ops - Wait for ops to complete
* @vhost: ibmvfc host struct
* @device: device to match (starget or sdev)
@@ -2087,19 +2430,35 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
{
struct ibmvfc_event *evt;
DECLARE_COMPLETION_ONSTACK(comp);
- int wait;
+ int wait, i, q_index, q_size;
unsigned long flags;
signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
+ struct ibmvfc_queue *queues;
ENTER;
+ if (vhost->mq_enabled && vhost->using_channels) {
+ queues = vhost->scsi_scrqs.scrqs;
+ q_size = vhost->scsi_scrqs.active_queues;
+ } else {
+ queues = &vhost->crq;
+ q_size = 1;
+ }
+
do {
wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (match(evt, device)) {
- evt->eh_comp = &comp;
- wait++;
+ for (q_index = 0; q_index < q_size; q_index++) {
+ spin_lock(&queues[q_index].l_lock);
+ for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+ evt = &queues[q_index].evt_pool.events[i];
+ if (!ibmvfc_event_is_free(evt)) {
+ if (match(evt, device)) {
+ evt->eh_comp = &comp;
+ wait++;
+ }
+ }
}
+ spin_unlock(&queues[q_index].l_lock);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -2109,11 +2468,18 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
if (!timeout) {
wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (match(evt, device)) {
- evt->eh_comp = NULL;
- wait++;
+ for (q_index = 0; q_index < q_size; q_index++) {
+ spin_lock(&queues[q_index].l_lock);
+ for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+ evt = &queues[q_index].evt_pool.events[i];
+ if (!ibmvfc_event_is_free(evt)) {
+ if (match(evt, device)) {
+ evt->eh_comp = NULL;
+ wait++;
+ }
+ }
}
+ spin_unlock(&queues[q_index].l_lock);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait)
@@ -2128,23 +2494,123 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
return SUCCESS;
}
-/**
- * ibmvfc_cancel_all - Cancel all outstanding commands to the device
- * @sdev: scsi device to cancel commands
- * @type: type of error recovery being performed
- *
- * This sends a cancel to the VIOS for the specified device. This does
- * NOT send any abort to the actual device. That must be done separately.
- *
- * Returns:
- * 0 on success / other on failure
- **/
-static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
+ struct scsi_device *sdev,
+ int type)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
struct fc_rport *rport = starget_to_rport(starget);
+ struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
+
+ evt = ibmvfc_get_event(queue);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+ memset(tmf, 0, sizeof(*tmf));
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ tmf->common.version = cpu_to_be32(2);
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ } else {
+ tmf->common.version = cpu_to_be32(1);
+ }
+ tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
+ tmf->common.length = cpu_to_be16(sizeof(*tmf));
+ tmf->scsi_id = cpu_to_be64(rport->port_id);
+ int_to_scsilun(sdev->lun, &tmf->lun);
+ if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
+ type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+ if (vhost->state == IBMVFC_ACTIVE)
+ tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
+ else
+ tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
+ tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
+
+ init_completion(&evt->comp);
+
+ return evt;
+}
+
+static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct ibmvfc_event *evt, *found_evt, *temp;
+ struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
+ unsigned long flags;
+ int num_hwq, i;
+ int fail = 0;
+ LIST_HEAD(cancelq);
+ u16 status;
+
+ ENTER;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ num_hwq = vhost->scsi_scrqs.active_queues;
+ for (i = 0; i < num_hwq; i++) {
+ spin_lock(queues[i].q_lock);
+ spin_lock(&queues[i].l_lock);
+ found_evt = NULL;
+ list_for_each_entry(evt, &queues[i].sent, queue_list) {
+ if (evt->cmnd && evt->cmnd->device == sdev) {
+ found_evt = evt;
+ break;
+ }
+ }
+ spin_unlock(&queues[i].l_lock);
+
+ if (found_evt && vhost->logged_in) {
+ evt = ibmvfc_init_tmf(&queues[i], sdev, type);
+ evt->sync_iu = &queues[i].cancel_rsp;
+ ibmvfc_send_event(evt, vhost, default_timeout);
+ list_add_tail(&evt->cancel, &cancelq);
+ }
+
+ spin_unlock(queues[i].q_lock);
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (list_empty(&cancelq)) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
+ return 0;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
+
+ list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
+ wait_for_completion(&evt->comp);
+ status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
+ list_del(&evt->cancel);
+ ibmvfc_free_event(evt);
+
+ if (status != IBMVFC_MAD_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
+ switch (status) {
+ case IBMVFC_MAD_DRIVER_FAILED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ /* Host adapter most likely going through reset, return success to
+ * the caller will wait for the command being cancelled to get returned
+ */
+ break;
+ default:
+ fail = 1;
+ break;
+ }
+ }
+ }
+
+ if (fail)
+ return -EIO;
+
+ sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
+ LEAVE;
+ return 0;
+}
+
+static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp;
int rsp_rc = -EBUSY;
@@ -2152,14 +2618,16 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
u16 status;
ENTER;
- spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
- list_for_each_entry(evt, &vhost->sent, queue) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(&vhost->crq.l_lock);
+ list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
+ spin_unlock(&vhost->crq.l_lock);
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
@@ -2169,27 +2637,8 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
}
if (vhost->logged_in) {
- evt = ibmvfc_get_event(vhost);
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
-
- tmf = &evt->iu.tmf;
- memset(tmf, 0, sizeof(*tmf));
- tmf->common.version = cpu_to_be32(1);
- tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
- tmf->common.length = cpu_to_be16(sizeof(*tmf));
- tmf->scsi_id = cpu_to_be64(rport->port_id);
- int_to_scsilun(sdev->lun, &tmf->lun);
- if (!(be64_to_cpu(vhost->login_buf->resp.capabilities) & IBMVFC_CAN_SUPPRESS_ABTS))
- type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
- if (vhost->state == IBMVFC_ACTIVE)
- tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
- else
- tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
- tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
- tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
-
+ evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
evt->sync_iu = &rsp;
- init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
@@ -2229,6 +2678,27 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
}
/**
+ * ibmvfc_cancel_all - Cancel all outstanding commands to the device
+ * @sdev: scsi device to cancel commands
+ * @type: type of error recovery being performed
+ *
+ * This sends a cancel to the VIOS for the specified device. This does
+ * NOT send any abort to the actual device. That must be done separately.
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+
+ if (vhost->mq_enabled && vhost->using_channels)
+ return ibmvfc_cancel_all_mq(sdev, type);
+ else
+ return ibmvfc_cancel_all_sq(sdev, type);
+}
+
+/**
* ibmvfc_match_key - Match function for specified cancel key
* @evt: ibmvfc event struct
* @key: cancel key to match
@@ -2278,19 +2748,22 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp_iu;
- struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ struct ibmvfc_fcp_cmd_iu *iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
int rc, rsp_rc = -EBUSY;
unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
int rsp_code = 0;
- spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
- list_for_each_entry(evt, &vhost->sent, queue) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(&vhost->crq.l_lock);
+ list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
+ spin_unlock(&vhost->crq.l_lock);
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
@@ -2300,23 +2773,19 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
}
if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
- tmf = &evt->iu.cmd;
- memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
- tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
- tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
- tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
- tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
- int_to_scsilun(sdev->lun, &tmf->iu.lun);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
- tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
evt->sync_iu = &rsp_iu;
+ tmf->correlation = cpu_to_be64((u64)evt);
+
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
@@ -2361,7 +2830,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
}
if (rsp_iu.cmd.status)
- rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+ rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
@@ -2538,7 +3007,9 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
struct ibmvfc_host *vhost = shost_priv(shost);
struct fc_rport *dev_rport;
struct scsi_device *sdev;
- unsigned long rc;
+ struct ibmvfc_target *tgt;
+ unsigned long rc, flags;
+ unsigned int found;
ENTER;
shost_for_each_device(sdev, shost) {
@@ -2552,6 +3023,27 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
if (rc == FAILED)
ibmvfc_issue_fc_host_lip(shost);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ found = 0;
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->scsi_id == rport->port_id) {
+ found++;
+ break;
+ }
+ }
+
+ if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ /*
+ * If we get here, that means we previously attempted to send
+ * an implicit logout to the target but it failed, most likely
+ * due to I/O being pending, so we need to send it again
+ */
+ ibmvfc_del_tgt(tgt);
+ ibmvfc_reinit_host(vhost);
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
LEAVE;
}
@@ -2686,7 +3178,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
tgt->logo_rcvd = 1;
if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
ibmvfc_reinit_host(vhost);
}
}
@@ -2711,9 +3203,11 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
* ibmvfc_handle_crq - Handles and frees received events in the CRQ
* @crq: Command/Response queue
* @vhost: ibmvfc host struct
+ * @evt_doneq: Event done queue
*
- **/
-static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
+**/
+static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
+ struct list_head *evt_doneq)
{
long rc;
struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
@@ -2746,9 +3240,12 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
/* We need to re-setup the interpartition connection */
dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
vhost->client_migrated = 1;
+
+ scsi_block_requests(vhost->host);
ibmvfc_purge_requests(vhost, DID_REQUEUE);
- ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
+ wake_up(&vhost->work_wait_q);
} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
ibmvfc_purge_requests(vhost, DID_ERROR);
@@ -2772,22 +3269,21 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
* things we send. Make sure this response is to something we
* actually sent
*/
- if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
+ if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
crq->ioba);
return;
}
- if (unlikely(atomic_read(&evt->free))) {
+ if (unlikely(atomic_dec_if_positive(&evt->active))) {
dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
crq->ioba);
return;
}
- del_timer(&evt->timer);
- list_del(&evt->queue);
- ibmvfc_trc_end(evt);
- evt->done(evt);
+ spin_lock(&evt->queue->l_lock);
+ list_move_tail(&evt->queue_list, evt_doneq);
+ spin_unlock(&evt->queue->l_lock);
}
/**
@@ -2805,14 +3301,18 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
int done = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (time >= (init_timeout * HZ)) {
+ if (!vhost->scan_timeout)
+ done = 1;
+ else if (time >= (vhost->scan_timeout * HZ)) {
dev_info(vhost->dev, "Scan taking longer than %d seconds, "
- "continuing initialization\n", init_timeout);
+ "continuing initialization\n", vhost->scan_timeout);
done = 1;
}
- if (vhost->scan_complete)
+ if (vhost->scan_complete) {
+ vhost->scan_timeout = init_timeout;
done = 1;
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return done;
}
@@ -2881,8 +3381,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (sdev->type == TYPE_DISK)
+ if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
+ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
@@ -2891,7 +3393,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
* ibmvfc_change_queue_depth - Change the device's queue depth
* @sdev: scsi device struct
* @qdepth: depth to set
- * @reason: calling context
*
* Return value:
* actual depth set
@@ -2949,7 +3450,7 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
+ return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
}
static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
@@ -2957,12 +3458,13 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
+ return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
}
/**
* ibmvfc_show_log_level - Show the adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
*
* Return value:
@@ -2985,7 +3487,9 @@ static ssize_t ibmvfc_show_log_level(struct device *dev,
/**
* ibmvfc_store_log_level - Change the adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
+ * @count: buffer size
*
* Return value:
* number of bytes printed to buffer
@@ -3004,6 +3508,37 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
return strlen(buf);
}
+static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return len;
+}
+
+static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+ unsigned int channels;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ channels = simple_strtoul(buf, NULL, 10);
+ vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
+ ibmvfc_hard_reset_host(vhost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return strlen(buf);
+}
+
static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
@@ -3012,6 +3547,8 @@ static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
ibmvfc_show_log_level, ibmvfc_store_log_level);
+static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
+ ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
@@ -3030,7 +3567,7 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags = 0;
@@ -3060,17 +3597,20 @@ static struct bin_attribute ibmvfc_trace_attr = {
};
#endif
-static struct device_attribute *ibmvfc_attrs[] = {
- &dev_attr_partition_name,
- &dev_attr_device_name,
- &dev_attr_port_loc_code,
- &dev_attr_drc_name,
- &dev_attr_npiv_version,
- &dev_attr_capabilities,
- &dev_attr_log_level,
+static struct attribute *ibmvfc_host_attrs[] = {
+ &dev_attr_partition_name.attr,
+ &dev_attr_device_name.attr,
+ &dev_attr_port_loc_code.attr,
+ &dev_attr_drc_name.attr,
+ &dev_attr_npiv_version.attr,
+ &dev_attr_capabilities.attr,
+ &dev_attr_log_level.attr,
+ &dev_attr_nr_scsi_channels.attr,
NULL
};
+ATTRIBUTE_GROUPS(ibmvfc_host);
+
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IBM POWER Virtual FC Adapter",
@@ -3091,8 +3631,9 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = IBMVFC_MAX_SECTORS,
- .shost_attrs = ibmvfc_attrs,
+ .shost_groups = ibmvfc_host_groups,
.track_queue_depth = 1,
+ .host_tagset = 1,
};
/**
@@ -3104,10 +3645,10 @@ static struct scsi_host_template driver_template = {
**/
static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
{
- struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
+ struct ibmvfc_queue *async_crq = &vhost->async_crq;
struct ibmvfc_async_crq *crq;
- crq = &async_crq->msgs[async_crq->cur];
+ crq = &async_crq->msgs.async[async_crq->cur];
if (crq->valid & 0x80) {
if (++async_crq->cur == async_crq->size)
async_crq->cur = 0;
@@ -3127,10 +3668,10 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
**/
static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
{
- struct ibmvfc_crq_queue *queue = &vhost->crq;
+ struct ibmvfc_queue *queue = &vhost->crq;
struct ibmvfc_crq *crq;
- crq = &queue->msgs[queue->cur];
+ crq = &queue->msgs.crq[queue->cur];
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
@@ -3174,10 +3715,13 @@ static void ibmvfc_tasklet(void *data)
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_crq *crq;
struct ibmvfc_async_crq *async;
+ struct ibmvfc_event *evt, *temp;
unsigned long flags;
int done = 0;
+ LIST_HEAD(evt_doneq);
spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(vhost->crq.q_lock);
while (!done) {
/* Pull all the valid messages off the async CRQ */
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
@@ -3188,7 +3732,7 @@ static void ibmvfc_tasklet(void *data)
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
- ibmvfc_handle_crq(crq, vhost);
+ ibmvfc_handle_crq(crq, vhost, &evt_doneq);
crq->valid = 0;
wmb();
}
@@ -3201,14 +3745,138 @@ static void ibmvfc_tasklet(void *data)
wmb();
} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
- ibmvfc_handle_crq(crq, vhost);
+ ibmvfc_handle_crq(crq, vhost, &evt_doneq);
crq->valid = 0;
wmb();
} else
done = 1;
}
+ spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
+ del_timer(&evt->timer);
+ list_del(&evt->queue_list);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+ }
+}
+
+static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
+{
+ struct device *dev = scrq->vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ unsigned long rc;
+ int irq_action = H_ENABLE_VIO_INTERRUPT;
+
+ if (!enable)
+ irq_action = H_DISABLE_VIO_INTERRUPT;
+
+ rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
+ scrq->hw_irq, 0, 0);
+
+ if (rc)
+ dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
+ enable ? "enable" : "disable", scrq->hwq_id, rc);
+
+ return rc;
+}
+
+static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
+ struct list_head *evt_doneq)
+{
+ struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
+
+ switch (crq->valid) {
+ case IBMVFC_CRQ_CMD_RSP:
+ break;
+ case IBMVFC_CRQ_XPORT_EVENT:
+ return;
+ default:
+ dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
+ return;
+ }
+
+ /* The only kind of payload CRQs we should get are responses to
+ * things we send. Make sure this response is to something we
+ * actually sent
+ */
+ if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
+ dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
+ crq->ioba);
+ return;
+ }
+
+ if (unlikely(atomic_dec_if_positive(&evt->active))) {
+ dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
+ crq->ioba);
+ return;
+ }
+
+ spin_lock(&evt->queue->l_lock);
+ list_move_tail(&evt->queue_list, evt_doneq);
+ spin_unlock(&evt->queue->l_lock);
+}
+
+static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
+{
+ struct ibmvfc_crq *crq;
+
+ crq = &scrq->msgs.scrq[scrq->cur].crq;
+ if (crq->valid & 0x80) {
+ if (++scrq->cur == scrq->size)
+ scrq->cur = 0;
+ rmb();
+ } else
+ crq = NULL;
+
+ return crq;
+}
+
+static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
+{
+ struct ibmvfc_crq *crq;
+ struct ibmvfc_event *evt, *temp;
+ unsigned long flags;
+ int done = 0;
+ LIST_HEAD(evt_doneq);
+
+ spin_lock_irqsave(scrq->q_lock, flags);
+ while (!done) {
+ while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
+ ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
+ crq->valid = 0;
+ wmb();
+ }
+
+ ibmvfc_toggle_scrq_irq(scrq, 1);
+ if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
+ ibmvfc_toggle_scrq_irq(scrq, 0);
+ ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
+ crq->valid = 0;
+ wmb();
+ } else
+ done = 1;
+ }
+ spin_unlock_irqrestore(scrq->q_lock, flags);
+
+ list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
+ del_timer(&evt->timer);
+ list_del(&evt->queue_list);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+ }
+}
+
+static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
+{
+ struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
+
+ ibmvfc_toggle_scrq_irq(scrq, 0);
+ ibmvfc_drain_sub_crq(scrq);
+
+ return IRQ_HANDLED;
}
/**
@@ -3220,8 +3888,8 @@ static void ibmvfc_tasklet(void *data)
static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
- tgt->job_step = job_step;
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
+ tgt->job_step = job_step;
wake_up(&tgt->vhost->work_wait_q);
}
@@ -3237,7 +3905,7 @@ static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
wake_up(&tgt->vhost->work_wait_q);
return 0;
} else
@@ -3312,13 +3980,13 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
tgt->add_rport = 1;
} else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
} else if (prli_rsp[index].retry)
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
} else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -3335,7 +4003,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
@@ -3363,13 +4031,18 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
prli = &evt->iu.prli;
memset(prli, 0, sizeof(*prli));
- prli->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ prli->common.version = cpu_to_be32(2);
+ prli->target_wwpn = cpu_to_be64(tgt->wwpn);
+ } else {
+ prli->common.version = cpu_to_be32(1);
+ }
prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
prli->common.length = cpu_to_be16(sizeof(*prli));
prli->scsi_id = cpu_to_be64(tgt->scsi_id);
@@ -3434,7 +4107,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
@@ -3465,14 +4138,19 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
kref_get(&tgt->kref);
tgt->logo_rcvd = 0;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
vhost->discovery_threads++;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
- plogi->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ plogi->common.version = cpu_to_be32(2);
+ plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
+ } else {
+ plogi->common.version = cpu_to_be32(1);
+ }
plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
plogi->common.length = cpu_to_be16(sizeof(*plogi));
plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
@@ -3515,33 +4193,29 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
break;
}
- if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
- else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
- tgt->scsi_id != tgt->new_scsi_id)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
kref_put(&tgt->kref, ibmvfc_release_tgt);
wake_up(&vhost->work_wait_q);
}
/**
- * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
* @tgt: ibmvfc target struct
+ * @done: Routine to call when the event is responded to
*
+ * Returns:
+ * Allocated and initialized ibmvfc_event struct
**/
-static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
+ void (*done) (struct ibmvfc_event *))
{
struct ibmvfc_implicit_logout *mad;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
- if (vhost->discovery_threads >= disc_threads)
- return;
-
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
- vhost->discovery_threads++;
- ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
+ evt = ibmvfc_get_event(&vhost->crq);
+ ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
mad = &evt->iu.implicit_logout;
memset(mad, 0, sizeof(*mad));
@@ -3549,6 +4223,25 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
mad->common.length = cpu_to_be16(sizeof(*mad));
mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
+ return evt;
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_done);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
@@ -3560,6 +4253,158 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
}
/**
+ * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
+ u32 status = be16_to_cpu(mad->common.status);
+
+ vhost->discovery_threads--;
+ ibmvfc_free_event(evt);
+
+ /*
+ * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
+ * driver in which case we need to free up all the targets. If we are
+ * not unloading, we will still go through a hard reset to get out of
+ * offline state, so there is no need to track the old targets in that
+ * case.
+ */
+ if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
+
+ tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (!vhost->logged_in) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ return;
+ }
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_and_del_done);
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Implicit Logout\n");
+}
+
+/**
+ * ibmvfc_tgt_move_login_done - Completion handler for Move Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
+ u32 status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
+ tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
+ tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+ tgt->scsi_id = tgt->new_scsi_id;
+ tgt->ids.port_id = tgt->scsi_id;
+ memcpy(&tgt->service_parms, &rsp->service_parms,
+ sizeof(tgt->service_parms));
+ memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
+ sizeof(tgt->service_parms_change));
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
+
+ tgt_log(tgt, level,
+ "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
+ tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
+ status);
+ break;
+ }
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+
+/**
+ * ibmvfc_tgt_move_login - Initiate a move login for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_move_login *move;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ move = &evt->iu.move_login;
+ memset(move, 0, sizeof(*move));
+ move->common.version = cpu_to_be32(1);
+ move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
+ move->common.length = cpu_to_be16(sizeof(*move));
+
+ move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
+ move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
+ move->wwpn = cpu_to_be64(tgt->wwpn);
+ move->node_name = cpu_to_be64(tgt->ids.node_name);
+
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
+}
+
+/**
* ibmvfc_adisc_needs_plogi - Does device need PLOGI?
* @mad: ibmvfc passthru mad struct
* @tgt: ibmvfc target struct
@@ -3600,13 +4445,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "ADISC succeeded\n");
if (ibmvfc_adisc_needs_plogi(mad, tgt))
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_FAILED:
default:
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3674,7 +4519,7 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
/**
* ibmvfc_adisc_timeout - Handle an ADISC timeout
- * @tgt: ibmvfc target struct
+ * @t: ibmvfc target struct
*
* If an ADISC times out, send a cancel. If the cancel times
* out, reset the CRQ. When the ADISC comes back as cancelled,
@@ -3701,13 +4546,18 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
vhost->abort_threads++;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
- tmf->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ tmf->common.version = cpu_to_be32(2);
+ tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
+ } else {
+ tmf->common.version = cpu_to_be32(1);
+ }
tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
tmf->common.length = cpu_to_be16(sizeof(*tmf));
tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
@@ -3746,7 +4596,7 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
@@ -3799,9 +4649,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Query Target succeeded\n");
- tgt->new_scsi_id = be64_to_cpu(rsp->scsi_id);
if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ ibmvfc_del_tgt(tgt);
else
ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
break;
@@ -3815,11 +4664,11 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
@@ -3850,7 +4699,7 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
vhost->discovery_threads++;
evt->tgt = tgt;
ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
@@ -3873,37 +4722,87 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
/**
* ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
* @vhost: ibmvfc host struct
- * @scsi_id: SCSI ID to allocate target for
+ * @target: Holds SCSI ID to allocate target forand the WWPN
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
+static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
+ struct ibmvfc_discover_targets_entry *target)
{
+ struct ibmvfc_target *stgt = NULL;
+ struct ibmvfc_target *wtgt = NULL;
struct ibmvfc_target *tgt;
unsigned long flags;
+ u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
+ u64 wwpn = be64_to_cpu(target->wwpn);
+ /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->wwpn == wwpn) {
+ wtgt = tgt;
+ break;
+ }
+ }
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->scsi_id == scsi_id) {
- if (tgt->need_login)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ stgt = tgt;
+ break;
+ }
+ }
+
+ if (wtgt && !stgt) {
+ /*
+ * A WWPN target has moved and we still are tracking the old
+ * SCSI ID. The only way we should be able to get here is if
+ * we attempted to send an implicit logout for the old SCSI ID
+ * and it failed for some reason, such as there being I/O
+ * pending to the target. In this case, we will have already
+ * deleted the rport from the FC transport so we do a move
+ * login, which works even with I/O pending, however, if
+ * there is still I/O pending, it will stay outstanding, so
+ * we only do this if fast fail is disabled for the rport,
+ * otherwise we let terminate_rport_io clean up the port
+ * before we login at the new location.
+ */
+ if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ if (wtgt->move_login) {
+ /*
+ * Do a move login here. The old target is no longer
+ * known to the transport layer We don't use the
+ * normal ibmvfc_set_tgt_action to set this, as we
+ * don't normally want to allow this state change.
+ */
+ wtgt->new_scsi_id = scsi_id;
+ wtgt->action = IBMVFC_TGT_ACTION_INIT;
+ wtgt->init_retries = 0;
+ ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
+ }
goto unlock_out;
+ } else {
+ tgt_err(wtgt, "Unexpected target state: %d, %p\n",
+ wtgt->action, wtgt->rport);
}
+ } else if (stgt) {
+ if (tgt->need_login)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ goto unlock_out;
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
memset(tgt, 0, sizeof(*tgt));
tgt->scsi_id = scsi_id;
- tgt->new_scsi_id = scsi_id;
+ tgt->wwpn = wwpn;
tgt->vhost = vhost;
tgt->need_login = 1;
- tgt->cancel_key = vhost->task_set++;
timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
kref_init(&tgt->kref);
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
spin_lock_irqsave(vhost->host->host_lock, flags);
+ tgt->cancel_key = vhost->task_set++;
list_add_tail(&tgt->queue, &vhost->targets);
unlock_out:
@@ -3923,9 +4822,7 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
int i, rc;
for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
- rc = ibmvfc_alloc_target(vhost,
- be32_to_cpu(vhost->disc_buf->scsi_id[i]) &
- IBMVFC_DISC_TGT_SCSI_ID_MASK);
+ rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
return rc;
}
@@ -3974,7 +4871,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
{
struct ibmvfc_discover_targets *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.discover_targets;
@@ -3985,6 +4882,7 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
+ mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
@@ -3993,6 +4891,148 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
}
+static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
+ struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+ int flags, active_queues, i;
+
+ ibmvfc_free_event(evt);
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
+ flags = be32_to_cpu(setup->flags);
+ vhost->do_enquiry = 0;
+ active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
+ scrqs->active_queues = active_queues;
+
+ if (flags & IBMVFC_CHANNELS_CANCELED) {
+ ibmvfc_dbg(vhost, "Channels Canceled\n");
+ vhost->using_channels = 0;
+ } else {
+ if (active_queues)
+ vhost->using_channels = 1;
+ for (i = 0; i < active_queues; i++)
+ scrqs->scrqs[i].vios_cookie =
+ be64_to_cpu(setup->channel_handles[i]);
+
+ ibmvfc_dbg(vhost, "Using %u channels\n",
+ vhost->scsi_scrqs.active_queues);
+ }
+ break;
+ case IBMVFC_MAD_FAILED:
+ level += ibmvfc_retry_host_init(vhost);
+ ibmvfc_log(vhost, level, "Channel Setup failed\n");
+ fallthrough;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ return;
+ default:
+ dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
+ mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ return;
+ }
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ wake_up(&vhost->work_wait_q);
+}
+
+static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_channel_setup_mad *mad;
+ struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+ struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ unsigned int num_channels =
+ min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
+ int i;
+
+ memset(setup_buf, 0, sizeof(*setup_buf));
+ if (num_channels == 0)
+ setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
+ else {
+ setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
+ for (i = 0; i < num_channels; i++)
+ setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
+ }
+
+ ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.channel_setup;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+ mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
+ mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Sent channel setup\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+}
+
+static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
+ u32 mad_status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
+ vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
+ ibmvfc_free_event(evt);
+ break;
+ case IBMVFC_MAD_FAILED:
+ level += ibmvfc_retry_host_init(vhost);
+ ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
+ fallthrough;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ ibmvfc_free_event(evt);
+ return;
+ default:
+ dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
+ mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ ibmvfc_free_event(evt);
+ return;
+ }
+
+ ibmvfc_channel_setup(vhost);
+}
+
+static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_channel_enquiry *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+
+ ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.channel_enquiry;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+
+ if (mig_channels_only)
+ mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
+ if (mig_no_less_channels)
+ mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Send channel enquiry\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
/**
* ibmvfc_npiv_login_done - Completion handler for NPIV Login
* @evt: ibmvfc event struct
@@ -4022,7 +5062,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
return;
case IBMVFC_MAD_CRQ_ERROR:
ibmvfc_retry_host_init(vhost);
- /* fall through */
+ fallthrough;
case IBMVFC_MAD_DRIVER_FAILED:
ibmvfc_free_event(evt);
return;
@@ -4074,8 +5114,14 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
vhost->host->max_sectors = npiv_max_sectors;
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
- wake_up(&vhost->work_wait_q);
+
+ if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
+ ibmvfc_channel_enquiry(vhost);
+ } else {
+ vhost->do_enquiry = 0;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ wake_up(&vhost->work_wait_q);
+ }
}
/**
@@ -4086,7 +5132,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login_mad *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_gather_partition_info(vhost);
ibmvfc_set_login_info(vhost);
@@ -4107,11 +5153,11 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
ibmvfc_dbg(vhost, "Sent NPIV login\n");
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
-};
+}
/**
* ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
- * @vhost: ibmvfc host struct
+ * @evt: ibmvfc event struct
*
**/
static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
@@ -4123,7 +5169,7 @@ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
- if (list_empty(&vhost->sent) &&
+ if (list_empty(&vhost->crq.sent) &&
vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
ibmvfc_init_host(vhost);
return;
@@ -4151,7 +5197,7 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
struct ibmvfc_npiv_logout_mad *mad;
struct ibmvfc_event *evt;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.npiv_logout;
@@ -4189,6 +5235,25 @@ static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_dev_logo_to_do - Is there target logout work to do?
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
+ tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
+ return 1;
+ }
+ return 0;
+}
+
+/**
* __ibmvfc_work_to_do - Is there task level work to do? (no locking)
* @vhost: ibmvfc host struct
*
@@ -4217,11 +5282,20 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
return 0;
return 1;
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ if (vhost->discovery_threads == disc_threads)
+ return 0;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
+ return 1;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
+ return 0;
+ return 1;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
- case IBMVFC_HOST_ACTION_TGT_DEL:
- case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
@@ -4292,6 +5366,14 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+ tgt->rport = NULL;
+ tgt->init_retries = 0;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ fc_remote_port_delete(rport);
+ return;
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return;
@@ -4326,6 +5408,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
struct ibmvfc_target *tgt;
unsigned long flags;
struct fc_rport *rport;
+ LIST_HEAD(purge);
int rc;
ibmvfc_log_ae(vhost, vhost->events_to_log);
@@ -4337,26 +5420,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_INIT_WAIT:
break;
case IBMVFC_HOST_ACTION_RESET:
- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_complete_purge(&purge);
rc = ibmvfc_reset_crq(vhost);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
- if (rc == H_CLOSED)
+ if (!rc || rc == H_CLOSED)
vio_enable_interrupts(to_vio_dev(vhost->dev));
- if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
- (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
+ /*
+ * The only action we could have changed to would have
+ * been reenable, in which case, we skip the rest of
+ * this path and wait until we've done the re-enable
+ * before sending the crq init.
+ */
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+
+ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ }
}
break;
case IBMVFC_HOST_ACTION_REENABLE:
- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_complete_purge(&purge);
rc = ibmvfc_reenable_crq_queue(vhost);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
- if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
+ /*
+ * The only action we could have changed to would have
+ * been reset, in which case, we skip the rest of this
+ * path and wait until we've done the reset before
+ * sending the crq init.
+ */
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ }
}
break;
case IBMVFC_HOST_ACTION_LOGO:
@@ -4391,6 +5497,18 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
+ tgt->job_step(tgt);
+ break;
+ }
+ }
+
+ if (ibmvfc_dev_logo_to_do(vhost)) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return;
+ }
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
rport = tgt->rport;
@@ -4403,6 +5521,28 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
+ rport = tgt->rport;
+ tgt->rport = NULL;
+ tgt->init_retries = 0;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+
+ /*
+ * If fast fail is enabled, we wait for it to fire and then clean up
+ * the old port, since we expect the fast fail timer to clean up the
+ * outstanding I/O faster than waiting for normal command timeouts.
+ * However, if fast fail is disabled, any I/O outstanding to the
+ * rport LUNs will stay outstanding indefinitely, since the EH handlers
+ * won't get invoked for I/O's timing out. If this is a NPIV failover
+ * scenario, the better alternative is to use the move login.
+ */
+ if (rport && rport->fast_io_fail_tmo == -1)
+ tgt->move_login = 1;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (rport)
+ fc_remote_port_delete(rport);
+ return;
}
}
@@ -4491,6 +5631,71 @@ static int ibmvfc_work(void *data)
}
/**
+ * ibmvfc_alloc_queue - Allocate queue
+ * @vhost: ibmvfc host struct
+ * @queue: ibmvfc queue to allocate
+ * @fmt: queue format to allocate
+ *
+ * Returns:
+ * 0 on success / non-zero on failure
+ **/
+static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue,
+ enum ibmvfc_msg_fmt fmt)
+{
+ struct device *dev = vhost->dev;
+ size_t fmt_size;
+ unsigned int pool_size = 0;
+
+ ENTER;
+ spin_lock_init(&queue->_lock);
+ queue->q_lock = &queue->_lock;
+
+ switch (fmt) {
+ case IBMVFC_CRQ_FMT:
+ fmt_size = sizeof(*queue->msgs.crq);
+ pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+ break;
+ case IBMVFC_ASYNC_FMT:
+ fmt_size = sizeof(*queue->msgs.async);
+ break;
+ case IBMVFC_SUB_CRQ_FMT:
+ fmt_size = sizeof(*queue->msgs.scrq);
+ /* We need one extra event for Cancel Commands */
+ pool_size = max_requests + 1;
+ break;
+ default:
+ dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
+ return -EINVAL;
+ }
+
+ if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
+ dev_err(dev, "Couldn't initialize event pool.\n");
+ return -ENOMEM;
+ }
+
+ queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!queue->msgs.handle)
+ return -ENOMEM;
+
+ queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev, queue->msg_token)) {
+ free_page((unsigned long)queue->msgs.handle);
+ queue->msgs.handle = NULL;
+ return -ENOMEM;
+ }
+
+ queue->cur = 0;
+ queue->fmt = fmt;
+ queue->size = PAGE_SIZE / fmt_size;
+
+ queue->vhost = vhost;
+ return 0;
+}
+
+/**
* ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
* @vhost: ibmvfc host struct
*
@@ -4505,21 +5710,12 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
int rc, retrc = -ENOMEM;
struct device *dev = vhost->dev;
struct vio_dev *vdev = to_vio_dev(dev);
- struct ibmvfc_crq_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *crq = &vhost->crq;
ENTER;
- crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
-
- if (!crq->msgs)
+ if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
return -ENOMEM;
- crq->size = PAGE_SIZE / sizeof(*crq->msgs);
- crq->msg_token = dma_map_single(dev, crq->msgs,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(dev, crq->msg_token))
- goto map_failed;
-
retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
crq->msg_token, PAGE_SIZE);
@@ -4548,7 +5744,6 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
goto req_irq_failed;
}
- crq->cur = 0;
LEAVE;
return retrc;
@@ -4558,12 +5753,185 @@ req_irq_failed:
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_crq_failed:
- dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
-map_failed:
- free_page((unsigned long)crq->msgs);
+ ibmvfc_free_queue(vhost, crq);
return retrc;
}
+static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
+ int index)
+{
+ struct device *dev = vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
+ int rc = -ENOMEM;
+
+ ENTER;
+
+ rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
+ &scrq->cookie, &scrq->hw_irq);
+
+ /* H_CLOSED indicates successful register, but no CRQ partner */
+ if (rc && rc != H_CLOSED) {
+ dev_warn(dev, "Error registering sub-crq: %d\n", rc);
+ if (rc == H_PARAMETER)
+ dev_warn_once(dev, "Firmware may not support MQ\n");
+ goto reg_failed;
+ }
+
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+ if (!scrq->irq) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
+ goto irq_failed;
+ }
+
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
+ vdev->unit_address, index);
+ rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
+
+ if (rc) {
+ dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
+ irq_dispose_mapping(scrq->irq);
+ goto irq_failed;
+ }
+
+ scrq->hwq_id = index;
+
+ LEAVE;
+ return 0;
+
+irq_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
+ } while (rtas_busy_delay(rc));
+reg_failed:
+ LEAVE;
+ return rc;
+}
+
+static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
+{
+ struct device *dev = vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
+ long rc;
+
+ ENTER;
+
+ free_irq(scrq->irq, scrq);
+ irq_dispose_mapping(scrq->irq);
+ scrq->irq = 0;
+
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
+ scrq->cookie);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc)
+ dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
+
+ /* Clean out the queue */
+ memset(scrq->msgs.crq, 0, PAGE_SIZE);
+ scrq->cur = 0;
+
+ LEAVE;
+}
+
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
+{
+ int i, j;
+
+ ENTER;
+ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+ return;
+
+ for (i = 0; i < nr_scsi_hw_queues; i++) {
+ if (ibmvfc_register_scsi_channel(vhost, i)) {
+ for (j = i; j > 0; j--)
+ ibmvfc_deregister_scsi_channel(vhost, j - 1);
+ vhost->do_enquiry = 0;
+ return;
+ }
+ }
+
+ LEAVE;
+}
+
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
+{
+ int i;
+
+ ENTER;
+ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+ return;
+
+ for (i = 0; i < nr_scsi_hw_queues; i++)
+ ibmvfc_deregister_scsi_channel(vhost, i);
+
+ LEAVE;
+}
+
+static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_queue *scrq;
+ int i, j;
+
+ ENTER;
+ if (!vhost->mq_enabled)
+ return;
+
+ vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
+ sizeof(*vhost->scsi_scrqs.scrqs),
+ GFP_KERNEL);
+ if (!vhost->scsi_scrqs.scrqs) {
+ vhost->do_enquiry = 0;
+ return;
+ }
+
+ for (i = 0; i < nr_scsi_hw_queues; i++) {
+ scrq = &vhost->scsi_scrqs.scrqs[i];
+ if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
+ for (j = i; j > 0; j--) {
+ scrq = &vhost->scsi_scrqs.scrqs[j - 1];
+ ibmvfc_free_queue(vhost, scrq);
+ }
+ kfree(vhost->scsi_scrqs.scrqs);
+ vhost->scsi_scrqs.scrqs = NULL;
+ vhost->scsi_scrqs.active_queues = 0;
+ vhost->do_enquiry = 0;
+ vhost->mq_enabled = 0;
+ return;
+ }
+ }
+
+ ibmvfc_reg_sub_crqs(vhost);
+
+ LEAVE;
+}
+
+static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_queue *scrq;
+ int i;
+
+ ENTER;
+ if (!vhost->scsi_scrqs.scrqs)
+ return;
+
+ ibmvfc_dereg_sub_crqs(vhost);
+
+ for (i = 0; i < nr_scsi_hw_queues; i++) {
+ scrq = &vhost->scsi_scrqs.scrqs[i];
+ ibmvfc_free_queue(vhost, scrq);
+ }
+
+ kfree(vhost->scsi_scrqs.scrqs);
+ vhost->scsi_scrqs.scrqs = NULL;
+ vhost->scsi_scrqs.active_queues = 0;
+ LEAVE;
+}
+
/**
* ibmvfc_free_mem - Free memory for vhost
* @vhost: ibmvfc host struct
@@ -4573,7 +5941,7 @@ map_failed:
**/
static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
{
- struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+ struct ibmvfc_queue *async_q = &vhost->async_crq;
ENTER;
mempool_destroy(vhost->tgt_pool);
@@ -4582,10 +5950,10 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
vhost->disc_buf_dma);
dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
vhost->login_buf, vhost->login_buf_dma);
+ dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
+ vhost->channel_setup_buf, vhost->channel_setup_dma);
dma_pool_destroy(vhost->sg_pool);
- dma_unmap_single(vhost->dev, async_q->msg_token,
- async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
- free_page((unsigned long)async_q->msgs);
+ ibmvfc_free_queue(vhost, async_q);
LEAVE;
}
@@ -4598,26 +5966,15 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
**/
static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
{
- struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+ struct ibmvfc_queue *async_q = &vhost->async_crq;
struct device *dev = vhost->dev;
ENTER;
- async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
- if (!async_q->msgs) {
- dev_err(dev, "Couldn't allocate async queue.\n");
+ if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
+ dev_err(dev, "Couldn't allocate/map async queue.\n");
goto nomem;
}
- async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
- async_q->msg_token = dma_map_single(dev, async_q->msgs,
- async_q->size * sizeof(*async_q->msgs),
- DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(dev, async_q->msg_token)) {
- dev_err(dev, "Failed to map async queue\n");
- goto free_async_crq;
- }
-
vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
SG_ALL * sizeof(struct srp_direct_buf),
sizeof(struct srp_direct_buf), 0);
@@ -4635,7 +5992,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
goto free_sg_pool;
}
- vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
+ vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
&vhost->disc_buf_dma, GFP_KERNEL);
@@ -4646,6 +6003,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
+ atomic_set(&vhost->trace_index, -1);
if (!vhost->trace)
goto free_disc_buffer;
@@ -4658,9 +6016,20 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
goto free_trace;
}
+ vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
+ &vhost->channel_setup_dma,
+ GFP_KERNEL);
+
+ if (!vhost->channel_setup_buf) {
+ dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
+ goto free_tgt_pool;
+ }
+
LEAVE;
return 0;
+free_tgt_pool:
+ mempool_destroy(vhost->tgt_pool);
free_trace:
kfree(vhost->trace);
free_disc_buffer:
@@ -4672,10 +6041,7 @@ free_login_buffer:
free_sg_pool:
dma_pool_destroy(vhost->sg_pool);
unmap_async_crq:
- dma_unmap_single(dev, async_q->msg_token,
- async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
-free_async_crq:
- free_page((unsigned long)async_q->msgs);
+ ibmvfc_free_queue(vhost, async_q);
nomem:
LEAVE;
return -ENOMEM;
@@ -4747,6 +6113,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct Scsi_Host *shost;
struct device *dev = &vdev->dev;
int rc = -ENOMEM;
+ unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
ENTER;
shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
@@ -4762,17 +6129,24 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
shost->max_sectors = IBMVFC_MAX_SECTORS;
shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
shost->unique_id = shost->host_no;
+ shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
vhost = shost_priv(shost);
- INIT_LIST_HEAD(&vhost->sent);
- INIT_LIST_HEAD(&vhost->free);
INIT_LIST_HEAD(&vhost->targets);
+ INIT_LIST_HEAD(&vhost->purge);
sprintf(vhost->name, IBMVFC_NAME);
vhost->host = shost;
vhost->dev = dev;
vhost->partition_number = -1;
vhost->log_level = log_level;
vhost->task_set = 1;
+
+ vhost->mq_enabled = mq_enabled;
+ vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
+ vhost->using_channels = 0;
+ vhost->do_enquiry = 1;
+ vhost->scan_timeout = 0;
+
strcpy(vhost->partition_name, "UNKNOWN");
init_waitqueue_head(&vhost->work_wait_q);
init_waitqueue_head(&vhost->init_wait_q);
@@ -4788,6 +6162,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (IS_ERR(vhost->work_thread)) {
dev_err(dev, "Couldn't create kernel thread: %ld\n",
PTR_ERR(vhost->work_thread));
+ rc = PTR_ERR(vhost->work_thread);
goto free_host_mem;
}
@@ -4796,13 +6171,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto kill_kthread;
}
- if ((rc = ibmvfc_init_event_pool(vhost))) {
- dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
- goto release_crq;
- }
-
if ((rc = scsi_add_host(shost, dev)))
- goto release_event_pool;
+ goto release_crq;
fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
@@ -4812,6 +6182,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto remove_shost;
}
+ ibmvfc_init_sub_crqs(vhost);
+
if (shost_to_fc_host(shost)->rqst_q)
blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
dev_set_drvdata(dev, vhost);
@@ -4825,8 +6197,6 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
remove_shost:
scsi_remove_host(shost);
-release_event_pool:
- ibmvfc_free_event_pool(vhost);
release_crq:
ibmvfc_release_crq_queue(vhost);
kill_kthread:
@@ -4847,9 +6217,10 @@ out:
* Return value:
* 0
**/
-static int ibmvfc_remove(struct vio_dev *vdev)
+static void ibmvfc_remove(struct vio_dev *vdev)
{
struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
+ LIST_HEAD(purge);
unsigned long flags;
ENTER;
@@ -4860,15 +6231,17 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_wait_while_resetting(vhost);
- ibmvfc_release_crq_queue(vhost);
kthread_stop(vhost->work_thread);
fc_remove_host(vhost->host);
scsi_remove_host(vhost->host);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
+ list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- ibmvfc_free_event_pool(vhost);
+ ibmvfc_complete_purge(&purge);
+ ibmvfc_release_sub_crqs(vhost);
+ ibmvfc_release_crq_queue(vhost);
ibmvfc_free_mem(vhost);
spin_lock(&ibmvfc_driver_lock);
@@ -4876,7 +6249,6 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_unlock(&ibmvfc_driver_lock);
scsi_host_put(vhost->host);
LEAVE;
- return 0;
}
/**
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 7da89f4d26b2..c39a245f43d0 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -41,6 +41,12 @@
#define IBMVFC_DEFAULT_LOG_LEVEL 2
#define IBMVFC_MAX_CDB_LEN 16
#define IBMVFC_CLS3_ERROR 0
+#define IBMVFC_MQ 1
+#define IBMVFC_SCSI_CHANNELS 8
+#define IBMVFC_MAX_SCSI_QUEUES 16
+#define IBMVFC_SCSI_HW_QUEUES 8
+#define IBMVFC_MIG_NO_SUB_TO_CRQ 0
+#define IBMVFC_MIG_NO_N_TO_M 0
/*
* Ensure we have resources for ERP and initialization:
@@ -54,6 +60,7 @@
#define IBMVFC_MAD_SUCCESS 0x00
#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
+#define IBMVFC_MAD_VERSION_NOT_SUPP 0xF2
#define IBMVFC_MAD_FAILED 0xF7
#define IBMVFC_MAD_DRIVER_FAILED 0xEE
#define IBMVFC_MAD_CRQ_ERROR 0xEF
@@ -120,10 +127,14 @@ enum ibmvfc_mad_types {
IBMVFC_PORT_LOGIN = 0x0004,
IBMVFC_PROCESS_LOGIN = 0x0008,
IBMVFC_QUERY_TARGET = 0x0010,
+ IBMVFC_MOVE_LOGIN = 0x0020,
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
IBMVFC_PASSTHRU = 0x0200,
IBMVFC_TMF_MAD = 0x0100,
IBMVFC_NPIV_LOGOUT = 0x0800,
+ IBMVFC_CHANNEL_ENQUIRY = 0x1000,
+ IBMVFC_CHANNEL_SETUP = 0x2000,
+ IBMVFC_CONNECTION_INFO = 0x4000,
};
struct ibmvfc_mad_common {
@@ -133,16 +144,16 @@ struct ibmvfc_mad_common {
__be16 status;
__be16 length;
__be64 tag;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_npiv_login_mad {
struct ibmvfc_mad_common common;
struct srp_direct_buf buffer;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_npiv_logout_mad {
struct ibmvfc_mad_common common;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
#define IBMVFC_MAX_NAME 256
@@ -162,13 +173,17 @@ struct ibmvfc_npiv_login {
__be32 max_cmds;
__be64 capabilities;
#define IBMVFC_CAN_MIGRATE 0x01
+#define IBMVFC_CAN_USE_CHANNELS 0x02
+#define IBMVFC_CAN_HANDLE_FPIN 0x04
+#define IBMVFC_CAN_USE_MAD_VERSION 0x08
+#define IBMVFC_CAN_SEND_VF_WWPN 0x10
__be64 node_name;
struct srp_direct_buf async;
u8 partition_name[IBMVFC_MAX_NAME];
u8 device_name[IBMVFC_MAX_NAME];
u8 drc_name[IBMVFC_MAX_NAME];
__be64 reserved2[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_common_svc_parms {
__be16 fcph_version;
@@ -177,7 +192,7 @@ struct ibmvfc_common_svc_parms {
__be16 bb_rcv_sz; /* upper nibble is BB_SC_N */
__be32 ratov;
__be32 edtov;
-}__attribute__((packed, aligned (4)));
+} __packed __aligned(4);
struct ibmvfc_service_parms {
struct ibmvfc_common_svc_parms common;
@@ -192,7 +207,8 @@ struct ibmvfc_service_parms {
__be32 ext_len;
__be32 reserved[30];
__be32 clk_sync_qos[2];
-}__attribute__((packed, aligned (4)));
+ __be32 reserved2;
+} __packed __aligned(4);
struct ibmvfc_npiv_login_resp {
__be32 version;
@@ -204,6 +220,9 @@ struct ibmvfc_npiv_login_resp {
__be64 capabilities;
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
+#define IBMVFC_MAD_VERSION_CAP 0x20
+#define IBMVFC_HANDLE_VF_WWPN 0x40
+#define IBMVFC_CAN_SUPPORT_CHANNELS 0x80
__be32 max_cmds;
__be32 scsi_id_sz;
__be64 max_dma_len;
@@ -217,29 +236,32 @@ struct ibmvfc_npiv_login_resp {
u8 drc_name[IBMVFC_MAX_NAME];
struct ibmvfc_service_parms service_parms;
__be64 reserved2;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
union ibmvfc_npiv_login_data {
struct ibmvfc_npiv_login login;
struct ibmvfc_npiv_login_resp resp;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
-struct ibmvfc_discover_targets_buf {
- __be32 scsi_id[1];
+struct ibmvfc_discover_targets_entry {
+ __be32 scsi_id;
+ __be32 pad;
+ __be64 wwpn;
#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
-};
+} __packed __aligned(8);
struct ibmvfc_discover_targets {
struct ibmvfc_mad_common common;
struct srp_direct_buf buffer;
__be32 flags;
+#define IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST 0x02
__be16 status;
__be16 error;
__be32 bufflen;
__be32 num_avail;
__be32 num_written;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_fc_reason {
IBMVFC_INVALID_ELS_CMD_CODE = 0x01,
@@ -282,8 +304,29 @@ struct ibmvfc_port_login {
__be32 reserved2;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
+ __be64 target_wwpn;
__be64 reserved3[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
+
+struct ibmvfc_move_login {
+ struct ibmvfc_mad_common common;
+ __be64 old_scsi_id;
+ __be64 new_scsi_id;
+ __be64 wwpn;
+ __be64 node_name;
+ __be32 flags;
+#define IBMVFC_MOVE_LOGIN_IMPLICIT_OLD_FAILED 0x01
+#define IBMVFC_MOVE_LOGIN_IMPLICIT_NEW_FAILED 0x02
+#define IBMVFC_MOVE_LOGIN_PORT_LOGIN_FAILED 0x04
+ __be32 reserved;
+ struct ibmvfc_service_parms service_parms;
+ struct ibmvfc_service_parms service_parms_change;
+ __be32 reserved2;
+ __be16 service_class;
+ __be16 vios_flags;
+#define IBMVFC_MOVE_LOGIN_VF_NOT_SENT_ADAPTER 0x01
+ __be64 reserved3;
+} __packed __aligned(8);
struct ibmvfc_prli_svc_parms {
u8 type;
@@ -303,7 +346,7 @@ struct ibmvfc_prli_svc_parms {
#define IBMVFC_PRLI_TARGET_FUNC 0x00000010
#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001
-}__attribute__((packed, aligned (4)));
+} __packed __aligned(4);
struct ibmvfc_process_login {
struct ibmvfc_mad_common common;
@@ -313,8 +356,9 @@ struct ibmvfc_process_login {
__be16 status;
__be16 error; /* also fc_reason */
__be32 reserved2;
+ __be64 target_wwpn;
__be64 reserved3[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_query_tgt {
struct ibmvfc_mad_common common;
@@ -325,13 +369,13 @@ struct ibmvfc_query_tgt {
__be16 fc_explain;
__be16 fc_type;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_implicit_logout {
struct ibmvfc_mad_common common;
__be64 old_scsi_id;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_tmf {
struct ibmvfc_mad_common common;
@@ -347,8 +391,10 @@ struct ibmvfc_tmf {
__be32 cancel_key;
__be32 my_cancel_key;
__be32 pad;
+ __be64 target_wwpn;
+ __be64 task_tag;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_fcp_rsp_info_codes {
RSP_NO_FAILURE = 0x00,
@@ -361,7 +407,7 @@ struct ibmvfc_fcp_rsp_info {
u8 reserved[3];
u8 rsp_code;
u8 reserved2[4];
-}__attribute__((packed, aligned (2)));
+} __packed __aligned(2);
enum ibmvfc_fcp_rsp_flags {
FCP_BIDI_RSP = 0x80,
@@ -377,7 +423,7 @@ enum ibmvfc_fcp_rsp_flags {
union ibmvfc_fcp_rsp_data {
struct ibmvfc_fcp_rsp_info info;
u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_fcp_rsp {
__be64 reserved;
@@ -388,7 +434,7 @@ struct ibmvfc_fcp_rsp {
__be32 fcp_sense_len;
__be32 fcp_rsp_len;
union ibmvfc_fcp_rsp_data data;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_cmd_flags {
IBMVFC_SCATTERLIST = 0x0001,
@@ -422,7 +468,7 @@ struct ibmvfc_fcp_cmd_iu {
#define IBMVFC_WRDATA 0x01
u8 cdb[IBMVFC_MAX_CDB_LEN];
__be32 xfer_len;
-}__attribute__((packed, aligned (4)));
+} __packed __aligned(4);
struct ibmvfc_cmd {
__be64 task_tag;
@@ -443,10 +489,20 @@ struct ibmvfc_cmd {
__be64 correlation;
__be64 tgt_scsi_id;
__be64 tag;
- __be64 reserved3[2];
- struct ibmvfc_fcp_cmd_iu iu;
- struct ibmvfc_fcp_rsp rsp;
-}__attribute__((packed, aligned (8)));
+ __be64 target_wwpn;
+ __be64 reserved3;
+ union {
+ struct {
+ struct ibmvfc_fcp_cmd_iu iu;
+ struct ibmvfc_fcp_rsp rsp;
+ } v1;
+ struct {
+ __be64 reserved4;
+ struct ibmvfc_fcp_cmd_iu iu;
+ struct ibmvfc_fcp_rsp rsp;
+ } v2;
+ };
+} __packed __aligned(8);
struct ibmvfc_passthru_fc_iu {
__be32 payload[7];
@@ -472,19 +528,66 @@ struct ibmvfc_passthru_iu {
__be64 correlation;
__be64 scsi_id;
__be64 tag;
+ __be64 target_wwpn;
__be64 reserved2[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_passthru_mad {
struct ibmvfc_mad_common common;
struct srp_direct_buf cmd_ioba;
struct ibmvfc_passthru_iu iu;
struct ibmvfc_passthru_fc_iu fc_iu;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
+
+struct ibmvfc_channel_enquiry {
+ struct ibmvfc_mad_common common;
+ __be32 flags;
+#define IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT 0x01
+#define IBMVFC_SUPPORT_VARIABLE_SUBQ_MSG 0x02
+#define IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT 0x04
+ __be32 num_scsi_subq_channels;
+ __be32 num_nvmeof_subq_channels;
+ __be32 num_scsi_vas_channels;
+ __be32 num_nvmeof_vas_channels;
+} __packed __aligned(8);
+
+struct ibmvfc_channel_setup_mad {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf buffer;
+} __packed __aligned(8);
+
+#define IBMVFC_MAX_CHANNELS 502
+
+struct ibmvfc_channel_setup {
+ __be32 flags;
+#define IBMVFC_CANCEL_CHANNELS 0x01
+#define IBMVFC_USE_BUFFER 0x02
+#define IBMVFC_CHANNELS_CANCELED 0x04
+ __be32 reserved;
+ __be32 num_scsi_subq_channels;
+ __be32 num_nvmeof_subq_channels;
+ __be32 num_scsi_vas_channels;
+ __be32 num_nvmeof_vas_channels;
+ struct srp_direct_buf buffer;
+ __be64 reserved2[5];
+ __be64 channel_handles[IBMVFC_MAX_CHANNELS];
+} __packed __aligned(8);
+
+struct ibmvfc_connection_info {
+ struct ibmvfc_mad_common common;
+ __be64 information_bits;
+#define IBMVFC_NO_FC_IO_CHANNEL 0x01
+#define IBMVFC_NO_PHYP_VAS 0x02
+#define IBMVFC_NO_PHYP_SUBQ 0x04
+#define IBMVFC_PHYP_DEPRECATED_SUBQ 0x08
+#define IBMVFC_PHYP_PRESERVED_SUBQ 0x10
+#define IBMVFC_PHYP_FULL_SUBQ 0x20
+ __be64 reserved[16];
+} __packed __aligned(8);
struct ibmvfc_trace_start_entry {
u32 xfer_len;
-}__attribute__((packed));
+} __packed;
struct ibmvfc_trace_end_entry {
u16 status;
@@ -493,7 +596,7 @@ struct ibmvfc_trace_end_entry {
u8 rsp_code;
u8 scsi_status;
u8 reserved;
-}__attribute__((packed));
+} __packed;
struct ibmvfc_trace_entry {
struct ibmvfc_event *evt;
@@ -510,7 +613,7 @@ struct ibmvfc_trace_entry {
struct ibmvfc_trace_start_entry start;
struct ibmvfc_trace_end_entry end;
} u;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_crq_formats {
IBMVFC_CMD_FORMAT = 0x01,
@@ -532,6 +635,7 @@ enum ibmvfc_async_event {
IBMVFC_AE_HALT = 0x0400,
IBMVFC_AE_RESUME = 0x0800,
IBMVFC_AE_ADAPTER_FAILED = 0x1000,
+ IBMVFC_AE_FPIN = 0x2000,
};
struct ibmvfc_async_desc {
@@ -545,13 +649,12 @@ struct ibmvfc_crq {
volatile u8 format;
u8 reserved[6];
volatile __be64 ioba;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
-struct ibmvfc_crq_queue {
- struct ibmvfc_crq *msgs;
- int size, cur;
- dma_addr_t msg_token;
-};
+struct ibmvfc_sub_crq {
+ struct ibmvfc_crq crq;
+ __be64 reserved[2];
+} __packed __aligned(8);
enum ibmvfc_ae_link_state {
IBMVFC_AE_LS_LINK_UP = 0x01,
@@ -560,23 +663,25 @@ enum ibmvfc_ae_link_state {
IBMVFC_AE_LS_LINK_DEAD = 0x08,
};
+enum ibmvfc_ae_fpin_status {
+ IBMVFC_AE_FPIN_LINK_CONGESTED = 0x1,
+ IBMVFC_AE_FPIN_PORT_CONGESTED = 0x2,
+ IBMVFC_AE_FPIN_PORT_CLEARED = 0x3,
+ IBMVFC_AE_FPIN_PORT_DEGRADED = 0x4,
+};
+
struct ibmvfc_async_crq {
volatile u8 valid;
u8 link_state;
- u8 pad[2];
+ u8 fpin_status;
+ u8 pad;
__be32 pad2;
volatile __be64 event;
volatile __be64 scsi_id;
volatile __be64 wwpn;
volatile __be64 node_name;
__be64 reserved;
-}__attribute__((packed, aligned (8)));
-
-struct ibmvfc_async_crq_queue {
- struct ibmvfc_async_crq *msgs;
- int size, cur;
- dma_addr_t msg_token;
-};
+} __packed __aligned(8);
union ibmvfc_iu {
struct ibmvfc_mad_common mad_common;
@@ -585,25 +690,34 @@ union ibmvfc_iu {
struct ibmvfc_discover_targets discover_targets;
struct ibmvfc_port_login plogi;
struct ibmvfc_process_login prli;
+ struct ibmvfc_move_login move_login;
struct ibmvfc_query_tgt query_tgt;
struct ibmvfc_implicit_logout implicit_logout;
struct ibmvfc_tmf tmf;
struct ibmvfc_cmd cmd;
struct ibmvfc_passthru_mad passthru;
-}__attribute__((packed, aligned (8)));
+ struct ibmvfc_channel_enquiry channel_enquiry;
+ struct ibmvfc_channel_setup_mad channel_setup;
+ struct ibmvfc_connection_info connection_info;
+} __packed __aligned(8);
enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_NONE = 0,
IBMVFC_TGT_ACTION_INIT,
IBMVFC_TGT_ACTION_INIT_WAIT,
+ IBMVFC_TGT_ACTION_LOGOUT_RPORT,
+ IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT,
IBMVFC_TGT_ACTION_DEL_RPORT,
IBMVFC_TGT_ACTION_DELETED_RPORT,
+ IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT,
+ IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT,
};
struct ibmvfc_target {
struct list_head queue;
struct ibmvfc_host *vhost;
u64 scsi_id;
+ u64 wwpn;
u64 new_scsi_id;
struct fc_rport *rport;
int target_id;
@@ -612,6 +726,7 @@ struct ibmvfc_target {
int add_rport;
int init_retries;
int logo_rcvd;
+ int move_login;
u32 cancel_key;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
@@ -623,13 +738,17 @@ struct ibmvfc_target {
/* a unit of work for the hosting partition */
struct ibmvfc_event {
- struct list_head queue;
+ struct list_head queue_list;
+ struct list_head cancel;
struct ibmvfc_host *vhost;
+ struct ibmvfc_queue *queue;
struct ibmvfc_target *tgt;
struct scsi_cmnd *cmnd;
atomic_t free;
+ atomic_t active;
union ibmvfc_iu *xfer_iu;
- void (*done) (struct ibmvfc_event *);
+ void (*done)(struct ibmvfc_event *evt);
+ void (*_done)(struct ibmvfc_event *evt);
struct ibmvfc_crq crq;
union ibmvfc_iu iu;
union ibmvfc_iu *sync_iu;
@@ -638,6 +757,7 @@ struct ibmvfc_event {
struct completion comp;
struct completion *eh_comp;
struct timer_list timer;
+ u16 hwq;
};
/* a pool of event structs for use */
@@ -648,6 +768,49 @@ struct ibmvfc_event_pool {
dma_addr_t iu_token;
};
+enum ibmvfc_msg_fmt {
+ IBMVFC_CRQ_FMT = 0,
+ IBMVFC_ASYNC_FMT,
+ IBMVFC_SUB_CRQ_FMT,
+};
+
+union ibmvfc_msgs {
+ void *handle;
+ struct ibmvfc_crq *crq;
+ struct ibmvfc_async_crq *async;
+ struct ibmvfc_sub_crq *scrq;
+};
+
+struct ibmvfc_queue {
+ union ibmvfc_msgs msgs;
+ dma_addr_t msg_token;
+ enum ibmvfc_msg_fmt fmt;
+ int size, cur;
+ spinlock_t _lock;
+ spinlock_t *q_lock;
+
+ struct ibmvfc_host *vhost;
+ struct ibmvfc_event_pool evt_pool;
+ struct list_head sent;
+ struct list_head free;
+ spinlock_t l_lock;
+
+ union ibmvfc_iu cancel_rsp;
+
+ /* Sub-CRQ fields */
+ unsigned long cookie;
+ unsigned long vios_cookie;
+ unsigned long hw_irq;
+ unsigned long irq;
+ unsigned long hwq_id;
+ char name[32];
+};
+
+struct ibmvfc_scsi_channels {
+ struct ibmvfc_queue *scrqs;
+ unsigned int active_queues;
+};
+
enum ibmvfc_host_action {
IBMVFC_HOST_ACTION_NONE = 0,
IBMVFC_HOST_ACTION_RESET,
@@ -682,26 +845,29 @@ struct ibmvfc_host {
enum ibmvfc_host_action action;
#define IBMVFC_NUM_TRACE_INDEX_BITS 8
#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS)
+#define IBMVFC_TRACE_INDEX_MASK (IBMVFC_NUM_TRACE_ENTRIES - 1)
#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
struct ibmvfc_trace_entry *trace;
- u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS;
+ atomic_t trace_index;
int num_targets;
struct list_head targets;
- struct list_head sent;
- struct list_head free;
+ struct list_head purge;
struct device *dev;
- struct ibmvfc_event_pool pool;
struct dma_pool *sg_pool;
mempool_t *tgt_pool;
- struct ibmvfc_crq_queue crq;
- struct ibmvfc_async_crq_queue async_crq;
+ struct ibmvfc_queue crq;
+ struct ibmvfc_queue async_crq;
+ struct ibmvfc_scsi_channels scsi_scrqs;
struct ibmvfc_npiv_login login_info;
union ibmvfc_npiv_login_data *login_buf;
dma_addr_t login_buf_dma;
+ struct ibmvfc_channel_setup *channel_setup_buf;
+ dma_addr_t channel_setup_dma;
int disc_buf_sz;
int log_level;
- struct ibmvfc_discover_targets_buf *disc_buf;
+ struct ibmvfc_discover_targets_entry *disc_buf;
struct mutex passthru_mutex;
+ int max_vios_scsi_channels;
int task_set;
int init_retries;
int discovery_threads;
@@ -710,7 +876,12 @@ struct ibmvfc_host {
int reinit;
int delay_init;
int scan_complete;
+ int scan_timeout;
int logged_in;
+ int mq_enabled;
+ int using_channels;
+ int do_enquiry;
+ int client_scsi_channels;
int aborting_passthru;
int events_to_log;
#define IBMVFC_AE_LINKUP 0x0001
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7f66a7783209..63f32f843e75 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -130,9 +130,10 @@ static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
}
/**
- * release_crq_queue: - Deallocates data and unregisters CRQ
- * @queue: crq_queue to initialize and register
- * @host_data: ibmvscsi_host_data of host
+ * ibmvscsi_release_crq_queue() - Deallocates data and unregisters CRQ
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ * @max_requests: maximum requests (unused)
*
* Frees irq, deallocates a page for messages, unmaps dma, and unregisters
* the crq with the hypervisor.
@@ -276,10 +277,9 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
}
/**
- * reset_crq_queue: - resets a crq after a failure
+ * ibmvscsi_reset_crq_queue() - resets a crq after a failure
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
- *
*/
static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
@@ -314,9 +314,10 @@ static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
}
/**
- * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
- * @queue: crq_queue to initialize and register
- * @hostdata: ibmvscsi_host_data of host
+ * ibmvscsi_init_crq_queue() - Initializes and registers CRQ with hypervisor
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ * @max_requests: maximum requests (unused)
*
* Allocates a page for messages, maps it for dma, and registers
* the crq with the hypervisor.
@@ -404,10 +405,9 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
}
/**
- * reenable_crq_queue: - reenables a crq after
+ * ibmvscsi_reenable_crq_queue() - reenables a crq after
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
- *
*/
static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
@@ -415,6 +415,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
int rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ set_adapter_info(hostdata);
+
/* Re-enable the CRQ */
do {
if (rc)
@@ -437,7 +439,7 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
* @hostdata: ibmvscsi_host_data who owns the event pool
*
* Returns zero on success.
-*/
+ */
static int initialize_event_pool(struct event_pool *pool,
int size, struct ibmvscsi_host_data *hostdata)
{
@@ -452,7 +454,7 @@ static int initialize_event_pool(struct event_pool *pool,
pool->iu_storage =
dma_alloc_coherent(hostdata->dev,
pool->size * sizeof(*pool->iu_storage),
- &pool->iu_token, 0);
+ &pool->iu_token, GFP_KERNEL);
if (!pool->iu_storage) {
kfree(pool->events);
return -ENOMEM;
@@ -476,12 +478,12 @@ static int initialize_event_pool(struct event_pool *pool,
}
/**
- * release_event_pool: - Frees memory of an event pool of a host
+ * release_event_pool() - Frees memory of an event pool of a host
* @pool: event_pool to be released
* @hostdata: ibmvscsi_host_data who owns the even pool
*
* Returns zero on success.
-*/
+ */
static void release_event_pool(struct event_pool *pool,
struct ibmvscsi_host_data *hostdata)
{
@@ -524,11 +526,10 @@ static int valid_event_struct(struct event_pool *pool,
}
/**
- * ibmvscsi_free-event_struct: - Changes status of event to "free"
+ * free_event_struct() - Changes status of event to "free"
* @pool: event_pool that contains the event
* @evt: srp_event_struct to be modified
- *
-*/
+ */
static void free_event_struct(struct event_pool *pool,
struct srp_event_struct *evt)
{
@@ -545,7 +546,7 @@ static void free_event_struct(struct event_pool *pool,
}
/**
- * get_evt_struct: - Gets the next free event in pool
+ * get_event_struct() - Gets the next free event in pool
* @pool: event_pool that contains the events to be searched
*
* Returns the next event in "free" state, and NULL if none are free.
@@ -573,7 +574,7 @@ static struct srp_event_struct *get_event_struct(struct event_pool *pool)
/**
* init_event_struct: Initialize fields in an event struct that are always
* required.
- * @evt: The event
+ * @evt_struct: The event
* @done: Routine to call when the event is responded to
* @format: SRP or MAD format
* @timeout: timeout value set in the CRQ
@@ -595,7 +596,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
* Routines for receiving SCSI responses from the hosting partition
*/
-/**
+/*
* set_srp_direction: Set the fields in the srp related to data
* direction and number of buffers based on the direction in
* the scsi_cmnd and the number of buffers
@@ -630,9 +631,9 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
/**
* unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
* @cmd: srp_cmd whose additional_data member will be unmapped
+ * @evt_struct: the event
* @dev: device for which the memory is mapped
- *
-*/
+ */
static void unmap_cmd_data(struct srp_cmd *cmd,
struct srp_event_struct *evt_struct,
struct device *dev)
@@ -667,8 +668,9 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
}
/**
- * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
+ * map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields
* @cmd: struct scsi_cmnd with the scatterlist
+ * @evt_struct: struct srp_event_struct to map
* @srp_cmd: srp_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
*
@@ -715,8 +717,7 @@ static int map_sg_data(struct scsi_cmnd *cmd,
/* get indirect table */
if (!evt_struct->ext_list) {
- evt_struct->ext_list = (struct srp_direct_buf *)
- dma_alloc_coherent(dev,
+ evt_struct->ext_list = dma_alloc_coherent(dev,
SG_ALL * sizeof(struct srp_direct_buf),
&evt_struct->ext_list_token, 0);
if (!evt_struct->ext_list) {
@@ -743,6 +744,7 @@ static int map_sg_data(struct scsi_cmnd *cmd,
/**
* map_data_for_srp_cmd: - Calls functions to map data for srp cmds
* @cmd: struct scsi_cmnd with the memory to be mapped
+ * @evt_struct: struct srp_event_struct to map
* @srp_cmd: srp_cmd that contains the memory descriptor
* @dev: dma device for which to map dma memory
*
@@ -776,6 +778,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
/**
* purge_requests: Our virtual adapter just shut down. purge any sent requests
* @hostdata: the adapter
+ * @error_code: error code to return as the 'result'
*/
static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
{
@@ -805,13 +808,29 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
}
/**
+ * ibmvscsi_set_request_limit - Set the adapter request_limit in response to
+ * an adapter failure, reset, or SRP Login. Done under host lock to prevent
+ * race with SCSI command submission.
+ * @hostdata: adapter to adjust
+ * @limit: new request limit
+ */
+static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ atomic_set(&hostdata->request_limit, limit);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+}
+
+/**
* ibmvscsi_reset_host - Reset the connection to the server
* @hostdata: struct ibmvscsi_host_data to reset
*/
static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
{
scsi_block_requests(hostdata->host);
- atomic_set(&hostdata->request_limit, 0);
+ ibmvscsi_set_request_limit(hostdata, 0);
purge_requests(hostdata, DID_ERROR);
hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
@@ -820,7 +839,7 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
/**
* ibmvscsi_timeout - Internal command timeout handler
- * @evt_struct: struct srp_event_struct that timed out
+ * @t: struct srp_event_struct that timed out
*
* Called when an internally generated command times out
*/
@@ -986,7 +1005,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (cmnd) {
cmnd->result |= rsp->status;
- if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
+ if (scsi_status_is_check_condition(cmnd->result))
memcpy(cmnd->sense_buffer,
rsp->data,
be32_to_cpu(rsp->sense_data_len));
@@ -1016,13 +1035,13 @@ static inline u16 lun_from_dev(struct scsi_device *dev)
}
/**
- * ibmvscsi_queue: - The queuecommand function of the scsi template
- * @cmd: struct scsi_cmnd to be executed
+ * ibmvscsi_queuecommand_lck() - The queuecommand function of the scsi template
+ * @cmnd: struct scsi_cmnd to be executed
* @done: Callback function to be called when cmd is completed
*/
-static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
- void (*done) (struct scsi_cmnd *))
+static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct srp_cmd *srp_cmd;
struct srp_event_struct *evt_struct;
struct srp_indirect_buf *indirect;
@@ -1036,8 +1055,9 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
return SCSI_MLQUEUE_HOST_BUSY;
/* Set up the actual SRP IU */
+ BUILD_BUG_ON(sizeof(evt_struct->iu.srp) != SRP_MAX_IU_LEN);
+ memset(&evt_struct->iu.srp, 0x00, sizeof(evt_struct->iu.srp));
srp_cmd = &evt_struct->iu.srp.cmd;
- memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->opcode = SRP_CMD;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
int_to_scsilun(lun, &srp_cmd->lun);
@@ -1053,7 +1073,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
- cmnd->request->timeout/HZ);
+ scsi_cmd_to_rq(cmnd)->timeout / HZ);
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
@@ -1144,13 +1164,13 @@ static void login_rsp(struct srp_event_struct *evt_struct)
dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
evt_struct->xfer_iu->srp.login_rej.reason);
/* Login failed. */
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
return;
default:
dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
evt_struct->xfer_iu->srp.login_rsp.opcode);
/* Login failed. */
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
return;
}
@@ -1161,7 +1181,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
* This value is set rather than added to request_limit because
* request_limit could have been set to -1 by this client.
*/
- atomic_set(&hostdata->request_limit,
+ ibmvscsi_set_request_limit(hostdata,
be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
/* If we had any pending I/Os, kick them */
@@ -1193,13 +1213,13 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
- spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in
* the login request we are just sending and login requests always
* get sent by the driver regardless of request_limit.
*/
- atomic_set(&hostdata->request_limit, 0);
+ ibmvscsi_set_request_limit(hostdata, 0);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
dev_info(hostdata->dev, "sent SRP login\n");
@@ -1324,7 +1344,7 @@ static void fast_fail_rsp(struct srp_event_struct *evt_struct)
}
/**
- * init_host - Start host initialization
+ * enable_fast_fail() - Start host initialization
* @hostdata: ibmvscsi_host_data of host
*
* Returns zero if successful.
@@ -1438,16 +1458,15 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
};
-/**
- * init_adapter: Start virtual adapter initialization sequence
- *
+/*
+ * init_adapter() - Start virtual adapter initialization sequence
*/
static void init_adapter(struct ibmvscsi_host_data *hostdata)
{
send_mad_adapter_info(hostdata);
}
-/**
+/*
* sync_completion: Signal that a synchronous command has completed
* Note that after returning from this call, the evt_struct is freed.
* the caller waiting on this completion shouldn't touch the evt_struct
@@ -1462,8 +1481,8 @@ static void sync_completion(struct srp_event_struct *evt_struct)
complete(&evt_struct->comp);
}
-/**
- * ibmvscsi_abort: Abort a command...from scsi host template
+/*
+ * ibmvscsi_eh_abort_handler: Abort a command...from scsi host template
* send this over to the server and wait synchronously for the response
*/
static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
@@ -1600,7 +1619,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
return SUCCESS;
}
-/**
+/*
* ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
* template send this over to the server and wait synchronously for the
* response
@@ -1779,7 +1798,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
return;
case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */
scsi_block_requests(hostdata->host);
- atomic_set(&hostdata->request_limit, 0);
+ ibmvscsi_set_request_limit(hostdata, 0);
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
dev_info(hostdata->dev, "Re-enabling adapter!\n");
@@ -1866,7 +1885,6 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
* ibmvscsi_change_queue_depth - Change the device's queue depth
* @sdev: scsi device struct
* @qdepth: depth to set
- * @reason: calling context
*
* Return value:
* actual depth set
@@ -2047,18 +2065,20 @@ static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type)
return 0;
}
-static struct device_attribute *ibmvscsi_attrs[] = {
- &ibmvscsi_host_vhost_loc,
- &ibmvscsi_host_vhost_name,
- &ibmvscsi_host_srp_version,
- &ibmvscsi_host_partition_name,
- &ibmvscsi_host_partition_number,
- &ibmvscsi_host_mad_version,
- &ibmvscsi_host_os_type,
- &ibmvscsi_host_config,
+static struct attribute *ibmvscsi_host_attrs[] = {
+ &ibmvscsi_host_vhost_loc.attr,
+ &ibmvscsi_host_vhost_name.attr,
+ &ibmvscsi_host_srp_version.attr,
+ &ibmvscsi_host_partition_name.attr,
+ &ibmvscsi_host_partition_number.attr,
+ &ibmvscsi_host_mad_version.attr,
+ &ibmvscsi_host_os_type.attr,
+ &ibmvscsi_host_config.attr,
NULL
};
+ATTRIBUTE_GROUPS(ibmvscsi_host);
+
/* ------------------------------------------------------------
* SCSI driver registration
*/
@@ -2078,7 +2098,7 @@ static struct scsi_host_template driver_template = {
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .shost_attrs = ibmvscsi_attrs,
+ .shost_groups = ibmvscsi_host_groups,
};
/**
@@ -2135,12 +2155,12 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
}
hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rc) {
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
dev_err(hostdata->dev, "error after %s\n", action);
}
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
scsi_unblock_requests(hostdata->host);
}
@@ -2196,7 +2216,7 @@ static int ibmvscsi_work(void *data)
return 0;
}
-/**
+/*
* Called by bus code for each adapter
*/
static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -2224,7 +2244,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
init_waitqueue_head(&hostdata->work_wait_q);
hostdata->host = host;
hostdata->dev = dev;
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
if (map_persist_bufs(hostdata)) {
@@ -2317,19 +2337,15 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
return -1;
}
-static int ibmvscsi_remove(struct vio_dev *vdev)
+static void ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
- unsigned long flags;
srp_remove_host(hostdata->host);
scsi_remove_host(hostdata->host);
purge_requests(hostdata, DID_ERROR);
-
- spin_lock_irqsave(hostdata->host->host_lock, flags);
release_event_pool(&hostdata->pool, hostdata);
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
max_events);
@@ -2342,8 +2358,6 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
spin_unlock(&ibmvscsi_driver_lock);
scsi_host_put(hostdata->host);
-
- return 0;
}
/**
@@ -2362,7 +2376,7 @@ static int ibmvscsi_resume(struct device *dev)
return 0;
}
-/**
+/*
* ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
* support.
*/
@@ -2388,7 +2402,7 @@ static struct vio_driver ibmvscsi_driver = {
static struct srp_function_template ibmvscsi_transport_functions = {
};
-int __init ibmvscsi_module_init(void)
+static int __init ibmvscsi_module_init(void)
{
int ret;
@@ -2410,7 +2424,7 @@ int __init ibmvscsi_module_init(void)
return ret;
}
-void __exit ibmvscsi_module_exit(void)
+static void __exit ibmvscsi_module_exit(void)
{
vio_unregister_driver(&ibmvscsi_driver);
srp_release_transport(ibmvscsi_transport_template);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index d9e94e81da01..e8770310a64b 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/string.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -35,7 +36,7 @@
#define IBMVSCSIS_VERSION "v0.2"
-#define INITIAL_SRP_LIMIT 800
+#define INITIAL_SRP_LIMIT 1024
#define DEFAULT_MAX_SECTORS 256
#define MAX_TXU 1024 * 1024
@@ -128,10 +129,10 @@ static bool connection_broken(struct scsi_info *vscsi)
* This function calls h_free_q then frees the interrupt bit etc.
* It must release the lock before doing so because of the time it can take
* for h_free_crq in PHYP
- * NOTE: the caller must make sure that state and or flags will prevent
- * interrupt handler from scheduling work.
- * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
- * we can't do it here, because we don't have the lock
+ * NOTE: * the caller must make sure that state and or flags will prevent
+ * interrupt handler from scheduling work.
+ * * anyone calling this function may need to set the CRQ_CLOSED flag
+ * we can't do it here, because we don't have the lock
*
* EXECUTION ENVIRONMENT:
* Process level
@@ -443,7 +444,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
break;
/*
- * Can transition from this state to to unconfiguring
+ * Can transition from this state to unconfiguring
* or err disconnect.
*/
case ERR_DISCONNECT_RECONNECT:
@@ -1581,7 +1582,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
case H_PERMISSION:
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
- /* Fall through */
+ fallthrough;
default:
dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
rc);
@@ -1871,11 +1872,8 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
*/
static void ibmvscsis_send_messages(struct scsi_info *vscsi)
{
- u64 msg_hi = 0;
- /* note do not attempt to access the IU_data_ptr with this pointer
- * it is not valid
- */
- struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
+ struct viosrp_crq empty_crq = { };
+ struct viosrp_crq *crq = &empty_crq;
struct ibmvscsis_cmd *cmd, *nxt;
long rc = ADAPT_SUCCESS;
bool retry = false;
@@ -1939,7 +1937,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
crq->IU_length = cpu_to_be16(cmd->rsp.len);
rc = h_send_crq(vscsi->dma_dev->unit_address,
- be64_to_cpu(msg_hi),
+ be64_to_cpu(crq->high),
be64_to_cpu(cmd->rsp.tag));
dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
@@ -2489,10 +2487,10 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
break;
case H_CLOSED:
vscsi->flags |= CLIENT_FAILED;
- /* Fall through */
+ fallthrough;
case H_DROPPED:
vscsi->flags |= RESPONSE_Q_DOWN;
- /* Fall through */
+ fallthrough;
case H_REMOTE_PARM:
dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
rc);
@@ -2670,7 +2668,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
u64 data_len = 0;
enum dma_data_direction dir;
int attr = 0;
- int rc = 0;
nexus = vscsi->tport.ibmv_nexus;
/*
@@ -2725,17 +2722,9 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
srp->lun.scsi_lun[0] &= 0x3f;
- rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
- cmd->sense_buf, scsilun_to_int(&srp->lun),
- data_len, attr, dir, 0);
- if (rc) {
- dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
- spin_lock_bh(&vscsi->intr_lock);
- list_del(&cmd->list);
- ibmvscsis_free_cmd_resources(vscsi, cmd);
- spin_unlock_bh(&vscsi->intr_lock);
- goto fail;
- }
+ target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
+ cmd->sense_buf, scsilun_to_int(&srp->lun),
+ data_len, attr, dir, 0);
return;
fail:
@@ -3595,7 +3584,7 @@ free_adapter:
return rc;
}
-static int ibmvscsis_remove(struct vio_dev *vdev)
+static void ibmvscsis_remove(struct vio_dev *vdev)
{
struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
@@ -3622,8 +3611,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
list_del(&vscsi->list);
spin_unlock_bh(&ibmvscsis_dev_lock);
kfree(vscsi);
-
- return 0;
}
static ssize_t system_id_show(struct device *dev,
@@ -3958,41 +3945,16 @@ static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
NULL,
};
-static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
- char *page)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
- struct ibmvscsis_tport *tport = container_of(se_tpg,
- struct ibmvscsis_tport,
- se_tpg);
-
- return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
-}
-static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
+static int ibmvscsis_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
- struct se_portal_group *se_tpg = to_tpg(item);
struct ibmvscsis_tport *tport = container_of(se_tpg,
struct ibmvscsis_tport,
se_tpg);
struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
- unsigned long tmp;
- int rc;
long lrc;
- rc = kstrtoul(page, 0, &tmp);
- if (rc < 0) {
- dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
- return -EINVAL;
- }
-
- if ((tmp != 0) && (tmp != 1)) {
- dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
- return -EINVAL;
- }
-
- if (tmp) {
+ if (enable) {
spin_lock_bh(&vscsi->intr_lock);
tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
@@ -4008,17 +3970,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
spin_unlock_bh(&vscsi->intr_lock);
}
- dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
- vscsi->state);
-
- return count;
+ return 0;
}
-CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
-
-static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
- &ibmvscsis_tpg_attr_enable,
- NULL,
-};
static const struct target_core_fabric_ops ibmvscsis_ops = {
.module = THIS_MODULE,
@@ -4048,10 +4001,10 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
.fabric_make_wwn = ibmvscsis_make_tport,
.fabric_drop_wwn = ibmvscsis_drop_tport,
.fabric_make_tpg = ibmvscsis_make_tpg,
+ .fabric_enable_tpg = ibmvscsis_enable_tpg,
.fabric_drop_tpg = ibmvscsis_drop_tpg,
.tfc_wwn_attrs = ibmvscsis_wwn_attrs,
- .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs,
};
static void ibmvscsis_dev_release(struct device *dev) {};
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 2519fb7aee51..7a499d621c25 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -66,7 +66,7 @@ static void got_it(imm_struct *dev)
{
dev->base = dev->dev->port->base;
if (dev->cur_cmd)
- dev->cur_cmd->SCp.phase = 1;
+ imm_scsi_pointer(dev->cur_cmd)->phase = 1;
else
wake_up(dev->waiting);
}
@@ -618,13 +618,14 @@ static inline int imm_send_command(struct scsi_cmnd *cmd)
* The driver appears to remain stable if we speed up the parallel port
* i/o in this function, but not elsewhere.
*/
-static int imm_completion(struct scsi_cmnd *cmd)
+static int imm_completion(struct scsi_cmnd *const cmd)
{
/* Return codes:
* -1 Error
* 0 Told to schedule
* 1 Finished data transfer
*/
+ struct scsi_pointer *scsi_pointer = imm_scsi_pointer(cmd);
imm_struct *dev = imm_dev(cmd->device->host);
unsigned short ppb = dev->base;
unsigned long start_jiffies = jiffies;
@@ -660,44 +661,43 @@ static int imm_completion(struct scsi_cmnd *cmd)
* a) Drive status is screwy (!ready && !present)
* b) Drive is requesting/sending more data than expected
*/
- if (((r & 0x88) != 0x88) || (cmd->SCp.this_residual <= 0)) {
+ if ((r & 0x88) != 0x88 || scsi_pointer->this_residual <= 0) {
imm_fail(dev, DID_ERROR);
return -1; /* ERROR_RETURN */
}
/* determine if we should use burst I/O */
if (dev->rd == 0) {
- fast = (bulk
- && (cmd->SCp.this_residual >=
- IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 2;
- status = imm_out(dev, cmd->SCp.ptr, fast);
+ fast = bulk && scsi_pointer->this_residual >=
+ IMM_BURST_SIZE ? IMM_BURST_SIZE : 2;
+ status = imm_out(dev, scsi_pointer->ptr, fast);
} else {
- fast = (bulk
- && (cmd->SCp.this_residual >=
- IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 1;
- status = imm_in(dev, cmd->SCp.ptr, fast);
+ fast = bulk && scsi_pointer->this_residual >=
+ IMM_BURST_SIZE ? IMM_BURST_SIZE : 1;
+ status = imm_in(dev, scsi_pointer->ptr, fast);
}
- cmd->SCp.ptr += fast;
- cmd->SCp.this_residual -= fast;
+ scsi_pointer->ptr += fast;
+ scsi_pointer->this_residual -= fast;
if (!status) {
imm_fail(dev, DID_BUS_BUSY);
return -1; /* ERROR_RETURN */
}
- if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ if (scsi_pointer->buffer && !scsi_pointer->this_residual) {
/* if scatter/gather, advance to the next segment */
- if (cmd->SCp.buffers_residual--) {
- cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
- cmd->SCp.this_residual =
- cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ if (scsi_pointer->buffers_residual--) {
+ scsi_pointer->buffer =
+ sg_next(scsi_pointer->buffer);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
/*
* Make sure that we transfer even number of bytes
* otherwise it makes imm_byte_out() messy.
*/
- if (cmd->SCp.this_residual & 0x01)
- cmd->SCp.this_residual++;
+ if (scsi_pointer->this_residual & 0x01)
+ scsi_pointer->this_residual++;
}
}
/* Now check to see if the drive is ready to comunicate */
@@ -762,20 +762,21 @@ static void imm_interrupt(struct work_struct *work)
}
#endif
- if (cmd->SCp.phase > 1)
+ if (imm_scsi_pointer(cmd)->phase > 1)
imm_disconnect(dev);
imm_pb_dismiss(dev);
spin_lock_irqsave(host->host_lock, flags);
dev->cur_cmd = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
spin_unlock_irqrestore(host->host_lock, flags);
return;
}
-static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
+static int imm_engine(imm_struct *dev, struct scsi_cmnd *const cmd)
{
+ struct scsi_pointer *scsi_pointer = imm_scsi_pointer(cmd);
unsigned short ppb = dev->base;
unsigned char l = 0, h = 0;
int retv, x;
@@ -786,7 +787,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (dev->failed)
return 0;
- switch (cmd->SCp.phase) {
+ switch (scsi_pointer->phase) {
case 0: /* Phase 0 - Waiting for parport */
if (time_after(jiffies, dev->jstart + HZ)) {
/*
@@ -800,16 +801,16 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
case 1: /* Phase 1 - Connected */
imm_connect(dev, CONNECT_EPP_MAYBE);
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->phase++;
+ fallthrough;
case 2: /* Phase 2 - We are now talking to the scsi bus */
if (!imm_select(dev, scmd_id(cmd))) {
imm_fail(dev, DID_NO_CONNECT);
return 0;
}
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->phase++;
+ fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
@@ -818,24 +819,24 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (!imm_send_command(cmd))
return 0;
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->phase++;
+ fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ scsi_pointer->buffer = scsi_sglist(cmd);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.this_residual = 0;
- cmd->SCp.ptr = NULL;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->ptr = NULL;
}
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.phase++;
- if (cmd->SCp.this_residual & 0x01)
- cmd->SCp.this_residual++;
- /* fall through */
+ scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1;
+ scsi_pointer->phase++;
+ if (scsi_pointer->this_residual & 0x01)
+ scsi_pointer->this_residual++;
+ fallthrough;
case 5: /* Phase 5 - Pre-Data transfer stage */
/* Spin lock for BUSY */
@@ -851,8 +852,8 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if ((dev->dp) && (dev->rd))
if (imm_negotiate(dev))
return 0;
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->phase++;
+ fallthrough;
case 6: /* Phase 6 - Data transfer stage */
/* Spin lock for BUSY */
@@ -867,8 +868,8 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (retv == 0)
return 1;
}
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->phase++;
+ fallthrough;
case 7: /* Phase 7 - Post data transfer stage */
if ((dev->dp) && (dev->rd)) {
@@ -879,8 +880,8 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
w_ctr(ppb, 0x4);
}
}
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->phase++;
+ fallthrough;
case 8: /* Phase 8 - Read status/message */
/* Check for data overrun */
@@ -903,7 +904,6 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
w_ctr(ppb, 0x4);
}
return 0; /* Finished */
- break;
default:
printk("imm: Invalid scsi phase\n");
@@ -911,8 +911,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
-static int imm_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int imm_queuecommand_lck(struct scsi_cmnd *cmd)
{
imm_struct *dev = imm_dev(cmd->device->host);
@@ -923,9 +922,8 @@ static int imm_queuecommand_lck(struct scsi_cmnd *cmd,
dev->failed = 0;
dev->jstart = jiffies;
dev->cur_cmd = cmd;
- cmd->scsi_done = done;
cmd->result = DID_ERROR << 16; /* default return code */
- cmd->SCp.phase = 0; /* bus free */
+ imm_scsi_pointer(cmd)->phase = 0; /* bus free */
schedule_delayed_work(&dev->imm_tq, 0);
@@ -964,15 +962,13 @@ static int imm_abort(struct scsi_cmnd *cmd)
* have tied the SCSI_MESSAGE line high in the interface
*/
- switch (cmd->SCp.phase) {
+ switch (imm_scsi_pointer(cmd)->phase) {
case 0: /* Do not have access to parport */
case 1: /* Have not connected to interface */
dev->cur_cmd = NULL; /* Forget the problem */
return SUCCESS;
- break;
default: /* SCSI command sent, can not abort */
return FAILED;
- break;
}
}
@@ -992,7 +988,7 @@ static int imm_reset(struct scsi_cmnd *cmd)
{
imm_struct *dev = imm_dev(cmd->device->host);
- if (cmd->SCp.phase)
+ if (imm_scsi_pointer(cmd)->phase)
imm_disconnect(dev);
dev->cur_cmd = NULL; /* Forget the problem */
@@ -1114,6 +1110,7 @@ static struct scsi_host_template imm_template = {
.sg_tablesize = SG_ALL,
.can_queue = 1,
.slave_alloc = imm_adjust_queue,
+ .cmd_size = sizeof(struct scsi_pointer),
};
/***************************************************************************
@@ -1286,19 +1283,6 @@ static struct parport_driver imm_driver = {
.detach = imm_detach,
.devmodel = true,
};
-
-static int __init imm_driver_init(void)
-{
- printk("imm: Version %s\n", IMM_VERSION);
- return parport_register_driver(&imm_driver);
-}
-
-static void __exit imm_driver_exit(void)
-{
- parport_unregister_driver(&imm_driver);
-}
-
-module_init(imm_driver_init);
-module_exit(imm_driver_exit);
+module_parport_driver(imm_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/imm.h b/drivers/scsi/imm.h
index 7f2bb35b1b87..411cf94af5b0 100644
--- a/drivers/scsi/imm.h
+++ b/drivers/scsi/imm.h
@@ -139,6 +139,11 @@ static char *IMM_MODE_STRING[] =
#define w_ctr(x,y) outb(y, (x)+2)
#endif
+static inline struct scsi_pointer *imm_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static int imm_engine(imm_struct *, struct scsi_cmnd *);
#endif /* _IMM_H */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 1d39628ac947..375261d67619 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -546,7 +546,6 @@ static int initio_reset_scsi(struct initio_host * host, int seconds)
/**
* initio_init - set up an InitIO host adapter
* @host: InitIO host adapter
- * @num_scbs: Number of SCBS
* @bios_addr: BIOS address
*
* Set up the host adapter and devices according to the configuration
@@ -866,17 +865,16 @@ static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_b
struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
{
- struct scsi_ctrl_blk *tmp, *prev;
+ struct scsi_ctrl_blk *tmp;
u16 scbp_tarlun;
- prev = tmp = host->first_busy;
+ tmp = host->first_busy;
while (tmp != NULL) {
scbp_tarlun = (tmp->lun << 8) | (tmp->target);
if (scbp_tarlun == tarlun) { /* Unlink this SCB */
break;
}
- prev = tmp;
tmp = tmp->next;
}
#if DEBUG_QUEUE
@@ -1168,7 +1166,7 @@ static void tulip_scsi(struct initio_host * host)
return;
}
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
- if ((scb = host->active) != NULL)
+ if (host->active)
initio_next_state(host);
return;
}
@@ -1315,15 +1313,15 @@ static int initio_state_1(struct initio_host * host)
}
if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
active_tc->flags |= TCF_WDTR_DONE;
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo); /* Extended msg length */
- outb(3, host->addr + TUL_SFifo); /* Sync request */
+ outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* Sync request */
outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
} else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
active_tc->flags |= TCF_SYNC_DONE;
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* extended msg length */
- outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
}
@@ -1409,16 +1407,16 @@ static int initio_state_3(struct initio_host * host)
case MSG_OUT: /* Message out phase */
if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
- outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
} else {
active_tc->flags |= TCF_SYNC_DONE;
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* ext. msg len */
- outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
@@ -1479,7 +1477,7 @@ static int initio_state_4(struct initio_host * host)
return -1;
return 6;
} else {
- outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
@@ -1616,7 +1614,7 @@ static int initio_state_6(struct initio_host * host)
break;
case MSG_OUT: /* Message out phase */
- outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
@@ -1789,9 +1787,9 @@ int initio_status_msg(struct initio_host * host)
if (host->phase == MSG_OUT) {
if (host->jsstatus0 & TSS_PAR_ERROR)
- outb(MSG_PARITY, host->addr + TUL_SFifo);
+ outb(MSG_PARITY_ERROR, host->addr + TUL_SFifo);
else
- outb(MSG_NOP, host->addr + TUL_SFifo);
+ outb(NOP, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
@@ -1802,7 +1800,7 @@ int initio_status_msg(struct initio_host * host)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
- outb(MSG_PARITY, host->addr + TUL_SFifo);
+ outb(MSG_PARITY_ERROR, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
@@ -1815,7 +1813,8 @@ int initio_status_msg(struct initio_host * host)
return initio_wait_done_disc(host);
}
- if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
+ if (msg == LINKED_CMD_COMPLETE ||
+ msg == LINKED_FLG_CMD_COMPLETE) {
if ((scb->tastat & 0x18) == 0x10)
return initio_msgin_accept(host);
}
@@ -1887,7 +1886,7 @@ static int int_initio_scsi_rst(struct initio_host * host)
}
/**
- * int_initio_scsi_resel - Reselection occurred
+ * int_initio_resel - Reselection occurred
* @host: InitIO host adapter
*
* A SCSI reselection event has been signalled and the interrupt
@@ -1930,7 +1929,8 @@ int int_initio_resel(struct initio_host * host)
return -1;
msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
- if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
+ if (msg < SIMPLE_QUEUE_TAG || msg > ORDERED_QUEUE_TAG)
+ /* Is simple Tag */
goto no_tag;
if (initio_msgin_accept(host) == -1)
@@ -2010,7 +2010,7 @@ static int initio_msgout_abort_targ(struct initio_host * host)
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
- outb(MSG_ABORT, host->addr + TUL_SFifo);
+ outb(ABORT_TASK_SET, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
@@ -2033,7 +2033,7 @@ static int initio_msgout_abort_tag(struct initio_host * host)
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
- outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
+ outb(ABORT_TASK, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
@@ -2059,15 +2059,15 @@ static int initio_msgin(struct initio_host * host)
return -1;
switch (inb(host->addr + TUL_SFifo)) {
- case MSG_DISC: /* Disconnect msg */
+ case DISCONNECT: /* Disconnect msg */
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
- case MSG_SDP:
- case MSG_RESTORE:
- case MSG_NOP:
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ case NOP:
initio_msgin_accept(host);
break;
- case MSG_REJ: /* Clear ATN first */
+ case MESSAGE_REJECT: /* Clear ATN first */
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
host->addr + TUL_SSignal);
active_tc = host->active_tc;
@@ -2076,13 +2076,13 @@ static int initio_msgin(struct initio_host * host)
host->addr + TUL_SSignal);
initio_msgin_accept(host);
break;
- case MSG_EXTEND: /* extended msg */
+ case EXTENDED_MESSAGE: /* extended msg */
initio_msgin_extend(host);
break;
- case MSG_IGNOREWIDE:
+ case IGNORE_WIDE_RESIDUE:
initio_msgin_accept(host);
break;
- case MSG_COMP:
+ case COMMAND_COMPLETE:
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_done_disc(host);
@@ -2104,7 +2104,7 @@ static int initio_msgout_reject(struct initio_host * host)
return -1;
if (host->phase == MSG_OUT) {
- outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
+ outb(MESSAGE_REJECT, host->addr + TUL_SFifo); /* Msg reject */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
@@ -2113,7 +2113,7 @@ static int initio_msgout_reject(struct initio_host * host)
static int initio_msgout_ide(struct initio_host * host)
{
- outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
+ outb(INITIATOR_ERROR, host->addr + TUL_SFifo); /* Initiator Detected Error */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
@@ -2167,9 +2167,9 @@ static int initio_msgin_extend(struct initio_host * host)
initio_sync_done(host);
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo);
- outb(1, host->addr + TUL_SFifo);
+ outb(EXTENDED_SDTR, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(host->msg[3], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
@@ -2199,9 +2199,9 @@ static int initio_msgin_extend(struct initio_host * host)
if (initio_msgin_accept(host) != MSG_OUT)
return host->phase;
/* WDTR msg out */
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo);
- outb(3, host->addr + TUL_SFifo);
+ outb(EXTENDED_WDTR, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
@@ -2391,7 +2391,7 @@ int initio_bus_device_reset(struct initio_host * host)
}
tmp = tmp->next;
}
- outb(MSG_DEVRST, host->addr + TUL_SFifo);
+ outb(TARGET_RESET, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
@@ -2553,7 +2553,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
SENSE_SIZE, DMA_FROM_DEVICE);
cblk->senseptr = (u32)dma_addr;
cblk->senselen = SENSE_SIZE;
- cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
+ initio_priv(cmnd)->sense_dma_addr = dma_addr;
cblk->cdblen = cmnd->cmd_len;
/* Clear the returned status */
@@ -2577,7 +2577,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
cblk->bufptr = (u32)dma_addr;
- cmnd->SCp.dma_handle = dma_addr;
+ initio_priv(cmnd)->sglist_dma_addr = dma_addr;
cblk->sglen = nseg;
@@ -2600,23 +2600,18 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
}
/**
- * i91u_queuecommand - Queue a new command if possible
+ * i91u_queuecommand_lck - Queue a new command if possible
* @cmd: SCSI command block from the mid layer
- * @done: Completion handler
*
* Attempts to queue a new command with the host adapter. Will return
* zero if successful or indicate a host busy condition if not (which
* will cause the mid layer to call us again later with the command)
*/
-
-static int i91u_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int i91u_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
struct scsi_ctrl_blk *cmnd;
- cmd->scsi_done = done;
-
cmnd = initio_alloc_scb(host);
if (!cmnd)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -2649,9 +2644,9 @@ static int i91u_bus_reset(struct scsi_cmnd * cmnd)
}
/**
- * i91u_biospararm - return the "logical geometry
+ * i91u_biosparam - return the "logical geometry
* @sdev: SCSI device
- * @dev; Matching block device
+ * @dev: Matching block device
* @capacity: Sector size of drive
* @info_array: Return space for BIOS geometry
*
@@ -2709,16 +2704,17 @@ static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
{
/* auto sense buffer */
- if (cmnd->SCp.ptr) {
+ if (initio_priv(cmnd)->sense_dma_addr) {
dma_unmap_single(&pci_dev->dev,
- (dma_addr_t)((unsigned long)cmnd->SCp.ptr),
+ initio_priv(cmnd)->sense_dma_addr,
SENSE_SIZE, DMA_FROM_DEVICE);
- cmnd->SCp.ptr = NULL;
+ initio_priv(cmnd)->sense_dma_addr = 0;
}
/* request buffer */
if (scsi_sg_count(cmnd)) {
- dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
+ dma_unmap_single(&pci_dev->dev,
+ initio_priv(cmnd)->sglist_dma_addr,
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
@@ -2726,10 +2722,8 @@ static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
}
}
-/**
+/*
* i91uSCBPost - SCSI callback
- * @host: Pointer to host adapter control block.
- * @cmnd: Pointer to SCSI control block.
*
* This is callback routine be called when tulip finish one
* SCSI command.
@@ -2790,7 +2784,7 @@ static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
cmnd->result = cblk->tastat | (cblk->hastat << 16);
i91u_unmap_scb(host->pci_dev, cmnd);
- cmnd->scsi_done(cmnd); /* Notify system DONE */
+ scsi_done(cmnd); /* Notify system DONE */
initio_release_scb(host, cblk); /* Release SCB for current channel */
}
@@ -2803,6 +2797,7 @@ static struct scsi_host_template initio_template = {
.can_queue = MAX_TARGETS * i91u_MAXQUEUE,
.this_id = 1,
.sg_tablesize = SG_ALL,
+ .cmd_size = sizeof(struct initio_cmd_priv),
};
static int initio_probe_one(struct pci_dev *pdev,
@@ -2854,7 +2849,8 @@ static int initio_probe_one(struct pci_dev *pdev,
for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
i = num_scb * sizeof(struct scsi_ctrl_blk);
- if ((scb = kzalloc(i, GFP_DMA)) != NULL)
+ scb = kzalloc(i, GFP_KERNEL);
+ if (scb)
break;
}
@@ -2962,20 +2958,8 @@ static struct pci_driver initio_pci_driver = {
.probe = initio_probe_one,
.remove = initio_remove_one,
};
-
-static int __init initio_init_driver(void)
-{
- return pci_register_driver(&initio_pci_driver);
-}
-
-static void __exit initio_exit_driver(void)
-{
- pci_unregister_driver(&initio_pci_driver);
-}
+module_pci_driver(initio_pci_driver);
MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
MODULE_AUTHOR("Initio Corporation");
MODULE_LICENSE("GPL");
-
-module_init(initio_init_driver);
-module_exit(initio_exit_driver);
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index 219b901bdc25..7c9741552654 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -433,31 +433,6 @@ struct scsi_ctrl_blk {
#define TARGET_BUSY 0x08
#define INI_QUEUE_FULL 0x28
-/* SCSI MESSAGE */
-#define MSG_COMP 0x00
-#define MSG_EXTEND 0x01
-#define MSG_SDP 0x02
-#define MSG_RESTORE 0x03
-#define MSG_DISC 0x04
-#define MSG_IDE 0x05
-#define MSG_ABORT 0x06
-#define MSG_REJ 0x07
-#define MSG_NOP 0x08
-#define MSG_PARITY 0x09
-#define MSG_LINK_COMP 0x0A
-#define MSG_LINK_FLAG 0x0B
-#define MSG_DEVRST 0x0C
-#define MSG_ABORT_TAG 0x0D
-
-/* Queue tag msg: Simple_quque_tag, Head_of_queue_tag, Ordered_queue_tag */
-#define MSG_STAG 0x20
-#define MSG_HTAG 0x21
-#define MSG_OTAG 0x22
-
-#define MSG_IGNOREWIDE 0x23
-
-#define MSG_IDENT 0x80
-
/***********************************************************************
Target Device Control Structure
**********************************************************************/
@@ -665,3 +640,12 @@ typedef struct _NVRAM {
#define SCSI_RESET_HOST_RESET 0x200
#define SCSI_RESET_ACTION 0xff
+struct initio_cmd_priv {
+ dma_addr_t sense_dma_addr;
+ dma_addr_t sglist_dma_addr;
+};
+
+static inline struct initio_cmd_priv *initio_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cd8db1349871..9d01a3e3c26a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -670,6 +670,7 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
/**
* ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
* @ipr_cmd: ipr command struct
+ * @fast_done: fast done function call-back
*
* Return value:
* none
@@ -687,7 +688,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
/**
* __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
- * @ioa_cfg: ioa config struct
+ * @hrrq: hrr queue
*
* Return value:
* pointer to ipr command struct
@@ -737,7 +738,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
u32 clr_ints)
{
- volatile u32 int_reg;
int i;
/* Stop new interrupts */
@@ -757,7 +757,7 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
if (ioa_cfg->sis64)
writel(~0, ioa_cfg->regs.clr_interrupt_reg);
writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+ readl(ioa_cfg->regs.sense_interrupt_reg);
}
/**
@@ -866,7 +866,7 @@ static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_cmd->result |= (DID_ERROR << 16);
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@@ -1164,7 +1164,7 @@ static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int pr
default:
res->ata_class = ATA_DEV_UNKNOWN;
break;
- };
+ }
}
/**
@@ -1287,7 +1287,7 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
/**
* __ipr_format_res_path - Format the resource path for printing.
* @res_path: resource path
- * @buf: buffer
+ * @buffer: buffer
* @len: length of buffer provided
*
* Return value:
@@ -1299,9 +1299,9 @@ static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
char *p = buffer;
*p = '\0';
- p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
- for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
- p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
+ p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
+ for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
+ p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
return buffer;
}
@@ -1310,7 +1310,7 @@ static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
* ipr_format_res_path - Format the resource path for printing.
* @ioa_cfg: ioa config struct
* @res_path: resource path
- * @buf: buffer
+ * @buffer: buffer
* @len: length of buffer provided
*
* Return value:
@@ -1322,8 +1322,8 @@ static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
char *p = buffer;
*p = '\0';
- p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
- __ipr_format_res_path(res_path, p, len - (buffer - p));
+ p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
+ __ipr_format_res_path(res_path, p, len - (p - buffer));
return buffer;
}
@@ -1391,7 +1391,6 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
* ipr_clear_res_target - Clear the bit in the bit map representing the target
* for the resource.
* @res: resource entry struct
- * @cfgtew: config table entry wrapper struct
*
* Return value:
* none
@@ -2667,7 +2666,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
/**
* ipr_timeout - An internally generated op has timed out.
- * @ipr_cmd: ipr command struct
+ * @t: Timer context used to fetch ipr command struct
*
* This function blocks host requests and initiates an
* adapter reset.
@@ -2700,7 +2699,7 @@ static void ipr_timeout(struct timer_list *t)
/**
* ipr_oper_timeout - Adapter timed out transitioning to operational
- * @ipr_cmd: ipr command struct
+ * @t: Timer context used to fetch ipr command struct
*
* This function blocks host requests and initiates an
* adapter reset.
@@ -3457,7 +3456,7 @@ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
@@ -3484,6 +3483,7 @@ static struct bin_attribute ipr_trace_attr = {
/**
* ipr_show_fw_version - Show the firmware version
* @dev: class device struct
+ * @attr: device attribute (unused)
* @buf: buffer
*
* Return value:
@@ -3518,6 +3518,7 @@ static struct device_attribute ipr_fw_version_attr = {
/**
* ipr_show_log_level - Show the adapter's error logging level
* @dev: class device struct
+ * @attr: device attribute (unused)
* @buf: buffer
*
* Return value:
@@ -3540,7 +3541,9 @@ static ssize_t ipr_show_log_level(struct device *dev,
/**
* ipr_store_log_level - Change the adapter's error logging level
* @dev: class device struct
+ * @attr: device attribute (unused)
* @buf: buffer
+ * @count: buffer size
*
* Return value:
* number of bytes printed to buffer
@@ -3571,6 +3574,7 @@ static struct device_attribute ipr_log_level_attr = {
/**
* ipr_store_diagnostics - IOA Diagnostics interface
* @dev: device struct
+ * @attr: device attribute (unused)
* @buf: buffer
* @count: buffer size
*
@@ -3631,7 +3635,8 @@ static struct device_attribute ipr_diagnostics_attr = {
/**
* ipr_show_adapter_state - Show the adapter's state
- * @class_dev: device struct
+ * @dev: device struct
+ * @attr: device attribute (unused)
* @buf: buffer
*
* Return value:
@@ -3657,6 +3662,7 @@ static ssize_t ipr_show_adapter_state(struct device *dev,
/**
* ipr_store_adapter_state - Change adapter state
* @dev: device struct
+ * @attr: device attribute (unused)
* @buf: buffer
* @count: buffer size
*
@@ -3708,6 +3714,7 @@ static struct device_attribute ipr_ioa_state_attr = {
/**
* ipr_store_reset_adapter - Reset the adapter
* @dev: device struct
+ * @attr: device attribute (unused)
* @buf: buffer
* @count: buffer size
*
@@ -3749,6 +3756,7 @@ static int ipr_iopoll(struct irq_poll *iop, int budget);
/**
* ipr_show_iopoll_weight - Show ipr polling mode
* @dev: class device struct
+ * @attr: device attribute (unused)
* @buf: buffer
*
* Return value:
@@ -3772,7 +3780,9 @@ static ssize_t ipr_show_iopoll_weight(struct device *dev,
/**
* ipr_store_iopoll_weight - Change the adapter's polling mode
* @dev: class device struct
+ * @attr: device attribute (unused)
* @buf: buffer
+ * @count: buffer size
*
* Return value:
* number of bytes printed to buffer
@@ -3871,7 +3881,7 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
/**
* ipr_free_ucode_buffer - Frees a microcode download buffer
- * @p_dnld: scatter/gather list pointer
+ * @sglist: scatter/gather list pointer
*
* Free a DMA'able ucode download buffer previously allocated with
* ipr_alloc_ucode_buffer
@@ -4059,7 +4069,8 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
/**
* ipr_store_update_fw - Update the firmware on the adapter
- * @class_dev: device struct
+ * @dev: device struct
+ * @attr: device attribute (unused)
* @buf: buffer
* @count: buffer size
*
@@ -4139,6 +4150,7 @@ static struct device_attribute ipr_update_fw_attr = {
/**
* ipr_show_fw_type - Show the adapter's firmware type.
* @dev: class device struct
+ * @attr: device attribute (unused)
* @buf: buffer
*
* Return value:
@@ -4170,7 +4182,7 @@ static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct device *cdev = container_of(kobj, struct device, kobj);
+ struct device *cdev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
struct ipr_hostrcb *hostrcb;
@@ -4194,7 +4206,7 @@ static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct device *cdev = container_of(kobj, struct device, kobj);
+ struct device *cdev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
struct ipr_hostrcb *hostrcb;
@@ -4224,18 +4236,20 @@ static struct bin_attribute ipr_ioa_async_err_log = {
.write = ipr_next_async_err_log
};
-static struct device_attribute *ipr_ioa_attrs[] = {
- &ipr_fw_version_attr,
- &ipr_log_level_attr,
- &ipr_diagnostics_attr,
- &ipr_ioa_state_attr,
- &ipr_ioa_reset_attr,
- &ipr_update_fw_attr,
- &ipr_ioa_fw_type_attr,
- &ipr_iopoll_weight_attr,
+static struct attribute *ipr_ioa_attrs[] = {
+ &ipr_fw_version_attr.attr,
+ &ipr_log_level_attr.attr,
+ &ipr_diagnostics_attr.attr,
+ &ipr_ioa_state_attr.attr,
+ &ipr_ioa_reset_attr.attr,
+ &ipr_update_fw_attr.attr,
+ &ipr_ioa_fw_type_attr.attr,
+ &ipr_iopoll_weight_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(ipr_ioa);
+
#ifdef CONFIG_SCSI_IPR_DUMP
/**
* ipr_read_dump - Dump the adapter
@@ -4253,7 +4267,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *cdev = container_of(kobj, struct device, kobj);
+ struct device *cdev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
struct ipr_dump *dump;
@@ -4442,7 +4456,7 @@ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *cdev = container_of(kobj, struct device, kobj);
+ struct device *cdev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
int rc;
@@ -4480,7 +4494,6 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
* ipr_change_queue_depth - Change the device's queue depth
* @sdev: scsi device struct
* @qdepth: depth to set
- * @reason: calling context
*
* Return value:
* actual depth set
@@ -4650,6 +4663,7 @@ static struct device_attribute ipr_resource_type_attr = {
/**
* ipr_show_raw_mode - Show the adapter's raw mode
* @dev: class device struct
+ * @attr: device attribute (unused)
* @buf: buffer
*
* Return value:
@@ -4677,7 +4691,9 @@ static ssize_t ipr_show_raw_mode(struct device *dev,
/**
* ipr_store_raw_mode - Change the adapter's raw mode
* @dev: class device struct
+ * @attr: device attribute (unused)
* @buf: buffer
+ * @count: buffer size
*
* Return value:
* number of bytes printed to buffer
@@ -4718,15 +4734,17 @@ static struct device_attribute ipr_raw_mode_attr = {
.store = ipr_store_raw_mode
};
-static struct device_attribute *ipr_dev_attrs[] = {
- &ipr_adapter_handle_attr,
- &ipr_resource_path_attr,
- &ipr_device_id_attr,
- &ipr_resource_type_attr,
- &ipr_raw_mode_attr,
+static struct attribute *ipr_dev_attrs[] = {
+ &ipr_adapter_handle_attr.attr,
+ &ipr_resource_path_attr.attr,
+ &ipr_device_id_attr.attr,
+ &ipr_resource_type_attr.attr,
+ &ipr_raw_mode_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(ipr_dev);
+
/**
* ipr_biosparam - Return the HSC mapping
* @sdev: scsi device struct
@@ -5060,7 +5078,7 @@ static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
/**
* ipr_cmnd_is_free - Check if a command is free or not
- * @ipr_cmd ipr command struct
+ * @ipr_cmd: ipr command struct
*
* Returns:
* true / false
@@ -5096,7 +5114,7 @@ static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
/**
* ipr_wait_for_ops - Wait for matching commands to complete
- * @ipr_cmd: ipr command struct
+ * @ioa_cfg: ioa config struct
* @device: device to match (sdev)
* @match: match function to use
*
@@ -5261,6 +5279,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
* ipr_sata_reset - Reset the SATA port
* @link: SATA link to reset
* @classes: class of the attached device
+ * @deadline: unused
*
* This function issues a SATA phy reset to the affected ATA link.
*
@@ -5306,7 +5325,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
}
/**
- * ipr_eh_dev_reset - Reset the device
+ * __ipr_eh_dev_reset - Reset the device
* @scsi_cmd: scsi command struct
*
* This function issues a device reset to the affected device.
@@ -5440,7 +5459,7 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
/**
* ipr_abort_timeout - An abort task has timed out
- * @ipr_cmd: ipr command struct
+ * @t: Timer context used to fetch ipr command struct
*
* This function handles when an abort task times out. If this
* happens we issue a bus reset since we have resources tied
@@ -5494,7 +5513,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_cmd_pkt *cmd_pkt;
- u32 ioasc, int_reg;
+ u32 ioasc;
int i, op_found = 0;
struct ipr_hrr_queue *hrrq;
@@ -5517,7 +5536,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
* by a still not detected EEH error. In such cases, reading a register will
* trigger the EEH recovery infrastructure.
*/
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+ readl(ioa_cfg->regs.sense_interrupt_reg);
if (!ipr_is_gscsi(res))
return FAILED;
@@ -5568,8 +5587,9 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
}
/**
- * ipr_eh_abort - Abort a single op
- * @scsi_cmd: scsi command struct
+ * ipr_scan_finished - Report whether scan is done
+ * @shost: scsi host struct
+ * @elapsed_time: elapsed time
*
* Return value:
* 0 if scan in progress / 1 if scan is complete
@@ -5590,7 +5610,7 @@ static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time
}
/**
- * ipr_eh_host_reset - Reset the host adapter
+ * ipr_eh_abort - Reset the host adapter
* @scsi_cmd: scsi command struct
*
* Return value:
@@ -5696,6 +5716,7 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
* ipr_isr_eh - Interrupt service routine error handler
* @ioa_cfg: ioa config struct
* @msg: message to log
+ * @number: various meanings depending on the caller/message
*
* Return value:
* none
@@ -5762,7 +5783,6 @@ static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
static int ipr_iopoll(struct irq_poll *iop, int budget)
{
- struct ipr_ioa_cfg *ioa_cfg;
struct ipr_hrr_queue *hrrq;
struct ipr_cmnd *ipr_cmd, *temp;
unsigned long hrrq_flags;
@@ -5770,7 +5790,6 @@ static int ipr_iopoll(struct irq_poll *iop, int budget)
LIST_HEAD(doneq);
hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
- ioa_cfg = hrrq->ioa_cfg;
spin_lock_irqsave(hrrq->lock, hrrq_flags);
completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
@@ -6050,7 +6069,7 @@ static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@@ -6268,8 +6287,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
/**
* ipr_gen_sense - Generate SCSI sense data from an IOASA
- * @ioasa: IOASA
- * @sense_buf: sense data buffer
+ * @ipr_cmd: ipr command struct
*
* Return value:
* none
@@ -6488,7 +6506,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@@ -6517,7 +6535,7 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@@ -6671,7 +6689,7 @@ err_nodev:
spin_lock_irqsave(hrrq->lock, hrrq_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
}
@@ -6701,8 +6719,8 @@ static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
}
/**
- * ipr_info - Get information about the card/driver
- * @scsi_host: scsi host struct
+ * ipr_ioa_info - Get information about the card/driver
+ * @host: scsi host struct
*
* Return value:
* pointer to buffer with description string
@@ -6731,6 +6749,7 @@ static struct scsi_host_template driver_template = {
.compat_ioctl = ipr_ioctl,
#endif
.queuecommand = ipr_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
.eh_host_reset_handler = ipr_eh_host_reset,
@@ -6747,8 +6766,8 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = IPR_MAX_SGLIST,
.max_sectors = IPR_IOA_MAX_SECTORS,
.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
- .shost_attrs = ipr_ioa_attrs,
- .sdev_attrs = ipr_dev_attrs,
+ .shost_groups = ipr_ioa_groups,
+ .sdev_groups = ipr_dev_groups,
.proc_name = IPR_NAME,
};
@@ -7591,7 +7610,7 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
/**
* ipr_build_mode_sense - Builds a mode sense command
* @ipr_cmd: ipr command struct
- * @res: resource entry struct
+ * @res_handle: resource entry struct
* @parm: Byte 2 of mode sense command
* @dma_addr: DMA address of mode sense buffer
* @xfer_len: Size of DMA buffer
@@ -7938,6 +7957,7 @@ static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
/**
* ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
* action
+ * @ipr_cmd: ipr command struct
*
* Return value:
* none
@@ -7974,6 +7994,10 @@ static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
/**
* ipr_ioafp_inquiry - Send an Inquiry to the adapter.
* @ipr_cmd: ipr command struct
+ * @flags: flags to send
+ * @page: page to inquire
+ * @dma_addr: DMA address
+ * @xfer_len: transfer data length
*
* This utility function sends an inquiry to the adapter.
*
@@ -8264,7 +8288,7 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
/**
* ipr_reset_timer_done - Adapter reset timer function
- * @ipr_cmd: ipr command struct
+ * @t: Timer context used to fetch ipr command struct
*
* Description: This function is used in adapter reset processing
* for timing events. If the reset_cmd pointer in the IOA
@@ -8658,7 +8682,6 @@ static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- u32 int_reg;
ENTER;
ioa_cfg->pdev->state_saved = true;
@@ -8674,7 +8697,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
if (ioa_cfg->sis64) {
/* Set the adapter to the correct endian mode. */
writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
- int_reg = readl(ioa_cfg->regs.endian_swap_reg);
+ readl(ioa_cfg->regs.endian_swap_reg);
}
if (ioa_cfg->ioa_unit_checked) {
@@ -9468,7 +9491,6 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
ipr_pci_perm_failure(pdev);
return PCI_ERS_RESULT_DISCONNECT;
- break;
default:
break;
}
@@ -9482,7 +9504,6 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
* Description: This is the second phase of adapter initialization
* This function takes care of initilizing the adapter to the point
* where it can accept new commands.
-
* Return value:
* 0 on success / -EIO on failure
**/
@@ -9529,8 +9550,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
}
}
- if (ioa_cfg->ipr_cmd_pool)
- dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
+ dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
kfree(ioa_cfg->ipr_cmnd_list);
kfree(ioa_cfg->ipr_cmnd_list_dma);
@@ -9597,7 +9617,7 @@ static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
/**
* ipr_free_all_resources - Free all allocated resources for an adapter.
- * @ipr_cmd: ipr command struct
+ * @ioa_cfg: ioa config struct
*
* This function frees all allocated resources for the
* specified adapter.
@@ -9775,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
GFP_KERNEL);
if (!ioa_cfg->hrrq[i].host_rrq) {
- while (--i > 0)
+ while (--i >= 0)
dma_free_coherent(&pdev->dev,
sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg->hrrq[i].host_rrq,
@@ -10048,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
- while (--i >= 0)
+ while (--i > 0)
free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;
@@ -10059,7 +10079,8 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
/**
* ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
- * @pdev: PCI device struct
+ * @devp: PCI device struct
+ * @irq: IRQ number
*
* Description: Simply set the msi_received flag to 1 indicating that
* Message Signaled Interrupts are supported.
@@ -10071,7 +10092,6 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
unsigned long lock_flags = 0;
- irqreturn_t rc = IRQ_HANDLED;
dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -10080,11 +10100,12 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
wake_up(&ioa_cfg->msi_wait_q);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return rc;
+ return IRQ_HANDLED;
}
/**
* ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
+ * @ioa_cfg: ioa config struct
* @pdev: PCI device struct
*
* Description: This routine sets up and initiates a test interrupt to determine
@@ -10097,7 +10118,6 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
{
int rc;
- volatile u32 int_reg;
unsigned long lock_flags = 0;
int irq = pci_irq_vector(pdev, 0);
@@ -10108,7 +10128,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
ioa_cfg->msi_received = 0;
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ readl(ioa_cfg->regs.sense_interrupt_mask_reg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
@@ -10119,7 +10139,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+ readl(ioa_cfg->regs.sense_interrupt_reg);
wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
@@ -10530,6 +10550,8 @@ static void ipr_remove(struct pci_dev *pdev)
/**
* ipr_probe - Adapter hot plug add entry point
+ * @pdev: pci device struct
+ * @dev_id: pci device ID
*
* Return value:
* 0 on success / non-zero on failure
@@ -10786,6 +10808,7 @@ static struct pci_driver ipr_driver = {
/**
* ipr_halt_done - Shutdown prepare completion
+ * @ipr_cmd: ipr command struct
*
* Return value:
* none
@@ -10797,6 +10820,9 @@ static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
/**
* ipr_halt - Issue shutdown prepare to all adapters
+ * @nb: Notifier block
+ * @event: Notifier event
+ * @buf: Notifier data (unused)
*
* Return value:
* NOTIFY_OK on success / NOTIFY_DONE on failure
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b97aa9ac2ffe..69444d21fca1 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -428,6 +428,7 @@ struct ipr_config_table_entry64 {
__be64 lun;
__be64 lun_wwn[2];
#define IPR_MAX_RES_PATH_LENGTH 48
+#define IPR_RES_PATH_BYTES 8
__be64 res_path;
struct ipr_std_inq_data std_inq_data;
u8 reserved2[4];
@@ -451,12 +452,12 @@ struct ipr_config_table_hdr64 {
struct ipr_config_table {
struct ipr_config_table_hdr hdr;
- struct ipr_config_table_entry dev[0];
+ struct ipr_config_table_entry dev[];
}__attribute__((packed, aligned (4)));
struct ipr_config_table64 {
struct ipr_config_table_hdr64 hdr64;
- struct ipr_config_table_entry64 dev[0];
+ struct ipr_config_table_entry64 dev[];
}__attribute__((packed, aligned (8)));
struct ipr_config_table_entry_wrapper {
@@ -792,7 +793,7 @@ struct ipr_mode_page28 {
struct ipr_mode_page_hdr hdr;
u8 num_entries;
u8 entry_length;
- struct ipr_dev_bus_entry bus[0];
+ struct ipr_dev_bus_entry bus[];
}__attribute__((packed));
struct ipr_mode_page24 {
@@ -1684,7 +1685,7 @@ struct ipr_dump_entry_header {
struct ipr_dump_location_entry {
struct ipr_dump_entry_header hdr;
u8 location[20];
-}__attribute__((packed));
+}__attribute__((packed, aligned (4)));
struct ipr_dump_trace_entry {
struct ipr_dump_entry_header hdr;
@@ -1708,7 +1709,7 @@ struct ipr_driver_dump {
struct ipr_dump_location_entry location_entry;
struct ipr_dump_ioa_type_entry ioa_type_entry;
struct ipr_dump_trace_entry trace_entry;
-}__attribute__((packed));
+}__attribute__((packed, aligned (4)));
struct ipr_ioa_dump {
struct ipr_dump_entry_header hdr;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index f25672982c5f..16419aeec02d 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -180,9 +180,13 @@
#include <linux/types.h>
#include <linux/dma-mapping.h>
-#include <scsi/sg.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sg.h>
#include "ips.h"
@@ -638,8 +642,7 @@ ips_setup_funclist(ips_ha_t * ha)
/* Remove a driver */
/* */
/****************************************************************************/
-static int
-ips_release(struct Scsi_Host *sh)
+static void ips_release(struct Scsi_Host *sh)
{
ips_scb_t *scb;
ips_ha_t *ha;
@@ -655,13 +658,12 @@ ips_release(struct Scsi_Host *sh)
printk(KERN_WARNING
"(%s) release, invalid Scsi_Host pointer.\n", ips_name);
BUG();
- return (FALSE);
}
ha = IPS_HA(sh);
if (!ha)
- return (FALSE);
+ return;
/* flush the cache on the controller */
scb = &ha->scbs[ha->max_cmds - 1];
@@ -699,8 +701,6 @@ ips_release(struct Scsi_Host *sh)
scsi_host_put(sh);
ips_released_controllers++;
-
- return (FALSE);
}
/****************************************************************************/
@@ -936,7 +936,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@@ -946,10 +946,10 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
scsi_cmd->result = DID_ERROR;
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
}
- ha->active = FALSE;
+ ha->active = false;
return (FAILED);
}
@@ -965,7 +965,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@@ -975,10 +975,10 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
scsi_cmd->result = DID_ERROR << 16;
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
}
- ha->active = FALSE;
+ ha->active = false;
return (FAILED);
}
@@ -994,7 +994,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_RESET << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@@ -1035,8 +1035,9 @@ static int ips_eh_reset(struct scsi_cmnd *SC)
/* Linux obtains io_request_lock before calling this function */
/* */
/****************************************************************************/
-static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
+static int ips_queue_lck(struct scsi_cmnd *SC)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
ips_ha_t *ha;
ips_passthru_t *pt;
@@ -1045,10 +1046,10 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
ha = (ips_ha_t *) SC->device->host->hostdata;
if (!ha)
- return (1);
+ goto out_error;
if (!ha->active)
- return (DID_ERROR);
+ goto out_error;
if (ips_is_passthru(SC)) {
if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) {
@@ -1064,8 +1065,6 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
return (0);
}
- SC->scsi_done = done;
-
DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
ips_name,
ha->host_num,
@@ -1099,7 +1098,7 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
ha->ioctl_reset = 1; /* This reset request is from an IOCTL */
__ips_eh_reset(SC);
SC->result = DID_OK << 16;
- SC->scsi_done(SC);
+ scsi_done(SC);
return (0);
}
@@ -1124,6 +1123,11 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
ips_next(ha, IPS_INTR_IORL);
return (0);
+out_error:
+ SC->result = DID_ERROR << 16;
+ done(SC);
+
+ return (0);
}
static DEF_SCSI_QCMD(ips_queue)
@@ -1287,7 +1291,7 @@ ips_intr_copperhead(ips_ha_t * ha)
return 0;
}
- while (TRUE) {
+ while (true) {
sp = &ha->sp;
intrstatus = (*ha->func.isintr) (ha);
@@ -1351,7 +1355,7 @@ ips_intr_morpheus(ips_ha_t * ha)
return 0;
}
- while (TRUE) {
+ while (true) {
sp = &ha->sp;
intrstatus = (*ha->func.isintr) (ha);
@@ -2239,7 +2243,7 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
major = 0;
minor = 0;
- strncpy(ha->bios_version, " ?", 8);
+ memcpy(ha->bios_version, " ?", 8);
if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
if (IPS_USE_MEMIO(ha)) {
@@ -2574,7 +2578,7 @@ ips_next(ips_ha_t * ha, int intr)
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@@ -2582,7 +2586,7 @@ ips_next(ips_ha_t * ha, int intr)
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_OK << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@@ -2707,7 +2711,7 @@ ips_next(ips_ha_t * ha, int intr)
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
if (scb->bus)
@@ -2718,7 +2722,7 @@ ips_next(ips_ha_t * ha, int intr)
break;
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd)
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
if (scb->bus)
ha->dcdb_active[scb->bus - 1] &=
@@ -3086,8 +3090,8 @@ ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ipsintr_blocking", 2);
ips_freescb(ha, scb);
- if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) {
- ha->waitflag = FALSE;
+ if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0]) {
+ ha->waitflag = false;
return;
}
@@ -3201,7 +3205,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@@ -3209,7 +3213,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@@ -3226,7 +3230,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
}
- scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@@ -3339,13 +3343,15 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
IPS_CMD_EXTENDED_DCDB_SG)) {
tapeDCDB =
(IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
- memcpy(scb->scsi_cmd->sense_buffer,
+ memcpy_and_pad(scb->scsi_cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
tapeDCDB->sense_info,
- SCSI_SENSE_BUFFERSIZE);
+ sizeof(tapeDCDB->sense_info), 0);
} else {
- memcpy(scb->scsi_cmd->sense_buffer,
+ memcpy_and_pad(scb->scsi_cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
scb->dcdb.sense_info,
- SCSI_SENSE_BUFFERSIZE);
+ sizeof(scb->dcdb.sense_info), 0);
}
device_error = 2; /* check condition */
}
@@ -3385,7 +3391,7 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
METHOD_TRACE("ips_send_wait", 1);
if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */
- ha->waitflag = TRUE;
+ ha->waitflag = true;
ha->cmd_in_progress = scb->cdb[0];
}
scb->callback = ipsintr_blocking;
@@ -3462,10 +3468,8 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
if (scb->bus > 0) {
/* Controller commands can't be issued */
/* to real devices -- fail them */
- if ((ha->waitflag == TRUE) &&
- (ha->cmd_in_progress == scb->cdb[0])) {
- ha->waitflag = FALSE;
- }
+ if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0])
+ ha->waitflag = false;
return (1);
}
@@ -3515,11 +3519,11 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
inquiry.Flags[1] =
IPS_SCSI_INQ_WBus16 |
IPS_SCSI_INQ_Sync;
- strncpy(inquiry.VendorId, "IBM ",
+ memcpy(inquiry.VendorId, "IBM ",
8);
- strncpy(inquiry.ProductId,
+ memcpy(inquiry.ProductId,
"SERVERAID ", 16);
- strncpy(inquiry.ProductRevisionLevel,
+ memcpy(inquiry.ProductRevisionLevel,
"1.00", 4);
ips_scmd_buf_write(scb->scsi_cmd,
@@ -3728,7 +3732,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
scb->cmd.dcdb.segment_4G = 0;
scb->cmd.dcdb.enhanced_sg = 0;
- TimeOut = scb->scsi_cmd->request->timeout;
+ TimeOut = scsi_cmd_to_rq(scb->scsi_cmd)->timeout;
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
if (!scb->sg_len) {
@@ -4036,9 +4040,9 @@ ips_inquiry(ips_ha_t * ha, ips_scb_t * scb)
inquiry.Flags[0] = IPS_SCSI_INQ_Address16;
inquiry.Flags[1] =
IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue;
- strncpy(inquiry.VendorId, "IBM ", 8);
- strncpy(inquiry.ProductId, "SERVERAID ", 16);
- strncpy(inquiry.ProductRevisionLevel, "1.00", 4);
+ memcpy(inquiry.VendorId, "IBM ", 8);
+ memcpy(inquiry.ProductId, "SERVERAID ", 16);
+ memcpy(inquiry.ProductRevisionLevel, "1.00", 4);
ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry));
@@ -4613,7 +4617,7 @@ ips_poll_for_flush_complete(ips_ha_t * ha)
{
IPS_STATUS cstatus;
- while (TRUE) {
+ while (true) {
cstatus.value = (*ha->func.statupd) (ha);
if (cstatus.value == 0xffffffff) /* If No Interrupt to process */
@@ -4697,7 +4701,6 @@ ips_init_copperhead(ips_ha_t * ha)
uint8_t Isr;
uint8_t Cbsp;
uint8_t PostByte[IPS_MAX_POST_BYTES];
- uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
int i, j;
METHOD_TRACE("ips_init_copperhead", 1);
@@ -4742,7 +4745,7 @@ ips_init_copperhead(ips_ha_t * ha)
/* error occurred */
return (0);
- ConfigByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
+ inb(ha->io_addr + IPS_REG_ISPR);
outb(Isr, ha->io_addr + IPS_REG_HISR);
}
@@ -4791,7 +4794,6 @@ ips_init_copperhead_memio(ips_ha_t * ha)
uint8_t Isr = 0;
uint8_t Cbsp;
uint8_t PostByte[IPS_MAX_POST_BYTES];
- uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
int i, j;
METHOD_TRACE("ips_init_copperhead_memio", 1);
@@ -4836,7 +4838,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
/* error occurred */
return (0);
- ConfigByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
+ readb(ha->mem_ptr + IPS_REG_ISPR);
writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
}
@@ -5538,26 +5540,26 @@ ips_wait(ips_ha_t * ha, int time, int intr)
METHOD_TRACE("ips_wait", 1);
ret = IPS_FAILURE;
- done = FALSE;
+ done = false;
time *= IPS_ONE_SEC; /* convert seconds */
while ((time > 0) && (!done)) {
if (intr == IPS_INTR_ON) {
- if (ha->waitflag == FALSE) {
+ if (!ha->waitflag) {
ret = IPS_SUCCESS;
- done = TRUE;
+ done = true;
break;
}
} else if (intr == IPS_INTR_IORL) {
- if (ha->waitflag == FALSE) {
+ if (!ha->waitflag) {
/*
* controller generated an interrupt to
* acknowledge completion of the command
* and ips_intr() has serviced the interrupt.
*/
ret = IPS_SUCCESS;
- done = TRUE;
+ done = true;
break;
}
@@ -5592,7 +5594,7 @@ ips_write_driver_status(ips_ha_t * ha, int intr)
{
METHOD_TRACE("ips_write_driver_status", 1);
- if (!ips_readwrite_page5(ha, FALSE, intr)) {
+ if (!ips_readwrite_page5(ha, false, intr)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to read NVRAM page 5.\n");
@@ -5622,15 +5624,15 @@ ips_write_driver_status(ips_ha_t * ha, int intr)
/* change values (as needed) */
ha->nvram->operating_system = IPS_OS_LINUX;
ha->nvram->adapter_type = ha->ad_type;
- strncpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4);
- strncpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4);
- strncpy((char *) ha->nvram->bios_high, ha->bios_version, 4);
- strncpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4);
+ memcpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4);
+ memcpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4);
+ memcpy((char *) ha->nvram->bios_high, ha->bios_version, 4);
+ memcpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4);
ha->nvram->versioning = 0; /* Indicate the Driver Does Not Support Versioning */
/* now update the page */
- if (!ips_readwrite_page5(ha, TRUE, intr)) {
+ if (!ips_readwrite_page5(ha, true, intr)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to write NVRAM page 5.\n");
@@ -6835,8 +6837,6 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
uint32_t mem_addr;
uint32_t io_len;
uint32_t mem_len;
- uint8_t bus;
- uint8_t func;
int j;
int index;
dma_addr_t dma_address;
@@ -6856,10 +6856,6 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
if (index >= IPS_MAX_ADAPTERS)
return -1;
- /* stuff that we get in dev */
- bus = pci_dev->bus->number;
- func = pci_dev->devfn;
-
/* Init MEM/IO addresses to 0 */
mem_addr = 0;
io_addr = 0;
@@ -7102,23 +7098,3 @@ ips_init_phase2(int index)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
MODULE_VERSION(IPS_VER_STRING);
-
-
-/*
- * Overrides for Emacs so that we almost follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 2
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -2
- * c-argdecl-indent: 2
- * c-label-offset: -2
- * c-continued-statement-offset: 2
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 6c0678fb9a67..65edf000e447 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -1211,23 +1211,3 @@ typedef struct {
IPS_COMPAT_TAMPA, \
IPS_COMPAT_KEYWEST \
}
-
-
-/*
- * Overrides for Emacs so that we almost follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 2
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -2
- * c-argdecl-indent: 2
- * c-label-offset: -2
- * c-continued-statement-offset: 2
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 7b5deae68d33..35589b6af90d 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -89,16 +89,14 @@
#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
-/**
- *
- *
+/*
* The number of milliseconds to wait while a given phy is consuming power
* before allowing another set of phys to consume power. Ultimately, this will
* be specified by OEM parameter.
*/
#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
-/**
+/*
* NORMALIZE_PUT_POINTER() -
*
* This macro will normalize the completion queue put pointer so its value can
@@ -108,7 +106,7 @@
((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
-/**
+/*
* NORMALIZE_EVENT_POINTER() -
*
* This macro will normalize the completion queue event entry so its value can
@@ -120,7 +118,7 @@
>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
)
-/**
+/*
* NORMALIZE_GET_POINTER() -
*
* This macro will normalize the completion queue get pointer so its value can
@@ -129,7 +127,7 @@
#define NORMALIZE_GET_POINTER(x) \
((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
-/**
+/*
* NORMALIZE_GET_POINTER_CYCLE_BIT() -
*
* This macro will normalize the completion queue cycle pointer so it matches
@@ -138,7 +136,7 @@
#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
-/**
+/*
* COMPLETION_QUEUE_CYCLE_BIT() -
*
* This macro will return the cycle bit of the completion queue entry
@@ -415,7 +413,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for io request object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -430,7 +428,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for remote device object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -464,7 +462,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
} else
dev_err(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received event 0x%x "
- "for remote device object 0x%0x that doesnt "
+ "for remote device object 0x%0x that doesn't "
"exist.\n",
__func__,
ihost,
@@ -637,7 +635,7 @@ irqreturn_t isci_error_isr(int vec, void *data)
/**
* isci_host_start_complete() - This function is called by the core library,
* through the ISCI Module, to indicate controller start status.
- * @isci_host: This parameter specifies the ISCI host object
+ * @ihost: This parameter specifies the ISCI host object
* @completion_status: This parameter specifies the completion status from the
* core library.
*
@@ -670,7 +668,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
* use any timeout value, but this method provides the suggested minimum
* start timeout value. The returned value is based upon empirical
* information determined as a result of interoperability testing.
- * @controller: the handle to the controller object for which to return the
+ * @ihost: the handle to the controller object for which to return the
* suggested start timeout.
*
* This method returns the number of milliseconds for the suggested start
@@ -893,7 +891,7 @@ bool is_controller_start_complete(struct isci_host *ihost)
/**
* sci_controller_start_next_phy - start phy
- * @scic: controller
+ * @ihost: controller
*
* If all the phys have been started, then attempt to transition the
* controller to the READY state and inform the user
@@ -1145,7 +1143,7 @@ void isci_host_completion_routine(unsigned long data)
* controller has been quiesced. This method will ensure that all IO
* requests are quiesced, phys are stopped, and all additional operation by
* the hardware is halted.
- * @controller: the handle to the controller object to stop.
+ * @ihost: the handle to the controller object to stop.
* @timeout: This parameter specifies the number of milliseconds in which the
* stop operation should complete.
*
@@ -1174,7 +1172,7 @@ static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
* considered destructive. In other words, all current operations are wiped
* out. No IO completions for outstanding devices occur. Outstanding IO
* requests are not aborted or completed at the actual remote device.
- * @controller: the handle to the controller object to reset.
+ * @ihost: the handle to the controller object to reset.
*
* Indicate if the controller reset method succeeded or failed in some way.
* SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
@@ -1331,7 +1329,7 @@ static inline void sci_controller_starting_state_exit(struct sci_base_state_mach
/**
* sci_controller_set_interrupt_coalescence() - This method allows the user to
* configure the interrupt coalescence.
- * @controller: This parameter represents the handle to the controller object
+ * @ihost: This parameter represents the handle to the controller object
* for which its interrupt coalesce register is overridden.
* @coalesce_number: Used to control the number of entries in the Completion
* Queue before an interrupt is generated. If the number of entries exceed
@@ -2479,12 +2477,13 @@ struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
}
/**
+ * sci_controller_allocate_remote_node_context()
* This method allocates remote node index and the reserves the remote node
* context space for use. This method can fail if there are no more remote
* node index available.
- * @scic: This is the controller object which contains the set of
+ * @ihost: This is the controller object which contains the set of
* free remote node ids
- * @sci_dev: This is the device object which is requesting the a remote node
+ * @idev: This is the device object which is requesting the a remote node
* id
* @node_id: This is the remote node id that is assinged to the device if one
* is available
@@ -2671,7 +2670,6 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
struct isci_request *ireq)
{
enum sci_status status;
- u16 index;
switch (ihost->sm.current_state_id) {
case SCIC_STOPPING:
@@ -2682,7 +2680,6 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
if (status != SCI_SUCCESS)
return status;
- index = ISCI_TAG_TCI(ireq->io_tag);
clear_bit(IREQ_ACTIVE, &ireq->flags);
return SCI_SUCCESS;
default:
@@ -2711,11 +2708,11 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
/**
* sci_controller_start_task() - This method is called by the SCIC user to
* send/start a framework task management request.
- * @controller: the handle to the controller object for which to start the task
+ * @ihost: the handle to the controller object for which to start the task
* management request.
- * @remote_device: the handle to the remote device object for which to start
+ * @idev: the handle to the remote device object for which to start
* the task management request.
- * @task_request: the handle to the task request object to start.
+ * @ireq: the handle to the task request object to start.
*/
enum sci_status sci_controller_start_task(struct isci_host *ihost,
struct isci_remote_device *idev,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index b48aac8dfcb8..e294d5d961eb 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -142,17 +142,20 @@ static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, c
static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
-struct device_attribute *isci_host_attrs[] = {
- &dev_attr_isci_id,
+static struct attribute *isci_host_attrs[] = {
+ &dev_attr_isci_id.attr,
NULL
};
+ATTRIBUTE_GROUPS(isci_host);
+
static struct scsi_host_template isci_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.proc_name = DRV_NAME,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = isci_host_scan_finished,
@@ -166,12 +169,13 @@ static struct scsi_host_template isci_sht = {
.eh_abort_handler = sas_eh_abort_handler,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = isci_host_attrs,
+ .shost_groups = isci_host_groups,
.track_queue_depth = 1,
};
@@ -189,7 +193,6 @@ static struct sas_domain_function_template isci_transport_ops = {
/* Task Management Functions. Must be called from process context. */
.lldd_abort_task = isci_task_abort_task,
.lldd_abort_task_set = isci_task_abort_task_set,
- .lldd_clear_aca = isci_task_clear_aca,
.lldd_clear_task_set = isci_task_clear_task_set,
.lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
.lldd_lu_reset = isci_task_lu_reset,
@@ -621,7 +624,7 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
pci_set_drvdata(pdev, pci_info);
- if (efi_enabled(EFI_RUNTIME_SERVICES))
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
orom = isci_get_efi_var(pdev);
if (!orom)
@@ -714,10 +717,6 @@ static int isci_suspend(struct device *dev)
isci_host_deinit(ihost);
}
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
@@ -725,19 +724,7 @@ static int isci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct isci_host *ihost;
- int rc, i;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- rc = pcim_enable_device(pdev);
- if (rc) {
- dev_err(&pdev->dev,
- "enabling device failure after resume(%d)\n", rc);
- return rc;
- }
-
- pci_set_master(pdev);
+ int i;
for_each_isci_host(i, ihost, pdev) {
sas_prep_resume_ha(&ihost->sas_ha);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 680e30947671..4e6b1decbca7 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -500,19 +500,19 @@ struct sci_timer {
static inline
void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t))
{
- tmr->cancel = 0;
+ tmr->cancel = false;
timer_setup(&tmr->timer, fn, 0);
}
static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
{
- tmr->cancel = 0;
+ tmr->cancel = false;
mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
}
static inline void sci_del_timer(struct sci_timer *tmr)
{
- tmr->cancel = 1;
+ tmr->cancel = true;
del_timer(&tmr->timer);
}
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 7f9b3f20e5e4..aa8787343e83 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -339,10 +339,11 @@ done:
}
/**
- * This method returns the port currently containing this phy. If the phy is
- * currently contained by the dummy port, then the phy is considered to not
- * be part of a port.
- * @sci_phy: This parameter specifies the phy for which to retrieve the
+ * phy_get_non_dummy_port() - This method returns the port currently containing
+ * this phy. If the phy is currently contained by the dummy port, then the phy
+ * is considered to not be part of a port.
+ *
+ * @iphy: This parameter specifies the phy for which to retrieve the
* containing port.
*
* This method returns a handle to a port that contains the supplied phy.
@@ -360,12 +361,8 @@ struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
return iphy->owning_port;
}
-/**
- * This method will assign a port to the phy object.
- * @out]: iphy This parameter specifies the phy for which to assign a port
- * object.
- *
- *
+/*
+ * sci_phy_set_port() - This method will assign a port to the phy object.
*/
void sci_phy_set_port(
struct isci_phy *iphy,
@@ -398,11 +395,11 @@ enum sci_status sci_phy_initialize(struct isci_phy *iphy,
}
/**
- * This method assigns the direct attached device ID for this phy.
+ * sci_phy_setup_transport() - This method assigns the direct attached device ID for this phy.
*
- * @iphy The phy for which the direct attached device id is to
+ * @iphy: The phy for which the direct attached device id is to
* be assigned.
- * @device_id The direct attached device ID to assign to the phy.
+ * @device_id: The direct attached device ID to assign to the phy.
* This will either be the RNi for the device or an invalid RNi if there
* is no current device assigned to the phy.
*/
@@ -597,7 +594,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
/**
* sci_phy_complete_link_training - perform processing common to
* all protocols upon completion of link training.
- * @sci_phy: This parameter specifies the phy object for which link training
+ * @iphy: This parameter specifies the phy object for which link training
* has completed.
* @max_link_rate: This parameter specifies the maximum link rate to be
* associated with this phy.
@@ -669,7 +666,7 @@ static const char *phy_event_name(u32 event_code)
phy_state_name(state), phy_event_name(code), code)
-void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
+static void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
{
u32 val;
@@ -753,7 +750,6 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
default:
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
- break;
}
return SCI_SUCCESS;
case SCI_PHY_SUB_AWAIT_IAF_UF:
@@ -778,7 +774,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
break;
case SCU_EVENT_LINK_FAILURE:
scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
- /* fall through */
+ fallthrough;
case SCU_EVENT_HARD_RESET_RECEIVED:
/* Start the oob/sn state machine over again */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
@@ -958,7 +954,6 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
default:
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE_INVALID_STATE;
- break;
}
return SCI_SUCCESS;
default:
@@ -1169,8 +1164,8 @@ static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine
}
/**
- *
- * @sci_phy: This is the struct isci_phy object to stop.
+ * scu_link_layer_stop_protocol_engine()
+ * @iphy: This is the struct isci_phy object to stop.
*
* This method will stop the struct isci_phy object. This does not reset the
* protocol engine it just suspends it and places it in a state where it will
@@ -1221,7 +1216,8 @@ static void scu_link_layer_start_oob(struct isci_phy *iphy)
}
/**
- *
+ * scu_link_layer_tx_hard_reset()
+ * @iphy: This is the struct isci_phy object to stop.
*
* This method will transmit a hard reset request on the specified phy. The SCU
* hardware requires that we reset the OOB state machine and set the hard reset
@@ -1422,7 +1418,7 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
/**
* isci_phy_control() - This function is one of the SAS Domain Template
* functions. This is a phy management function.
- * @phy: This parameter specifies the sphy being controlled.
+ * @sas_phy: This parameter specifies the sphy being controlled.
* @func: This parameter specifies the phy control function being invoked.
* @buf: This parameter is specific to the phy function being invoked.
*
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 45fecfa36a98..5aaf95b14b2e 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -447,7 +447,6 @@ void sci_phy_get_attached_sas_address(
struct isci_phy *iphy,
struct sci_sas_address *sas_address);
-struct sci_phy_proto;
void sci_phy_get_protocols(
struct isci_phy *iphy,
struct sci_phy_proto *protocols);
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 1df45f028ea7..1609aba1c9c1 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -62,7 +62,7 @@
#undef C
#define C(a) (#a)
-const char *port_state_name(enum sci_port_states state)
+static const char *port_state_name(enum sci_port_states state)
{
static const char * const strings[] = PORT_STATES;
@@ -115,9 +115,9 @@ static u32 sci_port_get_phys(struct isci_port *iport)
/**
* sci_port_get_properties() - This method simply returns the properties
* regarding the port, such as: physical index, protocols, sas address, etc.
- * @port: this parameter specifies the port for which to retrieve the physical
+ * @iport: this parameter specifies the port for which to retrieve the physical
* index.
- * @properties: This parameter specifies the properties structure into which to
+ * @prop: This parameter specifies the properties structure into which to
* copy the requested information.
*
* Indicate if the user specified a valid port. SCI_SUCCESS This value is
@@ -164,7 +164,8 @@ static void isci_port_bc_change_received(struct isci_host *ihost,
"%s: isci_phy = %p, sas_phy = %p\n",
__func__, iphy, &iphy->sas_phy);
- ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(&iphy->sas_phy,
+ PORTE_BROADCAST_RCVD, GFP_ATOMIC);
sci_port_bcn_enable(iport);
}
@@ -223,8 +224,8 @@ static void isci_port_link_up(struct isci_host *isci_host,
/* Notify libsas that we have an address frame, if indeed
* we've found an SSP, SMP, or STP target */
if (success)
- isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
- PORTE_BYTES_DMAED);
+ sas_notify_port_event(&iphy->sas_phy,
+ PORTE_BYTES_DMAED, GFP_ATOMIC);
}
@@ -232,8 +233,8 @@ static void isci_port_link_up(struct isci_host *isci_host,
* isci_port_link_down() - This function is called by the sci core when a link
* becomes inactive.
* @isci_host: This parameter specifies the isci host object.
- * @phy: This parameter specifies the isci phy with the active link.
- * @port: This parameter specifies the isci port with the active link.
+ * @isci_phy: This parameter specifies the isci phy with the active link.
+ * @isci_port: This parameter specifies the isci port with the active link.
*
*/
static void isci_port_link_down(struct isci_host *isci_host,
@@ -270,8 +271,8 @@ static void isci_port_link_down(struct isci_host *isci_host,
* isci_port_deformed and isci_dev_gone functions.
*/
sas_phy_disconnected(&isci_phy->sas_phy);
- isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&isci_phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p - Done\n", __func__, isci_port);
@@ -307,7 +308,7 @@ static void port_state_machine_change(struct isci_port *iport,
/**
* isci_port_hard_reset_complete() - This function is called by the sci core
* when the hard reset complete notification has been received.
- * @port: This parameter specifies the sci port with the active link.
+ * @isci_port: This parameter specifies the sci port with the active link.
* @completion_status: This parameter specifies the core status for the reset
* process.
*
@@ -394,9 +395,10 @@ bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
}
/**
- *
- * @sci_port: This is the port object for which to determine if the phy mask
+ * sci_port_is_phy_mask_valid()
+ * @iport: This is the port object for which to determine if the phy mask
* can be supported.
+ * @phy_mask: Phy mask belonging to this port
*
* This method will return a true value if the port's phy mask can be supported
* by the SCU. The following is a list of valid PHY mask configurations for
@@ -532,7 +534,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a
/**
* sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
*
- * @sci_port: logical port on which we need to create the remote node context
+ * @iport: logical port on which we need to create the remote node context
* @rni: remote node index for this remote node context.
*
* This routine will construct a dummy remote node context data structure
@@ -676,8 +678,8 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
/**
* sci_port_general_link_up_handler - phy can be assigned to port?
- * @sci_port: sci_port object for which has a phy that has gone link up.
- * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @iport: sci_port object for which has a phy that has gone link up.
+ * @iphy: This is the struct isci_phy object that has gone link up.
* @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
*
* Determine if this phy can be assigned to this port . If the phy is
@@ -715,10 +717,11 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
/**
+ * sci_port_is_wide()
* This method returns false if the port only has a single phy object assigned.
* If there are no phys or more than one phy then the method will return
* true.
- * @sci_port: The port for which the wide port condition is to be checked.
+ * @iport: The port for which the wide port condition is to be checked.
*
* bool true Is returned if this is a wide ported port. false Is returned if
* this is a narrow port.
@@ -738,12 +741,13 @@ static bool sci_port_is_wide(struct isci_port *iport)
}
/**
+ * sci_port_link_detected()
* This method is called by the PHY object when the link is detected. if the
* port wants the PHY to continue on to the link up state then the port
* layer must return true. If the port object returns false the phy object
* must halt its attempt to go link up.
- * @sci_port: The port associated with the phy object.
- * @sci_phy: The phy object that is trying to go link up.
+ * @iport: The port associated with the phy object.
+ * @iphy: The phy object that is trying to go link up.
*
* true if the phy object can continue to the link up condition. true Is
* returned if this phy can continue to the ready state. false Is returned if
@@ -816,10 +820,8 @@ done:
/* --------------------------------------------------------------------------- */
-/**
+/*
* This function updates the hardwares VIIT entry for this port.
- *
- *
*/
static void sci_port_update_viit_entry(struct isci_port *iport)
{
@@ -873,7 +875,7 @@ static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
/**
* sci_port_post_dummy_request() - post dummy/workaround request
- * @sci_port: port to post task
+ * @iport: port to post task
*
* Prevent the hardware scheduler from posting new requests to the front
* of the scheduler queue causing a starvation problem for currently
@@ -898,10 +900,11 @@ static void sci_port_post_dummy_request(struct isci_port *iport)
}
/**
- * This routine will abort the dummy request. This will alow the hardware to
+ * sci_port_abort_dummy_request()
+ * This routine will abort the dummy request. This will allow the hardware to
* power down parts of the silicon to save power.
*
- * @sci_port: The port on which the task must be aborted.
+ * @iport: The port on which the task must be aborted.
*
*/
static void sci_port_abort_dummy_request(struct isci_port *iport)
@@ -922,8 +925,8 @@ static void sci_port_abort_dummy_request(struct isci_port *iport)
}
/**
- *
- * @sci_port: This is the struct isci_port object to resume.
+ * sci_port_resume_port_task_scheduler()
+ * @iport: This is the struct isci_port object to resume.
*
* This method will resume the port task scheduler for this port object. none
*/
@@ -1013,8 +1016,8 @@ static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
}
/**
- *
- * @object: This is the object which is cast to a struct isci_port object.
+ * sci_port_ready_substate_operational_exit()
+ * @sm: This is the object which is cast to a struct isci_port object.
*
* This method will perform the actions required by the struct isci_port on
* exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
@@ -1185,9 +1188,9 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
}
/**
- * sci_port_add_phy() -
- * @sci_port: This parameter specifies the port in which the phy will be added.
- * @sci_phy: This parameter is the phy which is to be added to the port.
+ * sci_port_add_phy()
+ * @iport: This parameter specifies the port in which the phy will be added.
+ * @iphy: This parameter is the phy which is to be added to the port.
*
* This method will add a PHY to the selected port. This method returns an
* enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
@@ -1256,9 +1259,9 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
}
/**
- * sci_port_remove_phy() -
- * @sci_port: This parameter specifies the port in which the phy will be added.
- * @sci_phy: This parameter is the phy which is to be added to the port.
+ * sci_port_remove_phy()
+ * @iport: This parameter specifies the port in which the phy will be added.
+ * @iphy: This parameter is the phy which is to be added to the port.
*
* This method will remove the PHY from the selected PORT. This method returns
* an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index b1c197505579..c382a257b51b 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -73,7 +73,7 @@ enum SCIC_SDS_APC_ACTIVITY {
* ****************************************************************************** */
/**
- *
+ * sci_sas_address_compare()
* @address_one: A SAS Address to be compared.
* @address_two: A SAS Address to be compared.
*
@@ -102,9 +102,9 @@ static s32 sci_sas_address_compare(
}
/**
- *
- * @controller: The controller object used for the port search.
- * @phy: The phy object to match.
+ * sci_port_configuration_agent_find_port()
+ * @ihost: The controller object used for the port search.
+ * @iphy: The phy object to match.
*
* This routine will find a matching port for the phy. This means that the
* port and phy both have the same broadcast sas address and same received sas
@@ -145,8 +145,8 @@ static struct isci_port *sci_port_configuration_agent_find_port(
}
/**
- *
- * @controller: This is the controller object that contains the port agent
+ * sci_port_configuration_agent_validate_ports()
+ * @ihost: This is the controller object that contains the port agent
* @port_agent: This is the port configuration agent for the controller.
*
* This routine will validate the port configuration is correct for the SCU
@@ -373,15 +373,16 @@ static void sci_mpc_agent_link_up(struct isci_host *ihost,
}
/**
- *
- * @controller: This is the controller object that receives the link down
+ * sci_mpc_agent_link_down()
+ * @ihost: This is the controller object that receives the link down
* notification.
- * @port: This is the port object associated with the phy. If the is no
+ * @port_agent: This is the port configuration agent for the controller.
+ * @iport: This is the port object associated with the phy. If the is no
* associated port this is an NULL. The port is an invalid
* handle only if the phy was never port of this port. This happens when
* the phy is not broadcasting the same SAS address as the other phys in the
* assigned port.
- * @phy: This is the phy object which has gone link down.
+ * @iphy: This is the phy object which has gone link down.
*
* This function handles the manual port configuration link down notifications.
* Since all ports and phys are associated at initialization time we just turn
@@ -590,11 +591,12 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
/**
* sci_apc_agent_link_up - handle apc link up events
- * @scic: This is the controller object that receives the link up
+ * @ihost: This is the controller object that receives the link up
* notification.
- * @sci_port: This is the port object associated with the phy. If the is no
+ * @port_agent: This is the port configuration agent for the controller.
+ * @iport: This is the port object associated with the phy. If the is no
* associated port this is an NULL.
- * @sci_phy: This is the phy object which has gone link up.
+ * @iphy: This is the phy object which has gone link up.
*
* This method handles the automatic port configuration for link up
* notifications. Is it possible to get a link down notification from a phy
@@ -620,9 +622,10 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
}
/**
- *
- * @controller: This is the controller object that receives the link down
+ * sci_apc_agent_link_down()
+ * @ihost: This is the controller object that receives the link down
* notification.
+ * @port_agent: This is the port configuration agent for the controller.
* @iport: This is the port object associated with the phy. If the is no
* associated port this is an NULL.
* @iphy: This is the phy object which has gone link down.
@@ -697,9 +700,7 @@ done:
* Public port configuration agent routines
* ****************************************************************************** */
-/**
- *
- *
+/*
* This method will construct the port configuration agent for operation. This
* call is universal for both manual port configuration and automatic port
* configuration modes.
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index cd1e4b4d95bb..866950a02965 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -288,8 +288,9 @@ enum sci_status isci_remote_device_terminate_requests(
* isci_remote_device_not_ready() - This function is called by the ihost when
* the remote device is not ready. We mark the isci device as ready (not
* "ready_for_io") and signal the waiting proccess.
-* @isci_host: This parameter specifies the isci host object.
-* @isci_device: This parameter specifies the remote device
+* @ihost: This parameter specifies the isci host object.
+* @idev: This parameter specifies the remote device
+* @reason: Reason to switch on
*
* sci_lock is held on entrance to this function.
*/
@@ -310,7 +311,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
/* Kill all outstanding requests for the device. */
sci_remote_device_terminate_requests(idev);
- /* Fall through - into the default case... */
+ fallthrough; /* into the default case */
default:
clear_bit(IDEV_IO_READY, &idev->flags);
break;
@@ -593,7 +594,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
break;
}
- /* fall through - and treat as unhandled... */
+ fallthrough; /* and treat as unhandled */
default:
dev_dbg(scirdev_to_dev(idev),
"%s: device: %p event code: %x: %s\n",
@@ -1000,7 +1001,7 @@ static void sci_remote_device_initial_state_enter(struct sci_base_state_machine
/**
* sci_remote_device_destruct() - free remote node context and destruct
- * @remote_device: This parameter specifies the remote device to be destructed.
+ * @idev: This parameter specifies the remote device to be destructed.
*
* Remote device objects are a limited resource. As such, they must be
* protected. Thus calls to construct and destruct are mutually exclusive and
@@ -1236,8 +1237,8 @@ static const struct sci_base_state sci_remote_device_state_table[] = {
/**
* sci_remote_device_construct() - common construction
- * @sci_port: SAS/SATA port through which this device is accessed.
- * @sci_dev: remote device to construct
+ * @iport: SAS/SATA port through which this device is accessed.
+ * @idev: remote device to construct
*
* This routine just performs benign initialization and does not
* allocate the remote_node_context which is left to
@@ -1256,7 +1257,7 @@ static void sci_remote_device_construct(struct isci_port *iport,
SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
}
-/**
+/*
* sci_remote_device_da_construct() - construct direct attached device.
*
* The information (e.g. IAF, Signature FIS, etc.) necessary to build
@@ -1294,7 +1295,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
return SCI_SUCCESS;
}
-/**
+/*
* sci_remote_device_ea_construct() - construct expander attached device
*
* Remote node context(s) is/are a global resource allocated by this
@@ -1384,7 +1385,7 @@ static bool isci_remote_device_test_resume_done(
return done;
}
-void isci_remote_device_wait_for_resume_from_abort(
+static void isci_remote_device_wait_for_resume_from_abort(
struct isci_host *ihost,
struct isci_remote_device *idev)
{
@@ -1439,7 +1440,7 @@ enum sci_status isci_remote_device_resume_from_abort(
* sci_remote_device_start() - This method will start the supplied remote
* device. This method enables normal IO requests to flow through to the
* remote device.
- * @remote_device: This parameter specifies the device to be started.
+ * @idev: This parameter specifies the device to be started.
* @timeout: This parameter specifies the number of milliseconds in which the
* start operation should complete.
*
@@ -1501,10 +1502,11 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
}
/**
+ * isci_remote_device_alloc()
* This function builds the isci_remote_device when a libsas dev_found message
* is received.
- * @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the isci_port connected to this device.
+ * @ihost: This parameter specifies the isci host object.
+ * @iport: This parameter specifies the isci_port connected to this device.
*
* pointer to new isci_remote_device.
*/
@@ -1549,8 +1551,8 @@ void isci_remote_device_release(struct kref *kref)
/**
* isci_remote_device_stop() - This function is called internally to stop the
* remote device.
- * @isci_host: This parameter specifies the isci host object.
- * @isci_device: This parameter specifies the remote device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device.
*
* The status of the ihost request to stop.
*/
@@ -1585,8 +1587,7 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
/**
* isci_remote_device_gone() - This function is called by libsas when a domain
* device is removed.
- * @domain_device: This parameter specifies the libsas domain device.
- *
+ * @dev: This parameter specifies the libsas domain device.
*/
void isci_remote_device_gone(struct domain_device *dev)
{
@@ -1606,7 +1607,7 @@ void isci_remote_device_gone(struct domain_device *dev)
* device is discovered. A remote device object is created and started. the
* function then sleeps until the sci core device started message is
* received.
- * @domain_device: This parameter specifies the libsas domain device.
+ * @dev: This parameter specifies the libsas domain device.
*
* status, zero indicates success.
*/
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 474a43460963..77ba0291134e 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -74,7 +74,7 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
#undef C
/**
- *
+ * sci_remote_node_context_is_ready()
* @sci_rnc: The state of the remote node context object to check.
*
* This method will return true if the remote node context is in a READY state
@@ -163,12 +163,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
rnc->ssp.oaf_source_zone_group = 0;
rnc->ssp.oaf_more_compatibility_features = 0;
}
-/**
- *
- * @sci_rnc:
- * @callback:
- * @callback_parameter:
- *
+/*
* This method will setup the remote node context object so it will transition
* to its ready state. If the remote node context is already setup to
* transition to its final state then this function does nothing. none
@@ -202,9 +197,7 @@ static void sci_remote_node_context_setup_to_destroy(
wake_up(&ihost->eventq);
}
-/**
- *
- *
+/*
* This method just calls the user callback function and then resets the
* callback.
*/
@@ -225,7 +218,7 @@ static void sci_remote_node_context_continue_state_transitions(struct sci_remote
case RNC_DEST_READY:
case RNC_DEST_SUSPENDED_RESUME:
rnc->destination_state = RNC_DEST_READY;
- /* Fall through... */
+ fallthrough;
case RNC_DEST_FINAL:
sci_remote_node_context_resume(rnc, rnc->user_callback,
rnc->user_cookie);
@@ -601,9 +594,9 @@ enum sci_status sci_remote_node_context_suspend(
__func__, sci_rnc);
return SCI_FAILURE_INVALID_STATE;
}
- /* Fall through - and handle like SCI_RNC_POSTING */
+ fallthrough; /* and handle like SCI_RNC_POSTING */
case SCI_RNC_RESUMING:
- /* Fall through - and handle like SCI_RNC_POSTING */
+ fallthrough; /* and handle like SCI_RNC_POSTING */
case SCI_RNC_POSTING:
/* Set the destination state to AWAIT - this signals the
* entry into the SCI_RNC_READY state that a suspension
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
index 301b3141945e..1bcaf528c1c9 100644
--- a/drivers/scsi/isci/remote_node_table.c
+++ b/drivers/scsi/isci/remote_node_table.c
@@ -53,17 +53,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/**
+/*
* This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
* public, protected, and private methods.
- *
- *
*/
#include "remote_node_table.h"
#include "remote_node_context.h"
/**
- *
+ * sci_remote_node_table_get_group_index()
* @remote_node_table: This is the remote node index table from which the
* selection will be made.
* @group_table_index: This is the index to the group table from which to
@@ -98,10 +96,10 @@ static u32 sci_remote_node_table_get_group_index(
}
/**
- *
- * @out]: remote_node_table This the remote node table in which to clear the
+ * sci_remote_node_table_clear_group_index()
+ * @remote_node_table: This the remote node table in which to clear the
* selector.
- * @set_index: This is the remote node selector in which the change will be
+ * @group_table_index: This is the remote node selector in which the change will be
* made.
* @group_index: This is the bit index in the table to be modified.
*
@@ -128,8 +126,8 @@ static void sci_remote_node_table_clear_group_index(
}
/**
- *
- * @out]: remote_node_table This the remote node table in which to set the
+ * sci_remote_node_table_set_group_index()
+ * @remote_node_table: This the remote node table in which to set the
* selector.
* @group_table_index: This is the remote node selector in which the change
* will be made.
@@ -158,8 +156,8 @@ static void sci_remote_node_table_set_group_index(
}
/**
- *
- * @out]: remote_node_table This is the remote node table in which to modify
+ * sci_remote_node_table_set_node_index()
+ * @remote_node_table: This is the remote node table in which to modify
* the remote node availability.
* @remote_node_index: This is the remote node index that is being returned to
* the table.
@@ -191,8 +189,8 @@ static void sci_remote_node_table_set_node_index(
}
/**
- *
- * @out]: remote_node_table This is the remote node table from which to clear
+ * sci_remote_node_table_clear_node_index()
+ * @remote_node_table: This is the remote node table from which to clear
* the available remote node bit.
* @remote_node_index: This is the remote node index which is to be cleared
* from the table.
@@ -224,8 +222,8 @@ static void sci_remote_node_table_clear_node_index(
}
/**
- *
- * @out]: remote_node_table The remote node table from which the slot will be
+ * sci_remote_node_table_clear_group()
+ * @remote_node_table: The remote node table from which the slot will be
* cleared.
* @group_index: The index for the slot that is to be cleared.
*
@@ -252,9 +250,8 @@ static void sci_remote_node_table_clear_group(
remote_node_table->available_remote_nodes[dword_location] = dword_value;
}
-/**
- *
- * @remote_node_table:
+/*
+ * sci_remote_node_table_set_group()
*
* THis method sets an entire remote node group in the remote node table.
*/
@@ -280,7 +277,7 @@ static void sci_remote_node_table_set_group(
}
/**
- *
+ * sci_remote_node_table_get_group_value()
* @remote_node_table: This is the remote node table that for which the group
* value is to be returned.
* @group_index: This is the group index to use to find the group value.
@@ -307,8 +304,8 @@ static u8 sci_remote_node_table_get_group_value(
}
/**
- *
- * @out]: remote_node_table The remote that which is to be initialized.
+ * sci_remote_node_table_initialize()
+ * @remote_node_table: The remote that which is to be initialized.
* @remote_node_entries: The number of entries to put in the table.
*
* This method will initialize the remote node table for use. none
@@ -365,10 +362,10 @@ void sci_remote_node_table_initialize(
}
/**
- *
- * @out]: remote_node_table The remote node table from which to allocate a
+ * sci_remote_node_table_allocate_single_remote_node()
+ * @remote_node_table: The remote node table from which to allocate a
* remote node.
- * @table_index: The group index that is to be used for the search.
+ * @group_table_index: The group index that is to be used for the search.
*
* This method will allocate a single RNi from the remote node table. The
* table index will determine from which remote node group table to search.
@@ -425,10 +422,10 @@ static u16 sci_remote_node_table_allocate_single_remote_node(
}
/**
- *
+ * sci_remote_node_table_allocate_triple_remote_node()
* @remote_node_table: This is the remote node table from which to allocate the
* remote node entries.
- * @group_table_index: THis is the group table index which must equal two (2)
+ * @group_table_index: This is the group table index which must equal two (2)
* for this operation.
*
* This method will allocate three consecutive remote node context entries. If
@@ -462,7 +459,7 @@ static u16 sci_remote_node_table_allocate_triple_remote_node(
}
/**
- *
+ * sci_remote_node_table_allocate_remote_node()
* @remote_node_table: This is the remote node table from which the remote node
* allocation is to take place.
* @remote_node_count: This is ther remote node count which is one of
@@ -505,9 +502,10 @@ u16 sci_remote_node_table_allocate_remote_node(
}
/**
- *
- * @remote_node_table:
- *
+ * sci_remote_node_table_release_single_remote_node()
+ * @remote_node_table: This is the remote node table from which the remote node
+ * release is to take place.
+ * @remote_node_index: This is the remote node index that is being released.
* This method will free a single remote node index back to the remote node
* table. This routine will update the remote node groups
*/
@@ -550,9 +548,10 @@ static void sci_remote_node_table_release_single_remote_node(
}
/**
- *
+ * sci_remote_node_table_release_triple_remote_node()
* @remote_node_table: This is the remote node table to which the remote node
* index is to be freed.
+ * @remote_node_index: This is the remote node index that is being released.
*
* This method will release a group of three consecutive remote nodes back to
* the free remote nodes.
@@ -573,11 +572,12 @@ static void sci_remote_node_table_release_triple_remote_node(
}
/**
- *
+ * sci_remote_node_table_release_remote_node_index()
* @remote_node_table: The remote node table to which the remote node index is
* to be freed.
* @remote_node_count: This is the count of consecutive remote nodes that are
* to be freed.
+ * @remote_node_index: This is the remote node index that is being released.
*
* This method will release the remote node index back into the remote node
* table free pool.
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
index 721ab982d2ac..0ddfdda2b248 100644
--- a/drivers/scsi/isci/remote_node_table.h
+++ b/drivers/scsi/isci/remote_node_table.h
@@ -61,7 +61,7 @@
/**
*
*
- * Remote node sets are sets of remote node index in the remtoe node table The
+ * Remote node sets are sets of remote node index in the remote node table. The
* SCU hardware requires that STP remote node entries take three consecutive
* remote node index so the table is arranged in sets of three. The bits are
* used as 0111 0111 to make a byte and the bits define the set of three remote
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 343d24c7e788..6370cdbfba08 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -207,11 +207,8 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
SCI_CONTROLLER_INVALID_IO_TAG;
}
-/**
+/*
* This method is will fill in the SCU Task Context for any type of SSP request.
- * @sci_req:
- * @task_context:
- *
*/
static void scu_ssp_request_construct_task_context(
struct isci_request *ireq,
@@ -344,7 +341,7 @@ static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
tc->reserved_E8_0 = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
- tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
+ tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd);
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_gen = 0;
}
@@ -372,7 +369,7 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
tc->app_tag_gen = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
- tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
+ tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd);
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_verify = 0;
@@ -410,10 +407,8 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
tc->ref_tag_seed_gen = 0;
}
-/**
+/*
* This method is will fill in the SCU Task Context for a SSP IO request.
- * @sci_req:
- *
*/
static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
enum dma_data_direction dir,
@@ -456,17 +451,16 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
}
/**
- * This method will fill in the SCU Task Context for a SSP Task request. The
- * following important settings are utilized: -# priority ==
- * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
- * ahead of other task destined for the same Remote Node. -# task_type ==
- * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
- * (i.e. non-raw frame) is being utilized to perform task management. -#
- * control_frame == 1. This ensures that the proper endianess is set so
- * that the bytes are transmitted in the right order for a task frame.
- * @sci_req: This parameter specifies the task request object being
- * constructed.
- *
+ * scu_ssp_task_request_construct_task_context() - This method will fill in
+ * the SCU Task Context for a SSP Task request. The following important
+ * settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH. This
+ * ensures that the task request is issued ahead of other task destined
+ * for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD. This
+ * simply indicates that a normal request type (i.e. non-raw frame) is
+ * being utilized to perform task management. -#control_frame == 1. This
+ * ensures that the proper endianness is set so that the bytes are
+ * transmitted in the right order for a task frame.
+ * @ireq: This parameter specifies the task request object being constructed.
*/
static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
{
@@ -484,9 +478,10 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
}
/**
+ * scu_sata_request_construct_task_context()
* This method is will fill in the SCU Task Context for any type of SATA
* request. This is called from the various SATA constructors.
- * @sci_req: The general IO request object which is to be used in
+ * @ireq: The general IO request object which is to be used in
* constructing the SCU task context.
* @task_context: The buffer pointer for the SCU task context which is being
* constructed.
@@ -593,9 +588,9 @@ static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
return SCI_SUCCESS;
}
-/**
- *
- * @sci_req: This parameter specifies the request to be constructed as an
+/*
+ * sci_stp_optimized_request_construct()
+ * @ireq: This parameter specifies the request to be constructed as an
* optimized request.
* @optimized_task_type: This parameter specifies whether the request is to be
* an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
@@ -778,11 +773,11 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
return status;
}
+#define SCU_TASK_CONTEXT_SRAM 0x200000
/**
* sci_req_tx_bytes - bytes transferred when reply underruns request
* @ireq: request that was terminated early
*/
-#define SCU_TASK_CONTEXT_SRAM 0x200000
static u32 sci_req_tx_bytes(struct isci_request *ireq)
{
struct isci_host *ihost = ireq->owning_controller;
@@ -894,7 +889,7 @@ sci_io_request_terminate(struct isci_request *ireq)
* and don't wait for the task response.
*/
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
- /* Fall through - and handle like ABORTING... */
+ fallthrough; /* and handle like ABORTING */
case SCI_REQ_ABORTING:
if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
set_bit(IREQ_PENDING_ABORT, &ireq->flags);
@@ -1052,7 +1047,8 @@ request_started_state_tc_event(struct isci_request *ireq,
resp_iu = &ireq->ssp.rsp;
datapres = resp_iu->datapres;
- if (datapres == 1 || datapres == 2) {
+ if (datapres == SAS_DATAPRES_RESPONSE_DATA ||
+ datapres == SAS_DATAPRES_SENSE_DATA) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
@@ -1396,10 +1392,10 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
}
/**
- *
- * @stp_request: The request that is used for the SGL processing.
- * @data_buffer: The buffer of data to be copied.
- * @length: The length of the data transfer.
+ * sci_stp_request_pio_data_in_copy_data_buffer()
+ * @stp_req: The request that is used for the SGL processing.
+ * @data_buf: The buffer of data to be copied.
+ * @len: The length of the data transfer.
*
* Copy the data from the buffer for the length specified to the IO request SGL
* specified data region. enum sci_status
@@ -1443,8 +1439,8 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
}
/**
- *
- * @sci_req: The PIO DATA IN request that is to receive the data.
+ * sci_stp_request_pio_data_in_copy_data()
+ * @stp_req: The PIO DATA IN request that is to receive the data.
* @data_buffer: The buffer to copy from.
*
* Copy the data buffer to the io request data region. enum sci_status
@@ -1480,8 +1476,6 @@ static enum sci_status
stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
u32 completion_code)
{
- enum sci_status status = SCI_SUCCESS;
-
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
@@ -1500,7 +1494,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
break;
}
- return status;
+ return SCI_SUCCESS;
}
static enum sci_status
@@ -1737,8 +1731,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
resp_iu = &ireq->ssp.rsp;
- if (resp_iu->datapres == 0x01 ||
- resp_iu->datapres == 0x02) {
+ if (resp_iu->datapres == SAS_DATAPRES_RESPONSE_DATA ||
+ resp_iu->datapres == SAS_DATAPRES_SENSE_DATA) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
} else {
@@ -2103,8 +2097,6 @@ sci_io_request_frame_handler(struct isci_request *ireq,
static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
u32 completion_code)
{
- enum sci_status status = SCI_SUCCESS;
-
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
@@ -2148,14 +2140,12 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
break;
}
- return status;
+ return SCI_SUCCESS;
}
static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
enum sci_base_request_states next)
{
- enum sci_status status = SCI_SUCCESS;
-
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
@@ -2174,7 +2164,7 @@ static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 compl
break;
}
- return status;
+ return SCI_SUCCESS;
}
static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
@@ -2192,7 +2182,7 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire
case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
u16 len = sci_req_tx_bytes(ireq);
- /* likely non-error data underrrun, workaround missing
+ /* likely non-error data underrun, workaround missing
* d2h frame from the controller
*/
if (d2h->fis_type != FIS_REGD2H) {
@@ -2458,7 +2448,7 @@ sci_io_request_tc_completion(struct isci_request *ireq,
* isci_request_process_response_iu() - This function sets the status and
* response iu, in the task struct, from the request object for the upper
* layer driver.
- * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @task: This parameter is the task struct from the upper layer driver.
* @resp_iu: This parameter points to the response iu of the completed request.
* @dev: This parameter specifies the linux device struct.
*
@@ -2491,6 +2481,7 @@ static void isci_request_process_response_iu(
* isci_request_set_open_reject_status() - This function prepares the I/O
* completion for OPEN_REJECT conditions.
* @request: This parameter is the completed isci_request object.
+ * @task: This parameter is the task struct from the upper layer driver.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
* @open_rej_reason: This parameter specifies the encoded reason for the
@@ -2515,7 +2506,9 @@ static void isci_request_set_open_reject_status(
/**
* isci_request_handle_controller_specific_errors() - This function decodes
* controller-specific I/O completion error conditions.
+ * @idev: Remote device
* @request: This parameter is the completed isci_request object.
+ * @task: This parameter is the task struct from the upper layer driver.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
*
@@ -2574,7 +2567,7 @@ static void isci_request_handle_controller_specific_errors(
if (!idev)
*status_ptr = SAS_DEVICE_UNKNOWN;
else
- *status_ptr = SAM_STAT_TASK_ABORTED;
+ *status_ptr = SAS_SAM_STAT_TASK_ABORTED;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
}
@@ -2704,7 +2697,7 @@ static void isci_request_handle_controller_specific_errors(
default:
/* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED;
- *status_ptr = SAM_STAT_TASK_ABORTED;
+ *status_ptr = SAS_SAM_STAT_TASK_ABORTED;
if (task->task_proto == SAS_PROTOCOL_SMP)
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
@@ -2727,7 +2720,7 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
if (ac_err_mask(fis->status))
ts->stat = SAS_PROTO_RESPONSE;
else
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
ts->resp = SAS_TASK_COMPLETE;
}
@@ -2790,7 +2783,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
case SCI_IO_SUCCESS_IO_DONE_EARLY:
response = SAS_TASK_COMPLETE;
- status = SAM_STAT_GOOD;
+ status = SAS_SAM_STAT_GOOD;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
@@ -2860,7 +2853,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
/* Fail the I/O. */
response = SAS_TASK_UNDELIVERED;
- status = SAM_STAT_TASK_ABORTED;
+ status = SAS_SAM_STAT_TASK_ABORTED;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
break;
@@ -2942,8 +2935,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
/* Normal notification (task_done) */
task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
}
spin_unlock_irqrestore(&task->task_state_lock, task_flags);
@@ -3332,7 +3324,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq)
* @ihost: This parameter specifies the ISCI host object
* @request: This parameter points to the isci_request object allocated in the
* request construct function.
- * @sci_device: This parameter is the handle for the sci core's remote device
+ * @idev: This parameter is the handle for the sci core's remote device
* object that is the destination for this request.
*
* SCI_SUCCESS on successfull completion, or specific failure code.
@@ -3414,9 +3406,9 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t
return ireq;
}
-static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
- struct sas_task *task,
- u16 tag)
+struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag)
{
struct isci_request *ireq;
@@ -3442,16 +3434,12 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
}
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
- struct sas_task *task, u16 tag)
+ struct sas_task *task, struct isci_request *ireq)
{
- enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
- struct isci_request *ireq;
+ enum sci_status status;
unsigned long flags;
int ret = 0;
- /* do common allocation and init of request object. */
- ireq = isci_io_request_from_tag(ihost, task, tag);
-
status = isci_io_request_build(ihost, ireq, idev);
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index aff95317fcf4..20b141739e4d 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -291,7 +291,10 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
struct isci_tmf *isci_tmf,
u16 tag);
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
- struct sas_task *task, u16 tag);
+ struct sas_task *task, struct isci_request *ireq);
+struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag);
enum sci_status
sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
index dc26b4aea99e..15d8f3631ab7 100644
--- a/drivers/scsi/isci/sas.h
+++ b/drivers/scsi/isci/sas.h
@@ -201,7 +201,7 @@ struct smp_req {
u8 func; /* byte 1 */
u8 alloc_resp_len; /* byte 2 */
u8 req_len; /* byte 3 */
- u8 req_data[0];
+ u8 req_data[];
} __packed;
/*
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 26fa1a4d1e6b..c514b20293b2 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -91,8 +91,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
/* Normal notification (task_done) */
task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->lldd_task = NULL;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -160,20 +159,19 @@ int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
isci_task_refuse(ihost, task,
SAS_TASK_UNDELIVERED,
- SAM_STAT_TASK_ABORTED);
+ SAS_SAM_STAT_TASK_ABORTED);
} else {
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ struct isci_request *ireq;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_io_request_from_tag(ihost, task, tag);
spin_unlock_irqrestore(&task->task_state_lock, flags);
/* build and send the request. */
- status = isci_request_execute(ihost, idev, task, tag);
+ /* do common allocation and init of request object. */
+ status = isci_request_execute(ihost, idev, task, ireq);
if (status != SCI_SUCCESS) {
- spin_lock_irqsave(&task->task_state_lock, flags);
- /* Did not really start this command. */
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
if (test_bit(IDEV_GONE, &idev->flags)) {
/* Indicate that the device
* is gone.
@@ -369,7 +367,7 @@ static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
tmf->io_tag = old_request->io_tag;
}
-/**
+/*
* isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
* Template functions.
* @lun: This parameter specifies the lun to be reset.
@@ -498,7 +496,6 @@ int isci_task_abort_task(struct sas_task *task)
/* If task is already done, the request isn't valid */
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
- (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
old_request) {
idev = isci_get_device(task->dev->lldd_dev);
target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
@@ -532,8 +529,7 @@ int isci_task_abort_task(struct sas_task *task)
*/
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
spin_unlock_irqrestore(&task->task_state_lock, flags);
ret = TMF_RESP_FUNC_COMPLETE;
@@ -581,8 +577,7 @@ int isci_task_abort_task(struct sas_task *task)
test_bit(IDEV_GONE, &idev->flags));
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -626,24 +621,6 @@ int isci_task_abort_task_set(
/**
- * isci_task_clear_aca() - This function is one of the SAS Domain Template
- * functions. This is one of the Task Management functoins called by libsas.
- * @d_device: This parameter specifies the domain device associated with this
- * request.
- * @lun: This parameter specifies the lun associated with this request.
- *
- * status, zero indicates success.
- */
-int isci_task_clear_aca(
- struct domain_device *d_device,
- u8 *lun)
-{
- return TMF_RESP_FUNC_FAILED;
-}
-
-
-
-/**
* isci_task_clear_task_set() - This function is one of the SAS Domain Template
* functions. This is one of the Task Management functoins called by libsas.
* @d_device: This parameter specifies the domain device associated with this
@@ -668,7 +645,6 @@ int isci_task_clear_task_set(
* returned, libsas turns this into a LUN reset; when FUNC_FAILED is
* returned, libsas will turn this into a target reset
* @task: This parameter specifies the sas task being queried.
- * @lun: This parameter specifies the lun associated with this request.
*
* status, zero indicates success.
*/
@@ -710,8 +686,8 @@ isci_task_request_complete(struct isci_host *ihost,
tmf->status = completion_status;
if (tmf->proto == SAS_PROTOCOL_SSP) {
- memcpy(&tmf->resp.resp_iu,
- &ireq->ssp.rsp,
+ memcpy(tmf->resp.rsp_buf,
+ ireq->ssp.rsp_buf,
SSP_RESP_IU_MAX_SIZE);
} else if (tmf->proto == SAS_PROTOCOL_SATA) {
memcpy(&tmf->resp.d2h_fis,
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 8f4531f22ac2..f96633fa6939 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -140,10 +140,6 @@ int isci_task_abort_task_set(
struct domain_device *d_device,
u8 *lun);
-int isci_task_clear_aca(
- struct domain_device *d_device,
- u8 *lun);
-
int isci_task_clear_task_set(
struct domain_device *d_device,
u8 *lun);
@@ -182,8 +178,4 @@ void *isci_task_ssp_request_get_response_data_address(
u32 isci_task_ssp_request_get_response_data_length(
struct isci_request *request);
-int isci_queuecommand(
- struct scsi_cmnd *scsi_cmd,
- void (*donefunc)(struct scsi_cmnd *));
-
#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index e4857b728033..a64abe38db2d 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -352,7 +352,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
boot_kobj->kobj.kset = boot_kset->kset;
if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype,
NULL, name, index)) {
- kfree(boot_kobj);
+ kobject_put(&boot_kobj->kobj);
return NULL;
}
boot_kobj->data = data;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b5dd1caae5e9..5fb1f364e815 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -52,6 +52,10 @@ static struct iscsi_transport iscsi_sw_tcp_transport;
static unsigned int iscsi_max_lun = ~0;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+static bool iscsi_recv_from_iscsi_q;
+module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644);
+MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
+
static int iscsi_sw_tcp_dbg;
module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
S_IRUGO | S_IWUSR);
@@ -122,20 +126,13 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
return 0;
}
-static void iscsi_sw_tcp_data_ready(struct sock *sk)
+static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
{
- struct iscsi_conn *conn;
- struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
read_descriptor_t rd_desc;
- read_lock_bh(&sk->sk_callback_lock);
- conn = sk->sk_user_data;
- if (!conn) {
- read_unlock_bh(&sk->sk_callback_lock);
- return;
- }
- tcp_conn = conn->dd_data;
-
/*
* Use rd_desc to pass 'conn' to iscsi_tcp_recv.
* We set count to 1 because we want the network layer to
@@ -144,13 +141,48 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
*/
rd_desc.arg.data = conn;
rd_desc.count = 1;
- tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
- iscsi_sw_sk_state_check(sk);
+ tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
/* If we had to (atomically) map a highmem page,
* unmap it now. */
iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+
+ iscsi_sw_sk_state_check(sk);
+}
+
+static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
+{
+ struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
+ recvwork);
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
+
+ lock_sock(sk);
+ iscsi_sw_tcp_recv_data(conn);
+ release_sock(sk);
+}
+
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
+{
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_conn *conn;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ if (tcp_sw_conn->queue_recv)
+ iscsi_conn_queue_recv(conn);
+ else
+ iscsi_sw_tcp_recv_data(conn);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -180,7 +212,7 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
}
/**
- * iscsi_write_space - Called when more output buffer space is available
+ * iscsi_sw_tcp_write_space - Called when more output buffer space is available
* @sk: socket space is available for
**/
static void iscsi_sw_tcp_write_space(struct sock *sk)
@@ -205,7 +237,7 @@ static void iscsi_sw_tcp_write_space(struct sock *sk)
old_write_space(sk);
ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
@@ -274,7 +306,10 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
copy = segment->size - offset;
if (segment->total_copied + segment->size < segment->total_size)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
+
+ if (tcp_sw_conn->queue_recv)
+ flags |= MSG_DONTWAIT;
/* Use sendpage if we can; else fall back to sendmsg */
if (!segment->data) {
@@ -353,7 +388,7 @@ error:
}
/**
- * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+ * iscsi_sw_tcp_xmit_qlen - return the number of bytes queued for xmit
* @conn: iscsi connection
*/
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
@@ -557,6 +592,10 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
conn = cls_conn->dd_data;
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
+ INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
+ tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
+
+ mutex_init(&tcp_sw_conn->sock_lock);
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
@@ -592,21 +631,33 @@ free_conn:
static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
- struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct socket *sock = tcp_sw_conn->sock;
+ /*
+ * The iscsi transport class will make sure we are not called in
+ * parallel with start, stop, bind and destroys. However, this can be
+ * called twice if userspace does a stop then a destroy.
+ */
if (!sock)
return;
+ /*
+ * Make sure we start socket shutdown now in case userspace is up
+ * but delayed in releasing the socket.
+ */
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+
sock_hold(sock->sk);
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
- spin_lock_bh(&session->frwd_lock);
+ iscsi_suspend_rx(conn);
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
tcp_sw_conn->sock = NULL;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
sockfd_put(sock);
}
@@ -658,7 +709,6 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
- struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -678,10 +728,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
if (err)
goto free_socket;
- spin_lock_bh(&session->frwd_lock);
+ mutex_lock(&tcp_sw_conn->sock_lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -689,6 +739,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
sk->sk_allocation = GFP_ATOMIC;
sk_set_memalloc(sk);
+ sock_no_linger(sk);
iscsi_sw_tcp_conn_set_callbacks(conn);
tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
@@ -717,8 +768,15 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
break;
case ISCSI_PARAM_DATADGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ if (!tcp_sw_conn->sock) {
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ return -ENOTCONN;
+ }
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
+ mutex_unlock(&tcp_sw_conn->sock_lock);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -733,9 +791,10 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
struct sockaddr_in6 addr;
+ struct socket *sock;
int rc;
switch(param) {
@@ -743,17 +802,36 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_PARAM_LOCAL_PORT:
spin_lock_bh(&conn->session->frwd_lock);
- if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ if (!conn->session->leadconn) {
spin_unlock_bh(&conn->session->frwd_lock);
return -ENOTCONN;
}
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
+ spin_unlock_bh(&conn->session->frwd_lock);
+
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock) {
+ rc = -ENOTCONN;
+ goto sock_unlock;
+ }
+
if (param == ISCSI_PARAM_LOCAL_PORT)
- rc = kernel_getsockname(tcp_sw_conn->sock,
+ rc = kernel_getsockname(sock,
(struct sockaddr *)&addr);
else
- rc = kernel_getpeername(tcp_sw_conn->sock,
+ rc = kernel_getpeername(sock,
(struct sockaddr *)&addr);
- spin_unlock_bh(&conn->session->frwd_lock);
+sock_unlock:
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
@@ -775,6 +853,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
struct sockaddr_in6 addr;
+ struct socket *sock;
int rc;
switch (param) {
@@ -789,16 +868,22 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
return -ENOTCONN;
}
tcp_conn = conn->dd_data;
-
tcp_sw_conn = tcp_conn->dd_data;
- if (!tcp_sw_conn->sock) {
- spin_unlock_bh(&session->frwd_lock);
- return -ENOTCONN;
- }
-
- rc = kernel_getsockname(tcp_sw_conn->sock,
- (struct sockaddr *)&addr);
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&session->frwd_lock);
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock)
+ rc = -ENOTCONN;
+ else
+ rc = kernel_getsockname(sock, (struct sockaddr *)&addr);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
@@ -839,6 +924,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
struct iscsi_session *session;
struct iscsi_sw_tcp_host *tcp_sw_host;
struct Scsi_Host *shost;
+ int rc;
if (ep) {
printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
@@ -856,6 +942,11 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
shost->max_channel = 0;
shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+ rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
+ if (rc < 0)
+ goto free_host;
+ shost->can_queue = rc;
+
if (iscsi_host_add(shost, NULL))
goto free_host;
@@ -870,7 +961,6 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
tcp_sw_host = iscsi_host_priv(shost);
tcp_sw_host->session = session;
- shost->can_queue = session->scsi_cmds_max;
if (iscsi_tcp_r2tpool_alloc(session))
goto remove_session;
return cls_session;
@@ -878,7 +968,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
remove_session:
iscsi_session_teardown(cls_session);
remove_host:
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
@@ -895,7 +985,7 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
@@ -962,8 +1052,8 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
struct iscsi_conn *conn = session->leadconn;
if (conn->datadgst_en)
- sdev->request_queue->backing_dev_info->capabilities
- |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ sdev->request_queue);
blk_queue_dma_alignment(sdev->request_queue, 0);
return 0;
}
@@ -973,7 +1063,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.name = "iSCSI Initiator over TCP/IP",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .can_queue = ISCSI_TOTAL_CMDS_MAX,
.sg_tablesize = 4096,
.max_sectors = 0xFFFF,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
@@ -983,10 +1073,10 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
.slave_configure = iscsi_sw_tcp_slave_configure,
- .target_alloc = iscsi_target_alloc,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport iscsi_sw_tcp_transport = {
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 791453195099..68e14a344904 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -28,6 +28,11 @@ struct iscsi_sw_tcp_send {
struct iscsi_sw_tcp_conn {
struct socket *sock;
+ /* Taken when accessing the sock from the netlink/sysfs interface */
+ struct mutex sock_lock;
+
+ struct work_struct recvwork;
+ bool queue_recv;
struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 7f683e42c798..60a88a95a8e2 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
if (!esp->command_block)
goto fail_unmap_regs;
- host->irq = platform_get_irq(dev, 0);
+ host->irq = err = platform_get_irq(dev, 0);
+ if (err < 0)
+ goto fail_unmap_command_block;
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
if (err < 0)
goto fail_unmap_command_block;
@@ -201,21 +203,9 @@ static struct platform_driver esp_jazz_driver = {
.name = "jazz_esp",
},
};
-
-static int __init jazz_esp_init(void)
-{
- return platform_driver_register(&esp_jazz_driver);
-}
-
-static void __exit jazz_esp_exit(void)
-{
- platform_driver_unregister(&esp_jazz_driver);
-}
+module_platform_driver(esp_jazz_driver);
MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(jazz_esp_init);
-module_exit(jazz_esp_exit);
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index c48a73a0f517..86fe19e0468d 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- mode: c; c-basic-offset: 8 -*- */
/* PARISC LASI driver for the 53c700 chip
*
@@ -31,7 +30,6 @@
#include <linux/slab.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/hardware.h>
#include <asm/parisc-device.h>
@@ -136,7 +134,7 @@ lasi700_probe(struct parisc_device *dev)
return -ENODEV;
}
-static int __exit
+static void __exit
lasi700_driver_remove(struct parisc_device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
@@ -148,8 +146,6 @@ lasi700_driver_remove(struct parisc_device *dev)
free_irq(host->irq, host);
iounmap(hostdata->base);
kfree(hostdata);
-
- return 0;
}
static struct parisc_driver lasi700_driver __refdata = {
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 2b865c6423e2..942fc60f7c21 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -301,8 +301,8 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
struct fc_lport *lport = fc_disc_lport(disc);
unsigned long delay = 0;
- FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
- PTR_ERR(fp), disc->retry_count,
+ FC_DISC_DBG(disc, "Error %d, retries %d/%d\n",
+ PTR_ERR_OR_ZERO(fp), disc->retry_count,
FC_DISC_RETRY_LIMIT);
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
@@ -337,7 +337,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
/**
* fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
- * @lport: The discovery context
+ * @disc: The discovery context
*/
static void fc_disc_gpn_ft_req(struct fc_disc *disc)
{
@@ -370,7 +370,7 @@ err:
/**
* fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
- * @lport: The local port the GPN_FT was received on
+ * @disc: The discovery context
* @buf: The GPN_FT response buffer
* @len: The size of response buffer
*
@@ -488,7 +488,7 @@ static void fc_disc_timeout(struct work_struct *work)
* fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
* @sp: The sequence that the GPN_FT response was received on
* @fp: The GPN_FT response frame
- * @lp_arg: The discovery context
+ * @disc_arg: The discovery context
*
* Locking Note: This function is called without disc mutex held, and
* should do all its processing with the mutex held
@@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
if (PTR_ERR(fp) == -FC_EX_CLOSED)
goto out;
- if (IS_ERR(fp))
- goto redisc;
+ if (IS_ERR(fp)) {
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
+ goto out;
+ }
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp)
@@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
new_rdata->disc_id = disc->disc_id;
fc_rport_login(new_rdata);
}
- goto out;
+ goto free_fp;
}
rdata->disc_id = disc->disc_id;
mutex_unlock(&rdata->rp_mutex);
@@ -626,10 +630,10 @@ redisc:
fc_disc_restart(disc);
mutex_unlock(&disc->disc_mutex);
}
+free_fp:
+ fc_frame_free(fp);
out:
kref_put(&rdata->kref, fc_rport_destroy);
- if (!IS_ERR(fp))
- fc_frame_free(fp);
}
/**
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 13a2e7c33cb1..8d3006edbe12 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -15,7 +15,7 @@
#include <scsi/fc/fc_ns.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
/**
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
new file mode 100644
index 000000000000..7dcac3b6baa7
--- /dev/null
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -0,0 +1,951 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2008 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_ENCODE_H_
+#define _FC_ENCODE_H_
+#include <asm/unaligned.h>
+#include <linux/utsname.h>
+#include <scsi/fc/fc_ms.h>
+
+/*
+ * F_CTL values for simple requests and responses.
+ */
+#define FC_FCTL_REQ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)
+#define FC_FCTL_RESP (FC_FC_EX_CTX | FC_FC_LAST_SEQ | \
+ FC_FC_END_SEQ | FC_FC_SEQ_INIT)
+
+struct fc_ns_rft {
+ struct fc_ns_fid fid; /* port ID object */
+ struct fc_ns_fts fts; /* FC4-types object */
+};
+
+struct fc_ct_req {
+ struct fc_ct_hdr hdr;
+ union {
+ struct fc_ns_gid_ft gid;
+ struct fc_ns_rn_id rn;
+ struct fc_ns_rft rft;
+ struct fc_ns_rff_id rff;
+ struct fc_ns_fid fid;
+ struct fc_ns_rsnn snn;
+ struct fc_ns_rspn spn;
+ struct fc_fdmi_rhba rhba;
+ struct fc_fdmi_rpa rpa;
+ struct fc_fdmi_dprt dprt;
+ struct fc_fdmi_dhba dhba;
+ } payload;
+};
+
+/**
+ * fc_adisc_fill() - Fill in adisc request frame
+ * @lport: local port.
+ * @fp: fc frame where payload will be placed.
+ */
+static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_adisc *adisc;
+
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_ADISC;
+ put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn);
+ put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn);
+ hton24(adisc->adisc_port_id, lport->port_id);
+}
+
+/**
+ * fc_ct_hdr_fill- fills ct header and reset ct payload
+ * returns pointer to ct request.
+ */
+static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
+ unsigned int op, size_t req_size,
+ enum fc_ct_fs_type fs_type,
+ u8 subtype)
+{
+ struct fc_ct_req *ct;
+ size_t ct_plen;
+
+ ct_plen = sizeof(struct fc_ct_hdr) + req_size;
+ ct = fc_frame_payload_get(fp, ct_plen);
+ memset(ct, 0, ct_plen);
+ ct->hdr.ct_rev = FC_CT_REV;
+ ct->hdr.ct_fs_type = fs_type;
+ ct->hdr.ct_fs_subtype = subtype;
+ ct->hdr.ct_cmd = htons((u16) op);
+ return ct;
+}
+
+/**
+ * fc_ct_ns_fill() - Fill in a name service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
+ */
+static inline int fc_ct_ns_fill(struct fc_lport *lport,
+ u32 fc_id, struct fc_frame *fp,
+ unsigned int op, enum fc_rctl *r_ctl,
+ enum fc_fh_type *fh_type)
+{
+ struct fc_ct_req *ct;
+ size_t len;
+
+ switch (op) {
+ case FC_NS_GPN_FT:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
+ break;
+
+ case FC_NS_GPN_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
+ hton24(ct->payload.fid.fp_fid, fc_id);
+ break;
+
+ case FC_NS_RFT_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.rft.fid.fp_fid, lport->port_id);
+ ct->payload.rft.fts = lport->fcts;
+ break;
+
+ case FC_NS_RFF_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id);
+ ct->payload.rff.fr_type = FC_TYPE_FCP;
+ if (lport->service_params & FCP_SPPF_INIT_FCN)
+ ct->payload.rff.fr_feat = FCP_FEAT_INIT;
+ if (lport->service_params & FCP_SPPF_TARG_FCN)
+ ct->payload.rff.fr_feat |= FCP_FEAT_TARG;
+ break;
+
+ case FC_NS_RNN_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id);
+ put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn);
+ break;
+
+ case FC_NS_RSPN_ID:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len,
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
+ strncpy(ct->payload.spn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
+ ct->payload.spn.fr_name_len = len;
+ break;
+
+ case FC_NS_RSNN_NN:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len,
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn);
+ strncpy(ct->payload.snn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
+ ct->payload.snn.fr_name_len = len;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ *r_ctl = FC_RCTL_DD_UNSOL_CTL;
+ *fh_type = FC_TYPE_CT;
+ return 0;
+}
+
+static inline void fc_ct_ms_fill_attr(struct fc_fdmi_attr_entry *entry,
+ const char *in, size_t len)
+{
+ int copied;
+
+ copied = strscpy(entry->value, in, len);
+ if (copied > 0 && copied + 1 < len)
+ memset(entry->value + copied + 1, 0, len - copied - 1);
+}
+
+/**
+ * fc_ct_ms_fill() - Fill in a mgmt service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
+ */
+static inline int fc_ct_ms_fill(struct fc_lport *lport,
+ u32 fc_id, struct fc_frame *fp,
+ unsigned int op, enum fc_rctl *r_ctl,
+ enum fc_fh_type *fh_type)
+{
+ struct fc_ct_req *ct;
+ size_t len;
+ struct fc_fdmi_attr_entry *entry;
+ struct fs_fdmi_attrs *hba_attrs;
+ int numattrs = 0;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
+
+ switch (op) {
+ case FC_FDMI_RHBA:
+ numattrs = 11;
+ len = sizeof(struct fc_fdmi_rhba);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+ len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+ len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+ len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+ len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 7;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ }
+
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+
+ /* HBA Identifier */
+ put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id);
+ /* Number of Ports - always 1 */
+ put_unaligned_be32(1, &ct->payload.rhba.port.numport);
+ /* Port Name */
+ put_unaligned_be64(lport->wwpn,
+ &ct->payload.rhba.port.port[0].portname);
+
+ /* HBA Attributes */
+ put_unaligned_be32(numattrs,
+ &ct->payload.rhba.hba_attrs.numattrs);
+ hba_attrs = &ct->payload.rhba.hba_attrs;
+ entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
+ /* NodeName*/
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NODENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(lport->wwnn,
+ (__be64 *)&entry->value);
+
+ /* Manufacturer */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NODENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MANUFACTURER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_manufacturer(lport->host),
+ FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
+
+ /* SerialNumber */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_SERIALNUMBER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_serial_number(lport->host),
+ FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
+
+ /* Model */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MODEL,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_model(lport->host),
+ FC_FDMI_HBA_ATTR_MODEL_LEN);
+
+ /* Model Description */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MODEL_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_model_description(lport->host),
+ FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
+
+ /* Hardware Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_HARDWAREVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_hardware_version(lport->host),
+ FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
+
+ /* Driver Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_DRIVERVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_driver_version(lport->host),
+ FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
+
+ /* OptionROM Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_OPTIONROMVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ "unknown",
+ FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
+
+ /* Firmware Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_firmware_version(lport->host),
+ FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
+
+ /* OS Name and Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_OSNAMEVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ snprintf((char *)&entry->value,
+ FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN,
+ "%s v%s",
+ init_utsname()->sysname,
+ init_utsname()->release);
+
+ /* Max CT payload */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_max_ct_payload(lport->host),
+ &entry->value);
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ /* Node symbolic name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NODESYMBLNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_symbolic_name(lport->host),
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN);
+
+ /* Vendor specific info */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(0,
+ &entry->value);
+
+ /* Number of ports */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NUMBEROFPORTS,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_num_ports(lport->host),
+ &entry->value);
+
+ /* Fabric name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_FABRICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_fabric_name(lport->host),
+ &entry->value);
+
+ /* BIOS version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_FABRICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_bootbios_version(lport->host),
+ FC_FDMI_HBA_ATTR_BIOSVERSION_LEN);
+
+ /* BIOS state */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_BIOSVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSSTATE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_bootbios_state(lport->host),
+ &entry->value);
+
+ /* Vendor identifier */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_BIOSSTATE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORIDENTIFIER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_vendor_identifier(lport->host),
+ FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN);
+ }
+
+ break;
+ case FC_FDMI_RPA:
+ numattrs = 6;
+ len = sizeof(struct fc_fdmi_rpa);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+ len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 10;
+
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+
+ }
+
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+
+ /* Port Name */
+ put_unaligned_be64(lport->wwpn,
+ &ct->payload.rpa.port.portname);
+
+ /* Port Attributes */
+ put_unaligned_be32(numattrs,
+ &ct->payload.rpa.hba_attrs.numattrs);
+
+ hba_attrs = &ct->payload.rpa.hba_attrs;
+ entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
+
+ /* FC4 types */
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_FC4TYPES,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ memcpy(&entry->value, fc_host_supported_fc4s(lport->host),
+ FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+
+ /* Supported Speed */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+
+ put_unaligned_be32(fc_host_supported_speeds(lport->host),
+ &entry->value);
+
+ /* Current Port Speed */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(lport->link_speed,
+ &entry->value);
+
+ /* Max Frame Size */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_maxframe_size(lport->host),
+ &entry->value);
+
+ /* OS Device Name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_OSDEVICENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ /* Use the sysfs device name */
+ fc_ct_ms_fill_attr(entry,
+ dev_name(&lport->host->shost_gendev),
+ strnlen(dev_name(&lport->host->shost_gendev),
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
+
+ /* Host Name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_HOSTNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ if (strlen(fc_host_system_hostname(lport->host)))
+ fc_ct_ms_fill_attr(entry,
+ fc_host_system_hostname(lport->host),
+ strnlen(fc_host_system_hostname(lport->host),
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
+ else
+ fc_ct_ms_fill_attr(entry,
+ init_utsname()->nodename,
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+
+ /* Node name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_NODENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_node_name(lport->host),
+ &entry->value);
+
+ /* Port name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_NODENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(lport->wwpn,
+ &entry->value);
+
+ /* Port symbolic name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SYMBOLICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_symbolic_name(lport->host),
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN);
+
+ /* Port type */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTTYPE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_type(lport->host),
+ &entry->value);
+
+ /* Supported class of service */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTTYPE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_supported_classes(lport->host),
+ &entry->value);
+
+ /* Port Fabric name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_FABRICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_fabric_name(lport->host),
+ &entry->value);
+
+ /* Port active FC-4 */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_FABRICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTFC4TYPE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ memcpy(&entry->value, fc_host_active_fc4s(lport->host),
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN);
+
+ /* Port state */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTSTATE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_state(lport->host),
+ &entry->value);
+
+ /* Discovered ports */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTSTATE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_DISCOVEREDPORTS,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_num_discovered_ports(lport->host),
+ &entry->value);
+
+ /* Port ID */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTID,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_id(lport->host),
+ &entry->value);
+ }
+
+ break;
+ case FC_FDMI_DPRT:
+ len = sizeof(struct fc_fdmi_dprt);
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+ /* Port Name */
+ put_unaligned_be64(lport->wwpn,
+ &ct->payload.dprt.port.portname);
+ break;
+ case FC_FDMI_DHBA:
+ len = sizeof(struct fc_fdmi_dhba);
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+ /* HBA Identifier */
+ put_unaligned_be64(lport->wwpn, &ct->payload.dhba.hbaid.id);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *r_ctl = FC_RCTL_DD_UNSOL_CTL;
+ *fh_type = FC_TYPE_CT;
+ return 0;
+}
+
+/**
+ * fc_ct_fill() - Fill in a common transport service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
+ */
+static inline int fc_ct_fill(struct fc_lport *lport,
+ u32 fc_id, struct fc_frame *fp,
+ unsigned int op, enum fc_rctl *r_ctl,
+ enum fc_fh_type *fh_type, u32 *did)
+{
+ int rc = -EINVAL;
+
+ switch (fc_id) {
+ case FC_FID_MGMT_SERV:
+ rc = fc_ct_ms_fill(lport, fc_id, fp, op, r_ctl, fh_type);
+ *did = FC_FID_MGMT_SERV;
+ break;
+ case FC_FID_DIR_SERV:
+ default:
+ rc = fc_ct_ns_fill(lport, fc_id, fp, op, r_ctl, fh_type);
+ *did = FC_FID_DIR_SERV;
+ break;
+ }
+
+ return rc;
+}
+/**
+ * fc_plogi_fill - Fill in plogi request frame
+ */
+static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
+ unsigned int op)
+{
+ struct fc_els_flogi *plogi;
+ struct fc_els_csp *csp;
+ struct fc_els_cssp *cp;
+
+ plogi = fc_frame_payload_get(fp, sizeof(*plogi));
+ memset(plogi, 0, sizeof(*plogi));
+ plogi->fl_cmd = (u8) op;
+ put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
+
+ csp = &plogi->fl_csp;
+ csp->sp_hi_ver = 0x20;
+ csp->sp_lo_ver = 0x20;
+ csp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ csp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ csp->sp_features = htons(FC_SP_FT_CIRO);
+ csp->sp_tot_seq = htons(255); /* seq. we accept */
+ csp->sp_rel_off = htons(0x1f);
+ csp->sp_e_d_tov = htonl(lport->e_d_tov);
+
+ cp->cp_rdfs = htons((u16) lport->mfs);
+ cp->cp_con_seq = htons(255);
+ cp->cp_open_seq = 1;
+}
+
+/**
+ * fc_flogi_fill - Fill in a flogi request frame.
+ */
+static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+ struct fc_els_flogi *flogi;
+
+ flogi = fc_frame_payload_get(fp, sizeof(*flogi));
+ memset(flogi, 0, sizeof(*flogi));
+ flogi->fl_cmd = (u8) ELS_FLOGI;
+ put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
+ sp = &flogi->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ if (lport->does_npiv)
+ sp->sp_features = htons(FC_SP_FT_NPIV);
+}
+
+/**
+ * fc_fdisc_fill - Fill in a fdisc request frame.
+ */
+static inline void fc_fdisc_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+ struct fc_els_flogi *fdisc;
+
+ fdisc = fc_frame_payload_get(fp, sizeof(*fdisc));
+ memset(fdisc, 0, sizeof(*fdisc));
+ fdisc->fl_cmd = (u8) ELS_FDISC;
+ put_unaligned_be64(lport->wwpn, &fdisc->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &fdisc->fl_wwnn);
+ sp = &fdisc->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &fdisc->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+}
+
+/**
+ * fc_logo_fill - Fill in a logo request frame.
+ */
+static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_logo *logo;
+
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ memset(logo, 0, sizeof(*logo));
+ logo->fl_cmd = ELS_LOGO;
+ hton24(logo->fl_n_port_id, lport->port_id);
+ logo->fl_n_port_wwn = htonll(lport->wwpn);
+}
+
+/**
+ * fc_rtv_fill - Fill in RTV (read timeout value) request frame.
+ */
+static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_rtv *rtv;
+
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ memset(rtv, 0, sizeof(*rtv));
+ rtv->rtv_cmd = ELS_RTV;
+}
+
+/**
+ * fc_rec_fill - Fill in rec request frame
+ */
+static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_rec *rec;
+ struct fc_exch *ep = fc_seq_exch(fr_seq(fp));
+
+ rec = fc_frame_payload_get(fp, sizeof(*rec));
+ memset(rec, 0, sizeof(*rec));
+ rec->rec_cmd = ELS_REC;
+ hton24(rec->rec_s_id, lport->port_id);
+ rec->rec_ox_id = htons(ep->oxid);
+ rec->rec_rx_id = htons(ep->rxid);
+}
+
+/**
+ * fc_prli_fill - Fill in prli request frame
+ */
+static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ memset(pp, 0, sizeof(*pp));
+ pp->prli.prli_cmd = ELS_PRLI;
+ pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
+ pp->prli.prli_len = htons(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+ pp->spp.spp_params = htonl(lport->service_params);
+}
+
+/**
+ * fc_scr_fill - Fill in a scr request frame.
+ */
+static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_scr *scr;
+
+ scr = fc_frame_payload_get(fp, sizeof(*scr));
+ memset(scr, 0, sizeof(*scr));
+ scr->scr_cmd = ELS_SCR;
+ scr->scr_reg_func = ELS_SCRF_FULL;
+}
+
+/**
+ * fc_els_fill - Fill in an ELS request frame
+ */
+static inline int fc_els_fill(struct fc_lport *lport,
+ u32 did,
+ struct fc_frame *fp, unsigned int op,
+ enum fc_rctl *r_ctl, enum fc_fh_type *fh_type)
+{
+ switch (op) {
+ case ELS_ADISC:
+ fc_adisc_fill(lport, fp);
+ break;
+
+ case ELS_PLOGI:
+ fc_plogi_fill(lport, fp, ELS_PLOGI);
+ break;
+
+ case ELS_FLOGI:
+ fc_flogi_fill(lport, fp);
+ break;
+
+ case ELS_FDISC:
+ fc_fdisc_fill(lport, fp);
+ break;
+
+ case ELS_LOGO:
+ fc_logo_fill(lport, fp);
+ break;
+
+ case ELS_RTV:
+ fc_rtv_fill(lport, fp);
+ break;
+
+ case ELS_REC:
+ fc_rec_fill(lport, fp);
+ break;
+
+ case ELS_PRLI:
+ fc_prli_fill(lport, fp);
+ break;
+
+ case ELS_SCR:
+ fc_scr_fill(lport, fp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *r_ctl = FC_RCTL_ELS_REQ;
+ *fh_type = FC_TYPE_ELS;
+ return 0;
+}
+#endif /* _FC_ENCODE_H_ */
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 52e866659853..1d91c457527f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -20,7 +20,6 @@
#include <scsi/fc/fc_fc2.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
#include "fc_libfc.h"
@@ -49,6 +48,8 @@ static struct workqueue_struct *fc_exch_workqueue;
* @total_exches: Total allocated exchanges
* @lock: Exch pool lock
* @ex_list: List of exchanges
+ * @left: Cache of free slot in exch array
+ * @right: Cache of free slot in exch array
*
* This structure manages per cpu exchanges in array of exchange pointers.
* This array is allocated followed by struct fc_exch_pool memory for
@@ -60,7 +61,6 @@ struct fc_exch_pool {
u16 next_index;
u16 total_exches;
- /* two cache of free slot in exch array */
u16 left;
u16 right;
} ____cacheline_aligned_in_smp;
@@ -74,6 +74,7 @@ struct fc_exch_pool {
* @ep_pool: Reserved exchange pointers
* @pool_max_index: Max exch array index in exch pool
* @pool: Per cpu exch pool
+ * @lport: Local exchange port
* @stats: Statistics structure
*
* This structure is the center for creating exchanges and sequences.
@@ -270,7 +271,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
if (f_ctl & FC_FC_END_SEQ) {
fr_eof(fp) = FC_EOF_T;
- if (fc_sof_needs_ack(ep->class))
+ if (fc_sof_needs_ack((enum fc_sof)ep->class))
fr_eof(fp) = FC_EOF_N;
/*
* From F_CTL.
@@ -702,6 +703,9 @@ int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
/**
* fc_invoke_resp() - invoke ep->resp()
+ * @ep: The exchange to be operated on
+ * @fp: The frame pointer to pass through to ->resp()
+ * @sp: The sequence pointer to pass through to ->resp()
*
* Notes:
* It is assumed that after initialization finished (this means the
@@ -821,10 +825,9 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));
- cpu = get_cpu();
+ cpu = raw_smp_processor_id();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
- put_cpu();
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
@@ -1619,8 +1622,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
}
/*
@@ -1639,6 +1647,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
+skip_resp:
fc_exch_release(ep);
return;
rel:
@@ -1691,6 +1700,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
if (cancel_delayed_work_sync(&ep->timeout_work)) {
FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
fc_exch_release(ep); /* release from pending timer hold */
+ return;
}
spin_lock_bh(&ep->ex_lock);
@@ -1895,10 +1905,16 @@ static void fc_exch_reset(struct fc_exch *ep)
fc_exch_hold(ep);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
fc_seq_set_resp(sp, NULL, ep->arg);
fc_exch_release(ep);
}
@@ -2103,7 +2119,7 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
switch (op) {
case ELS_LS_RJT:
FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
- /* fall through */
+ fallthrough;
case ELS_LS_ACC:
goto cleanup;
default:
@@ -2617,7 +2633,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
case FC_EOF_T:
if (f_ctl & FC_FC_END_SEQ)
skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
- /* fall through */
+ fallthrough;
case FC_EOF_N:
if (fh->fh_type == FC_TYPE_BLS)
fc_exch_recv_bls(ema->mp, fp);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index bf2cc9656e19..945adca5e72f 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -26,8 +26,8 @@
#include <scsi/fc/fc_fc2.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
static struct kmem_cache *scsi_pkt_cachep;
@@ -45,14 +45,10 @@ static struct kmem_cache *scsi_pkt_cachep;
#define FC_SRB_READ (1 << 1)
#define FC_SRB_WRITE (1 << 0)
-/*
- * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
- */
-#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
-#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
-#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
-#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
-#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
+static struct libfc_cmd_priv *libfc_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
/**
* struct fc_fcp_internal - FCP layer internal data
@@ -147,8 +143,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
} else {
- per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++;
- put_cpu();
+ this_cpu_inc(lport->stats->FcpPktAllocFails);
}
return fsp;
}
@@ -270,8 +265,7 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
if (!fsp->seq_ptr)
return -EINVAL;
- per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
- put_cpu();
+ this_cpu_inc(fsp->lp->stats->FcpPktAborts);
fsp->state |= FC_SRB_ABORT_PENDING;
rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
@@ -289,6 +283,7 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
/**
* fc_fcp_retry_cmd() - Retry a fcp_pkt
* @fsp: The FCP packet to be retried
+ * @status_code: The FCP status code to set
*
* Sets the status code to be FC_ERROR and then calls
* fc_fcp_complete_locked() which in turn calls fc_io_compl().
@@ -439,8 +434,7 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
if (likely(fp))
return fp;
- per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++;
- put_cpu();
+ this_cpu_inc(lport->stats->FcpFrameAllocFails);
/* error case */
fc_fcp_can_queue_ramp_down(lport);
shost_printk(KERN_ERR, lport->host,
@@ -474,7 +468,6 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
struct scsi_cmnd *sc = fsp->cmd;
struct fc_lport *lport = fsp->lp;
- struct fc_stats *stats;
struct fc_frame_header *fh;
size_t start_offset;
size_t offset;
@@ -536,14 +529,12 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->ErrorFrames++;
+ this_cpu_inc(lport->stats->ErrorFrames);
/* per cpu count, not total count, but OK for limit */
- if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
+ if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < FC_MAX_ERROR_CNT)
printk(KERN_WARNING "libfc: CRC error on data "
"frame for port (%6.6x)\n",
lport->port_id);
- put_cpu();
/*
* Assume the frame is total garbage.
* We may have copied it over the good part
@@ -580,7 +571,7 @@ err:
/**
* fc_fcp_send_data() - Send SCSI data to a target
* @fsp: The FCP packet the data is on
- * @sp: The sequence the data is to be sent on
+ * @seq: The sequence the data is to be sent on
* @offset: The starting offset for this data request
* @seq_blen: The burst length for this data request
*
@@ -751,7 +742,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
brp = fc_frame_payload_get(fp, sizeof(*brp));
if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
break;
- /* fall thru */
+ fallthrough;
default:
/*
* we will let the command timeout
@@ -1136,7 +1127,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
unsigned long flags;
int rc;
- fsp->cmd->SCp.ptr = (char *)fsp;
+ libfc_priv(fsp->cmd)->fsp = fsp;
fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
@@ -1149,7 +1140,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
if (unlikely(rc)) {
spin_lock_irqsave(&si->scsi_queue_lock, flags);
- fsp->cmd->SCp.ptr = NULL;
+ libfc_priv(fsp->cmd)->fsp = NULL;
list_del(&fsp->list);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
@@ -1283,7 +1274,7 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
/**
* fc_lun_reset_send() - Send LUN reset command
- * @data: The FCP packet that identifies the LUN to be reset
+ * @t: Timer context used to fetch the FSP packet
*/
static void fc_lun_reset_send(struct timer_list *t)
{
@@ -1409,7 +1400,7 @@ static void fc_fcp_cleanup(struct fc_lport *lport)
/**
* fc_fcp_timeout() - Handler for fcp_pkt timeouts
- * @data: The FCP packet that has timed out
+ * @t: Timer context used to fetch the FSP packet
*
* If REC is supported then just issue it and return. The REC exchange will
* complete or time out and recovery can continue at that point. Otherwise,
@@ -1535,7 +1526,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
"device %x invalid REC reject %d/%d\n",
fsp->rport->port_id, rjt->er_reason,
rjt->er_explan);
- /* fall through */
+ fallthrough;
case ELS_RJT_UNSUP:
FC_FCP_DBG(fsp, "device does not support REC\n");
rpriv = fsp->rport->dd_data;
@@ -1667,7 +1658,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
fsp, fsp->rport->port_id, error);
fsp->status_code = FC_CMD_PLOGO;
- /* fall through */
+ fallthrough;
case -FC_EX_TIMEOUT:
/*
@@ -1691,6 +1682,7 @@ out:
/**
* fc_fcp_recovery() - Handler for fcp_pkt recovery
* @fsp: The FCP pkt that needs to be aborted
+ * @code: The FCP status code to set
*/
static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
{
@@ -1709,6 +1701,7 @@ static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
* fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request)
* @fsp: The FCP packet the SRR is to be sent on
* @r_ctl: The R_CTL field for the SRR request
+ * @offset: The SRR relative offset
* This is called after receiving status but insufficient data, or
* when expecting status but the request has timed out.
*/
@@ -1827,7 +1820,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
break;
case -FC_EX_CLOSED: /* e.g., link failure */
FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
- /* fall through */
+ fallthrough;
default:
fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
@@ -1851,7 +1844,7 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
/**
* fc_queuecommand() - The queuecommand function of the SCSI template
* @shost: The Scsi_Host that the command was issued to
- * @cmd: The scsi_cmnd to be executed
+ * @sc_cmd: The scsi_cmnd to be executed
*
* This is the i/o strategy routine, called by the SCSI layer.
*/
@@ -1862,12 +1855,11 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
struct fc_fcp_pkt *fsp;
int rval;
int rc = 0;
- struct fc_stats *stats;
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -1877,7 +1869,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
* online
*/
sc_cmd->result = DID_IMM_RETRY << 16;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
goto out;
}
@@ -1914,20 +1906,18 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
/*
* setup the data direction
*/
- stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
fsp->req_flags = FC_SRB_READ;
- stats->InputRequests++;
- stats->InputBytes += fsp->data_len;
+ this_cpu_inc(lport->stats->InputRequests);
+ this_cpu_add(lport->stats->InputBytes, fsp->data_len);
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
fsp->req_flags = FC_SRB_WRITE;
- stats->OutputRequests++;
- stats->OutputBytes += fsp->data_len;
+ this_cpu_inc(lport->stats->OutputRequests);
+ this_cpu_add(lport->stats->OutputBytes, fsp->data_len);
} else {
fsp->req_flags = 0;
- stats->ControlRequests++;
+ this_cpu_inc(lport->stats->ControlRequests);
}
- put_cpu();
/*
* send it to the lower layer
@@ -1980,7 +1970,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
fc_fcp_can_queue_ramp_up(lport);
sc_cmd = fsp->cmd;
- CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
+ libfc_priv(sc_cmd)->status = fsp->cdb_status;
switch (fsp->status_code) {
case FC_COMPLETE:
if (fsp->cdb_status == 0) {
@@ -1989,7 +1979,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
*/
sc_cmd->result = DID_OK << 16;
if (fsp->scsi_resid)
- CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
} else {
/*
* transport level I/O was ok but scsi
@@ -2022,7 +2012,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
*/
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
"due to FC_DATA_UNDRUN (scsi)\n");
- CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
}
break;
@@ -2082,9 +2072,9 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
- sc_cmd->SCp.ptr = NULL;
+ libfc_priv(sc_cmd)->fsp = NULL;
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
/* release ref from initial allocation in queue command */
fc_fcp_pkt_release(fsp);
@@ -2118,7 +2108,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
si = fc_get_scsi_internal(lport);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
- fsp = CMD_SP(sc_cmd);
+ fsp = libfc_priv(sc_cmd)->fsp;
if (!fsp) {
/* command completed while scsi eh was setting up */
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
@@ -2245,7 +2235,7 @@ int fc_slave_alloc(struct scsi_device *sdev)
EXPORT_SYMBOL(fc_slave_alloc);
/**
- * fc_fcp_destory() - Tear down the FCP layer for a given local port
+ * fc_fcp_destroy() - Tear down the FCP layer for a given local port
* @lport: The local port that no longer needs the FCP layer
*/
void fc_fcp_destroy(struct fc_lport *lport)
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 19c4ab4e0f4d..0e6a1355d020 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -12,8 +12,8 @@
#include <linux/module.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
MODULE_AUTHOR("Open-FCoE.org");
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 684c5e361a28..9c02c9523c4d 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -84,16 +84,19 @@
#include <scsi/fc/fc_gs.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
#include <linux/scatterlist.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
/* Fabric IDs to use for point-to-point mode, chosen on whims. */
#define FC_LOCAL_PTP_FID_LO 0x010101
#define FC_LOCAL_PTP_FID_HI 0x010102
-#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+#define MAX_CT_PAYLOAD 2048
+#define DISCOVERED_PORTS 4
+#define NUMBER_OF_PORTS 1
static void fc_lport_error(struct fc_lport *, struct fc_frame *);
@@ -305,21 +308,21 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
stats = per_cpu_ptr(lport->stats, cpu);
- fc_stats->tx_frames += stats->TxFrames;
- fc_stats->tx_words += stats->TxWords;
- fc_stats->rx_frames += stats->RxFrames;
- fc_stats->rx_words += stats->RxWords;
- fc_stats->error_frames += stats->ErrorFrames;
- fc_stats->invalid_crc_count += stats->InvalidCRCCount;
- fc_stats->fcp_input_requests += stats->InputRequests;
- fc_stats->fcp_output_requests += stats->OutputRequests;
- fc_stats->fcp_control_requests += stats->ControlRequests;
- fcp_in_bytes += stats->InputBytes;
- fcp_out_bytes += stats->OutputBytes;
- fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
- fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
- fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
- fc_stats->link_failure_count += stats->LinkFailureCount;
+ fc_stats->tx_frames += READ_ONCE(stats->TxFrames);
+ fc_stats->tx_words += READ_ONCE(stats->TxWords);
+ fc_stats->rx_frames += READ_ONCE(stats->RxFrames);
+ fc_stats->rx_words += READ_ONCE(stats->RxWords);
+ fc_stats->error_frames += READ_ONCE(stats->ErrorFrames);
+ fc_stats->invalid_crc_count += READ_ONCE(stats->InvalidCRCCount);
+ fc_stats->fcp_input_requests += READ_ONCE(stats->InputRequests);
+ fc_stats->fcp_output_requests += READ_ONCE(stats->OutputRequests);
+ fc_stats->fcp_control_requests += READ_ONCE(stats->ControlRequests);
+ fcp_in_bytes += READ_ONCE(stats->InputBytes);
+ fcp_out_bytes += READ_ONCE(stats->OutputBytes);
+ fc_stats->fcp_packet_alloc_failures += READ_ONCE(stats->FcpPktAllocFails);
+ fc_stats->fcp_packet_aborts += READ_ONCE(stats->FcpPktAborts);
+ fc_stats->fcp_frame_alloc_failures += READ_ONCE(stats->FcpFrameAllocFails);
+ fc_stats->link_failure_count += READ_ONCE(stats->LinkFailureCount);
}
fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
@@ -405,7 +408,7 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
/**
* fc_lport_recv_echo_req() - Handle received ECHO request
* @lport: The local port receiving the ECHO
- * @fp: ECHO request frame
+ * @in_fp: ECHO request frame
*/
static void fc_lport_recv_echo_req(struct fc_lport *lport,
struct fc_frame *in_fp)
@@ -440,7 +443,7 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport,
/**
* fc_lport_recv_rnid_req() - Handle received Request Node ID data request
* @lport: The local port receiving the RNID
- * @fp: The RNID request frame
+ * @in_fp: The RNID request frame
*/
static void fc_lport_recv_rnid_req(struct fc_lport *lport,
struct fc_frame *in_fp)
@@ -703,7 +706,7 @@ static void fc_lport_disc_callback(struct fc_lport *lport,
}
/**
- * fc_rport_enter_ready() - Enter the ready state and start discovery
+ * fc_lport_enter_ready() - Enter the ready state and start discovery
* @lport: The local port that is ready
*/
static void fc_lport_enter_ready(struct fc_lport *lport)
@@ -747,7 +750,7 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
}
/**
- * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
+ * fc_lport_set_local_id() - set the local port Port ID for point-to-multipoint
* @lport: The local port which will have its Port ID set.
* @port_id: The new port ID.
*
@@ -1185,7 +1188,7 @@ static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport = lp_arg;
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
-
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
@@ -1219,7 +1222,13 @@ static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
switch (lport->state) {
case LPORT_ST_RHBA:
- if (ntohs(ct->ct_cmd) == FC_FS_ACC)
+ if ((ntohs(ct->ct_cmd) == FC_FS_RJT) && fc_host->fdmi_version == FDMI_V2) {
+ FC_LPORT_DBG(lport, "Error for FDMI-V2, fall back to FDMI-V1\n");
+ fc_host->fdmi_version = FDMI_V1;
+
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+
+ } else if (ntohs(ct->ct_cmd) == FC_FS_ACC)
fc_lport_enter_ms(lport, LPORT_ST_RPA);
else /* Error Skip RPA */
fc_lport_enter_scr(lport);
@@ -1325,6 +1334,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
/**
* fc_lport_enter_ns() - register some object with the name server
* @lport: Fibre Channel local port to register
+ * @state: Local port state
*/
static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
{
@@ -1392,7 +1402,7 @@ static struct fc_rport_operations fc_lport_rport_ops = {
};
/**
- * fc_rport_enter_dns() - Create a fc_rport for the name server
+ * fc_lport_enter_dns() - Create a fc_rport for the name server
* @lport: The local port requesting a remote port for the name server
*/
static void fc_lport_enter_dns(struct fc_lport *lport)
@@ -1423,6 +1433,7 @@ err:
/**
* fc_lport_enter_ms() - management server commands
* @lport: Fibre Channel local port to register
+ * @state: Local port state
*/
static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
{
@@ -1431,7 +1442,7 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
int size = sizeof(struct fc_ct_hdr);
size_t len;
int numattrs;
-
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
@@ -1444,10 +1455,10 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
case LPORT_ST_RHBA:
cmd = FC_FDMI_RHBA;
/* Number of HBA Attributes */
- numattrs = 10;
+ numattrs = 11;
len = sizeof(struct fc_fdmi_rhba);
len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+
len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
@@ -1458,6 +1469,21 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 7;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ }
+
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
size += len;
break;
@@ -1467,7 +1493,6 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
numattrs = 6;
len = sizeof(struct fc_fdmi_rpa);
len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
@@ -1475,6 +1500,22 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 10;
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+ }
+
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+
size += len;
break;
case LPORT_ST_DPRT:
@@ -1507,7 +1548,7 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
}
/**
- * fc_rport_enter_fdmi() - Create a fc_rport for the management server
+ * fc_lport_enter_fdmi() - Create a fc_rport for the management server
* @lport: The local port requesting a remote port for the management server
*/
static void fc_lport_enter_fdmi(struct fc_lport *lport)
@@ -1544,6 +1585,7 @@ static void fc_lport_timeout(struct work_struct *work)
struct fc_lport *lport =
container_of(work, struct fc_lport,
retry_work.work);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
mutex_lock(&lport->lp_mutex);
@@ -1571,12 +1613,19 @@ static void fc_lport_timeout(struct work_struct *work)
fc_lport_enter_fdmi(lport);
break;
case LPORT_ST_RHBA:
+ if (fc_host->fdmi_version == FDMI_V2) {
+ FC_LPORT_DBG(lport, "timeout for FDMI-V2 RHBA,fall back to FDMI-V1\n");
+ fc_host->fdmi_version = FDMI_V1;
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+ break;
+ }
+ fallthrough;
case LPORT_ST_RPA:
case LPORT_ST_DHBA:
case LPORT_ST_DPRT:
FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
fc_lport_state(lport));
- /* fall thru */
+ fallthrough;
case LPORT_ST_SCR:
fc_lport_enter_scr(lport);
break;
@@ -1638,7 +1687,7 @@ err:
EXPORT_SYMBOL(fc_lport_logo_resp);
/**
- * fc_rport_enter_logo() - Logout of the fabric
+ * fc_lport_enter_logo() - Logout of the fabric
* @lport: The local port to be logged out
*/
static void fc_lport_enter_logo(struct fc_lport *lport)
@@ -1729,7 +1778,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
- "lport->mfs:%hu\n", mfs, lport->mfs);
+ "lport->mfs:%u\n", mfs, lport->mfs);
fc_lport_error(lport, fp);
goto out;
}
@@ -1780,7 +1829,7 @@ err:
EXPORT_SYMBOL(fc_lport_flogi_resp);
/**
- * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
+ * fc_lport_enter_flogi() - Send a FLOGI request to the fabric manager
* @lport: Fibre Channel local port to be logged in to the fabric
*/
static void fc_lport_enter_flogi(struct fc_lport *lport)
@@ -1837,6 +1886,13 @@ EXPORT_SYMBOL(fc_lport_config);
*/
int fc_lport_init(struct fc_lport *lport)
{
+ struct fc_host_attrs *fc_host;
+
+ fc_host = shost_to_fc_host(lport->host);
+
+ /* Set FDMI version to FDMI-2 specification*/
+ fc_host->fdmi_version = FDMI_V2;
+
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
fc_host_node_name(lport->host) = lport->wwnn;
fc_host_port_name(lport->host) = lport->wwpn;
@@ -1845,6 +1901,7 @@ int fc_lport_init(struct fc_lport *lport)
sizeof(fc_host_supported_fc4s(lport->host)));
fc_host_supported_fc4s(lport->host)[2] = 1;
fc_host_supported_fc4s(lport->host)[7] = 1;
+ fc_host_num_discovered_ports(lport->host) = 4;
/* This value is also unchanging */
memset(fc_host_active_fc4s(lport->host), 0,
@@ -1857,8 +1914,27 @@ int fc_lport_init(struct fc_lport *lport)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_40GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_40GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_25GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_25GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_50GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_50GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
+
fc_fc4_add_lport(lport);
+ fc_host_num_discovered_ports(lport->host) = DISCOVERED_PORTS;
+ fc_host_port_state(lport->host) = FC_PORTSTATE_ONLINE;
+ fc_host_max_ct_payload(lport->host) = MAX_CT_PAYLOAD;
+ fc_host_num_ports(lport->host) = NUMBER_OF_PORTS;
+ fc_host_bootbios_state(lport->host) = 0X00000000;
+ snprintf(fc_host_bootbios_version(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "Unknown");
+
return 0;
}
EXPORT_SYMBOL(fc_lport_init);
@@ -1932,6 +2008,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
* @job: The BSG Passthrough job
* @lport: The local port sending the request
* @did: The destination port id
+ * @tov: The timeout period (in ms)
*/
static int fc_lport_els_request(struct bsg_job *job,
struct fc_lport *lport,
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index da6e97d8dc3b..33da3c1085f0 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -58,8 +58,8 @@
#include <asm/unaligned.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
static struct workqueue_struct *rport_event_queue;
@@ -121,7 +121,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
/**
* fc_rport_create() - Create a new remote port
* @lport: The local port this remote port will be associated with
- * @ids: The identifiers for the new remote port
+ * @port_id: The identifiers for the new remote port
*
* The remote port will start in the INIT state.
*/
@@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
lockdep_assert_held(&lport->disc.disc_mutex);
rdata = fc_rport_lookup(lport, port_id);
- if (rdata)
+ if (rdata) {
+ kref_put(&rdata->kref, fc_rport_destroy);
return rdata;
+ }
if (lport->rport_priv_size > 0)
rport_priv_size = lport->rport_priv_size;
@@ -481,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- kref_get(&rdata->kref);
- if (rdata->event == RPORT_EV_NONE &&
- !queue_work(rport_event_queue, &rdata->event_work))
- kref_put(&rdata->kref, fc_rport_destroy);
+ if (rdata->event == RPORT_EV_NONE) {
+ kref_get(&rdata->kref);
+ if (!queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
rdata->event = event;
}
@@ -632,6 +635,8 @@ static void fc_rport_error(struct fc_rport_priv *rdata, int err)
fc_rport_enter_ready(rdata);
break;
case RPORT_ST_PRLI:
+ fc_rport_enter_plogi(rdata);
+ break;
case RPORT_ST_ADISC:
fc_rport_enter_logo(rdata);
break;
@@ -1157,6 +1162,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
pp->spp.spp_flags, pp->spp.spp_type);
+
rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
@@ -1179,11 +1185,13 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
/*
* Call prli provider if we should act as a target
*/
- prov = fc_passive_prov[rdata->spp_type];
- if (prov) {
- memset(&temp_spp, 0, sizeof(temp_spp));
- prov->prli(rdata, pp->prli.prli_spp_len,
- &pp->spp, &temp_spp);
+ if (rdata->spp_type < FC_FC4_PROV_SIZE) {
+ prov = fc_passive_prov[rdata->spp_type];
+ if (prov) {
+ memset(&temp_spp, 0, sizeof(temp_spp));
+ prov->prli(rdata, pp->prli.prli_spp_len,
+ &pp->spp, &temp_spp);
+ }
}
/*
* Check if the image pair could be established
@@ -1208,9 +1216,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
if (!rjt)
FC_RPORT_DBG(rdata, "PRLI bad response\n");
- else
+ else {
FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
+ if (rjt->er_reason == ELS_RJT_UNAB &&
+ rjt->er_explan == ELS_EXPL_PLOGI_REQD) {
+ fc_rport_enter_plogi(rdata);
+ goto out;
+ }
+ }
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
@@ -1434,7 +1448,7 @@ drop:
* fc_rport_logo_resp() - Handler for logout (LOGO) responses
* @sp: The sequence the LOGO was on
* @fp: The LOGO response frame
- * @lport_arg: The local port
+ * @rdata_arg: The remote port
*/
static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
void *rdata_arg)
@@ -1475,7 +1489,7 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
}
/**
- * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses
+ * fc_rport_adisc_resp() - Handler for Address Discovery (ADISC) responses
* @sp: The sequence the ADISC response was on
* @fp: The ADISC response frame
* @rdata_arg: The remote port that sent the ADISC response
@@ -1712,7 +1726,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
kref_put(&rdata->kref, fc_rport_destroy);
goto busy;
}
- /* fall through */
+ fallthrough;
default:
FC_RPORT_DBG(rdata,
"Reject ELS 0x%02x while in state %s\n",
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 70b99c0e2e67..d95f4bcdeb2e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -83,7 +83,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
"%s " dbg_fmt, __func__, ##arg); \
} while (0);
-inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
+#define ISCSI_CMD_COMPL_WAIT 5
+
+inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn)
{
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
@@ -91,7 +93,17 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
if (ihost->workq)
queue_work(ihost->workq, &conn->xmitwork);
}
-EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit);
+
+inline void iscsi_conn_queue_recv(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ if (ihost->workq && !test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))
+ queue_work(ihost->workq, &conn->recvwork);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_recv);
static void __iscsi_update_cmdsn(struct iscsi_session *session,
uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -230,11 +242,11 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
*/
static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
{
- struct iscsi_conn *conn = task->conn;
- struct iscsi_tm *tmf = &conn->tmhdr;
+ struct iscsi_session *session = task->conn->session;
+ struct iscsi_tm *tmf = &session->tmhdr;
u64 hdr_lun;
- if (conn->tmf_state == TMF_INITIAL)
+ if (session->tmf_state == TMF_INITIAL)
return 0;
if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
@@ -248,30 +260,25 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
hdr_lun = scsilun_to_int(&tmf->lun);
if (hdr_lun != task->sc->device->lun)
return 0;
- /* fall through */
+ fallthrough;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
/*
* Fail all SCSI cmd PDUs
*/
if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
- iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x itt "
- "0x%x/0x%x] "
- "rejected.\n",
- opcode, task->itt,
- task->hdr_itt);
+ iscsi_session_printk(KERN_INFO, session,
+ "task [op %x itt 0x%x/0x%x] rejected.\n",
+ opcode, task->itt, task->hdr_itt);
return -EACCES;
}
/*
* And also all data-out PDUs in response to R2T
* if fast_abort is set.
*/
- if (conn->session->fast_abort) {
- iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x itt "
- "0x%x/0x%x] fast abort.\n",
- opcode, task->itt,
- task->hdr_itt);
+ if (session->fast_abort) {
+ iscsi_session_printk(KERN_INFO, session,
+ "task [op %x itt 0x%x/0x%x] fast abort.\n",
+ opcode, task->itt, task->hdr_itt);
return -EACCES;
}
break;
@@ -284,7 +291,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
*/
if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
task->hdr_itt == tmf->rtt) {
- ISCSI_DBG_SESSION(conn->session,
+ ISCSI_DBG_SESSION(session,
"Preventing task %x/%x from sending "
"data-out due to abort task in "
"progress\n", task->itt,
@@ -467,22 +474,28 @@ static void iscsi_free_task(struct iscsi_task *task)
if (sc) {
/* SCSI eh reuses commands to verify us */
- sc->SCp.ptr = NULL;
+ iscsi_cmd(sc)->task = NULL;
/*
* queue command may call this to free the task, so
* it will decide how to return sc to scsi-ml.
*/
if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
- sc->scsi_done(sc);
+ scsi_done(sc);
}
}
-void __iscsi_get_task(struct iscsi_task *task)
+bool iscsi_get_task(struct iscsi_task *task)
{
- refcount_inc(&task->refcount);
+ return refcount_inc_not_zero(&task->refcount);
}
-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+EXPORT_SYMBOL_GPL(iscsi_get_task);
+/**
+ * __iscsi_put_task - drop the refcount on a task
+ * @task: iscsi_task to drop the refcount on
+ *
+ * The back_lock must be held when calling in case it frees the task.
+ */
void __iscsi_put_task(struct iscsi_task *task)
{
if (refcount_dec_and_test(&task->refcount))
@@ -494,10 +507,11 @@ void iscsi_put_task(struct iscsi_task *task)
{
struct iscsi_session *session = task->conn->session;
- /* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ if (refcount_dec_and_test(&task->refcount)) {
+ spin_lock_bh(&session->back_lock);
+ iscsi_free_task(task);
+ spin_unlock_bh(&session->back_lock);
+ }
}
EXPORT_SYMBOL_GPL(iscsi_put_task);
@@ -523,18 +537,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
- spin_lock_bh(&conn->taskqueuelock);
- if (!list_empty(&task->running)) {
- pr_debug_once("%s while task on list", __func__);
- list_del_init(&task->running);
- }
- spin_unlock_bh(&conn->taskqueuelock);
-
- if (conn->task == task)
- conn->task = NULL;
-
- if (conn->ping_task == task)
- conn->ping_task = NULL;
+ if (READ_ONCE(conn->ping_task) == task)
+ WRITE_ONCE(conn->ping_task, NULL);
/* release get from queueing */
__iscsi_put_task(task);
@@ -564,25 +568,57 @@ void iscsi_complete_scsi_task(struct iscsi_task *task,
}
EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
+/*
+ * Must be called with back and frwd lock
+ */
+static bool cleanup_queued_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ bool early_complete = false;
+
+ /*
+ * We might have raced where we handled a R2T early and got a response
+ * but have not yet taken the task off the requeue list, then a TMF or
+ * recovery happened and so we can still see it here.
+ */
+ if (task->state == ISCSI_TASK_COMPLETED)
+ early_complete = true;
+
+ if (!list_empty(&task->running)) {
+ list_del_init(&task->running);
+ /*
+ * If it's on a list but still running this could be cleanup
+ * from a TMF or session recovery.
+ */
+ if (task->state == ISCSI_TASK_RUNNING ||
+ task->state == ISCSI_TASK_COMPLETED)
+ __iscsi_put_task(task);
+ }
+
+ if (conn->session->running_aborted_task == task) {
+ conn->session->running_aborted_task = NULL;
+ __iscsi_put_task(task);
+ }
+
+ if (conn->task == task) {
+ conn->task = NULL;
+ __iscsi_put_task(task);
+ }
+
+ return early_complete;
+}
/*
- * session back_lock must be held and if not called for a task that is
- * still pending or from the xmit thread, then xmit thread must
- * be suspended.
+ * session back and frwd lock must be held and if not called for a task that
+ * is still pending or from the xmit thread, then xmit thread must be suspended
*/
-static void fail_scsi_task(struct iscsi_task *task, int err)
+static void __fail_scsi_task(struct iscsi_task *task, int err)
{
struct iscsi_conn *conn = task->conn;
struct scsi_cmnd *sc;
int state;
- /*
- * if a command completes and we get a successful tmf response
- * we will hit this because the scsi eh abort code does not take
- * a ref to the task.
- */
- sc = task->sc;
- if (!sc)
+ if (cleanup_queued_task(task))
return;
if (task->state == ISCSI_TASK_PENDING) {
@@ -598,13 +634,19 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
else
state = ISCSI_TASK_ABRT_TMF;
+ sc = task->sc;
sc->result = err << 16;
scsi_set_resid(sc, scsi_bufflen(sc));
-
- /* regular RX path uses back_lock */
- spin_lock_bh(&conn->session->back_lock);
iscsi_complete_task(task, state);
- spin_unlock_bh(&conn->session->back_lock);
+}
+
+static void fail_scsi_task(struct iscsi_task *task, int err)
+{
+ struct iscsi_session *session = task->conn->session;
+
+ spin_lock_bh(&session->back_lock);
+ __fail_scsi_task(task, err);
+ spin_unlock_bh(&session->back_lock);
}
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -653,17 +695,24 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
return 0;
}
+/**
+ * iscsi_alloc_mgmt_task - allocate and setup a mgmt task.
+ * @conn: iscsi conn that the task will be sent on.
+ * @hdr: iscsi pdu that will be sent.
+ * @data: buffer for data segment if needed.
+ * @data_size: length of data in bytes.
+ */
static struct iscsi_task *
-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+iscsi_alloc_mgmt_task(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
- struct iscsi_host *ihost = shost_priv(session->host);
uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
struct iscsi_task *task;
itt_t itt;
- if (session->state == ISCSI_STATE_TERMINATE)
+ if (session->state == ISCSI_STATE_TERMINATE ||
+ !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
return NULL;
if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -738,27 +787,57 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
+ return task;
+
+free_task:
+ iscsi_put_task(task);
+ return NULL;
+}
+
+/**
+ * iscsi_send_mgmt_task - Send task created with iscsi_alloc_mgmt_task.
+ * @task: iscsi task to send.
+ *
+ * On failure this returns a non-zero error code, and the driver must free
+ * the task with iscsi_put_task;
+ */
+static int iscsi_send_mgmt_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_host *ihost = shost_priv(conn->session->host);
+ int rc = 0;
+
if (!ihost->workq) {
- if (iscsi_prep_mgmt_task(conn, task))
- goto free_task;
+ rc = iscsi_prep_mgmt_task(conn, task);
+ if (rc)
+ return rc;
- if (session->tt->xmit_task(task))
- goto free_task;
+ rc = session->tt->xmit_task(task);
+ if (rc)
+ return rc;
} else {
- spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue);
- spin_unlock_bh(&conn->taskqueuelock);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
- return task;
+ return 0;
+}
-free_task:
- /* regular RX path uses back_lock */
- spin_lock(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock(&session->back_lock);
- return NULL;
+static int __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct iscsi_task *task;
+ int rc;
+
+ task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size);
+ if (!task)
+ return -ENOMEM;
+
+ rc = iscsi_send_mgmt_task(task);
+ if (rc)
+ iscsi_put_task(task);
+ return rc;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -769,7 +848,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
int err = 0;
spin_lock_bh(&session->frwd_lock);
- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ if (__iscsi_conn_send_pdu(conn, hdr, data, data_size))
err = -EPERM;
spin_unlock_bh(&session->frwd_lock);
return err;
@@ -777,7 +856,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
/**
- * iscsi_cmd_rsp - SCSI Command Response processing
+ * iscsi_scsi_cmd_rsp - SCSI Command Response processing
* @conn: iscsi connection
* @hdr: iscsi header
* @task: scsi command task
@@ -813,10 +892,7 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
ascq = session->tt->check_protection(task, &sector);
if (ascq) {
- sc->result = DRIVER_SENSE << 24 |
- SAM_STAT_CHECK_CONDITION;
- scsi_build_sense_buffer(1, sc->sense_buffer,
- ILLEGAL_REQUEST, 0x10, ascq);
+ scsi_build_sense(sc, 1, ILLEGAL_REQUEST, 0x10, ascq);
scsi_set_sense_information(sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
sector);
@@ -920,20 +996,21 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{
struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
conn->tmfrsp_pdus_cnt++;
- if (conn->tmf_state != TMF_QUEUED)
+ if (session->tmf_state != TMF_QUEUED)
return;
if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
- conn->tmf_state = TMF_SUCCESS;
+ session->tmf_state = TMF_SUCCESS;
else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
- conn->tmf_state = TMF_NOT_FOUND;
+ session->tmf_state = TMF_NOT_FOUND;
else
- conn->tmf_state = TMF_FAILED;
- wake_up(&conn->ehwait);
+ session->tmf_state = TMF_FAILED;
+ wake_up(&session->ehwait);
}
static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
@@ -941,8 +1018,10 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
struct iscsi_nopout hdr;
struct iscsi_task *task;
- if (!rhdr && conn->ping_task)
- return -EINVAL;
+ if (!rhdr) {
+ if (READ_ONCE(conn->ping_task))
+ return -EINVAL;
+ }
memset(&hdr, 0, sizeof(struct iscsi_nopout));
hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
@@ -955,13 +1034,22 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
} else
hdr.ttt = RESERVED_ITT;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
- if (!task) {
+ task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ if (!task)
+ return -ENOMEM;
+
+ if (!rhdr)
+ WRITE_ONCE(conn->ping_task, task);
+
+ if (iscsi_send_mgmt_task(task)) {
+ if (!rhdr)
+ WRITE_ONCE(conn->ping_task, NULL);
+ iscsi_put_task(task);
+
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return -EIO;
} else if (!rhdr) {
/* only track our nops */
- conn->ping_task = task;
conn->last_ping = jiffies;
}
@@ -984,7 +1072,7 @@ static int iscsi_nop_out_rsp(struct iscsi_task *task,
struct iscsi_conn *conn = task->conn;
int rc = 0;
- if (conn->ping_task != task) {
+ if (READ_ONCE(conn->ping_task) != task) {
/*
* If this is not in response to one of our
* nops then it must be from userspace.
@@ -1326,10 +1414,10 @@ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
if (!task || !task->sc)
return NULL;
- if (task->sc->SCp.phase != conn->session->age) {
+ if (iscsi_cmd(task->sc)->age != conn->session->age) {
iscsi_session_printk(KERN_ERR, conn->session,
"task's session age %d, expected %d\n",
- task->sc->SCp.phase, conn->session->age);
+ iscsi_cmd(task->sc)->age, conn->session->age);
return NULL;
}
@@ -1341,7 +1429,6 @@ void iscsi_session_failure(struct iscsi_session *session,
enum iscsi_err err)
{
struct iscsi_conn *conn;
- struct device *dev;
spin_lock_bh(&session->frwd_lock);
conn = session->leadconn;
@@ -1350,10 +1437,8 @@ void iscsi_session_failure(struct iscsi_session *session,
return;
}
- dev = get_device(&conn->cls_conn->dev);
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&session->frwd_lock);
- if (!dev)
- return;
/*
* if the host is being removed bypass the connection
* recovery initialization because we are going to kill
@@ -1363,27 +1448,36 @@ void iscsi_session_failure(struct iscsi_session *session,
iscsi_conn_error_event(conn->cls_conn, err);
else
iscsi_conn_failure(conn, err);
- put_device(dev);
+ iscsi_put_conn(conn->cls_conn);
}
EXPORT_SYMBOL_GPL(iscsi_session_failure);
-void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
{
struct iscsi_session *session = conn->session;
- spin_lock_bh(&session->frwd_lock);
- if (session->state == ISCSI_STATE_FAILED) {
- spin_unlock_bh(&session->frwd_lock);
- return;
- }
+ if (session->state == ISCSI_STATE_FAILED)
+ return false;
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
+
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ return true;
+}
+
+void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+{
+ struct iscsi_session *session = conn->session;
+ bool needs_evt;
+
+ spin_lock_bh(&session->frwd_lock);
+ needs_evt = iscsi_set_conn_failed(conn);
spin_unlock_bh(&session->frwd_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- iscsi_conn_error_event(conn->cls_conn, err);
+ if (needs_evt)
+ iscsi_conn_error_event(conn->cls_conn, err);
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
@@ -1404,33 +1498,63 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
return 0;
}
-static int iscsi_xmit_task(struct iscsi_conn *conn)
+static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
+ bool was_requeue)
{
- struct iscsi_task *task = conn->task;
int rc;
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
- return -ENODATA;
+ if (!conn->task) {
+ /*
+ * Take a ref so we can access it after xmit_task().
+ *
+ * This should never fail because the failure paths will have
+ * stopped the xmit thread.
+ */
+ if (!iscsi_get_task(task)) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+ } else {
+ /* Already have a ref from when we failed to send it last call */
+ conn->task = NULL;
+ }
+
+ /*
+ * If this was a requeue for a R2T we have an extra ref on the task in
+ * case a bad target sends a cmd rsp before we have handled the task.
+ */
+ if (was_requeue)
+ iscsi_put_task(task);
- spin_lock_bh(&conn->session->back_lock);
- if (conn->task == NULL) {
- spin_unlock_bh(&conn->session->back_lock);
+ /*
+ * Do this after dropping the extra ref because if this was a requeue
+ * it's removed from that list and cleanup_queued_task would miss it.
+ */
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
+ /*
+ * Save the task and ref in case we weren't cleaning up this
+ * task and get woken up again.
+ */
+ conn->task = task;
return -ENODATA;
}
- __iscsi_get_task(task);
- spin_unlock_bh(&conn->session->back_lock);
+
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->frwd_lock);
if (!rc) {
/* done with this task */
task->last_xfer = jiffies;
- conn->task = NULL;
+ } else {
+ /*
+ * get an extra ref that is released next time we access it
+ * as conn->task above.
+ */
+ iscsi_get_task(task);
+ conn->task = task;
}
- /* regular RX path uses back_lock */
- spin_lock(&conn->session->back_lock);
- __iscsi_put_task(task);
- spin_unlock(&conn->session->back_lock);
+
+ iscsi_put_task(task);
return rc;
}
@@ -1438,9 +1562,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
* iscsi_requeue_task - requeue task to run from session workqueue
* @task: task to requeue
*
- * LLDs that need to run a task from the session workqueue should call
- * this. The session frwd_lock must be held. This should only be called
- * by software drivers.
+ * Callers must have taken a ref to the task that is going to be requeued.
*/
void iscsi_requeue_task(struct iscsi_task *task)
{
@@ -1450,11 +1572,18 @@ void iscsi_requeue_task(struct iscsi_task *task)
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
- spin_lock_bh(&conn->taskqueuelock);
- if (list_empty(&task->running))
+ spin_lock_bh(&conn->session->frwd_lock);
+ if (list_empty(&task->running)) {
list_add_tail(&task->running, &conn->requeue);
- spin_unlock_bh(&conn->taskqueuelock);
- iscsi_conn_queue_work(conn);
+ } else {
+ /*
+ * Don't need the extra ref since it's already requeued and
+ * has a ref.
+ */
+ iscsi_put_task(task);
+ }
+ iscsi_conn_queue_xmit(conn);
+ spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1473,14 +1602,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
int rc = 0;
spin_lock_bh(&conn->session->frwd_lock);
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
}
if (conn->task) {
- rc = iscsi_xmit_task(conn);
+ rc = iscsi_xmit_task(conn, conn->task, false);
if (rc)
goto done;
}
@@ -1490,54 +1619,63 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins
*/
- spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
- conn->task = list_entry(conn->mgmtqueue.next,
- struct iscsi_task, running);
- list_del_init(&conn->task->running);
- spin_unlock_bh(&conn->taskqueuelock);
- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+ task = list_entry(conn->mgmtqueue.next, struct iscsi_task,
+ running);
+ list_del_init(&task->running);
+ if (iscsi_prep_mgmt_task(conn, task)) {
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
- __iscsi_put_task(conn->task);
+ __iscsi_put_task(task);
spin_unlock_bh(&conn->session->back_lock);
- conn->task = NULL;
- spin_lock_bh(&conn->taskqueuelock);
continue;
}
- rc = iscsi_xmit_task(conn);
+ rc = iscsi_xmit_task(conn, task, false);
+ if (rc)
+ goto done;
+ }
+
+check_requeue:
+ while (!list_empty(&conn->requeue)) {
+ /*
+ * we always do fastlogout - conn stop code will clean up.
+ */
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+ task = list_entry(conn->requeue.next, struct iscsi_task,
+ running);
+
+ if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
+ break;
+
+ list_del_init(&task->running);
+ rc = iscsi_xmit_task(conn, task, true);
if (rc)
goto done;
- spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
}
/* process pending command queue */
while (!list_empty(&conn->cmdqueue)) {
- conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
- running);
- list_del_init(&conn->task->running);
- spin_unlock_bh(&conn->taskqueuelock);
+ task = list_entry(conn->cmdqueue.next, struct iscsi_task,
+ running);
+ list_del_init(&task->running);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
- fail_scsi_task(conn->task, DID_IMM_RETRY);
- spin_lock_bh(&conn->taskqueuelock);
+ fail_scsi_task(task, DID_IMM_RETRY);
continue;
}
- rc = iscsi_prep_scsi_cmd_pdu(conn->task);
+ rc = iscsi_prep_scsi_cmd_pdu(task);
if (rc) {
- if (rc == -ENOMEM || rc == -EACCES) {
- spin_lock_bh(&conn->taskqueuelock);
- list_add_tail(&conn->task->running,
- &conn->cmdqueue);
- conn->task = NULL;
- spin_unlock_bh(&conn->taskqueuelock);
- goto done;
- } else
- fail_scsi_task(conn->task, DID_ABORT);
- spin_lock_bh(&conn->taskqueuelock);
+ if (rc == -ENOMEM || rc == -EACCES)
+ fail_scsi_task(task, DID_IMM_RETRY);
+ else
+ fail_scsi_task(task, DID_ABORT);
continue;
}
- rc = iscsi_xmit_task(conn);
+ rc = iscsi_xmit_task(conn, task, false);
if (rc)
goto done;
/*
@@ -1545,35 +1683,12 @@ check_mgmt:
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
- spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
+ if (!list_empty(&conn->requeue))
+ goto check_requeue;
}
- while (!list_empty(&conn->requeue)) {
- /*
- * we always do fastlogout - conn stop code will clean up.
- */
- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
- break;
-
- task = list_entry(conn->requeue.next, struct iscsi_task,
- running);
- if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
- break;
-
- conn->task = task;
- list_del_init(&conn->task->running);
- conn->task->state = ISCSI_TASK_RUNNING;
- spin_unlock_bh(&conn->taskqueuelock);
- rc = iscsi_xmit_task(conn);
- if (rc)
- goto done;
- spin_lock_bh(&conn->taskqueuelock);
- if (!list_empty(&conn->mgmtqueue))
- goto check_mgmt;
- }
- spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1604,8 +1719,8 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
(void *) &task, sizeof(void *)))
return NULL;
- sc->SCp.phase = conn->session->age;
- sc->SCp.ptr = (char *) task;
+ iscsi_cmd(sc)->age = conn->session->age;
+ iscsi_cmd(sc)->task = task;
refcount_set(&task->refcount, 1);
task->state = ISCSI_TASK_PENDING;
@@ -1642,7 +1757,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
struct iscsi_task *task = NULL;
sc->result = 0;
- sc->SCp.ptr = NULL;
+ iscsi_cmd(sc)->task = NULL;
ihost = shost_priv(host);
@@ -1674,7 +1789,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
sc->result = DID_NO_CONNECT << 16;
break;
}
- /* fall through */
+ fallthrough;
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1705,7 +1820,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto fault;
}
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_REQUEUE << 16;
goto fault;
@@ -1739,10 +1854,8 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto prepd_reject;
}
} else {
- spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue);
- spin_unlock_bh(&conn->taskqueuelock);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
session->queued_cmdsn++;
@@ -1768,7 +1881,7 @@ fault:
ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
sc->cmnd[0], reason);
scsi_set_resid(sc, scsi_bufflen(sc));
- sc->scsi_done(sc);
+ scsi_done(sc);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_queuecommand);
@@ -1785,15 +1898,14 @@ EXPORT_SYMBOL_GPL(iscsi_target_alloc);
static void iscsi_tmf_timedout(struct timer_list *t)
{
- struct iscsi_conn *conn = from_timer(conn, t, tmf_timer);
- struct iscsi_session *session = conn->session;
+ struct iscsi_session *session = from_timer(session, t, tmf_timer);
spin_lock(&session->frwd_lock);
- if (conn->tmf_state == TMF_QUEUED) {
- conn->tmf_state = TMF_TIMEDOUT;
+ if (session->tmf_state == TMF_QUEUED) {
+ session->tmf_state = TMF_TIMEDOUT;
ISCSI_DBG_EH(session, "tmf timedout\n");
/* unblock eh_abort() */
- wake_up(&conn->ehwait);
+ wake_up(&session->ehwait);
}
spin_unlock(&session->frwd_lock);
}
@@ -1804,11 +1916,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
__must_hold(&session->frwd_lock)
{
struct iscsi_session *session = conn->session;
- struct iscsi_task *task;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
- NULL, 0);
- if (!task) {
+ if (__iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0)) {
spin_unlock_bh(&session->frwd_lock);
iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
@@ -1816,8 +1925,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
return -EPERM;
}
conn->tmfcmd_pdus_cnt++;
- conn->tmf_timer.expires = timeout * HZ + jiffies;
- add_timer(&conn->tmf_timer);
+ session->tmf_timer.expires = timeout * HZ + jiffies;
+ add_timer(&session->tmf_timer);
ISCSI_DBG_EH(session, "tmf set timeout\n");
spin_unlock_bh(&session->frwd_lock);
@@ -1831,12 +1940,12 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
* 3) session is terminated or restarted or userspace has
* given up on recovery
*/
- wait_event_interruptible(conn->ehwait, age != session->age ||
+ wait_event_interruptible(session->ehwait, age != session->age ||
session->state != ISCSI_STATE_LOGGED_IN ||
- conn->tmf_state != TMF_QUEUED);
+ session->tmf_state != TMF_QUEUED);
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&conn->tmf_timer);
+ del_timer_sync(&session->tmf_timer);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
@@ -1848,27 +1957,43 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
}
/*
- * Fail commands. session lock held and recv side suspended and xmit
- * thread flushed
+ * Fail commands. session frwd lock held and xmit thread flushed.
*/
static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
{
+ struct iscsi_session *session = conn->session;
struct iscsi_task *task;
int i;
- for (i = 0; i < conn->session->cmds_max; i++) {
- task = conn->session->cmds[i];
+restart_cmd_loop:
+ spin_lock_bh(&session->back_lock);
+ for (i = 0; i < session->cmds_max; i++) {
+ task = session->cmds[i];
if (!task->sc || task->state == ISCSI_TASK_FREE)
continue;
if (lun != -1 && lun != task->sc->device->lun)
continue;
+ /*
+ * The cmd is completing but if this is called from an eh
+ * callout path then when we return scsi-ml owns the cmd. Wait
+ * for the completion path to finish freeing the cmd.
+ */
+ if (!iscsi_get_task(task)) {
+ spin_unlock_bh(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ spin_lock_bh(&session->frwd_lock);
+ goto restart_cmd_loop;
+ }
- ISCSI_DBG_SESSION(conn->session,
+ ISCSI_DBG_SESSION(session,
"failing sc %p itt 0x%x state %d\n",
task->sc, task->itt, task->state);
- fail_scsi_task(task, error);
+ __fail_scsi_task(task, error);
+ __iscsi_put_task(task);
}
+ spin_unlock_bh(&session->back_lock);
}
/**
@@ -1885,14 +2010,14 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->session->frwd_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
/**
* iscsi_suspend_tx - suspend iscsi_data_xmit
- * @conn: iscsi conn tp stop processing IO on.
+ * @conn: iscsi conn to stop processing IO on.
*
* This function sets the suspend bit to prevent iscsi_data_xmit
* from sending new IO, and if work is queued on the xmit thread
@@ -1903,17 +2028,32 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
- flush_workqueue(ihost->workq);
+ flush_work(&conn->xmitwork);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- iscsi_conn_queue_work(conn);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ iscsi_conn_queue_xmit(conn);
+}
+
+/**
+ * iscsi_suspend_rx - Prevent recvwork from running again.
+ * @conn: iscsi conn to stop.
+ */
+void iscsi_suspend_rx(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ if (ihost->workq)
+ flush_work(&conn->recvwork);
}
+EXPORT_SYMBOL_GPL(iscsi_suspend_rx);
/*
* We want to make sure a ping is in flight. It has timed out.
@@ -1923,7 +2063,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
*/
static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
{
- if (conn->ping_task &&
+ if (READ_ONCE(conn->ping_task) &&
time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
return 1;
@@ -1946,15 +2086,28 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
spin_lock_bh(&session->frwd_lock);
- task = (struct iscsi_task *)sc->SCp.ptr;
+ spin_lock(&session->back_lock);
+ task = iscsi_cmd(sc)->task;
if (!task) {
/*
* Raced with completion. Blk layer has taken ownership
* so let timeout code complete it now.
*/
rc = BLK_EH_DONE;
+ spin_unlock(&session->back_lock);
+ goto done;
+ }
+ if (!iscsi_get_task(task)) {
+ /*
+ * Racing with the completion path right now, so give it more
+ * time so that path can complete it like normal.
+ */
+ rc = BLK_EH_RESET_TIMER;
+ task = NULL;
+ spin_unlock(&session->back_lock);
goto done;
}
+ spin_unlock(&session->back_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
@@ -2013,6 +2166,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
goto done;
}
+ spin_lock(&session->back_lock);
for (i = 0; i < conn->session->cmds_max; i++) {
running_task = conn->session->cmds[i];
if (!running_task->sc || running_task == task ||
@@ -2045,10 +2199,12 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
"last xfer %lu/%lu. Last check %lu.\n",
task->last_xfer, running_task->last_xfer,
task->last_timeout);
+ spin_unlock(&session->back_lock);
rc = BLK_EH_RESET_TIMER;
goto done;
}
}
+ spin_unlock(&session->back_lock);
/* Assumes nop timeout is shorter than scsi cmd timeout */
if (task->have_checked_conn)
@@ -2058,7 +2214,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
* Checking the transport already or nop from a cmd timeout still
* running
*/
- if (conn->ping_task) {
+ if (READ_ONCE(conn->ping_task)) {
task->have_checked_conn = true;
rc = BLK_EH_RESET_TIMER;
goto done;
@@ -2070,9 +2226,12 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
rc = BLK_EH_RESET_TIMER;
done:
- if (task)
- task->last_timeout = jiffies;
spin_unlock_bh(&session->frwd_lock);
+
+ if (task) {
+ task->last_timeout = jiffies;
+ iscsi_put_task(task);
+ }
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
"timer reset" : "shutdown or nh");
return rc;
@@ -2123,6 +2282,53 @@ done:
spin_unlock(&session->frwd_lock);
}
+/**
+ * iscsi_conn_unbind - prevent queueing to conn.
+ * @cls_conn: iscsi conn ep is bound to.
+ * @is_active: is the conn in use for boot or is this for EH/termination
+ *
+ * This must be called by drivers implementing the ep_disconnect callout.
+ * It disables queueing to the connection from libiscsi in preparation for
+ * an ep_disconnect call.
+ */
+void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
+{
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+
+ if (!cls_conn)
+ return;
+
+ conn = cls_conn->dd_data;
+ session = conn->session;
+ /*
+ * Wait for iscsi_eh calls to exit. We don't wait for the tmf to
+ * complete or timeout. The caller just wants to know what's running
+ * is everything that needs to be cleaned up, and no cmds will be
+ * queued.
+ */
+ mutex_lock(&session->eh_mutex);
+
+ iscsi_suspend_queue(conn);
+ iscsi_suspend_tx(conn);
+
+ spin_lock_bh(&session->frwd_lock);
+ clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+
+ if (!is_active) {
+ /*
+ * if logout timed out before userspace could even send a PDU
+ * the state might still be in ISCSI_STATE_LOGGED_IN and
+ * allowing new cmds and TMFs.
+ */
+ if (session->state == ISCSI_STATE_LOGGED_IN)
+ iscsi_set_conn_failed(conn);
+ }
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
+
static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
struct iscsi_tm *hdr)
{
@@ -2149,13 +2355,14 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
+completion_check:
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
/*
* if session was ISCSI_STATE_IN_RECOVERY then we may not have
* got the command.
*/
- if (!sc->SCp.ptr) {
+ if (!iscsi_cmd(sc)->task) {
ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
"it completed.\n");
spin_unlock_bh(&session->frwd_lock);
@@ -2168,7 +2375,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
* then let the host reset code handle this
*/
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
- sc->SCp.phase != session->age) {
+ iscsi_cmd(sc)->age != session->age) {
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
ISCSI_DBG_EH(session, "failing abort due to dropped "
@@ -2176,37 +2383,51 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return FAILED;
}
- conn = session->leadconn;
- conn->eh_abort_cnt++;
- age = session->age;
+ spin_lock(&session->back_lock);
+ task = iscsi_cmd(sc)->task;
+ if (!task || !task->sc) {
+ /* task completed before time out */
+ ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
- task = (struct iscsi_task *)sc->SCp.ptr;
- ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n",
- sc, task->itt);
+ spin_unlock(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+ }
- /* task completed before time out */
- if (!task->sc) {
- ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
- goto success;
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ goto completion_check;
}
+ ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
+ conn = session->leadconn;
+ iscsi_get_conn(conn->cls_conn);
+ conn->eh_abort_cnt++;
+ age = session->age;
+ spin_unlock(&session->back_lock);
+
if (task->state == ISCSI_TASK_PENDING) {
fail_scsi_task(task, DID_ABORT);
goto success;
}
/* only have one tmf outstanding at a time */
- if (conn->tmf_state != TMF_INITIAL)
+ if (session->tmf_state != TMF_INITIAL)
goto failed;
- conn->tmf_state = TMF_QUEUED;
+ session->tmf_state = TMF_QUEUED;
- hdr = &conn->tmhdr;
+ hdr = &session->tmhdr;
iscsi_prep_abort_task_pdu(task, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
goto failed;
- switch (conn->tmf_state) {
+ switch (session->tmf_state) {
case TMF_SUCCESS:
spin_unlock_bh(&session->frwd_lock);
/*
@@ -2221,27 +2442,28 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
*/
spin_lock_bh(&session->frwd_lock);
fail_scsi_task(task, DID_ABORT);
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
memset(hdr, 0, sizeof(*hdr));
spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
goto success_unlocked;
case TMF_TIMEDOUT:
+ session->running_aborted_task = task;
spin_unlock_bh(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto failed_unlocked;
case TMF_NOT_FOUND:
- if (!sc->SCp.ptr) {
- conn->tmf_state = TMF_INITIAL;
+ if (iscsi_task_is_completed(task)) {
+ session->tmf_state = TMF_INITIAL;
memset(hdr, 0, sizeof(*hdr));
/* task completed before tmf abort response */
ISCSI_DBG_EH(session, "sc completed while abort in "
"progress\n");
goto success;
}
- /* fall through */
+ fallthrough;
default:
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
goto failed;
}
@@ -2250,6 +2472,8 @@ success:
success_unlocked:
ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
sc, task->itt);
+ iscsi_put_task(task);
+ iscsi_put_conn(conn->cls_conn);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
@@ -2258,6 +2482,15 @@ failed:
failed_unlocked:
ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
task ? task->itt : 0);
+ /*
+ * The driver might be accessing the task so hold the ref. The conn
+ * stop cleanup will drop the ref after ep_disconnect so we know the
+ * driver's no longer touching the task.
+ */
+ if (!session->running_aborted_task)
+ iscsi_put_task(task);
+
+ iscsi_put_conn(conn->cls_conn);
mutex_unlock(&session->eh_mutex);
return FAILED;
}
@@ -2298,11 +2531,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
conn = session->leadconn;
/* only have one tmf outstanding at a time */
- if (conn->tmf_state != TMF_INITIAL)
+ if (session->tmf_state != TMF_INITIAL)
goto unlock;
- conn->tmf_state = TMF_QUEUED;
+ session->tmf_state = TMF_QUEUED;
- hdr = &conn->tmhdr;
+ hdr = &session->tmhdr;
iscsi_prep_lun_reset_pdu(sc, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
@@ -2311,7 +2544,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
goto unlock;
}
- switch (conn->tmf_state) {
+ switch (session->tmf_state) {
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
@@ -2319,7 +2552,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
goto unlock;
}
@@ -2331,7 +2564,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
@@ -2354,8 +2587,7 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
spin_lock_bh(&session->frwd_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
session->state = ISCSI_STATE_RECOVERY_FAILED;
- if (session->leadconn)
- wake_up(&session->leadconn->ehwait);
+ wake_up(&session->ehwait);
}
spin_unlock_bh(&session->frwd_lock);
}
@@ -2376,7 +2608,6 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- conn = session->leadconn;
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
@@ -2391,16 +2622,17 @@ failed:
return FAILED;
}
+ conn = session->leadconn;
+ iscsi_get_conn(conn->cls_conn);
+
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
- /*
- * we drop the lock here but the leadconn cannot be destoyed while
- * we are in the scsi eh
- */
+
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+ iscsi_put_conn(conn->cls_conn);
ISCSI_DBG_EH(session, "wait for relogin\n");
- wait_event_interruptible(conn->ehwait,
+ wait_event_interruptible(session->ehwait,
session->state == ISCSI_STATE_TERMINATE ||
session->state == ISCSI_STATE_LOGGED_IN ||
session->state == ISCSI_STATE_RECOVERY_FAILED);
@@ -2461,11 +2693,11 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
conn = session->leadconn;
/* only have one tmf outstanding at a time */
- if (conn->tmf_state != TMF_INITIAL)
+ if (session->tmf_state != TMF_INITIAL)
goto unlock;
- conn->tmf_state = TMF_QUEUED;
+ session->tmf_state = TMF_QUEUED;
- hdr = &conn->tmhdr;
+ hdr = &session->tmhdr;
iscsi_prep_tgt_reset_pdu(sc, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
@@ -2474,7 +2706,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
goto unlock;
}
- switch (conn->tmf_state) {
+ switch (session->tmf_state) {
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
@@ -2482,7 +2714,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
goto unlock;
}
@@ -2494,7 +2726,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, -1, DID_ERROR);
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
@@ -2584,6 +2816,56 @@ void iscsi_pool_free(struct iscsi_pool *q)
}
EXPORT_SYMBOL_GPL(iscsi_pool_free);
+int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
+ uint16_t requested_cmds_max)
+{
+ int scsi_cmds, total_cmds = requested_cmds_max;
+
+check:
+ if (!total_cmds)
+ total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+ /*
+ * The iscsi layer needs some tasks for nop handling and tmfs,
+ * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+ * + 1 command for scsi IO.
+ */
+ if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+ printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n",
+ total_cmds, ISCSI_TOTAL_CMDS_MIN);
+ return -EINVAL;
+ }
+
+ if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+ printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n",
+ requested_cmds_max, ISCSI_TOTAL_CMDS_MAX,
+ ISCSI_TOTAL_CMDS_MAX);
+ total_cmds = ISCSI_TOTAL_CMDS_MAX;
+ }
+
+ if (!is_power_of_2(total_cmds)) {
+ total_cmds = rounddown_pow_of_two(total_cmds);
+ if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+ printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN);
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n",
+ requested_cmds_max, total_cmds);
+ }
+
+ scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
+ if (shost->can_queue && scsi_cmds > shost->can_queue) {
+ total_cmds = shost->can_queue;
+
+ printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n",
+ requested_cmds_max, shost->can_queue);
+ goto check;
+ }
+
+ return scsi_cmds;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds);
+
/**
* iscsi_host_add - add host to system
* @shost: scsi host
@@ -2625,9 +2907,9 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
ihost = shost_priv(shost);
if (xmit_can_sleep) {
- snprintf(ihost->workq_name, sizeof(ihost->workq_name),
- "iscsi_q_%d", shost->host_no);
- ihost->workq = create_singlethread_workqueue(ihost->workq_name);
+ ihost->workq = alloc_workqueue("iscsi_q_%d",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 1, shost->host_no);
if (!ihost->workq)
goto free_host;
}
@@ -2652,11 +2934,12 @@ static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
/**
* iscsi_host_remove - remove host and sessions
* @shost: scsi host
+ * @is_shutdown: true if called from a driver shutdown callout
*
* If there are any sessions left, this will initiate the removal and wait
* for the completion.
*/
-void iscsi_host_remove(struct Scsi_Host *shost)
+void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown)
{
struct iscsi_host *ihost = shost_priv(shost);
unsigned long flags;
@@ -2665,15 +2948,17 @@ void iscsi_host_remove(struct Scsi_Host *shost)
ihost->state = ISCSI_HOST_REMOVED;
spin_unlock_irqrestore(&ihost->lock, flags);
- iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ if (!is_shutdown)
+ iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ else
+ iscsi_host_for_each_session(shost, iscsi_force_destroy_session);
+
wait_event_interruptible(ihost->session_removal_wq,
ihost->num_sessions == 0);
if (signal_pending(current))
flush_signals(current);
scsi_remove_host(shost);
- if (ihost->workq)
- destroy_workqueue(ihost->workq);
}
EXPORT_SYMBOL_GPL(iscsi_host_remove);
@@ -2681,6 +2966,9 @@ void iscsi_host_free(struct Scsi_Host *shost)
{
struct iscsi_host *ihost = shost_priv(shost);
+ if (ihost->workq)
+ destroy_workqueue(ihost->workq);
+
kfree(ihost->netdev);
kfree(ihost->hwaddress);
kfree(ihost->initiatorname);
@@ -2734,7 +3022,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
struct iscsi_host *ihost = shost_priv(shost);
struct iscsi_session *session;
struct iscsi_cls_session *cls_session;
- int cmd_i, scsi_cmds, total_cmds = cmds_max;
+ int cmd_i, scsi_cmds;
unsigned long flags;
spin_lock_irqsave(&ihost->lock, flags);
@@ -2745,37 +3033,9 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
ihost->num_sessions++;
spin_unlock_irqrestore(&ihost->lock, flags);
- if (!total_cmds)
- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
- /*
- * The iscsi layer needs some tasks for nop handling and tmfs,
- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
- * + 1 command for scsi IO.
- */
- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
- "must be a power of two that is at least %d.\n",
- total_cmds, ISCSI_TOTAL_CMDS_MIN);
+ scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
+ if (scsi_cmds < 0)
goto dec_session_count;
- }
-
- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
- "must be a power of 2 less than or equal to %d.\n",
- cmds_max, ISCSI_TOTAL_CMDS_MAX);
- total_cmds = ISCSI_TOTAL_CMDS_MAX;
- }
-
- if (!is_power_of_2(total_cmds)) {
- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
- "must be a power of 2.\n", total_cmds);
- total_cmds = rounddown_pow_of_two(total_cmds);
- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
- return NULL;
- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
- total_cmds);
- }
- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
cls_session = iscsi_alloc_session(shost, iscsit,
sizeof(struct iscsi_session) +
@@ -2791,7 +3051,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
session->lu_reset_timeout = 15;
session->abort_timeout = 10;
session->scsi_cmds_max = scsi_cmds;
- session->cmds_max = total_cmds;
+ session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX;
session->queued_cmdsn = session->cmdsn = initial_cmdsn;
session->exp_cmdsn = initial_cmdsn + 1;
session->max_cmdsn = initial_cmdsn + 1;
@@ -2799,7 +3059,11 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
session->tt = iscsit;
session->dd_data = cls_session->dd_data + sizeof(*session);
+ session->tmf_state = TMF_INITIAL;
+ timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
mutex_init(&session->eh_mutex);
+ init_waitqueue_head(&session->ehwait);
+
spin_lock_init(&session->frwd_lock);
spin_lock_init(&session->back_lock);
@@ -2850,10 +3114,9 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
struct module *owner = cls_session->transport->owner;
struct Scsi_Host *shost = session->host;
- iscsi_pool_free(&session->cmdpool);
-
iscsi_remove_session(cls_session);
+ iscsi_pool_free(&session->cmdpool);
kfree(session->password);
kfree(session->password_in);
kfree(session->username);
@@ -2889,13 +3152,13 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
struct iscsi_conn *conn;
struct iscsi_cls_conn *cls_conn;
char *data;
+ int err;
- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+ cls_conn = iscsi_alloc_conn(cls_session, sizeof(*conn) + dd_size,
conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
- memset(conn, 0, sizeof(*conn) + dd_size);
conn->dd_data = cls_conn->dd_data + sizeof(*conn);
conn->session = session;
@@ -2903,14 +3166,12 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
conn->id = conn_idx;
conn->exp_statsn = 0;
- conn->tmf_state = TMF_INITIAL;
timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
- spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
@@ -2929,16 +3190,21 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
- timer_setup(&conn->tmf_timer, iscsi_tmf_timedout, 0);
- init_waitqueue_head(&conn->ehwait);
+ err = iscsi_add_conn(cls_conn);
+ if (err)
+ goto login_task_add_dev_fail;
return cls_conn;
+login_task_add_dev_fail:
+ free_pages((unsigned long) conn->data,
+ get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
+
login_task_data_alloc_fail:
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
login_task_alloc_fail:
- iscsi_destroy_conn(cls_conn);
+ iscsi_put_conn(cls_conn);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_conn_setup);
@@ -2955,6 +3221,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
+ iscsi_remove_conn(cls_conn);
+
del_timer_sync(&conn->transport_timer);
mutex_lock(&session->eh_mutex);
@@ -2965,7 +3233,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
* leading connection? then give up on recovery.
*/
session->state = ISCSI_STATE_TERMINATE;
- wake_up(&conn->ehwait);
+ wake_up(&session->ehwait);
}
spin_unlock_bh(&session->frwd_lock);
@@ -2987,7 +3255,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
- iscsi_destroy_conn(cls_conn);
+ iscsi_put_conn(cls_conn);
}
EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
@@ -3040,7 +3308,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
* commands after successful recovery
*/
conn->stop_stage = 0;
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
session->age++;
if (session->age == 16)
session->age = 0;
@@ -3054,7 +3322,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
spin_unlock_bh(&session->frwd_lock);
iscsi_unblock_session(session->cls_session);
- wake_up(&conn->ehwait);
+ wake_up(&session->ehwait);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_start);
@@ -3076,18 +3344,25 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
ISCSI_DBG_SESSION(conn->session,
"failing mgmt itt 0x%x state %d\n",
task->itt, task->state);
+
+ spin_lock_bh(&session->back_lock);
+ if (cleanup_queued_task(task)) {
+ spin_unlock_bh(&session->back_lock);
+ continue;
+ }
+
state = ISCSI_TASK_ABRT_SESS_RECOV;
if (task->state == ISCSI_TASK_PENDING)
state = ISCSI_TASK_COMPLETED;
- spin_lock_bh(&session->back_lock);
iscsi_complete_task(task, state);
spin_unlock_bh(&session->back_lock);
}
}
-static void iscsi_start_session_recovery(struct iscsi_session *session,
- struct iscsi_conn *conn, int flag)
+void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
int old_stop_stage;
mutex_lock(&session->eh_mutex);
@@ -3141,26 +3416,10 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
spin_lock_bh(&session->frwd_lock);
fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
fail_mgmt_tasks(session, conn);
- memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
+ memset(&session->tmhdr, 0, sizeof(session->tmhdr));
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
}
-
-void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
-{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_session *session = conn->session;
-
- switch (flag) {
- case STOP_CONN_RECOVER:
- case STOP_CONN_TERM:
- iscsi_start_session_recovery(session, conn, flag);
- break;
- default:
- iscsi_conn_printk(KERN_ERR, conn,
- "invalid stop flag %d\n", flag);
- }
-}
EXPORT_SYMBOL_GPL(iscsi_conn_stop);
int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
@@ -3172,13 +3431,22 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
spin_lock_bh(&session->frwd_lock);
if (is_leading)
session->leadconn = conn;
+
+ set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
spin_unlock_bh(&session->frwd_lock);
/*
+ * The target could have reduced it's window size between logins, so
+ * we have to reset max/exp cmdsn so we can see the new values.
+ */
+ spin_lock_bh(&session->back_lock);
+ session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1;
+ spin_unlock_bh(&session->back_lock);
+ /*
* Unblock xmitworker(), Login Phase will pass through.
*/
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
@@ -3324,125 +3592,125 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
switch(param) {
case ISCSI_PARAM_FAST_ABORT:
- len = sprintf(buf, "%d\n", session->fast_abort);
+ len = sysfs_emit(buf, "%d\n", session->fast_abort);
break;
case ISCSI_PARAM_ABORT_TMO:
- len = sprintf(buf, "%d\n", session->abort_timeout);
+ len = sysfs_emit(buf, "%d\n", session->abort_timeout);
break;
case ISCSI_PARAM_LU_RESET_TMO:
- len = sprintf(buf, "%d\n", session->lu_reset_timeout);
+ len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout);
break;
case ISCSI_PARAM_TGT_RESET_TMO:
- len = sprintf(buf, "%d\n", session->tgt_reset_timeout);
+ len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout);
break;
case ISCSI_PARAM_INITIAL_R2T_EN:
- len = sprintf(buf, "%d\n", session->initial_r2t_en);
+ len = sysfs_emit(buf, "%d\n", session->initial_r2t_en);
break;
case ISCSI_PARAM_MAX_R2T:
- len = sprintf(buf, "%hu\n", session->max_r2t);
+ len = sysfs_emit(buf, "%hu\n", session->max_r2t);
break;
case ISCSI_PARAM_IMM_DATA_EN:
- len = sprintf(buf, "%d\n", session->imm_data_en);
+ len = sysfs_emit(buf, "%d\n", session->imm_data_en);
break;
case ISCSI_PARAM_FIRST_BURST:
- len = sprintf(buf, "%u\n", session->first_burst);
+ len = sysfs_emit(buf, "%u\n", session->first_burst);
break;
case ISCSI_PARAM_MAX_BURST:
- len = sprintf(buf, "%u\n", session->max_burst);
+ len = sysfs_emit(buf, "%u\n", session->max_burst);
break;
case ISCSI_PARAM_PDU_INORDER_EN:
- len = sprintf(buf, "%d\n", session->pdu_inorder_en);
+ len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en);
break;
case ISCSI_PARAM_DATASEQ_INORDER_EN:
- len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
+ len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en);
break;
case ISCSI_PARAM_DEF_TASKMGMT_TMO:
- len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
+ len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo);
break;
case ISCSI_PARAM_ERL:
- len = sprintf(buf, "%d\n", session->erl);
+ len = sysfs_emit(buf, "%d\n", session->erl);
break;
case ISCSI_PARAM_TARGET_NAME:
- len = sprintf(buf, "%s\n", session->targetname);
+ len = sysfs_emit(buf, "%s\n", session->targetname);
break;
case ISCSI_PARAM_TARGET_ALIAS:
- len = sprintf(buf, "%s\n", session->targetalias);
+ len = sysfs_emit(buf, "%s\n", session->targetalias);
break;
case ISCSI_PARAM_TPGT:
- len = sprintf(buf, "%d\n", session->tpgt);
+ len = sysfs_emit(buf, "%d\n", session->tpgt);
break;
case ISCSI_PARAM_USERNAME:
- len = sprintf(buf, "%s\n", session->username);
+ len = sysfs_emit(buf, "%s\n", session->username);
break;
case ISCSI_PARAM_USERNAME_IN:
- len = sprintf(buf, "%s\n", session->username_in);
+ len = sysfs_emit(buf, "%s\n", session->username_in);
break;
case ISCSI_PARAM_PASSWORD:
- len = sprintf(buf, "%s\n", session->password);
+ len = sysfs_emit(buf, "%s\n", session->password);
break;
case ISCSI_PARAM_PASSWORD_IN:
- len = sprintf(buf, "%s\n", session->password_in);
+ len = sysfs_emit(buf, "%s\n", session->password_in);
break;
case ISCSI_PARAM_IFACE_NAME:
- len = sprintf(buf, "%s\n", session->ifacename);
+ len = sysfs_emit(buf, "%s\n", session->ifacename);
break;
case ISCSI_PARAM_INITIATOR_NAME:
- len = sprintf(buf, "%s\n", session->initiatorname);
+ len = sysfs_emit(buf, "%s\n", session->initiatorname);
break;
case ISCSI_PARAM_BOOT_ROOT:
- len = sprintf(buf, "%s\n", session->boot_root);
+ len = sysfs_emit(buf, "%s\n", session->boot_root);
break;
case ISCSI_PARAM_BOOT_NIC:
- len = sprintf(buf, "%s\n", session->boot_nic);
+ len = sysfs_emit(buf, "%s\n", session->boot_nic);
break;
case ISCSI_PARAM_BOOT_TARGET:
- len = sprintf(buf, "%s\n", session->boot_target);
+ len = sysfs_emit(buf, "%s\n", session->boot_target);
break;
case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
- len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
+ len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable);
break;
case ISCSI_PARAM_DISCOVERY_SESS:
- len = sprintf(buf, "%u\n", session->discovery_sess);
+ len = sysfs_emit(buf, "%u\n", session->discovery_sess);
break;
case ISCSI_PARAM_PORTAL_TYPE:
- len = sprintf(buf, "%s\n", session->portal_type);
+ len = sysfs_emit(buf, "%s\n", session->portal_type);
break;
case ISCSI_PARAM_CHAP_AUTH_EN:
- len = sprintf(buf, "%u\n", session->chap_auth_en);
+ len = sysfs_emit(buf, "%u\n", session->chap_auth_en);
break;
case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
- len = sprintf(buf, "%u\n", session->discovery_logout_en);
+ len = sysfs_emit(buf, "%u\n", session->discovery_logout_en);
break;
case ISCSI_PARAM_BIDI_CHAP_EN:
- len = sprintf(buf, "%u\n", session->bidi_chap_en);
+ len = sysfs_emit(buf, "%u\n", session->bidi_chap_en);
break;
case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
- len = sprintf(buf, "%u\n", session->discovery_auth_optional);
+ len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional);
break;
case ISCSI_PARAM_DEF_TIME2WAIT:
- len = sprintf(buf, "%d\n", session->time2wait);
+ len = sysfs_emit(buf, "%d\n", session->time2wait);
break;
case ISCSI_PARAM_DEF_TIME2RETAIN:
- len = sprintf(buf, "%d\n", session->time2retain);
+ len = sysfs_emit(buf, "%d\n", session->time2retain);
break;
case ISCSI_PARAM_TSID:
- len = sprintf(buf, "%u\n", session->tsid);
+ len = sysfs_emit(buf, "%u\n", session->tsid);
break;
case ISCSI_PARAM_ISID:
- len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
+ len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n",
session->isid[0], session->isid[1],
session->isid[2], session->isid[3],
session->isid[4], session->isid[5]);
break;
case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
- len = sprintf(buf, "%u\n", session->discovery_parent_idx);
+ len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx);
break;
case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
if (session->discovery_parent_type)
- len = sprintf(buf, "%s\n",
+ len = sysfs_emit(buf, "%s\n",
session->discovery_parent_type);
else
- len = sprintf(buf, "\n");
+ len = sysfs_emit(buf, "\n");
break;
default:
return -ENOSYS;
@@ -3474,16 +3742,16 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_HOST_PARAM_IPADDRESS:
if (sin)
- len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
+ len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr);
else
- len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
+ len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr);
break;
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_LOCAL_PORT:
if (sin)
- len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
+ len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port));
else
- len = sprintf(buf, "%hu\n",
+ len = sysfs_emit(buf, "%hu\n",
be16_to_cpu(sin6->sin6_port));
break;
default:
@@ -3502,88 +3770,88 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
switch(param) {
case ISCSI_PARAM_PING_TMO:
- len = sprintf(buf, "%u\n", conn->ping_timeout);
+ len = sysfs_emit(buf, "%u\n", conn->ping_timeout);
break;
case ISCSI_PARAM_RECV_TMO:
- len = sprintf(buf, "%u\n", conn->recv_timeout);
+ len = sysfs_emit(buf, "%u\n", conn->recv_timeout);
break;
case ISCSI_PARAM_MAX_RECV_DLENGTH:
- len = sprintf(buf, "%u\n", conn->max_recv_dlength);
+ len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength);
break;
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
- len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
+ len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength);
break;
case ISCSI_PARAM_HDRDGST_EN:
- len = sprintf(buf, "%d\n", conn->hdrdgst_en);
+ len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en);
break;
case ISCSI_PARAM_DATADGST_EN:
- len = sprintf(buf, "%d\n", conn->datadgst_en);
+ len = sysfs_emit(buf, "%d\n", conn->datadgst_en);
break;
case ISCSI_PARAM_IFMARKER_EN:
- len = sprintf(buf, "%d\n", conn->ifmarker_en);
+ len = sysfs_emit(buf, "%d\n", conn->ifmarker_en);
break;
case ISCSI_PARAM_OFMARKER_EN:
- len = sprintf(buf, "%d\n", conn->ofmarker_en);
+ len = sysfs_emit(buf, "%d\n", conn->ofmarker_en);
break;
case ISCSI_PARAM_EXP_STATSN:
- len = sprintf(buf, "%u\n", conn->exp_statsn);
+ len = sysfs_emit(buf, "%u\n", conn->exp_statsn);
break;
case ISCSI_PARAM_PERSISTENT_PORT:
- len = sprintf(buf, "%d\n", conn->persistent_port);
+ len = sysfs_emit(buf, "%d\n", conn->persistent_port);
break;
case ISCSI_PARAM_PERSISTENT_ADDRESS:
- len = sprintf(buf, "%s\n", conn->persistent_address);
+ len = sysfs_emit(buf, "%s\n", conn->persistent_address);
break;
case ISCSI_PARAM_STATSN:
- len = sprintf(buf, "%u\n", conn->statsn);
+ len = sysfs_emit(buf, "%u\n", conn->statsn);
break;
case ISCSI_PARAM_MAX_SEGMENT_SIZE:
- len = sprintf(buf, "%u\n", conn->max_segment_size);
+ len = sysfs_emit(buf, "%u\n", conn->max_segment_size);
break;
case ISCSI_PARAM_KEEPALIVE_TMO:
- len = sprintf(buf, "%u\n", conn->keepalive_tmo);
+ len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo);
break;
case ISCSI_PARAM_LOCAL_PORT:
- len = sprintf(buf, "%u\n", conn->local_port);
+ len = sysfs_emit(buf, "%u\n", conn->local_port);
break;
case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
- len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat);
break;
case ISCSI_PARAM_TCP_NAGLE_DISABLE:
- len = sprintf(buf, "%u\n", conn->tcp_nagle_disable);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable);
break;
case ISCSI_PARAM_TCP_WSF_DISABLE:
- len = sprintf(buf, "%u\n", conn->tcp_wsf_disable);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable);
break;
case ISCSI_PARAM_TCP_TIMER_SCALE:
- len = sprintf(buf, "%u\n", conn->tcp_timer_scale);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale);
break;
case ISCSI_PARAM_TCP_TIMESTAMP_EN:
- len = sprintf(buf, "%u\n", conn->tcp_timestamp_en);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en);
break;
case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
- len = sprintf(buf, "%u\n", conn->fragment_disable);
+ len = sysfs_emit(buf, "%u\n", conn->fragment_disable);
break;
case ISCSI_PARAM_IPV4_TOS:
- len = sprintf(buf, "%u\n", conn->ipv4_tos);
+ len = sysfs_emit(buf, "%u\n", conn->ipv4_tos);
break;
case ISCSI_PARAM_IPV6_TC:
- len = sprintf(buf, "%u\n", conn->ipv6_traffic_class);
+ len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class);
break;
case ISCSI_PARAM_IPV6_FLOW_LABEL:
- len = sprintf(buf, "%u\n", conn->ipv6_flow_label);
+ len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label);
break;
case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
- len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6);
+ len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6);
break;
case ISCSI_PARAM_TCP_XMIT_WSF:
- len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf);
break;
case ISCSI_PARAM_TCP_RECV_WSF:
- len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf);
break;
case ISCSI_PARAM_LOCAL_IPADDR:
- len = sprintf(buf, "%s\n", conn->local_ipaddr);
+ len = sysfs_emit(buf, "%s\n", conn->local_ipaddr);
break;
default:
return -ENOSYS;
@@ -3601,13 +3869,13 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
- len = sprintf(buf, "%s\n", ihost->netdev);
+ len = sysfs_emit(buf, "%s\n", ihost->netdev);
break;
case ISCSI_HOST_PARAM_HWADDRESS:
- len = sprintf(buf, "%s\n", ihost->hwaddress);
+ len = sysfs_emit(buf, "%s\n", ihost->hwaddress);
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
- len = sprintf(buf, "%s\n", ihost->initiatorname);
+ len = sysfs_emit(buf, "%s\n", ihost->initiatorname);
break;
default:
return -ENOSYS;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 6ef93c7af954..c182aa83f2c9 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -128,7 +128,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
* coalescing neighboring slab objects into a single frag which
* triggers one of hardened usercopy checks.
*/
- if (!recv && page_count(sg_page(sg)) >= 1 && !PageSlab(sg_page(sg)))
+ if (!recv && sendpage_ok(sg_page(sg)))
return;
if (recv) {
@@ -524,48 +524,83 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
/**
* iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
* @conn: iscsi connection
- * @task: scsi command task
+ * @hdr: PDU header
*/
-static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{
struct iscsi_session *session = conn->session;
- struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ struct iscsi_tcp_task *tcp_task;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_r2t_rsp *rhdr;
struct iscsi_r2t_info *r2t;
- int r2tsn = be32_to_cpu(rhdr->r2tsn);
+ struct iscsi_task *task;
u32 data_length;
u32 data_offset;
+ int r2tsn;
int rc;
+ spin_lock(&session->back_lock);
+ task = iscsi_itt_to_ctask(conn, hdr->itt);
+ if (!task) {
+ spin_unlock(&session->back_lock);
+ return ISCSI_ERR_BAD_ITT;
+ } else if (task->sc->sc_data_direction != DMA_TO_DEVICE) {
+ spin_unlock(&session->back_lock);
+ return ISCSI_ERR_PROTO;
+ }
+ /*
+ * A bad target might complete the cmd before we have handled R2Ts
+ * so get a ref to the task that will be dropped in the xmit path.
+ */
+ if (task->state != ISCSI_TASK_RUNNING) {
+ spin_unlock(&session->back_lock);
+ /* Let the path that got the early rsp complete it */
+ return 0;
+ }
+ task->last_xfer = jiffies;
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ /* Let the path that got the early rsp complete it */
+ return 0;
+ }
+
+ tcp_conn = conn->dd_data;
+ rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ /* fill-in new R2T associated with the task */
+ iscsi_update_cmdsn(session, (struct iscsi_nopin *)rhdr);
+ spin_unlock(&session->back_lock);
+
if (tcp_conn->in.datalen) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2t with datalen %d\n",
tcp_conn->in.datalen);
- return ISCSI_ERR_DATALEN;
+ rc = ISCSI_ERR_DATALEN;
+ goto put_task;
}
+ tcp_task = task->dd_data;
+ r2tsn = be32_to_cpu(rhdr->r2tsn);
if (tcp_task->exp_datasn != r2tsn){
ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
tcp_task->exp_datasn, r2tsn);
- return ISCSI_ERR_R2TSN;
+ rc = ISCSI_ERR_R2TSN;
+ goto put_task;
}
- /* fill-in new R2T associated with the task */
- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
-
- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
iscsi_conn_printk(KERN_INFO, conn,
"dropping R2T itt %d in recovery.\n",
task->itt);
- return 0;
+ rc = 0;
+ goto put_task;
}
data_length = be32_to_cpu(rhdr->data_length);
if (data_length == 0) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n");
- return ISCSI_ERR_DATALEN;
+ rc = ISCSI_ERR_DATALEN;
+ goto put_task;
}
if (data_length > session->max_burst)
@@ -579,7 +614,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
"invalid R2T with data len %u at offset %u "
"and total length %d\n", data_length,
data_offset, task->sc->sdb.length);
- return ISCSI_ERR_DATALEN;
+ rc = ISCSI_ERR_DATALEN;
+ goto put_task;
}
spin_lock(&tcp_task->pool2queue);
@@ -589,7 +625,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
"Target has sent more R2Ts than it "
"negotiated for or driver has leaked.\n");
spin_unlock(&tcp_task->pool2queue);
- return ISCSI_ERR_PROTO;
+ rc = ISCSI_ERR_PROTO;
+ goto put_task;
}
r2t->exp_statsn = rhdr->statsn;
@@ -607,6 +644,10 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
iscsi_requeue_task(task);
return 0;
+
+put_task:
+ iscsi_put_task(task);
+ return rc;
}
/*
@@ -730,20 +771,11 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
break;
case ISCSI_OP_R2T:
- spin_lock(&conn->session->back_lock);
- task = iscsi_itt_to_ctask(conn, hdr->itt);
- spin_unlock(&conn->session->back_lock);
- if (!task)
- rc = ISCSI_ERR_BAD_ITT;
- else if (ahslen)
+ if (ahslen) {
rc = ISCSI_ERR_AHSLEN;
- else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
- task->last_xfer = jiffies;
- spin_lock(&conn->session->frwd_lock);
- rc = iscsi_tcp_r2t_rsp(conn, task);
- spin_unlock(&conn->session->frwd_lock);
- } else
- rc = ISCSI_ERR_PROTO;
+ break;
+ }
+ rc = iscsi_tcp_r2t_rsp(conn, hdr);
break;
case ISCSI_OP_LOGIN_RSP:
case ISCSI_OP_TEXT_RSP:
@@ -772,7 +804,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
iscsi_tcp_data_recv_prep(tcp_conn);
return 0;
}
- /* fall through */
+ fallthrough;
case ISCSI_OP_LOGOUT_RSP:
case ISCSI_OP_NOOP_IN:
case ISCSI_OP_SCSI_TMFUNC_RSP:
@@ -899,7 +931,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
*/
conn->last_recv = jiffies;
- if (unlikely(conn->suspend_rx)) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
ISCSI_DBG_TCP(conn, "Rx suspended!\n");
*status = ISCSI_TCP_SUSPENDED;
return 0;
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 5c6a5eff2f8e..c640535d1ac0 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -10,7 +10,6 @@ config SCSI_SAS_LIBSAS
tristate "SAS Domain Transport Attributes"
depends on SCSI
select SCSI_SAS_ATTRS
- select BLK_DEV_BSGLIB
help
This provides transport specific helpers for SAS drivers which
use the domain device construct (like the aic94xxx).
@@ -19,6 +18,7 @@ config SCSI_SAS_ATA
bool "ATA support for libsas (requires libata)"
depends on SCSI_SAS_LIBSAS
depends on ATA = y || ATA = SCSI_SAS_LIBSAS
+ select SATA_HOST
help
Builds in ATA support into libsas. Will necessitate
the loading of libata along with libsas.
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index e63a54f5ab8c..9dc32736cf21 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -18,4 +18,4 @@ libsas-y += sas_init.o \
libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o
-ccflags-y := -DDEBUG
+ccflags-y := -DDEBUG -I$(srctree)/drivers/scsi
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index c5a828a041e0..d35c9296f738 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -20,8 +20,8 @@
#include <scsi/scsi.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
-#include "../scsi_transport_api.h"
+#include "scsi_sas_internal.h"
+#include "scsi_transport_api.h"
#include <scsi/scsi_eh.h>
static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
@@ -35,46 +35,38 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
/* ts->resp == SAS_TASK_COMPLETE */
/* task delivered, what happened afterwards? */
switch (ts->stat) {
- case SAS_DEV_NO_RESPONSE:
- return AC_ERR_TIMEOUT;
-
- case SAS_INTERRUPTED:
- case SAS_PHY_DOWN:
- case SAS_NAK_R_ERR:
- return AC_ERR_ATA_BUS;
-
-
- case SAS_DATA_UNDERRUN:
- /*
- * Some programs that use the taskfile interface
- * (smartctl in particular) can cause underrun
- * problems. Ignore these errors, perhaps at our
- * peril.
- */
- return 0;
-
- case SAS_DATA_OVERRUN:
- case SAS_QUEUE_FULL:
- case SAS_DEVICE_UNKNOWN:
- case SAS_SG_ERR:
- return AC_ERR_INVALID;
-
- case SAS_OPEN_TO:
- case SAS_OPEN_REJECT:
- pr_warn("%s: Saw error %d. What to do?\n",
- __func__, ts->stat);
- return AC_ERR_OTHER;
-
- case SAM_STAT_CHECK_CONDITION:
- case SAS_ABORTED_TASK:
- return AC_ERR_DEV;
-
- case SAS_PROTO_RESPONSE:
- /* This means the ending_fis has the error
- * value; return 0 here to collect it */
- return 0;
- default:
- return 0;
+ case SAS_DEV_NO_RESPONSE:
+ return AC_ERR_TIMEOUT;
+ case SAS_INTERRUPTED:
+ case SAS_PHY_DOWN:
+ case SAS_NAK_R_ERR:
+ return AC_ERR_ATA_BUS;
+ case SAS_DATA_UNDERRUN:
+ /*
+ * Some programs that use the taskfile interface
+ * (smartctl in particular) can cause underrun
+ * problems. Ignore these errors, perhaps at our
+ * peril.
+ */
+ return 0;
+ case SAS_DATA_OVERRUN:
+ case SAS_QUEUE_FULL:
+ case SAS_DEVICE_UNKNOWN:
+ case SAS_OPEN_TO:
+ case SAS_OPEN_REJECT:
+ pr_warn("%s: Saw error %d. What to do?\n",
+ __func__, ts->stat);
+ return AC_ERR_OTHER;
+ case SAM_STAT_CHECK_CONDITION:
+ case SAS_ABORTED_TASK:
+ return AC_ERR_DEV;
+ case SAS_PROTO_RESPONSE:
+ /* This means the ending_fis has the error
+ * value; return 0 here to collect it
+ */
+ return 0;
+ default:
+ return 0;
}
}
@@ -122,9 +114,10 @@ static void sas_ata_task_done(struct sas_task *task)
}
}
- if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
- ((stat->stat == SAM_STAT_CHECK_CONDITION &&
- dev->sata_dev.class == ATA_DEV_ATAPI))) {
+ if (stat->stat == SAS_PROTO_RESPONSE ||
+ stat->stat == SAS_SAM_STAT_GOOD ||
+ (stat->stat == SAS_SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.class == ATA_DEV_ATAPI)) {
memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
if (!link->sactive) {
@@ -160,6 +153,7 @@ qc_already_gone:
}
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ __must_hold(ap->lock)
{
struct sas_task *task;
struct scatterlist *sg;
@@ -185,14 +179,9 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->task_proto = SAS_PROTOCOL_STP;
task->task_done = sas_ata_task_done;
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ ||
- qc->tf.command == ATA_CMD_FPDMA_RECV ||
- qc->tf.command == ATA_CMD_FPDMA_SEND ||
- qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
- /* Need to zero out the tag libata assigned us */
+ /* For NCQ commands, zero out the tag libata assigned us */
+ if (ata_is_ncq(qc->tf.protocol))
qc->tf.nsect = 0;
- }
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
task->uldd_task = qc;
@@ -200,18 +189,19 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
task->total_xfer_len = qc->nbytes;
task->num_scatter = qc->n_elem;
+ task->data_dir = qc->dma_dir;
+ } else if (!ata_is_data(qc->tf.protocol)) {
+ task->data_dir = DMA_NONE;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
+ task->data_dir = qc->dma_dir;
}
-
- task->data_dir = qc->dma_dir;
task->scatter = qc->sg;
task->ata_task.retry_count = 1;
- task->task_state_flags = SAS_TASK_STATE_PENDING;
qc->lldd_task = task;
task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
@@ -323,7 +313,7 @@ static int smp_ata_check_ready(struct ata_link *link)
case SAS_END_DEVICE:
if (ex_phy->attached_sata_dev)
return sas_ata_clear_pending(dev, ex_phy);
- /* fall through */
+ fallthrough;
default:
return -ENODEV;
}
@@ -368,22 +358,14 @@ static int sas_ata_printk(const char *level, const struct domain_device *ddev,
return r;
}
-static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline)
{
- int ret = 0, res;
- struct sas_phy *phy;
- struct ata_port *ap = link->ap;
+ struct sata_device *sata_dev = &dev->sata_dev;
int (*check_ready)(struct ata_link *link);
- struct domain_device *dev = ap->private_data;
- struct sas_internal *i = dev_to_sas_internal(dev);
-
- res = i->dft->lldd_I_T_nexus_reset(dev);
- if (res == -ENODEV)
- return res;
-
- if (res != TMF_RESP_FUNC_COMPLETE)
- sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
+ struct ata_port *ap = sata_dev->ap;
+ struct ata_link *link = &ap->link;
+ struct sas_phy *phy;
+ int ret;
phy = sas_get_local_phy(dev);
if (scsi_is_sas_phy_local(phy))
@@ -396,6 +378,27 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
if (ret && ret != -EAGAIN)
sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sas_ata_wait_after_reset);
+
+static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i = dev_to_sas_internal(dev);
+ int ret;
+
+ ret = i->dft->lldd_I_T_nexus_reset(dev);
+ if (ret == -ENODEV)
+ return ret;
+
+ if (ret != TMF_RESP_FUNC_COMPLETE)
+ sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
+
+ ret = sas_ata_wait_after_reset(dev, deadline);
+
*class = dev->sata_dev.class;
ap->cbl = ATA_CBL_SATA;
@@ -506,10 +509,23 @@ void sas_ata_end_eh(struct ata_port *ap)
spin_unlock_irqrestore(&ha->lock, flags);
}
+static int sas_ata_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_phy *local_phy = sas_get_local_phy(dev);
+ int res = 0;
+
+ if (!local_phy->enabled || test_bit(SAS_DEV_GONE, &dev->state))
+ res = -ENOENT;
+ sas_put_local_phy(local_phy);
+
+ return res;
+}
+
static struct ata_port_operations sas_sata_ops = {
- .prereset = ata_std_prereset,
+ .prereset = sas_ata_prereset,
.hardreset = sas_ata_hard_reset,
- .postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
.post_internal_cmd = sas_ata_post_internal,
.qc_defer = ata_std_qc_defer,
@@ -585,7 +601,7 @@ void sas_ata_task_abort(struct sas_task *task)
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
- blk_abort_request(qc->scsicmd->request);
+ blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
return;
}
@@ -709,19 +725,13 @@ void sas_resume_sata(struct asd_sas_port *port)
*/
int sas_discover_sata(struct domain_device *dev)
{
- int res;
-
if (dev->dev_type == SAS_SATA_PM)
return -ENODEV;
dev->sata_dev.class = sas_get_ata_command_set(dev);
sas_fill_in_rphy(dev, dev->rphy);
- res = sas_notify_lldd_dev_found(dev);
- if (res)
- return res;
-
- return 0;
+ return sas_notify_lldd_dev_found(dev);
}
static void async_sas_ata_eh(void *data, async_cookie_t cookie)
@@ -778,8 +788,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
sas_enable_revalidation(sas_ha);
}
-void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q)
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
{
struct scsi_cmnd *cmd, *n;
struct domain_device *eh_dev;
@@ -851,3 +860,11 @@ void sas_ata_wait_eh(struct domain_device *dev)
ap = dev->sata_dev.ap;
ata_port_wait_eh(ap);
}
+
+int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id)
+{
+ struct sas_tmf_task tmf_task = {};
+ return sas_execute_tmf(device, fis, sizeof(struct host_to_dev_fis),
+ force_phy_id, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_execute_ata_cmd);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index daf951b0b3f5..d5bc1314c341 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -8,7 +8,6 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/async.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include "sas_internal.h"
@@ -16,7 +15,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
/* ---------- Basic task processing for discovery purposes ---------- */
@@ -75,7 +74,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
struct dev_to_host_fis *fis =
(struct dev_to_host_fis *) dev->frame_rcvd;
if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
- fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
+ fis->byte_count_low == 0x69 && fis->byte_count_high == 0x96
&& (fis->device & ~0x10) == 0)
dev->dev_type = SAS_SATA_PM;
else
@@ -108,7 +107,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
rphy = NULL;
break;
}
- /* fall through */
+ fallthrough;
case SAS_END_DEVICE:
rphy = sas_end_device_alloc(port->port);
break;
@@ -182,10 +181,11 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
dev_name(sas_ha->dev),
SAS_ADDR(dev->sas_addr), res);
+ return res;
}
set_bit(SAS_DEV_FOUND, &dev->state);
kref_get(&dev->kref);
- return res;
+ return 0;
}
@@ -277,13 +277,7 @@ static void sas_resume_devices(struct work_struct *work)
*/
int sas_discover_end_dev(struct domain_device *dev)
{
- int res;
-
- res = sas_notify_lldd_dev_found(dev);
- if (res)
- return res;
-
- return 0;
+ return sas_notify_lldd_dev_found(dev);
}
/* ---------- Device registration and unregistration ---------- */
@@ -466,7 +460,7 @@ static void sas_discover_domain(struct work_struct *work)
break;
#else
pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
- /* Fall through */
+ fallthrough;
#endif
/* Fall through - only for the #else condition above. */
default:
@@ -551,19 +545,17 @@ static void sas_chain_event(int event, unsigned long *pending,
}
}
-int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
+void sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
{
struct sas_discovery *disc;
if (!port)
- return 0;
+ return;
disc = &port->disc;
BUG_ON(ev >= DISC_NUM_EVENTS);
sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
-
- return 0;
}
/**
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index a1852f6c042b..f3a17191a4fe 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -10,29 +10,26 @@
#include <scsi/scsi_host.h>
#include "sas_internal.h"
-int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
+bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
- /* it's added to the defer_q when draining so return succeed */
- int rc = 1;
-
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
- return 0;
+ return false;
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
/* add it to the defer list, if not already pending */
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
- } else
- rc = queue_work(ha->event_q, &sw->work);
+ return true;
+ }
- return rc;
+ return queue_work(ha->event_q, &sw->work);
}
-static int sas_queue_event(int event, struct sas_work *work,
+static bool sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
unsigned long flags;
- int rc;
+ bool rc;
spin_lock_irqsave(&ha->lock, flags);
rc = sas_queue_work(ha, work);
@@ -41,12 +38,24 @@ static int sas_queue_event(int event, struct sas_work *work,
return rc;
}
-
-void __sas_drain_work(struct sas_ha_struct *ha)
+void sas_queue_deferred_work(struct sas_ha_struct *ha)
{
struct sas_work *sw, *_sw;
- int ret;
+ spin_lock_irq(&ha->lock);
+ list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
+ list_del_init(&sw->drain_node);
+
+ if (!sas_queue_work(ha, sw)) {
+ pm_runtime_put(ha->dev);
+ sas_free_event(to_asd_sas_event(&sw->work));
+ }
+ }
+ spin_unlock_irq(&ha->lock);
+}
+
+void __sas_drain_work(struct sas_ha_struct *ha)
+{
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
@@ -55,16 +64,8 @@ void __sas_drain_work(struct sas_ha_struct *ha)
drain_workqueue(ha->event_q);
drain_workqueue(ha->disco_q);
- spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
- list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
- list_del_init(&sw->drain_node);
- ret = sas_queue_work(ha, sw);
- if (ret != 1)
- sas_free_event(to_asd_sas_event(&sw->work));
-
- }
- spin_unlock_irq(&ha->lock);
+ sas_queue_deferred_work(ha);
}
int sas_drain_work(struct sas_ha_struct *ha)
@@ -104,12 +105,17 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
if (!test_and_clear_bit(ev, &d->pending))
continue;
- if (list_empty(&port->phy_list))
+ spin_lock(&port->phy_list_lock);
+ if (list_empty(&port->phy_list)) {
+ spin_unlock(&port->phy_list_lock);
continue;
+ }
sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
port_phy_el);
- ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ spin_unlock(&port->phy_list_lock);
+ sas_notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD, GFP_KERNEL);
}
mutex_unlock(&ha->disco_mutex);
}
@@ -118,65 +124,93 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
static void sas_port_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *ha = phy->ha;
sas_port_event_fns[ev->event](work);
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
}
static void sas_phy_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *ha = phy->ha;
sas_phy_event_fns[ev->event](work);
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
}
-static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+/* defer works of new phys during suspend */
+static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
{
- struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
- int ret;
+ unsigned long flags;
+ bool deferred = false;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
+ struct sas_work *sw = &ev->work;
+
+ list_add_tail(&sw->drain_node, &ha->defer_q);
+ deferred = true;
+ }
+ spin_unlock_irqrestore(&ha->lock, flags);
+ return deferred;
+}
+
+void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
+ gfp_t gfp_flags)
+{
+ struct sas_ha_struct *ha = phy->ha;
+ struct asd_sas_event *ev;
BUG_ON(event >= PORT_NUM_EVENTS);
- ev = sas_alloc_event(phy);
+ ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
- return -ENOMEM;
+ return;
+
+ /* Call pm_runtime_put() with pairs in sas_port_event_worker() */
+ pm_runtime_get_noresume(ha->dev);
INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
- ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1)
- sas_free_event(ev);
+ if (sas_defer_event(phy, ev))
+ return;
- return ret;
+ if (!sas_queue_event(event, &ev->work, ha)) {
+ pm_runtime_put(ha->dev);
+ sas_free_event(ev);
+ }
}
+EXPORT_SYMBOL_GPL(sas_notify_port_event);
-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
+ gfp_t gfp_flags)
{
- struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
- int ret;
+ struct asd_sas_event *ev;
BUG_ON(event >= PHY_NUM_EVENTS);
- ev = sas_alloc_event(phy);
+ ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
- return -ENOMEM;
+ return;
- INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
-
- ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1)
- sas_free_event(ev);
+ /* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
+ pm_runtime_get_noresume(ha->dev);
- return ret;
-}
+ INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
-int sas_init_events(struct sas_ha_struct *sas_ha)
-{
- sas_ha->notify_port_event = sas_notify_port_event;
- sas_ha->notify_phy_event = sas_notify_phy_event;
+ if (sas_defer_event(phy, ev))
+ return;
- return 0;
+ if (!sas_queue_event(event, &ev->work, ha)) {
+ pm_runtime_put(ha->dev);
+ sas_free_event(ev);
+ }
}
+EXPORT_SYMBOL_GPL(sas_notify_phy_event);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index ab671cdd4cfb..5ce251830104 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -18,7 +18,7 @@
#include <scsi/sas_ata.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static int sas_discover_expander(struct domain_device *dev);
static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
@@ -28,26 +28,6 @@ static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
/* ---------- SMP task management ---------- */
-static void smp_task_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->slow_task->completion);
- }
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-}
-
-static void smp_task_done(struct sas_task *task)
-{
- del_timer(&task->slow_task->timer);
- complete(&task->slow_task->completion);
-}
-
/* Give it some long enough timeout. In seconds. */
#define SMP_TIMEOUT 10
@@ -58,7 +38,9 @@ static int smp_execute_task_sg(struct domain_device *dev,
struct sas_task *task = NULL;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
+ struct sas_ha_struct *ha = dev->port->ha;
+ pm_runtime_get_sync(ha->dev);
mutex_lock(&dev->ex_dev.cmd_mutex);
for (retry = 0; retry < 3; retry++) {
if (test_bit(SAS_DEV_GONE, &dev->state)) {
@@ -76,16 +58,16 @@ static int smp_execute_task_sg(struct domain_device *dev,
task->smp_task.smp_req = *req;
task->smp_task.smp_resp = *resp;
- task->task_done = smp_task_done;
+ task->task_done = sas_task_internal_done;
- task->slow_task->timer.function = smp_task_timedout;
+ task->slow_task->timer.function = sas_task_internal_timedout;
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
pr_notice("executing SMP task failed:%d\n", res);
break;
}
@@ -101,7 +83,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
}
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = 0;
break;
}
@@ -131,6 +113,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
}
}
mutex_unlock(&dev->ex_dev.cmd_mutex);
+ pm_runtime_put_sync(ha->dev);
BUG_ON(retry == 3 && task != NULL);
sas_free_task(task);
@@ -192,13 +175,13 @@ static enum sas_device_type to_dev_type(struct discover_resp *dr)
return dr->attached_dev_type;
}
-static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
+ struct smp_disc_resp *disc_resp)
{
enum sas_device_type dev_type;
enum sas_linkrate linkrate;
u8 sas_addr[SAS_ADDR_SIZE];
- struct smp_resp *resp = rsp;
- struct discover_resp *dr = &resp->disc;
+ struct discover_resp *dr = &disc_resp->disc;
struct sas_ha_struct *ha = dev->port->ha;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
@@ -215,7 +198,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
BUG_ON(!phy->phy);
}
- switch (resp->result) {
+ switch (disc_resp->result) {
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
break;
@@ -364,12 +347,13 @@ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
}
#define DISCOVER_REQ_SIZE 16
-#define DISCOVER_RESP_SIZE 56
+#define DISCOVER_RESP_SIZE sizeof(struct smp_disc_resp)
static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
- u8 *disc_resp, int single)
+ struct smp_disc_resp *disc_resp,
+ int single)
{
- struct discover_resp *dr;
+ struct discover_resp *dr = &disc_resp->disc;
int res;
disc_req[9] = single;
@@ -378,7 +362,6 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
return res;
- dr = &((struct smp_resp *)disc_resp)->disc;
if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
pr_notice("Found loopback topology, just ignore it!\n");
return 0;
@@ -392,7 +375,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
struct expander_device *ex = &dev->ex_dev;
int res = 0;
u8 *disc_req;
- u8 *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
if (!disc_req)
@@ -427,7 +410,7 @@ out_err:
static int sas_expander_discover(struct domain_device *dev)
{
struct expander_device *ex = &dev->ex_dev;
- int res = -ENOMEM;
+ int res;
ex->ex_phy = kcalloc(ex->num_phys, sizeof(*ex->ex_phy), GFP_KERNEL);
if (!ex->ex_phy)
@@ -446,27 +429,14 @@ static int sas_expander_discover(struct domain_device *dev)
#define MAX_EXPANDER_PHYS 128
-static void ex_assign_report_general(struct domain_device *dev,
- struct smp_resp *resp)
-{
- struct report_general_resp *rg = &resp->rg;
-
- dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
- dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
- dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
- dev->ex_dev.t2t_supp = rg->t2t_supp;
- dev->ex_dev.conf_route_table = rg->conf_route_table;
- dev->ex_dev.configuring = rg->configuring;
- memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
-}
-
#define RG_REQ_SIZE 8
-#define RG_RESP_SIZE 32
+#define RG_RESP_SIZE sizeof(struct smp_rg_resp)
static int sas_ex_general(struct domain_device *dev)
{
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
+ struct report_general_resp *rg;
int res;
int i;
@@ -497,7 +467,15 @@ static int sas_ex_general(struct domain_device *dev)
goto out;
}
- ex_assign_report_general(dev, rg_resp);
+ rg = &rg_resp->rg;
+ dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
+ dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
+ dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
+ dev->ex_dev.t2t_supp = rg->t2t_supp;
+ dev->ex_dev.conf_route_table = rg->conf_route_table;
+ dev->ex_dev.configuring = rg->configuring;
+ memcpy(dev->ex_dev.enclosure_logical_id,
+ rg->enclosure_logical_id, 8);
if (dev->ex_dev.configuring) {
pr_debug("RG: ex %016llx self-configuring...\n",
@@ -553,7 +531,7 @@ static int sas_ex_manuf_info(struct domain_device *dev)
mi_req[1] = SMP_REPORT_MANUF_INFO;
- res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
+ res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp, MI_RESP_SIZE);
if (res) {
pr_notice("MI: ex %016llx failed:0x%x\n",
SAS_ADDR(dev->sas_addr), res);
@@ -594,13 +572,13 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
pc_req[1] = SMP_PHY_CONTROL;
pc_req[9] = phy_id;
- pc_req[10]= phy_func;
+ pc_req[10] = phy_func;
if (rates) {
pc_req[32] = rates->minimum_linkrate << 4;
pc_req[33] = rates->maximum_linkrate << 4;
}
- res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
+ res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp, PC_RESP_SIZE);
if (res) {
pr_err("ex %016llx phy%02d PHY control failed: %d\n",
SAS_ADDR(dev->sas_addr), phy_id, res);
@@ -678,7 +656,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
req[9] = phy->number;
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
- resp, RPEL_RESP_SIZE);
+ resp, RPEL_RESP_SIZE);
if (res)
goto out;
@@ -698,10 +676,10 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
#ifdef CONFIG_SCSI_SAS_ATA
#define RPS_REQ_SIZE 16
-#define RPS_RESP_SIZE 60
+#define RPS_RESP_SIZE sizeof(struct smp_rps_resp)
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp)
+ struct smp_rps_resp *rps_resp)
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
@@ -714,7 +692,7 @@ int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
rps_req[9] = phy_id;
res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
- rps_resp, RPS_RESP_SIZE);
+ rps_resp, RPS_RESP_SIZE);
/* 0x34 is the FIS type for the D2H fis. There's a potential
* standards cockup here. sas-2 explicitly specifies the FIS
@@ -1096,7 +1074,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
} else
memcpy(dev->port->disc.fanout_sas_addr,
ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
- /* fallthrough */
+ fallthrough;
case SAS_EDGE_EXPANDER_DEVICE:
child = sas_ex_discover_expander(dev, phy_id);
break;
@@ -1506,7 +1484,8 @@ static int sas_configure_phy(struct domain_device *dev, int phy_id,
if (res)
return res;
if (include ^ present)
- return sas_configure_set(dev, phy_id, sas_addr, index,include);
+ return sas_configure_set(dev, phy_id, sas_addr, index,
+ include);
return res;
}
@@ -1673,7 +1652,7 @@ out_err:
/* ---------- Domain revalidation ---------- */
static int sas_get_phy_discover(struct domain_device *dev,
- int phy_id, struct smp_resp *disc_resp)
+ int phy_id, struct smp_disc_resp *disc_resp)
{
int res;
u8 *disc_req;
@@ -1689,10 +1668,8 @@ static int sas_get_phy_discover(struct domain_device *dev,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
goto out;
- else if (disc_resp->result != SMP_RESP_FUNC_ACC) {
+ if (disc_resp->result != SMP_RESP_FUNC_ACC)
res = disc_resp->result;
- goto out;
- }
out:
kfree(disc_req);
return res;
@@ -1702,7 +1679,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
int phy_id, int *pcc)
{
int res;
- struct smp_resp *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
@@ -1720,19 +1697,17 @@ static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_device_type *type)
{
int res;
- struct smp_resp *disc_resp;
- struct discover_resp *dr;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
return -ENOMEM;
- dr = &disc_resp->disc;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
if (res == 0) {
memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
SAS_ADDR_SIZE);
- *type = to_dev_type(dr);
+ *type = to_dev_type(&disc_resp->disc);
if (*type == 0)
memset(sas_addr, 0, SAS_ADDR_SIZE);
}
@@ -1776,7 +1751,7 @@ static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
{
int res;
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
rg_req = alloc_smp_req(RG_REQ_SIZE);
if (!rg_req)
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index eca2a6bf3601..32cdc969b736 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -14,7 +14,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 phy_id)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 21c43b18d5d5..e4f77072a58d 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -19,7 +19,7 @@
#include "sas_internal.h"
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static struct kmem_cache *sas_task_cache;
static struct kmem_cache *sas_event_cache;
@@ -123,12 +123,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
goto Undo_phys;
}
- error = sas_init_events(sas_ha);
- if (error) {
- pr_notice("couldn't start event thread:%d\n", error);
- goto Undo_ports;
- }
-
error = -ENOMEM;
snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
sas_ha->event_q = create_singlethread_workqueue(name);
@@ -153,6 +147,7 @@ Undo_phys:
return error;
}
+EXPORT_SYMBOL_GPL(sas_register_ha);
static void sas_disable_events(struct sas_ha_struct *sas_ha)
{
@@ -182,6 +177,7 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
return 0;
}
+EXPORT_SYMBOL_GPL(sas_unregister_ha);
static int sas_get_linkerrors(struct sas_phy *phy)
{
@@ -258,7 +254,7 @@ static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
}
}
-static int sas_phy_enable(struct sas_phy *phy, int enable)
+int sas_phy_enable(struct sas_phy *phy, int enable)
{
int ret;
enum phy_func cmd;
@@ -290,6 +286,7 @@ static int sas_phy_enable(struct sas_phy *phy, int enable)
}
return ret;
}
+EXPORT_SYMBOL_GPL(sas_phy_enable);
int sas_phy_reset(struct sas_phy *phy, int hard_reset)
{
@@ -319,6 +316,7 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
}
return ret;
}
+EXPORT_SYMBOL_GPL(sas_phy_reset);
int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates)
@@ -364,6 +362,7 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
int i;
set_bit(SAS_HA_REGISTERED, &ha->state);
+ set_bit(SAS_HA_RESUMING, &ha->state);
/* clear out any stale link events/data from the suspension path */
for (i = 0; i < ha->num_phys; i++) {
@@ -389,7 +388,31 @@ static int phys_suspended(struct sas_ha_struct *ha)
return rc;
}
-void sas_resume_ha(struct sas_ha_struct *ha)
+static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_port *port = ha->sas_port[i];
+ struct domain_device *dev = port->port_dev;
+
+ if (dev && dev_is_expander(dev->dev_type)) {
+ struct asd_sas_phy *first_phy;
+
+ spin_lock(&port->phy_list_lock);
+ first_phy = list_first_entry_or_null(
+ &port->phy_list, struct asd_sas_phy,
+ port_phy_el);
+ spin_unlock(&port->phy_list_lock);
+
+ if (first_phy)
+ sas_notify_port_event(first_phy,
+ PORTE_BROADCAST_RCVD, GFP_KERNEL);
+ }
+ }
+}
+
+static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
{
const unsigned long tmo = msecs_to_jiffies(25000);
int i;
@@ -410,7 +433,8 @@ void sas_resume_ha(struct sas_ha_struct *ha)
if (phy->suspended) {
dev_warn(&phy->phy->dev, "resume timeout\n");
- sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
+ sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT,
+ GFP_KERNEL);
}
}
@@ -418,10 +442,30 @@ void sas_resume_ha(struct sas_ha_struct *ha)
* flush out disks that did not return
*/
scsi_unblock_requests(ha->core.shost);
- sas_drain_work(ha);
+ if (drain)
+ sas_drain_work(ha);
+ clear_bit(SAS_HA_RESUMING, &ha->state);
+
+ sas_queue_deferred_work(ha);
+ /* send event PORTE_BROADCAST_RCVD to identify some new inserted
+ * disks for expander
+ */
+ sas_resume_insert_broadcast_ha(ha);
+}
+
+void sas_resume_ha(struct sas_ha_struct *ha)
+{
+ _sas_resume_ha(ha, true);
}
EXPORT_SYMBOL(sas_resume_ha);
+/* A no-sync variant, which does not call sas_drain_ha(). */
+void sas_resume_ha_no_sync(struct sas_ha_struct *ha)
+{
+ _sas_resume_ha(ha, false);
+}
+EXPORT_SYMBOL(sas_resume_ha_no_sync);
+
void sas_suspend_ha(struct sas_ha_struct *ha)
{
int i;
@@ -487,6 +531,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->reset_result = 0;
@@ -500,6 +545,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (rc == 0)
rc = d->reset_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
@@ -514,6 +560,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->enable_result = 0;
@@ -527,6 +574,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (rc == 0)
rc = d->enable_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
@@ -590,16 +638,15 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
}
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
-
-struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy,
+ gfp_t gfp_flags)
{
struct asd_sas_event *event;
- gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- event = kmem_cache_zalloc(sas_event_cache, flags);
+ event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
if (!event)
return NULL;
@@ -610,7 +657,8 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
pr_notice("The phy%d bursting events, shut it down.\n",
phy->id);
- sas_notify_phy_event(phy, PHYE_SHUTDOWN);
+ sas_notify_phy_event(phy, PHYE_SHUTDOWN,
+ gfp_flags);
}
} else {
/* Do not support PHY control, stop allocating events */
@@ -664,5 +712,3 @@ MODULE_LICENSE("GPL v2");
module_init(sas_class_init);
module_exit(sas_class_exit);
-EXPORT_SYMBOL_GPL(sas_register_ha);
-EXPORT_SYMBOL_GPL(sas_unregister_ha);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 1f1d01901978..8d0ad3abc7b5 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -14,6 +14,7 @@
#include <scsi/scsi_transport_sas.h>
#include <scsi/libsas.h>
#include <scsi/sas_ata.h>
+#include <linux/pm_runtime.h>
#ifdef pr_fmt
#undef pr_fmt
@@ -48,15 +49,15 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
int sas_register_phys(struct sas_ha_struct *sas_ha);
void sas_unregister_phys(struct sas_ha_struct *sas_ha);
-struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags);
void sas_free_event(struct asd_sas_event *event);
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
-int sas_init_events(struct sas_ha_struct *sas_ha);
void sas_disable_revalidation(struct sas_ha_struct *ha);
void sas_enable_revalidation(struct sas_ha_struct *ha);
+void sas_queue_deferred_work(struct sas_ha_struct *ha);
void __sas_drain_work(struct sas_ha_struct *ha);
void sas_deform_port(struct asd_sas_phy *phy, int gone);
@@ -66,7 +67,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
void sas_porte_link_reset_err(struct work_struct *work);
void sas_porte_timer_event(struct work_struct *work);
void sas_porte_hard_reset(struct work_struct *work);
-int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
+bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -77,13 +78,12 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
int sas_ex_phy_discover(struct domain_device *dev, int single);
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp);
+ struct smp_rps_resp *rps_resp);
int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
@@ -93,6 +93,12 @@ void sas_destruct_devices(struct asd_sas_port *port);
extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
+void sas_task_internal_done(struct sas_task *task);
+void sas_task_internal_timedout(struct timer_list *t);
+int sas_execute_tmf(struct domain_device *device, void *parameter,
+ int para_len, int force_phy_id,
+ struct sas_tmf_task *tmf);
+
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
#else
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 4ca4b1f30bd0..a0d592d11dfb 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -10,7 +10,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
/* ---------- Phy events ---------- */
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 19cf418928fa..11599c0e3fc3 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -10,7 +10,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
{
@@ -25,7 +25,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
static void sas_resume_port(struct asd_sas_phy *phy)
{
- struct domain_device *dev;
+ struct domain_device *dev, *n;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
@@ -44,7 +44,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
* 1/ presume every device came back
* 2/ force the next revalidation to check all expander phys
*/
- list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
int i, rc;
rc = sas_notify_lldd_dev_found(dev);
@@ -67,6 +67,34 @@ static void sas_resume_port(struct asd_sas_phy *phy)
sas_discover_event(port, DISCE_RESUME);
}
+static void sas_form_port_add_phy(struct asd_sas_port *port,
+ struct asd_sas_phy *phy, bool wideport)
+{
+ list_add_tail(&phy->port_phy_el, &port->phy_list);
+ sas_phy_set_target(phy, port->port_dev);
+ phy->port = port;
+ port->num_phys++;
+ port->phy_mask |= (1U << phy->id);
+
+ if (wideport)
+ pr_debug("phy%d matched wide port%d\n", phy->id,
+ port->id);
+ else
+ memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
+
+ if (*(u64 *)port->attached_sas_addr == 0) {
+ port->class = phy->class;
+ memcpy(port->attached_sas_addr, phy->attached_sas_addr,
+ SAS_ADDR_SIZE);
+ port->iproto = phy->iproto;
+ port->tproto = phy->tproto;
+ port->oob_mode = phy->oob_mode;
+ port->linkrate = phy->linkrate;
+ } else {
+ port->linkrate = max(port->linkrate, phy->linkrate);
+ }
+}
+
/**
* sas_form_port - add this phy to a port
* @phy: the phy of interest
@@ -79,7 +107,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
int i;
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
- struct domain_device *port_dev;
+ struct domain_device *port_dev = NULL;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
unsigned long flags;
@@ -110,8 +138,9 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (*(u64 *) port->sas_addr &&
phy_is_wideport_member(port, phy) && port->num_phys > 0) {
/* wide port */
- pr_debug("phy%d matched wide port%d\n", phy->id,
- port->id);
+ port_dev = port->port_dev;
+ sas_form_port_add_phy(port, phy, true);
+ spin_unlock(&port->phy_list_lock);
break;
}
spin_unlock(&port->phy_list_lock);
@@ -122,40 +151,22 @@ static void sas_form_port(struct asd_sas_phy *phy)
port = sas_ha->sas_port[i];
spin_lock(&port->phy_list_lock);
if (*(u64 *)port->sas_addr == 0
- && port->num_phys == 0) {
- memcpy(port->sas_addr, phy->sas_addr,
- SAS_ADDR_SIZE);
+ && port->num_phys == 0) {
+ port_dev = port->port_dev;
+ sas_form_port_add_phy(port, phy, false);
+ spin_unlock(&port->phy_list_lock);
break;
}
spin_unlock(&port->phy_list_lock);
}
- }
- if (i >= sas_ha->num_phys) {
- pr_err("%s: couldn't find a free port, bug?\n", __func__);
- spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
- return;
+ if (i >= sas_ha->num_phys) {
+ pr_err("%s: couldn't find a free port, bug?\n",
+ __func__);
+ spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
+ return;
+ }
}
-
- /* add the phy to the port */
- port_dev = port->port_dev;
- list_add_tail(&phy->port_phy_el, &port->phy_list);
- sas_phy_set_target(phy, port_dev);
- phy->port = port;
- port->num_phys++;
- port->phy_mask |= (1U << phy->id);
-
- if (*(u64 *)port->attached_sas_addr == 0) {
- port->class = phy->class;
- memcpy(port->attached_sas_addr, phy->attached_sas_addr,
- SAS_ADDR_SIZE);
- port->iproto = phy->iproto;
- port->tproto = phy->tproto;
- port->oob_mode = phy->oob_mode;
- port->linkrate = phy->linkrate;
- } else
- port->linkrate = max(port->linkrate, phy->linkrate);
- spin_unlock(&port->phy_list_lock);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
if (!port->port) {
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9e0975e55c27..a36fa1c128a8 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -22,9 +22,9 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
-#include "../scsi_sas_internal.h"
-#include "../scsi_transport_api.h"
-#include "../scsi_priv.h"
+#include "scsi_sas_internal.h"
+#include "scsi_transport_api.h"
+#include "scsi_priv.h"
#include <linux/err.h>
#include <linux/blkdev.h>
@@ -37,7 +37,8 @@
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
- int hs = 0, stat = 0;
+ enum scsi_host_status hs = DID_OK;
+ enum exec_status stat = SAS_SAM_STAT_GOOD;
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
@@ -66,9 +67,6 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
case SAS_DEVICE_UNKNOWN:
hs = DID_BAD_TARGET;
break;
- case SAS_SG_ERR:
- hs = DID_PARITY;
- break;
case SAS_OPEN_REJECT:
if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
hs = DID_SOFT_ERROR; /* retry */
@@ -82,10 +80,10 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
case SAS_ABORTED_TASK:
hs = DID_ABORT;
break;
- case SAM_STAT_CHECK_CONDITION:
+ case SAS_SAM_STAT_CHECK_CONDITION:
memcpy(sc->sense_buffer, ts->buf,
min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
- stat = SAM_STAT_CHECK_CONDITION;
+ stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
default:
stat = ts->stat;
@@ -125,7 +123,7 @@ static void sas_scsi_task_done(struct sas_task *task)
}
sas_end_task(sc, task);
- sc->scsi_done(sc);
+ scsi_done(sc);
}
static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
@@ -198,9 +196,10 @@ out_free_task:
else
cmd->result = DID_ERROR << 16;
out_done:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
+EXPORT_SYMBOL_GPL(sas_queuecommand);
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
@@ -314,11 +313,13 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
pr_notice("%s: task 0x%p failed to abort\n",
__func__, task);
return TASK_ABORT_FAILED;
+ default:
+ pr_notice("%s: task 0x%p result code %d not handled\n",
+ __func__, task, res);
}
-
}
}
- return res;
+ return TASK_ABORT_FAILED;
}
static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
@@ -511,6 +512,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
return FAILED;
}
+EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
@@ -532,6 +534,7 @@ int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
return FAILED;
}
+EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
/* Try to reset a device */
static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
@@ -622,7 +625,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
- /* fallthrough */
+ fallthrough;
case TASK_IS_NOT_AT_LU:
case TASK_ABORT_FAILED:
pr_notice("task 0x%p is not at LU: I_T recover\n",
@@ -753,7 +756,7 @@ retry:
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
- sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
+ sas_ata_eh(shost, &eh_work_q);
if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
@@ -790,6 +793,7 @@ int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(sas_ioctl);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
{
@@ -832,6 +836,7 @@ int sas_target_alloc(struct scsi_target *starget)
starget->hostdata = found_dev;
return 0;
}
+EXPORT_SYMBOL_GPL(sas_target_alloc);
#define SAS_DEF_QD 256
@@ -860,18 +865,21 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
return 0;
}
+EXPORT_SYMBOL_GPL(sas_slave_configure);
int sas_change_queue_depth(struct scsi_device *sdev, int depth)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
- return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
+ return ata_change_queue_depth(dev->sata_dev.ap,
+ sas_to_ata_dev(dev), sdev, depth);
if (!sdev->tagged_supported)
depth = 1;
return scsi_change_queue_depth(sdev, depth);
}
+EXPORT_SYMBOL_GPL(sas_change_queue_depth);
int sas_bios_param(struct scsi_device *scsi_dev,
struct block_device *bdev,
@@ -884,6 +892,316 @@ int sas_bios_param(struct scsi_device *scsi_dev,
return 0;
}
+EXPORT_SYMBOL_GPL(sas_bios_param);
+
+void sas_task_internal_done(struct sas_task *task)
+{
+ del_timer(&task->slow_task->timer);
+ complete(&task->slow_task->completion);
+}
+
+void sas_task_internal_timedout(struct timer_list *t)
+{
+ struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task *task = slow->task;
+ bool is_completed = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ is_completed = false;
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (!is_completed)
+ complete(&task->slow_task->completion);
+}
+
+#define TASK_TIMEOUT (20 * HZ)
+#define TASK_RETRY 3
+
+static int sas_execute_internal_abort(struct domain_device *device,
+ enum sas_internal_abort type, u16 tag,
+ unsigned int qid, void *data)
+{
+ struct sas_ha_struct *ha = device->port->ha;
+ struct sas_internal *i = to_sas_internal(ha->core.shost->transportt);
+ struct sas_task *task = NULL;
+ int res, retry;
+
+ for (retry = 0; retry < TASK_RETRY; retry++) {
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = device;
+ task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
+ task->task_done = sas_task_internal_done;
+ task->slow_task->timer.function = sas_task_internal_timedout;
+ task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
+ add_timer(&task->slow_task->timer);
+
+ task->abort_task.tag = tag;
+ task->abort_task.type = type;
+ task->abort_task.qid = qid;
+
+ res = i->dft->lldd_execute_task(task, GFP_KERNEL);
+ if (res) {
+ del_timer_sync(&task->slow_task->timer);
+ pr_err("Executing internal abort failed %016llx (%d)\n",
+ SAS_ADDR(device->sas_addr), res);
+ break;
+ }
+
+ wait_for_completion(&task->slow_task->completion);
+ res = TMF_RESP_FUNC_FAILED;
+
+ /* Even if the internal abort timed out, return direct. */
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ bool quit = true;
+
+ if (i->dft->lldd_abort_timeout)
+ quit = i->dft->lldd_abort_timeout(task, data);
+ else
+ pr_err("Internal abort: timeout %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ res = -EIO;
+ if (quit)
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+ res = TMF_RESP_FUNC_SUCC;
+ break;
+ }
+
+ pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
+ SAS_ADDR(device->sas_addr), task->task_status.resp,
+ task->task_status.stat);
+ sas_free_task(task);
+ task = NULL;
+ }
+ BUG_ON(retry == TASK_RETRY && task != NULL);
+ sas_free_task(task);
+ return res;
+}
+
+int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
+ unsigned int qid, void *data)
+{
+ return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
+ tag, qid, data);
+}
+EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
+
+int sas_execute_internal_abort_dev(struct domain_device *device,
+ unsigned int qid, void *data)
+{
+ return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
+ SCSI_NO_TAG, qid, data);
+}
+EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
+
+int sas_execute_tmf(struct domain_device *device, void *parameter,
+ int para_len, int force_phy_id,
+ struct sas_tmf_task *tmf)
+{
+ struct sas_task *task;
+ struct sas_internal *i =
+ to_sas_internal(device->port->ha->core.shost->transportt);
+ int res, retry;
+
+ for (retry = 0; retry < TASK_RETRY; retry++) {
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = device;
+ task->task_proto = device->tproto;
+
+ if (dev_is_sata(device)) {
+ task->ata_task.device_control_reg_update = 1;
+ if (force_phy_id >= 0) {
+ task->ata_task.force_phy = true;
+ task->ata_task.force_phy_id = force_phy_id;
+ }
+ memcpy(&task->ata_task.fis, parameter, para_len);
+ } else {
+ memcpy(&task->ssp_task, parameter, para_len);
+ }
+
+ task->task_done = sas_task_internal_done;
+ task->tmf = tmf;
+
+ task->slow_task->timer.function = sas_task_internal_timedout;
+ task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
+ add_timer(&task->slow_task->timer);
+
+ res = i->dft->lldd_execute_task(task, GFP_KERNEL);
+ if (res) {
+ del_timer_sync(&task->slow_task->timer);
+ pr_err("executing TMF task failed %016llx (%d)\n",
+ SAS_ADDR(device->sas_addr), res);
+ break;
+ }
+
+ wait_for_completion(&task->slow_task->completion);
+
+ if (i->dft->lldd_tmf_exec_complete)
+ i->dft->lldd_tmf_exec_complete(device);
+
+ res = TMF_RESP_FUNC_FAILED;
+
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ pr_err("TMF task timeout for %016llx and not done\n",
+ SAS_ADDR(device->sas_addr));
+ if (i->dft->lldd_tmf_aborted)
+ i->dft->lldd_tmf_aborted(task);
+ break;
+ }
+ pr_warn("TMF task timeout for %016llx and done\n",
+ SAS_ADDR(device->sas_addr));
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+ res = TMF_RESP_FUNC_SUCC;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun
+ */
+ pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
+ SAS_ADDR(device->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ res = task->task_status.residual;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ pr_warn("TMF task blocked task error %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ res = -EMSGSIZE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_OPEN_REJECT) {
+ pr_warn("TMF task open reject failed %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ res = -EIO;
+ } else {
+ pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
+ SAS_ADDR(device->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ }
+ sas_free_task(task);
+ task = NULL;
+ }
+
+ if (retry == TASK_RETRY)
+ pr_warn("executing TMF for %016llx failed after %d attempts!\n",
+ SAS_ADDR(device->sas_addr), TASK_RETRY);
+ sas_free_task(task);
+
+ return res;
+}
+
+static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
+ struct sas_tmf_task *tmf)
+{
+ struct sas_ssp_task ssp_task;
+
+ if (!(device->tproto & SAS_PROTOCOL_SSP))
+ return TMF_RESP_FUNC_ESUPP;
+
+ memcpy(ssp_task.LUN, lun, 8);
+
+ return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
+}
+
+int sas_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_ABORT_TASK_SET,
+ };
+
+ return sas_execute_ssp_tmf(dev, lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_abort_task_set);
+
+int sas_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_CLEAR_TASK_SET,
+ };
+
+ return sas_execute_ssp_tmf(dev, lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_clear_task_set);
+
+int sas_lu_reset(struct domain_device *dev, u8 *lun)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_LU_RESET,
+ };
+
+ return sas_execute_ssp_tmf(dev, lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_lu_reset);
+
+int sas_query_task(struct sas_task *task, u16 tag)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_QUERY_TASK,
+ .tag_of_task_to_be_managed = tag,
+ };
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct scsi_lun lun;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+
+ return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_query_task);
+
+int sas_abort_task(struct sas_task *task, u16 tag)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_ABORT_TASK,
+ .tag_of_task_to_be_managed = tag,
+ };
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct scsi_lun lun;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+
+ return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_abort_task);
/*
* Tell an upper layer that it needs to initiate an abort for a given task.
@@ -908,8 +1226,18 @@ void sas_task_abort(struct sas_task *task)
if (dev_is_sata(task->dev))
sas_ata_task_abort(task);
else
- blk_abort_request(sc->request);
+ blk_abort_request(scsi_cmd_to_rq(sc));
}
+EXPORT_SYMBOL_GPL(sas_task_abort);
+
+int sas_slave_alloc(struct scsi_device *sdev)
+{
+ if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
+ return -ENXIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sas_slave_alloc);
void sas_target_destroy(struct scsi_target *starget)
{
@@ -921,6 +1249,7 @@ void sas_target_destroy(struct scsi_target *starget)
starget->hostdata = NULL;
sas_put_device(found_dev);
}
+EXPORT_SYMBOL_GPL(sas_target_destroy);
#define SAS_STRING_ADDR_SIZE 16
@@ -948,14 +1277,3 @@ out:
}
EXPORT_SYMBOL_GPL(sas_request_addr);
-EXPORT_SYMBOL_GPL(sas_queuecommand);
-EXPORT_SYMBOL_GPL(sas_target_alloc);
-EXPORT_SYMBOL_GPL(sas_slave_configure);
-EXPORT_SYMBOL_GPL(sas_change_queue_depth);
-EXPORT_SYMBOL_GPL(sas_bios_param);
-EXPORT_SYMBOL_GPL(sas_task_abort);
-EXPORT_SYMBOL_GPL(sas_phy_reset);
-EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
-EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
-EXPORT_SYMBOL_GPL(sas_target_destroy);
-EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index e2d42593ce52..e9d291007817 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -15,12 +15,15 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
tstat->resp = SAS_TASK_COMPLETE;
- if (iu->datapres == 0)
+ switch (iu->datapres) {
+ case SAS_DATAPRES_NO_DATA:
tstat->stat = iu->status;
- else if (iu->datapres == 1)
+ break;
+ case SAS_DATAPRES_RESPONSE_DATA:
tstat->stat = iu->resp_data[3];
- else if (iu->datapres == 2) {
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+ case SAS_DATAPRES_SENSE_DATA:
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
tstat->buf_valid_size =
min_t(int, SAS_STATUS_BUF_SIZE,
be32_to_cpu(iu->sense_data_len));
@@ -29,10 +32,11 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
if (iu->status != SAM_STAT_CHECK_CONDITION)
dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n",
SAS_ADDR(task->dev->sas_addr), iu->status);
- }
- else
+ break;
+ default:
/* when datapres contains corrupt/unknown value... */
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
+ }
}
EXPORT_SYMBOL_GPL(sas_ssp_task_response);
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 092a971d066b..bbd1faf41e80 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -33,4 +33,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \
lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
- lpfc_nvme.o lpfc_nvmet.o
+ lpfc_nvme.o lpfc_nvmet.o lpfc_vmid.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 04d73e2be373..9ad233b40a9e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -22,6 +22,7 @@
*******************************************************************/
#include <scsi/scsi_host.h>
+#include <linux/hashtable.h>
#include <linux/ktime.h>
#include <linux/workqueue.h>
@@ -47,9 +48,6 @@ struct lpfc_sli2_slim;
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
-#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
- cmnd for menlo needs nearly twice as for firmware
- downloads using bsg */
#define LPFC_DEFAULT_XPSGL_SIZE 256
#define LPFC_MAX_SG_TABLESIZE 0xffff
@@ -70,8 +68,6 @@ struct lpfc_sli2_slim;
#define LPFC_MIN_TGT_QDEPTH 10
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
-#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
- collection. */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
@@ -114,6 +110,12 @@ struct lpfc_sli2_slim;
#define LPFC_MBX_NO_WAIT 0
#define LPFC_MBX_WAIT 1
+#define LPFC_CFG_PARAM_MAGIC_NUM 0xFEAA0005
+#define LPFC_PORT_CFG_NAME "/cfg/port.cfg"
+
+#define lpfc_rangecheck(val, min, max) \
+ ((uint)(val) >= (uint)(min) && (val) <= (max))
+
enum lpfc_polling_flags {
ENABLE_FCP_RING_POLLING = 0x1,
DISABLE_FCP_RING_INT = 0x2
@@ -143,7 +145,7 @@ struct lpfc_dmabuf {
struct lpfc_nvmet_ctxbuf {
struct list_head list;
- struct lpfc_nvmet_rcv_ctx *context;
+ struct lpfc_async_xchg_ctx *context;
struct lpfc_iocbq *iocbq;
struct lpfc_sglq *sglq;
struct work_struct defer_work;
@@ -207,8 +209,7 @@ typedef struct lpfc_vpd {
} rev;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd3 :19; /* Reserved */
- uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 :20; /* Reserved */
uint32_t rsvd2 : 3; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
@@ -230,8 +231,7 @@ typedef struct lpfc_vpd {
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd2 : 3; /* Reserved */
- uint32_t cdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd3 :19; /* Reserved */
+ uint32_t rsvd3 :20; /* Reserved */
#endif
} sli3Feat;
} lpfc_vpd_t;
@@ -262,13 +262,13 @@ struct lpfc_stats {
uint32_t elsRcvPRLI;
uint32_t elsRcvLIRR;
uint32_t elsRcvRLS;
- uint32_t elsRcvRPS;
uint32_t elsRcvRPL;
uint32_t elsRcvRRQ;
uint32_t elsRcvRTV;
uint32_t elsRcvECHO;
uint32_t elsRcvLCB;
uint32_t elsRcvRDP;
+ uint32_t elsRcvRDF;
uint32_t elsXmitFLOGI;
uint32_t elsXmitFDISC;
uint32_t elsXmitPLOGI;
@@ -306,6 +306,64 @@ struct lpfc_stats {
struct lpfc_hba;
+#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
+
+#define LPFC_MAX_VMID_SIZE 256
+#define LPFC_COMPRESS_VMID_SIZE 16
+
+union lpfc_vmid_io_tag {
+ u32 app_id; /* App Id vmid */
+ u8 cs_ctl_vmid; /* Priority tag vmid */
+};
+
+#define JIFFIES_PER_HR (HZ * 60 * 60)
+
+struct lpfc_vmid {
+ u8 flag;
+#define LPFC_VMID_SLOT_FREE 0x0
+#define LPFC_VMID_SLOT_USED 0x1
+#define LPFC_VMID_REQ_REGISTER 0x2
+#define LPFC_VMID_REGISTERED 0x4
+#define LPFC_VMID_DE_REGISTER 0x8
+ char host_vmid[LPFC_MAX_VMID_SIZE];
+ union lpfc_vmid_io_tag un;
+ struct hlist_node hnode;
+ u64 io_rd_cnt;
+ u64 io_wr_cnt;
+ u8 vmid_len;
+ u8 delete_inactive; /* Delete if inactive flag 0 = no, 1 = yes */
+ u32 hash_index;
+ u64 __percpu *last_io_time;
+};
+
+#define lpfc_vmid_is_type_priority_tag(vport)\
+ (vport->vmid_priority_tagging ? 1 : 0)
+
+#define LPFC_VMID_HASH_SIZE 256
+#define LPFC_VMID_HASH_MASK 255
+#define LPFC_VMID_HASH_SHIFT 6
+
+struct lpfc_vmid_context {
+ struct lpfc_vmid *vmp;
+ struct lpfc_nodelist *nlp;
+ bool instantiated;
+};
+
+struct lpfc_vmid_priority_range {
+ u8 low;
+ u8 high;
+ u8 qos;
+};
+
+struct lpfc_vmid_priority_info {
+ u32 num_descriptors;
+ struct lpfc_vmid_priority_range *vmid_range;
+};
+
+#define QFPA_EVEN_ONLY 0x01
+#define QFPA_ODD_ONLY 0x02
+#define QFPA_EVEN_ODD 0x03
+
enum discovery_state {
LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
LPFC_VPORT_FAILED = 1, /* vport has failed */
@@ -345,6 +403,159 @@ struct lpfc_trunk_link {
link1,
link2,
link3;
+ u32 phy_lnk_speed;
+};
+
+/* Format of congestion module parameters */
+struct lpfc_cgn_param {
+ uint32_t cgn_param_magic;
+ uint8_t cgn_param_version; /* version 1 */
+ uint8_t cgn_param_mode; /* 0=off 1=managed 2=monitor only */
+#define LPFC_CFG_OFF 0
+#define LPFC_CFG_MANAGED 1
+#define LPFC_CFG_MONITOR 2
+ uint8_t cgn_rsvd1;
+ uint8_t cgn_rsvd2;
+ uint8_t cgn_param_level0;
+ uint8_t cgn_param_level1;
+ uint8_t cgn_param_level2;
+ uint8_t byte11;
+ uint8_t byte12;
+ uint8_t byte13;
+ uint8_t byte14;
+ uint8_t byte15;
+};
+
+/* Max number of days of congestion data */
+#define LPFC_MAX_CGN_DAYS 10
+
+/* Format of congestion buffer info
+ * This structure defines memory thats allocated and registered with
+ * the HBA firmware. When adding or removing fields from this structure
+ * the alignment must match the HBA firmware.
+ */
+
+struct lpfc_cgn_info {
+ /* Header */
+ __le16 cgn_info_size; /* is sizeof(struct lpfc_cgn_info) */
+ uint8_t cgn_info_version; /* represents format of structure */
+#define LPFC_CGN_INFO_V1 1
+#define LPFC_CGN_INFO_V2 2
+#define LPFC_CGN_INFO_V3 3
+ uint8_t cgn_info_mode; /* 0=off 1=managed 2=monitor only */
+ uint8_t cgn_info_detect;
+ uint8_t cgn_info_action;
+ uint8_t cgn_info_level0;
+ uint8_t cgn_info_level1;
+ uint8_t cgn_info_level2;
+
+ /* Start Time */
+ uint8_t cgn_info_month;
+ uint8_t cgn_info_day;
+ uint8_t cgn_info_year;
+ uint8_t cgn_info_hour;
+ uint8_t cgn_info_minute;
+ uint8_t cgn_info_second;
+
+ /* minute / hours / daily indices */
+ uint8_t cgn_index_minute;
+ uint8_t cgn_index_hour;
+ uint8_t cgn_index_day;
+
+ __le16 cgn_warn_freq;
+ __le16 cgn_alarm_freq;
+ __le16 cgn_lunq;
+ uint8_t cgn_pad1[8];
+
+ /* Driver Information */
+ __le16 cgn_drvr_min[60];
+ __le32 cgn_drvr_hr[24];
+ __le32 cgn_drvr_day[LPFC_MAX_CGN_DAYS];
+
+ /* Congestion Warnings */
+ __le16 cgn_warn_min[60];
+ __le32 cgn_warn_hr[24];
+ __le32 cgn_warn_day[LPFC_MAX_CGN_DAYS];
+
+ /* Latency Information */
+ __le32 cgn_latency_min[60];
+ __le32 cgn_latency_hr[24];
+ __le32 cgn_latency_day[LPFC_MAX_CGN_DAYS];
+
+ /* Bandwidth Information */
+ __le16 cgn_bw_min[60];
+ __le16 cgn_bw_hr[24];
+ __le16 cgn_bw_day[LPFC_MAX_CGN_DAYS];
+
+ /* Congestion Alarms */
+ __le16 cgn_alarm_min[60];
+ __le32 cgn_alarm_hr[24];
+ __le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS];
+
+ struct_group(cgn_stat,
+ uint8_t cgn_stat_npm; /* Notifications per minute */
+
+ /* Start Time */
+ uint8_t cgn_stat_month;
+ uint8_t cgn_stat_day;
+ uint8_t cgn_stat_year;
+ uint8_t cgn_stat_hour;
+ uint8_t cgn_stat_minute;
+ uint8_t cgn_pad2[2];
+
+ __le32 cgn_notification;
+ __le32 cgn_peer_notification;
+ __le32 link_integ_notification;
+ __le32 delivery_notification;
+
+ uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
+ uint8_t cgn_stat_cgn_day;
+ uint8_t cgn_stat_cgn_year;
+ uint8_t cgn_stat_cgn_hour;
+ uint8_t cgn_stat_cgn_min;
+ uint8_t cgn_stat_cgn_sec;
+
+ uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
+ uint8_t cgn_stat_peer_day;
+ uint8_t cgn_stat_peer_year;
+ uint8_t cgn_stat_peer_hour;
+ uint8_t cgn_stat_peer_min;
+ uint8_t cgn_stat_peer_sec;
+
+ uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
+ uint8_t cgn_stat_lnk_day;
+ uint8_t cgn_stat_lnk_year;
+ uint8_t cgn_stat_lnk_hour;
+ uint8_t cgn_stat_lnk_min;
+ uint8_t cgn_stat_lnk_sec;
+
+ uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
+ uint8_t cgn_stat_del_day;
+ uint8_t cgn_stat_del_year;
+ uint8_t cgn_stat_del_hour;
+ uint8_t cgn_stat_del_min;
+ uint8_t cgn_stat_del_sec;
+ );
+
+ __le32 cgn_info_crc;
+#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
+#define LPFC_CGN_CRC32_SEED 0xFFFFFFFF
+};
+
+#define LPFC_CGN_INFO_SZ (sizeof(struct lpfc_cgn_info) - \
+ sizeof(uint32_t))
+
+struct lpfc_cgn_stat {
+ atomic64_t total_bytes;
+ atomic64_t rcv_bytes;
+ atomic64_t rx_latency;
+#define LPFC_CGN_NOT_SENT 0xFFFFFFFFFFFFFFFFLL
+ atomic_t rx_io_cnt;
+};
+
+struct lpfc_cgn_acqe_stat {
+ atomic64_t alarm;
+ atomic64_t warn;
};
struct lpfc_vport {
@@ -377,6 +588,7 @@ struct lpfc_vport {
#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
+#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -395,6 +607,7 @@ struct lpfc_vport {
#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */
#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
+#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
struct list_head fc_nodes;
@@ -445,16 +658,19 @@ struct lpfc_vport {
#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
#define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */
+#define WORKER_CHECK_INACTIVE_VMID 0x4000 /* hba: check inactive vmids */
+#define WORKER_CHECK_VMID_ISSUE_QFPA 0x8000 /* vport: Check if qfpa needs
+ * to be issued */
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
- int unreg_vpi_cmpl;
-
uint8_t load_flag;
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
#define FC_ALLOW_FDMI 0x4 /* port is ready for FDMI requests */
+#define FC_ALLOW_VMID 0x8 /* Allow VMID I/Os */
+#define FC_DEREGISTER_ALL_APP_ID 0x10 /* Deregister all VMIDs */
/* Vport Config Parameters */
uint32_t cfg_scan_down;
uint32_t cfg_lun_queue_depth;
@@ -473,28 +689,53 @@ struct lpfc_vport {
uint32_t cfg_tgt_queue_depth;
uint32_t cfg_first_burst_size;
uint32_t dev_loss_tmo_changed;
+ /* VMID parameters */
+ u8 lpfc_vmid_host_uuid[LPFC_COMPRESS_VMID_SIZE];
+ u32 max_vmid; /* maximum VMIDs allowed per port */
+ u32 cur_vmid_cnt; /* Current VMID count */
+#define LPFC_MIN_VMID 4
+#define LPFC_MAX_VMID 255
+ u32 vmid_inactivity_timeout; /* Time after which the VMID */
+ /* deregisters from switch */
+ u32 vmid_priority_tagging;
+#define LPFC_VMID_PRIO_TAG_DISABLE 0 /* Disable */
+#define LPFC_VMID_PRIO_TAG_SUP_TARGETS 1 /* Allow supported targets only */
+#define LPFC_VMID_PRIO_TAG_ALL_TARGETS 2 /* Allow all targets */
+ unsigned long *vmid_priority_range;
+#define LPFC_VMID_MAX_PRIORITY_RANGE 256
+#define LPFC_VMID_PRIORITY_BITMAP_SIZE 32
+ u8 vmid_flag;
+#define LPFC_VMID_IN_USE 0x1
+#define LPFC_VMID_ISSUE_QFPA 0x2
+#define LPFC_VMID_QFPA_CMPL 0x4
+#define LPFC_VMID_QOS_ENABLED 0x8
+#define LPFC_VMID_TIMER_ENBLD 0x10
+#define LPFC_VMID_TYPE_PRIO 0x20
+ struct fc_qfpa_res *qfpa_res;
struct fc_vport *fc_vport;
+ struct lpfc_vmid *vmid;
+ DECLARE_HASHTABLE(hash_table, 8);
+ rwlock_t vmid_lock;
+ struct lpfc_vmid_priority_info vmid_priority;
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
struct dentry *debug_nvmestat;
struct dentry *debug_scsistat;
- struct dentry *debug_nvmektime;
- struct dentry *debug_cpucheck;
+ struct dentry *debug_ioktime;
+ struct dentry *debug_hdwqstat;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
- uint8_t stat_data_enabled;
- uint8_t stat_data_blocked;
struct list_head rcv_buffer_list;
unsigned long rcv_buffer_time_stamp;
uint32_t vport_flag;
-#define STATIC_VPORT 1
-#define FAWWPN_SET 2
-#define FAWWPN_PARAM_CHG 4
+#define STATIC_VPORT 0x1
+#define FAWWPN_PARAM_CHG 0x2
uint16_t fdmi_num_disc;
uint32_t fdmi_hba_mask;
@@ -630,6 +871,32 @@ struct lpfc_ras_fwlog {
enum ras_state state; /* RAS logging running state */
};
+#define DBG_LOG_STR_SZ 256
+#define DBG_LOG_SZ 256
+
+struct dbg_log_ent {
+ char log[DBG_LOG_STR_SZ];
+ u64 t_ns;
+};
+
+enum lpfc_irq_chann_mode {
+ /* Assign IRQs to all possible cpus that have hardware queues */
+ NORMAL_MODE,
+
+ /* Assign IRQs only to cpus on the same numa node as HBA */
+ NUMA_MODE,
+
+ /* Assign IRQs only on non-hyperthreaded CPUs. This is the
+ * same as normal_mode, but assign IRQS only on physical CPUs.
+ */
+ NHT_MODE,
+};
+
+enum lpfc_hba_bit_flags {
+ FABRIC_COMANDS_BLOCKED,
+ HBA_PCI_ERR,
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -646,16 +913,25 @@ struct lpfc_hba {
void (*lpfc_scsi_prep_cmnd)
(struct lpfc_vport *, struct lpfc_io_buf *,
struct lpfc_nodelist *);
+ int (*lpfc_scsi_prep_cmnd_buf)
+ (struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo);
+ int (*lpfc_scsi_prep_task_mgmt_cmd)
+ (struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd);
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
+ int (*__lpfc_sli_issue_fcp_io)
+ (struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag);
void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
struct lpfc_iocbq *);
int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
- IOCB_t * (*lpfc_get_iocb_from_iocbq)
- (struct lpfc_iocbq *);
void (*lpfc_scsi_cmd_iocb_cmpl)
(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
@@ -688,7 +964,24 @@ struct lpfc_hba {
int (*lpfc_bg_scsi_prep_dma_buf)
(struct lpfc_hba *, struct lpfc_io_buf *);
- /* Add new entries here */
+
+ /* Prep SLI WQE/IOCB jump table entries */
+ void (*__lpfc_sli_prep_els_req_rsp)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp,
+ u16 cmd_size, u32 did, u32 elscmd,
+ u8 tmo, u8 expect_rsp);
+ void (*__lpfc_sli_prep_gen_req)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi,
+ u32 num_entry, u8 tmo);
+ void (*__lpfc_sli_prep_xmit_seq64)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi,
+ u16 ox_id, u32 num_entry, u8 rctl,
+ u8 last_seq, u8 cr_cx_cmd);
+ void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag,
+ u8 ulp_class, u16 cqid, bool ia,
+ bool wqec);
/* expedite pool */
struct lpfc_epd_pool epd_pool;
@@ -699,6 +992,9 @@ struct lpfc_hba {
struct workqueue_struct *wq;
struct delayed_work eq_delay_work;
+#define LPFC_IDLE_STAT_DELAY 1000
+ struct delayed_work idle_stat_delay_work;
+
struct lpfc_sli sli;
uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
@@ -723,7 +1019,9 @@ struct lpfc_hba {
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
-#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
+#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
+#define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */
+#define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
@@ -732,7 +1030,7 @@ struct lpfc_hba {
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
-#define ELS_XRI_ABORT_EVENT 0x40
+#define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
@@ -742,14 +1040,23 @@ struct lpfc_hba {
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
-#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
* capability
*/
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
-
+#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */
+#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
+#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
+#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */
+#define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */
+#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */
+#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
+#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */
+#define HBA_RHBA_CMPL 0x20000000 /* RHBA FDMI command is successful */
+
+ struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -797,7 +1104,6 @@ struct lpfc_hba {
uint8_t wwpn[8];
uint32_t RandomData[7];
uint8_t fcp_embed_io;
- uint8_t nvme_support; /* Firmware supports NVME */
uint8_t nvmet_support; /* driver supports NVMET */
#define LPFC_NVMET_MAX_PORTS 32
uint8_t mds_diags_support;
@@ -837,7 +1143,6 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
- uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
@@ -851,8 +1156,6 @@ struct lpfc_hba {
uint32_t cfg_nvme_seg_cnt;
uint32_t cfg_scsi_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
- uint64_t cfg_soft_wwnn;
- uint64_t cfg_soft_wwpn;
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
@@ -877,17 +1180,27 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_enable_fc4_type;
+#define LPFC_ENABLE_FCP 1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+#if (IS_ENABLED(CONFIG_NVME_FC))
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#else
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#endif
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz;
+ u32 cfg_fcp_wait_abts_rsp;
uint32_t cfg_delay_discovery;
uint32_t cfg_sli_mode;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
- uint32_t cfg_enable_dss;
uint32_t cfg_fdmi_on;
#define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
@@ -898,13 +1211,18 @@ struct lpfc_hba {
uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
-#define LPFC_ENABLE_FCP 1
-#define LPFC_ENABLE_NVME 2
-#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
+ uint32_t cfg_enable_mi;
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
+ u32 cfg_max_vmid; /* maximum VMIDs allowed per port */
+ u32 cfg_vmid_app_header;
+#define LPFC_VMID_APP_HEADER_DISABLE 0
+#define LPFC_VMID_APP_HEADER_ENABLE 1
+ u32 cfg_vmid_priority_tagging;
+ u32 cfg_vmid_inactivity_timeout; /* Time after which the VMID */
+ /* deregisters from switch */
struct pci_dev *pcidev;
struct list_head work_list;
uint32_t work_ha; /* Host Attention Bits for WT */
@@ -969,7 +1287,6 @@ struct lpfc_hba {
#define VPD_PORT 0x8 /* valid vpd port data */
#define VPD_MASK 0xf /* mask for any vpd data */
- uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
struct timer_list eratt_poll;
@@ -989,6 +1306,7 @@ struct lpfc_hba {
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
spinlock_t hbalock;
+ struct work_struct unblock_request_work; /* SCSI layer unblock IOs */
/* dma_mem_pools */
struct dma_pool *lpfc_sg_dma_buf_pool;
@@ -1006,6 +1324,7 @@ struct lpfc_hba {
mempool_t *active_rrq_pool;
struct fc_host_statistics link_stats;
+ enum lpfc_irq_chann_mode irq_chann_mode;
enum intr_type_t intr_type;
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
@@ -1033,7 +1352,6 @@ struct lpfc_hba {
atomic_t fabric_iocb_count;
struct timer_list fabric_block_timer;
unsigned long bit_flags;
-#define FABRIC_COMANDS_BLOCKED 0
atomic_t num_rsrc_err;
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
@@ -1061,6 +1379,8 @@ struct lpfc_hba {
#ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat;
#endif
+ struct dentry *debug_cgn_buffer;
+ struct dentry *debug_rx_monitor;
struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
@@ -1105,26 +1425,14 @@ struct lpfc_hba {
unsigned long last_completion_time;
unsigned long skipped_hb;
struct timer_list hb_tmofunc;
- uint8_t hb_outstanding;
struct timer_list rrq_tmr;
enum hba_temp_state over_temp_state;
- /* ndlp reference management */
- spinlock_t ndlp_lock;
/*
* Following bit will be set for all buffer tags which are not
* associated with any HBQ.
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- int wait_4_mlo_maint_flg;
- wait_queue_head_t wait_4_mlo_m_q;
- /* data structure used for latency data collection */
-#define LPFC_NO_BUCKET 0
-#define LPFC_LINEAR_BUCKET 1
-#define LPFC_POWER2_BUCKET 2
- uint8_t bucket_type;
- uint32_t bucket_base;
- uint32_t bucket_step;
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
@@ -1147,17 +1455,14 @@ struct lpfc_hba {
struct list_head ct_ev_waiters;
struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx;
+ struct timer_list inactive_vmid_poll;
/* RAS Support */
struct lpfc_ras_fwlog ras_fwlog;
- uint8_t menlo_flag; /* menlo generic flags */
-#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
- uint8_t fips_spec_rev;
- uint8_t fips_level;
spinlock_t devicelock; /* lock for luns list */
mempool_t *device_data_mem_pool;
struct list_head luns;
@@ -1175,12 +1480,11 @@ struct lpfc_hba {
uint16_t sfp_warning;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint16_t cpucheck_on;
+ uint16_t hdwqstat_on;
#define LPFC_CHECK_OFF 0
#define LPFC_CHECK_NVME_IO 1
-#define LPFC_CHECK_NVMET_RCV 2
-#define LPFC_CHECK_NVMET_IO 4
-#define LPFC_CHECK_SCSI_IO 8
+#define LPFC_CHECK_NVMET_IO 2
+#define LPFC_CHECK_SCSI_IO 4
uint16_t ktime_on;
uint64_t ktime_data_samples;
uint64_t ktime_status_samples;
@@ -1216,6 +1520,73 @@ struct lpfc_hba {
uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max;
#endif
+ /* CMF objects */
+ struct lpfc_cgn_stat __percpu *cmf_stat;
+ uint32_t cmf_interval_rate; /* timer interval limit in ms */
+ uint32_t cmf_timer_cnt;
+#define LPFC_CMF_INTERVAL 90
+ uint64_t cmf_link_byte_count;
+ uint64_t cmf_max_line_rate;
+ uint64_t cmf_max_bytes_per_interval;
+ uint64_t cmf_last_sync_bw;
+#define LPFC_CMF_BLK_SIZE 512
+ struct hrtimer cmf_timer;
+ atomic_t cmf_bw_wait;
+ atomic_t cmf_busy;
+ atomic_t cmf_stop_io; /* To block request and stop IO's */
+ uint32_t cmf_active_mode;
+ uint32_t cmf_info_per_interval;
+#define LPFC_MAX_CMF_INFO 32
+ struct timespec64 cmf_latency; /* Interval congestion timestamp */
+ uint32_t cmf_last_ts; /* Interval congestion time (ms) */
+ uint32_t cmf_active_info;
+
+ /* Signal / FPIN handling for Congestion Mgmt */
+ u8 cgn_reg_fpin; /* Negotiated value from RDF */
+ u8 cgn_init_reg_fpin; /* Initial value from READ_CONFIG */
+#define LPFC_CGN_FPIN_NONE 0x0
+#define LPFC_CGN_FPIN_WARN 0x1
+#define LPFC_CGN_FPIN_ALARM 0x2
+#define LPFC_CGN_FPIN_BOTH (LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM)
+
+ u8 cgn_reg_signal; /* Negotiated value from EDC */
+ u8 cgn_init_reg_signal; /* Initial value from READ_CONFIG */
+ /* cgn_reg_signal and cgn_init_reg_signal use
+ * enum fc_edc_cg_signal_cap_types
+ */
+ u16 cgn_fpin_frequency; /* In units of msecs */
+#define LPFC_FPIN_INIT_FREQ 0xffff
+ u32 cgn_sig_freq;
+ u32 cgn_acqe_cnt;
+
+ /* RX monitor handling for CMF */
+ struct lpfc_rx_info_monitor *rx_monitor;
+ atomic_t rx_max_read_cnt; /* Maximum read bytes */
+ uint64_t rx_block_cnt;
+
+ /* Congestion parameters from flash */
+ struct lpfc_cgn_param cgn_p;
+
+ /* Statistics counter for ACQE cgn alarms and warnings */
+ struct lpfc_cgn_acqe_stat cgn_acqe_stat;
+
+ /* Congestion buffer information */
+ struct lpfc_dmabuf *cgn_i; /* Congestion Info buffer */
+ atomic_t cgn_fabric_warn_cnt; /* Total warning cgn events for info */
+ atomic_t cgn_fabric_alarm_cnt; /* Total alarm cgn events for info */
+ atomic_t cgn_sync_warn_cnt; /* Total warning events for SYNC wqe */
+ atomic_t cgn_sync_alarm_cnt; /* Total alarm events for SYNC wqe */
+ atomic_t cgn_driver_evt_cnt; /* Total driver cgn events for fmw */
+ atomic_t cgn_latency_evt_cnt;
+ struct timespec64 cgn_daily_ts;
+ atomic64_t cgn_latency_evt; /* Avg latency per minute */
+ unsigned long cgn_evt_timestamp;
+#define LPFC_CGN_TIMER_TO_MIN 60000 /* ms in a minute */
+ uint32_t cgn_evt_minute;
+#define LPFC_SEC_MIN 60
+#define LPFC_MIN_HOUR 60
+#define LPFC_HOUR_DAY 24
+#define LPFC_MIN_DAY (LPFC_MIN_HOUR * LPFC_HOUR_DAY)
struct hlist_node cpuhp; /* used for cpuhp per hba callback */
struct timer_list cpuhp_poll_timer;
@@ -1225,6 +1596,40 @@ struct lpfc_hba {
#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
char os_host_name[MAXHOSTNAMELEN];
+
+ /* LD Signaling */
+ u32 degrade_activate_threshold;
+ u32 degrade_deactivate_threshold;
+ u32 fec_degrade_interval;
+
+ atomic_t dbg_log_idx;
+ atomic_t dbg_log_cnt;
+ atomic_t dbg_log_dmping;
+ struct dbg_log_ent dbg_log[DBG_LOG_SZ];
+};
+
+#define LPFC_MAX_RXMONITOR_ENTRY 800
+#define LPFC_MAX_RXMONITOR_DUMP 32
+struct rx_info_entry {
+ uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
+ uint64_t total_bytes; /* Total no of read bytes requested */
+ uint64_t rcv_bytes; /* Total no of read bytes completed */
+ uint64_t avg_io_size;
+ uint64_t avg_io_latency;/* Average io latency in microseconds */
+ uint64_t max_read_cnt; /* Maximum read bytes */
+ uint64_t max_bytes_per_interval;
+ uint32_t cmf_busy;
+ uint32_t cmf_info; /* CMF_SYNC_WQE info */
+ uint32_t io_cnt;
+ uint32_t timer_utilization;
+ uint32_t timer_interval;
+};
+
+struct lpfc_rx_info_monitor {
+ struct rx_info_entry *ring; /* info organized in a circular buffer */
+ u32 head_idx, tail_idx; /* index to head/tail of ring */
+ spinlock_t lock; /* spinlock for ring */
+ u32 entries; /* storing number entries/size of ring */
};
static inline struct Scsi_Host *
@@ -1315,19 +1720,19 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
/**
- * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
- * @numa_mask: Pointer to phba's numa_mask member.
+ * lpfc_next_online_cpu - Finds next online CPU on cpumask
+ * @mask: Pointer to phba's cpumask member.
* @start: starting cpu index
*
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
static inline unsigned int
-lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
+lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
unsigned int cpu_it;
- for_each_cpu_wrap(cpu_it, numa_mask, start) {
+ for_each_cpu_wrap(cpu_it, mask, start) {
if (cpu_online(cpu_it))
break;
}
@@ -1353,3 +1758,128 @@ lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq,
writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr);
eq->q_mode = delay;
}
+
+
+/*
+ * Macro that declares tables and a routine to perform enum type to
+ * ascii string lookup.
+ *
+ * Defines a <key,value> table for an enum. Uses xxx_INIT defines for
+ * the enum to populate the table. Macro defines a routine (named
+ * by caller) that will search all elements of the table for the key
+ * and return the name string if found or "Unrecognized" if not found.
+ */
+#define DECLARE_ENUM2STR_LOOKUP(routine, enum_name, enum_init) \
+static struct { \
+ enum enum_name value; \
+ char *name; \
+} fc_##enum_name##_e2str_names[] = enum_init; \
+static const char *routine(enum enum_name table_key) \
+{ \
+ int i; \
+ char *name = "Unrecognized"; \
+ \
+ for (i = 0; i < ARRAY_SIZE(fc_##enum_name##_e2str_names); i++) {\
+ if (fc_##enum_name##_e2str_names[i].value == table_key) {\
+ name = fc_##enum_name##_e2str_names[i].name; \
+ break; \
+ } \
+ } \
+ return name; \
+}
+
+/**
+ * lpfc_is_vmid_enabled - returns if VMID is enabled for either switch types
+ * @phba: Pointer to HBA context object.
+ *
+ * Relationship between the enable, target support and if vmid tag is required
+ * for the particular combination
+ * ---------------------------------------------------
+ * Switch Enable Flag Target Support VMID Needed
+ * ---------------------------------------------------
+ * App Id 0 NA N
+ * App Id 1 0 N
+ * App Id 1 1 Y
+ * Pr Tag 0 NA N
+ * Pr Tag 1 0 N
+ * Pr Tag 1 1 Y
+ * Pr Tag 2 * Y
+ ---------------------------------------------------
+ *
+ **/
+static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba)
+{
+ return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging;
+}
+
+static inline
+u8 get_job_ulpstatus(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(lpfc_wcqe_c_status, &iocbq->wcqe_cmpl);
+ else
+ return iocbq->iocb.ulpStatus;
+}
+
+static inline
+u32 get_job_word4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wcqe_cmpl.parameter;
+ else
+ return iocbq->iocb.un.ulpWord[4];
+}
+
+static inline
+u8 get_job_cmnd(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_cmnd, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.ulpCommand;
+}
+
+static inline
+u16 get_job_ulpcontext(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_ctxt_tag, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.ulpContext;
+}
+
+static inline
+u16 get_job_rcvoxid(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_rcvoxid, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.unsli3.rcvsli3.ox_id;
+}
+
+static inline
+u32 get_job_data_placed(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wcqe_cmpl.total_data_placed;
+ else
+ return iocbq->iocb.un.genreq64.bdl.bdeSize;
+}
+
+static inline
+u32 get_job_abtsiotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wqe.abort_cmd.wqe_com.abort_tag;
+ else
+ return iocbq->iocb.un.acxri.abortIoTag;
+}
+
+static inline
+u32 get_job_els_rsp64_did(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_els_did, &iocbq->wqe.els_req.wqe_dest);
+ else
+ return iocbq->iocb.un.elsreq64.remoteID;
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 46f56f30f77e..ef1481326fd7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -37,8 +37,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -48,7 +46,6 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_version.h"
#include "lpfc_compat.h"
@@ -60,10 +57,8 @@
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
-#define LPFC_DEF_MRQ_POST 512
-#define LPFC_MIN_MRQ_POST 512
-#define LPFC_MAX_MRQ_POST 2048
-
+#define LPFC_MAX_INFO_TMP_LEN 100
+#define LPFC_INFO_MORE_STR "\nCould be more info...\n"
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
@@ -119,6 +114,183 @@ lpfc_jedec_to_ascii(int incr, char hdw[])
return;
}
+static ssize_t
+lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_cgn_info *cp = NULL;
+ struct lpfc_cgn_stat *cgs;
+ int len = 0;
+ int cpu;
+ u64 rcv, total;
+ char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
+
+ if (phba->cgn_i)
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+ scnprintf(tmp, sizeof(tmp),
+ "Congestion Mgmt Info: E2Eattr %d Ver %d "
+ "CMF %d cnt %d\n",
+ phba->sli4_hba.pc_sli4_params.mi_ver,
+ cp ? cp->cgn_info_version : 0,
+ phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt);
+
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ if (!phba->sli4_hba.pc_sli4_params.cmf)
+ goto buffer_done;
+
+ switch (phba->cgn_init_reg_signal) {
+ case EDC_CG_SIG_WARN_ONLY:
+ scnprintf(tmp, sizeof(tmp),
+ "Register: Init: Signal:WARN ");
+ break;
+ case EDC_CG_SIG_WARN_ALARM:
+ scnprintf(tmp, sizeof(tmp),
+ "Register: Init: Signal:WARN|ALARM ");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp),
+ "Register: Init: Signal:NONE ");
+ break;
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ switch (phba->cgn_init_reg_fpin) {
+ case LPFC_CGN_FPIN_WARN:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:WARN\n");
+ break;
+ case LPFC_CGN_FPIN_ALARM:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:ALARM\n");
+ break;
+ case LPFC_CGN_FPIN_BOTH:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:WARN|ALARM\n");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:NONE\n");
+ break;
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ switch (phba->cgn_reg_signal) {
+ case EDC_CG_SIG_WARN_ONLY:
+ scnprintf(tmp, sizeof(tmp),
+ " Current: Signal:WARN ");
+ break;
+ case EDC_CG_SIG_WARN_ALARM:
+ scnprintf(tmp, sizeof(tmp),
+ " Current: Signal:WARN|ALARM ");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp),
+ " Current: Signal:NONE ");
+ break;
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ switch (phba->cgn_reg_fpin) {
+ case LPFC_CGN_FPIN_WARN:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+ break;
+ case LPFC_CGN_FPIN_ALARM:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+ break;
+ case LPFC_CGN_FPIN_BOTH:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp),
+ "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+ break;
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) {
+ switch (phba->cmf_active_mode) {
+ case LPFC_CFG_OFF:
+ scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n");
+ break;
+ case LPFC_CFG_MANAGED:
+ scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n");
+ break;
+ case LPFC_CFG_MONITOR:
+ scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n");
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+
+ switch (phba->cgn_p.cgn_param_mode) {
+ case LPFC_CFG_OFF:
+ scnprintf(tmp, sizeof(tmp), "Config: Mode:Off ");
+ break;
+ case LPFC_CFG_MANAGED:
+ scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed ");
+ break;
+ case LPFC_CFG_MONITOR:
+ scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor ");
+ break;
+ default:
+ scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown ");
+ }
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ total = 0;
+ rcv = 0;
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ total += atomic64_read(&cgs->total_bytes);
+ rcv += atomic64_read(&cgs->rcv_bytes);
+ }
+
+ scnprintf(tmp, sizeof(tmp),
+ "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n",
+ atomic_read(&phba->cmf_busy),
+ phba->cmf_active_info, rcv, total);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "Port_speed:%d Link_byte_cnt:%ld "
+ "Max_byte_per_interval:%ld\n",
+ lpfc_sli_port_speed_get(phba),
+ (unsigned long)phba->cmf_link_byte_count,
+ (unsigned long)phba->cmf_max_bytes_per_interval);
+ strlcat(buf, tmp, PAGE_SIZE);
+
+buffer_done:
+ len = strnlen(buf, PAGE_SIZE);
+
+ if (unlikely(len >= (PAGE_SIZE - 1))) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6312 Catching potential buffer "
+ "overflow > PAGE_SIZE = %lu bytes\n",
+ PAGE_SIZE);
+ strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
+ LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1);
+ }
+ return len;
+}
+
/**
* lpfc_drvr_version_show - Return the Emulex driver string with version number
* @dev: class unused variable.
@@ -175,7 +347,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
char *statep;
int i;
int len = 0;
- char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
+ char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@@ -375,11 +547,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
- spin_lock(&vport->phba->hbalock);
+ spin_lock(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
- spin_unlock(&vport->phba->hbalock);
+ spin_unlock(&ndlp->lock);
if (!nrport)
continue;
@@ -494,8 +666,8 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->xmt_fcp_noxri),
atomic_read(&lport->xmt_fcp_bad_ndlp),
atomic_read(&lport->xmt_fcp_qdepth),
- atomic_read(&lport->xmt_fcp_err),
- atomic_read(&lport->xmt_fcp_wqerr));
+ atomic_read(&lport->xmt_fcp_wqerr),
+ atomic_read(&lport->xmt_fcp_err));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
@@ -519,11 +691,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
"6314 Catching potential buffer "
"overflow > PAGE_SIZE = %lu bytes\n",
PAGE_SIZE);
- strlcpy(buf + PAGE_SIZE - 1 -
- strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
- LPFC_NVME_INFO_MORE_STR,
- strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
- + 1);
+ strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
+ LPFC_INFO_MORE_STR,
+ sizeof(LPFC_INFO_MORE_STR) + 1);
}
return len;
@@ -752,25 +922,6 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
- * @dev: class converted to a Scsi_host structure.
- * @attr: device attribute, not used.
- * @buf: on return contains the Menlo Maintenance sli flag.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- (phba->sli.sli_flag & LPFC_MENLO_MAINT));
-}
-
-/**
* lpfc_vportnum_show - Return the port number in ascii of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
@@ -871,7 +1022,7 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_state_show - Return the link state of the port
+ * lpfc_link_state_show - Return the link state of the port
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains text describing the state of the link.
@@ -939,10 +1090,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
"Unknown\n");
break;
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- len += scnprintf(buf + len, PAGE_SIZE-len,
- " Menlo Maint Mode\n");
- else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
@@ -950,12 +1098,22 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
- if (vport->fc_flag & FC_FABRIC)
- len += scnprintf(buf + len, PAGE_SIZE-len,
- " Fabric\n");
- else
+ if (vport->fc_flag & FC_FABRIC) {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag &
+ LPFC_FAWWPN_FABRIC)
+ len += scnprintf(buf + len,
+ PAGE_SIZE - len,
+ " Fabric FA-PWWN\n");
+ else
+ len += scnprintf(buf + len,
+ PAGE_SIZE - len,
+ " Fabric\n");
+ } else {
len += scnprintf(buf + len, PAGE_SIZE-len,
" Point-2-Point\n");
+ }
}
}
@@ -1145,6 +1303,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
+ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
+ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) &&
@@ -1183,8 +1344,7 @@ lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
msleep(20);
if (cnt++ > 250) { /* 5 secs */
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "0466 %s %s\n",
- "Outstanding IO when ",
+ "0466 Outstanding IO when "
"bringing Adapter offline\n");
return 0;
}
@@ -1508,6 +1668,7 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
/**
* lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
* @phba: lpfc_hba pointer.
+ * @opcode: The sli4 config command opcode.
*
* Description:
* Request SLI4 interface type-2 device to perform a physical register set
@@ -1539,25 +1700,25 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
before_fc_flag = phba->pport->fc_flag;
sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
- /* Disable SR-IOV virtual functions if enabled */
- if (phba->cfg_sriov_nr_virtfn) {
- pci_disable_sriov(pdev);
- phba->cfg_sriov_nr_virtfn = 0;
- }
+ if (opcode == LPFC_FW_DUMP) {
+ init_completion(&online_compl);
+ phba->fw_dump_cmpl = &online_compl;
+ } else {
+ /* Disable SR-IOV virtual functions if enabled */
+ if (phba->cfg_sriov_nr_virtfn) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
- if (opcode == LPFC_FW_DUMP)
- phba->hba_flag |= HBA_FW_DUMP_OP;
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+ if (status != 0)
+ return status;
- if (status != 0) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return status;
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
}
- /* wait for the device to be quiesced before firmware reset */
- msleep(100);
-
reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PDEV_CTL_OFFSET);
@@ -1586,24 +1747,42 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3153 Fail to perform the requested "
"access: x%x\n", reg_val);
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
return rc;
}
/* keep the original port state */
- if (before_fc_flag & FC_OFFLINE_MODE)
- goto out;
-
- init_completion(&online_compl);
- job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_ONLINE);
- if (!job_posted)
+ if (before_fc_flag & FC_OFFLINE_MODE) {
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
goto out;
+ }
- wait_for_completion(&online_compl);
+ /* Firmware dump will trigger an HA_ERATT event, and
+ * lpfc_handle_eratt_s4 routine already handles bringing the port back
+ * online.
+ */
+ if (opcode == LPFC_FW_DUMP) {
+ wait_for_completion(phba->fw_dump_cmpl);
+ } else {
+ init_completion(&online_compl);
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (!job_posted)
+ goto out;
+ wait_for_completion(&online_compl);
+ }
out:
/* in any case, restore the virtual functions enabled as before */
if (sriov_nr_virtfn) {
+ /* If fw_dump was performed, first disable to clean up */
+ if (opcode == LPFC_FW_DUMP) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+
sriov_err =
lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
if (!sriov_err)
@@ -1693,8 +1872,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"0071 Set trunk mode failed with status: %d",
rc);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return 0;
}
@@ -1794,6 +1972,8 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
== 0)
status = lpfc_reset_pci_bus(phba);
+ else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0)
+ lpfc_issue_hb_tmo(phba);
else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
else
@@ -2231,66 +2411,6 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_fips_level_show - Return the current FIPS level for the HBA
- * @dev: class unused variable.
- * @attr: device attribute, not used.
- * @buf: on return contains the module description text.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
-}
-
-/**
- * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
- * @dev: class unused variable.
- * @attr: device attribute, not used.
- * @buf: on return contains the module description text.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
-}
-
-/**
- * lpfc_dss_show - Return the current state of dss and the configured state
- * @dev: class converted to a Scsi_host structure.
- * @attr: device attribute, not used.
- * @buf: on return contains the formatted text.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_dss_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
- (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
- (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
- "" : "Not ");
-}
-
-/**
* lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
@@ -2316,11 +2436,6 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
}
-static inline bool lpfc_rangecheck(uint val, uint min, uint max)
-{
- return val >= min && val <= max;
-}
-
/**
* lpfc_enable_bbcr_set: Sets an attribute value.
* @phba: pointer the the adapter structure.
@@ -2340,18 +2455,18 @@ lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
{
if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3068 %s_enable_bbcr changed from %d to %d\n",
- LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
+ "3068 lpfc_enable_bbcr changed from %d to "
+ "%d\n", phba->cfg_enable_bbcr, val);
phba->cfg_enable_bbcr = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
- LPFC_DRIVER_NAME, val);
+ "0451 lpfc_enable_bbcr cannot set to %d, range is 0, "
+ "1\n", val);
return -EINVAL;
}
-/**
+/*
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -2377,7 +2492,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
-/**
+/*
* lpfc_param_hex_show - Return a cfg attribute value in hex
*
* Description:
@@ -2405,7 +2520,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
-/**
+/*
* lpfc_param_init - Initializes a cfg attribute
*
* Description:
@@ -2439,7 +2554,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_param_set - Set a cfg attribute value
*
* Description:
@@ -2476,7 +2591,7 @@ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_param_store - Set a vport attribute value
*
* Description:
@@ -2516,7 +2631,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
return -EINVAL;\
}
-/**
+/*
* lpfc_vport_param_show - Return decimal formatted cfg attribute value
*
* Description:
@@ -2540,7 +2655,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
-/**
+/*
* lpfc_vport_param_hex_show - Return hex formatted attribute value
*
* Description:
@@ -2565,7 +2680,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
-/**
+/*
* lpfc_vport_param_init - Initialize a vport cfg attribute
*
* Description:
@@ -2598,7 +2713,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_vport_param_set - Set a vport cfg attribute
*
* Description:
@@ -2634,7 +2749,7 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_vport_param_store - Set a vport attribute
*
* Description:
@@ -2690,7 +2805,6 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
-static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static DEVICE_ATTR_RO(lpfc_drvr_version);
static DEVICE_ATTR_RO(lpfc_enable_fip);
@@ -2705,15 +2819,12 @@ static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR_RO(lpfc_temp_sensor);
-static DEVICE_ATTR_RO(lpfc_fips_level);
-static DEVICE_ATTR_RO(lpfc_fips_rev);
-static DEVICE_ATTR_RO(lpfc_dss);
static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
NULL);
+static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
-static char *lpfc_soft_wwn_key = "C99G71SL8032A";
#define WWN_SZ 8
/**
* lpfc_wwn_set - Convert string to the 8 byte WWN value.
@@ -2757,228 +2868,7 @@ lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
}
return 0;
}
-/**
- * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: containing the string lpfc_soft_wwn_key.
- * @count: must be size of lpfc_soft_wwn_key.
- *
- * Returns:
- * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
- * length of buf indicates success
- **/
-static ssize_t
-lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- unsigned int cnt = count;
- uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
- u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
-
- /*
- * We're doing a simple sanity check for soft_wwpn setting.
- * We require that the user write a specific key to enable
- * the soft_wwpn attribute to be settable. Once the attribute
- * is written, the enable key resets. If further updates are
- * desired, the key must be written again to re-enable the
- * attribute.
- *
- * The "key" is not secret - it is a hardcoded string shown
- * here. The intent is to protect against the random user or
- * application that is just writing attributes.
- */
- if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0051 "LPFC_DRIVER_NAME" soft wwpn can not"
- " be enabled: fawwpn is enabled\n");
- return -EINVAL;
- }
-
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if ((cnt != strlen(lpfc_soft_wwn_key)) ||
- (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
- return -EINVAL;
-
- phba->soft_wwn_enable = 1;
-
- dev_printk(KERN_WARNING, &phba->pcidev->dev,
- "lpfc%d: soft_wwpn assignment has been enabled.\n",
- phba->brd_no);
- dev_printk(KERN_WARNING, &phba->pcidev->dev,
- " The soft_wwpn feature is not supported by Broadcom.");
-
- return count;
-}
-static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
-
-/**
- * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: on return contains the wwpn in hexadecimal.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long)phba->cfg_soft_wwpn);
-}
-
-/**
- * lpfc_soft_wwpn_store - Set the ww port name of the adapter
- * @dev class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: contains the wwpn in hexadecimal.
- * @count: number of wwpn bytes in buf
- *
- * Returns:
- * -EACCES hba reset not enabled, adapter over temp
- * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
- * -EIO error taking adapter offline or online
- * value of count on success
- **/
-static ssize_t
-lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- struct completion online_compl;
- int stat1 = 0, stat2 = 0;
- unsigned int cnt = count;
- u8 wwpn[WWN_SZ];
- int rc;
-
- if (!phba->cfg_enable_hba_reset)
- return -EACCES;
- spin_lock_irq(&phba->hbalock);
- if (phba->over_temp_state == HBA_OVER_TEMP) {
- spin_unlock_irq(&phba->hbalock);
- return -EACCES;
- }
- spin_unlock_irq(&phba->hbalock);
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if (!phba->soft_wwn_enable)
- return -EINVAL;
- /* lock setting wwpn, wwnn down */
- phba->soft_wwn_enable = 0;
-
- rc = lpfc_wwn_set(buf, cnt, wwpn);
- if (rc) {
- /* not able to set wwpn, unlock it */
- phba->soft_wwn_enable = 1;
- return rc;
- }
-
- phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
- fc_host_port_name(shost) = phba->cfg_soft_wwpn;
- if (phba->cfg_soft_wwnn)
- fc_host_node_name(shost) = phba->cfg_soft_wwnn;
-
- dev_printk(KERN_NOTICE, &phba->pcidev->dev,
- "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
-
- stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- if (stat1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0463 lpfc_soft_wwpn attribute set failed to "
- "reinit adapter - %d\n", stat1);
- init_completion(&online_compl);
- rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
- LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
-
- wait_for_completion(&online_compl);
- if (stat2)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0464 lpfc_soft_wwpn attribute set failed to "
- "reinit adapter - %d\n", stat2);
- return (stat1 || stat2) ? -EIO : count;
-}
-static DEVICE_ATTR_RW(lpfc_soft_wwpn);
-
-/**
- * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: on return contains the wwnn in hexadecimal.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long)phba->cfg_soft_wwnn);
-}
-
-/**
- * lpfc_soft_wwnn_store - sets the ww node name of the adapter
- * @cdev: class device that is converted into a Scsi_host.
- * @buf: contains the ww node name in hexadecimal.
- * @count: number of wwnn bytes in buf.
- *
- * Returns:
- * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
- * value of count on success
- **/
-static ssize_t
-lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- unsigned int cnt = count;
- u8 wwnn[WWN_SZ];
- int rc;
-
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if (!phba->soft_wwn_enable)
- return -EINVAL;
-
- rc = lpfc_wwn_set(buf, cnt, wwnn);
- if (rc) {
- /* Allow wwnn to be set many times, as long as the enable
- * is set. However, once the wwpn is set, everything locks.
- */
- return rc;
- }
-
- phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
-
- dev_printk(KERN_NOTICE, &phba->pcidev->dev,
- "lpfc%d: soft_wwnn set. Value will take effect upon "
- "setting of the soft_wwpn\n", phba->brd_no);
-
- return count;
-}
-static DEVICE_ATTR_RW(lpfc_soft_wwnn);
/**
* lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
@@ -3273,9 +3163,11 @@ static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
* lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
* (OAS) operations.
* @phba: lpfc_hba pointer.
- * @ndlp: pointer to fcp target node.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
* @lun: the fc lun for setting oas state.
* @oas_state: the oas state to be set to the lun.
+ * @pri: priority
*
* Returns:
* SUCCESS : 0
@@ -3313,6 +3205,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
* @vpt_wwpn: wwpn of the vport associated with the returned lun
* @tgt_wwpn: wwpn of the target associated with the returned lun
* @lun_status: status of the lun returned lun
+ * @lun_pri: priority of the lun returned lun
*
* Returns the first or next lun enabled for OAS operations for the vport/target
* specified. If a lun is found, its vport wwpn, target wwpn and status is
@@ -3351,6 +3244,7 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
* @tgt_wwpn: target wwpn by reference.
* @lun: the fc lun for setting oas state.
* @oas_state: the oas state to be set to the oas_lun.
+ * @pri: priority
*
* This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
* a lun for OAS operations.
@@ -3425,6 +3319,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: buffer for passing information.
+ * @count: size of the formatting string
*
* This function sets the OAS state for lun. Before this function is called,
* the vport wwpn, target wwpn, and oas state need to be set.
@@ -3504,11 +3399,8 @@ unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
-LPFC_ATTR(sli_mode, 0, 0, 3,
- "SLI mode selector:"
- " 0 - auto (SLI-3 if supported),"
- " 2 - select SLI-2 even on SLI-3 capable HBAs,"
- " 3 - select SLI-3");
+LPFC_ATTR(sli_mode, 3, 3, 3,
+ "SLI mode selector: 3 - select SLI-3");
LPFC_ATTR_R(enable_npiv, 1, 0, 1,
"Enable NPIV functionality");
@@ -3517,6 +3409,15 @@ LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
/*
+ * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of
+ * aborted IO.
+ * The range is [0,1]. Default value is 0
+ * 0, IO completes after ABTS issued (default).
+ * 1, IO completes after receipt of ABTS response or timeout.
+ */
+LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion");
+
+/*
# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
# 0x0 = disabled, XRI/OXID use not tracked.
# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
@@ -3697,16 +3598,14 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
#if (IS_ENABLED(CONFIG_NVME_FC))
- spin_lock(&vport->phba->hbalock);
+ spin_lock(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
remoteport = rport->remoteport;
- spin_unlock(&vport->phba->hbalock);
+ spin_unlock(&ndlp->lock);
if (rport && remoteport)
nvme_fc_set_remoteport_devloss(remoteport,
vport->cfg_devloss_tmo);
@@ -3846,8 +3745,8 @@ LPFC_ATTR_R(nvmet_mrq_post,
* 3 - register both FCP and NVME
* Supported values are [1,3]. Default value is 3
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
- LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
+ LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
"Enable FC4 Protocol support - FCP / NVME");
/*
@@ -3868,12 +3767,9 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
-# commands per FCP LUN. Value range is [1,512]. Default value is 30.
-# If this parameter value is greater than 1/8th the maximum number of exchanges
-# supported by the HBA port, then the lun queue depth will be reduced to
-# 1/8th the maximum number of exchanges.
+# commands per FCP LUN.
*/
-LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
+LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
"Max number of FCP commands we can queue to a specific LUN");
/*
@@ -3888,8 +3784,8 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
/**
- * lpfc_tgt_queue_depth_store: Sets an attribute value.
- * @phba: pointer the the adapter structure.
+ * lpfc_tgt_queue_depth_set: Sets an attribute value.
+ * @vport: lpfc vport structure pointer.
* @val: integer attribute value.
*
* Description: Sets the parameter to the new value.
@@ -4073,9 +3969,11 @@ LPFC_ATTR(topology, 0, 0, 6,
"Select Fibre Channel topology");
/**
- * lpfc_topology_set - Set the adapters topology field
- * @phba: lpfc_hba pointer.
- * @val: topology value.
+ * lpfc_topology_store - Set the adapters topology field
+ * @dev: class device that is converted into a scsi_host.
+ * @attr:device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: size of the data buffer.
*
* Description:
* If val is in a valid range then set the adapter's topology field and
@@ -4100,6 +3998,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
const char *val_buf = buf;
int err;
uint32_t prev_val;
+ u8 sli_family, if_type;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
nolip = 1;
@@ -4123,13 +4022,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
/*
* The 'topology' is not a configurable parameter if :
* - persistent topology enabled
- * - G7/G6 with no private loop support
+ * - ASIC_GEN_NUM >= 0xC, with no private loop support
*/
-
+ sli_family = bf_get(lpfc_sli_intf_sli_family,
+ &phba->sli4_hba.sli_intf);
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
- (!phba->sli4_hba.pc_sli4_params.pls &&
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
+ (!phba->sli4_hba.pc_sli4_params.pls &&
+ (sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
+ if_type == LPFC_SLI_INTF_IF_TYPE_6))) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3114 Loop mode not supported\n");
@@ -4191,340 +4093,17 @@ lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
*/
static DEVICE_ATTR_RO(lpfc_static_vport);
-/**
- * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device.
- * @buf: Data buffer.
- * @count: Size of the data buffer.
- *
- * This function get called when a user write to the lpfc_stat_data_ctrl
- * sysfs file. This function parse the command written to the sysfs file
- * and take appropriate action. These commands are used for controlling
- * driver statistical data collection.
- * Following are the command this function handles.
- *
- * setbucket <bucket_type> <base> <step>
- * = Set the latency buckets.
- * destroybucket = destroy all the buckets.
- * start = start data collection
- * stop = stop data collection
- * reset = reset the collected data
- **/
-static ssize_t
-lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-#define LPFC_MAX_DATA_CTRL_LEN 1024
- static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
- unsigned long i;
- char *str_ptr, *token;
- struct lpfc_vport **vports;
- struct Scsi_Host *v_shost;
- char *bucket_type_str, *base_str, *step_str;
- unsigned long base, step, bucket_type;
-
- if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
- if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
- return -EINVAL;
-
- strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
- str_ptr = &bucket_data[0];
- /* Ignore this token - this is command token */
- token = strsep(&str_ptr, "\t ");
- if (!token)
- return -EINVAL;
-
- bucket_type_str = strsep(&str_ptr, "\t ");
- if (!bucket_type_str)
- return -EINVAL;
-
- if (!strncmp(bucket_type_str, "linear", strlen("linear")))
- bucket_type = LPFC_LINEAR_BUCKET;
- else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
- bucket_type = LPFC_POWER2_BUCKET;
- else
- return -EINVAL;
-
- base_str = strsep(&str_ptr, "\t ");
- if (!base_str)
- return -EINVAL;
- base = simple_strtoul(base_str, NULL, 0);
-
- step_str = strsep(&str_ptr, "\t ");
- if (!step_str)
- return -EINVAL;
- step = simple_strtoul(step_str, NULL, 0);
- if (!step)
- return -EINVAL;
-
- /* Block the data collection for every vport */
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(v_shost->host_lock);
- /* Block and reset data collection */
- vports[i]->stat_data_blocked = 1;
- if (vports[i]->stat_data_enabled)
- lpfc_vport_reset_stat_data(vports[i]);
- spin_unlock_irq(v_shost->host_lock);
- }
-
- /* Set the bucket attributes */
- phba->bucket_type = bucket_type;
- phba->bucket_base = base;
- phba->bucket_step = step;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
-
- /* Unblock data collection */
- spin_lock_irq(v_shost->host_lock);
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(v_shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
- vports[i]->stat_data_blocked = 1;
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- phba->bucket_type = LPFC_NO_BUCKET;
- phba->bucket_base = 0;
- phba->bucket_step = 0;
- return strlen(buf);
- }
-
- if (!strncmp(buf, "start", strlen("start"))) {
- /* If no buckets configured return error */
- if (phba->bucket_type == LPFC_NO_BUCKET)
- return -EINVAL;
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_alloc_bucket(vport);
- vport->stat_data_enabled = 1;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "stop", strlen("stop"))) {
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled == 0) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "reset", strlen("reset"))) {
- if ((phba->bucket_type == LPFC_NO_BUCKET)
- || !vport->stat_data_enabled)
- return strlen(buf);
- spin_lock_irq(shost->host_lock);
- vport->stat_data_blocked = 1;
- lpfc_vport_reset_stat_data(vport);
- vport->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- return -EINVAL;
-}
-
-
-/**
- * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device object.
- * @buf: Data buffer.
- *
- * This function is the read call back function for
- * lpfc_stat_data_ctrl sysfs file. This function report the
- * current statistical data collection state.
- **/
-static ssize_t
-lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int index = 0;
- int i;
- char *bucket_type;
- unsigned long bucket_value;
-
- switch (phba->bucket_type) {
- case LPFC_LINEAR_BUCKET:
- bucket_type = "linear";
- break;
- case LPFC_POWER2_BUCKET:
- bucket_type = "power2";
- break;
- default:
- bucket_type = "No Bucket";
- break;
- }
-
- sprintf(&buf[index], "Statistical Data enabled :%d, "
- "blocked :%d, Bucket type :%s, Bucket base :%d,"
- " Bucket step :%d\nLatency Ranges :",
- vport->stat_data_enabled, vport->stat_data_blocked,
- bucket_type, phba->bucket_base, phba->bucket_step);
- index = strlen(buf);
- if (phba->bucket_type != LPFC_NO_BUCKET) {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- if (phba->bucket_type == LPFC_LINEAR_BUCKET)
- bucket_value = phba->bucket_base +
- phba->bucket_step * i;
- else
- bucket_value = phba->bucket_base +
- (1 << i) * phba->bucket_step;
-
- if (index + 10 > PAGE_SIZE)
- break;
- sprintf(&buf[index], "%08ld ", bucket_value);
- index = strlen(buf);
- }
- }
- sprintf(&buf[index], "\n");
- return strlen(buf);
-}
-
-/*
- * Sysfs attribute to control the statistical data collection.
- */
-static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
-
-/*
- * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
- */
-
-/*
- * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
- * for each target.
- */
-#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
-#define MAX_STAT_DATA_SIZE_PER_TARGET \
- STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
-
-
-/**
- * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
- * @filp: sysfs file
- * @kobj: Pointer to the kernel object
- * @bin_attr: Attribute object
- * @buff: Buffer pointer
- * @off: File offset
- * @count: Buffer size
- *
- * This function is the read call back function for lpfc_drvr_stat_data
- * sysfs file. This function export the statistical data to user
- * applications.
- **/
-static ssize_t
-sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct device *dev = container_of(kobj, struct device,
- kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int i = 0, index = 0;
- unsigned long nport_index;
- struct lpfc_nodelist *ndlp = NULL;
- nport_index = (unsigned long)off /
- MAX_STAT_DATA_SIZE_PER_TARGET;
-
- if (!vport->stat_data_enabled || vport->stat_data_blocked
- || (phba->bucket_type == LPFC_NO_BUCKET))
- return 0;
-
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
- continue;
-
- if (nport_index > 0) {
- nport_index--;
- continue;
- }
-
- if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
- > count)
- break;
-
- if (!ndlp->lat_data)
- continue;
-
- /* Print the WWN */
- sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
- ndlp->nlp_portname.u.wwn[0],
- ndlp->nlp_portname.u.wwn[1],
- ndlp->nlp_portname.u.wwn[2],
- ndlp->nlp_portname.u.wwn[3],
- ndlp->nlp_portname.u.wwn[4],
- ndlp->nlp_portname.u.wwn[5],
- ndlp->nlp_portname.u.wwn[6],
- ndlp->nlp_portname.u.wwn[7]);
-
- index = strlen(buf);
-
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- sprintf(&buf[index], "%010u,",
- ndlp->lat_data[i].cmd_count);
- index = strlen(buf);
- }
- sprintf(&buf[index], "\n");
- index = strlen(buf);
- }
- spin_unlock_irq(shost->host_lock);
- return index;
-}
-
-static struct bin_attribute sysfs_drvr_stat_data_attr = {
- .attr = {
- .name = "lpfc_drvr_stat_data",
- .mode = S_IRUSR,
- },
- .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
- .read = sysfs_drvr_stat_data_read,
- .write = NULL,
-};
-
/*
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
# connection.
# Value range is [0,16]. Default value is 0.
*/
/**
- * lpfc_link_speed_set - Set the adapters link speed
- * @phba: lpfc_hba pointer.
- * @val: link speed value.
+ * lpfc_link_speed_store - Set the adapters link speed
+ * @dev: Pointer to class device.
+ * @attr: Unused.
+ * @buf: Data buffer.
+ * @count: Size of the data buffer.
*
* Description:
* If val is in a valid range then set the adapter's link speed field and
@@ -4783,7 +4362,7 @@ static DEVICE_ATTR_RW(lpfc_aer_support);
* Description:
* If the @buf contains 1 and the device currently has the AER support
* enabled, then invokes the kernel AER helper routine
- * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
+ * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
* error status register.
*
* Notes:
@@ -4809,7 +4388,7 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
return -EINVAL;
if (phba->hba_flag & HBA_AER_ENABLED)
- rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
+ rc = pci_aer_clear_nonfatal_status(phba->pcidev);
if (rc == 0)
return strlen(buf);
@@ -4921,7 +4500,7 @@ lpfc_param_show(sriov_nr_virtfn)
static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
/**
- * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
+ * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -4943,7 +4522,7 @@ lpfc_request_firmware_upgrade_store(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- int val = 0, rc = -EINVAL;
+ int val = 0, rc;
/* Sanity check on user data */
if (!isdigit(buf[0]))
@@ -5275,8 +4854,8 @@ lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: "
- "%d out of range, using default\n",
+ "0371 lpfc_cq_max_proc_limit: %d out of range, using "
+ "default\n",
phba->cfg_cq_max_proc_limit);
return 0;
@@ -5285,7 +4864,7 @@ lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
/**
- * lpfc_state_show - Display current driver CPU affinity
+ * lpfc_fcp_cpu_map_show - Display current driver CPU affinity
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains text describing the state of the link.
@@ -5407,8 +4986,7 @@ static ssize_t
lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int status = -EINVAL;
- return status;
+ return -EINVAL;
}
/*
@@ -5471,9 +5049,9 @@ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
/*
# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
-# is [0,1]. Default value is 0.
+# is [0,1]. Default value is 1.
*/
-LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
+LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
/*
@@ -5533,8 +5111,6 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
spin_lock_irq(shost->host_lock);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
@@ -5770,17 +5346,69 @@ LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
-static inline void
-lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
+#if IS_ENABLED(CONFIG_X86)
+/**
+ * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
+ * irq_chann_mode
+ * @phba: Pointer to HBA context object.
+ **/
+static void
+lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
+{
+ unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
+ const struct cpumask *sibling_mask;
+ struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+ cpumask_clear(aff_mask);
+
+ if (phba->irq_chann_mode == NUMA_MODE) {
+ /* Check if we're a NUMA architecture */
+ numa_node = dev_to_node(&phba->pcidev->dev);
+ if (numa_node == NUMA_NO_NODE) {
+ phba->irq_chann_mode = NORMAL_MODE;
+ return;
+ }
+ }
+
+ for_each_possible_cpu(cpu) {
+ switch (phba->irq_chann_mode) {
+ case NUMA_MODE:
+ if (cpu_to_node(cpu) == numa_node)
+ cpumask_set_cpu(cpu, aff_mask);
+ break;
+ case NHT_MODE:
+ sibling_mask = topology_sibling_cpumask(cpu);
+ first_cpu = cpumask_first(sibling_mask);
+ if (first_cpu < nr_cpu_ids)
+ cpumask_set_cpu(first_cpu, aff_mask);
+ break;
+ default:
+ break;
+ }
+ }
+}
+#endif
+
+static void
+lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
{
#if IS_ENABLED(CONFIG_X86)
- /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- phba->cfg_irq_numa = 1;
- else
- phba->cfg_irq_numa = 0;
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ /* If AMD architecture, then default is NUMA_MODE */
+ phba->irq_chann_mode = NUMA_MODE;
+ break;
+ case X86_VENDOR_INTEL:
+ /* If Intel architecture, then default is no hyperthread mode */
+ phba->irq_chann_mode = NHT_MODE;
+ break;
+ default:
+ phba->irq_chann_mode = NORMAL_MODE;
+ break;
+ }
+ lpfc_cpumask_irq_mode_init(phba);
#else
- phba->cfg_irq_numa = 0;
+ phba->irq_chann_mode = NORMAL_MODE;
#endif
}
@@ -5792,6 +5420,7 @@ lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
*
* 0 = Configure number of IRQ Channels to:
* if AMD architecture, number of CPUs on HBA's NUMA node
+ * if Intel architecture, number of physical CPUs.
* otherwise, number of active CPUs.
* [1,256] = Manually specify how many IRQ Channels to use.
*
@@ -5817,35 +5446,46 @@ MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
static int
lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
{
- const struct cpumask *numa_mask;
+ const struct cpumask *aff_mask;
if (phba->cfg_use_msi != 2) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8532 use_msi = %u ignoring cfg_irq_numa\n",
phba->cfg_use_msi);
- phba->cfg_irq_numa = 0;
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ phba->irq_chann_mode = NORMAL_MODE;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return 0;
}
/* Check if default setting was passed */
- if (val == LPFC_IRQ_CHANN_DEF)
- lpfc_assign_default_irq_numa(phba);
+ if (val == LPFC_IRQ_CHANN_DEF &&
+ phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF &&
+ phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_assign_default_irq_chann(phba);
- if (phba->cfg_irq_numa) {
- numa_mask = &phba->sli4_hba.numa_mask;
+ if (phba->irq_chann_mode != NORMAL_MODE) {
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
- if (cpumask_empty(numa_mask)) {
+ if (cpumask_empty(aff_mask)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "8533 Could not identify NUMA node, "
- "ignoring cfg_irq_numa\n");
- phba->cfg_irq_numa = 0;
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ "8533 Could not identify CPUS for "
+ "mode %d, ignoring\n",
+ phba->irq_chann_mode);
+ phba->irq_chann_mode = NORMAL_MODE;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
} else {
- phba->cfg_irq_chann = cpumask_weight(numa_mask);
+ phba->cfg_irq_chann = cpumask_weight(aff_mask);
+
+ /* If no hyperthread mode, then set hdwq count to
+ * aff_mask weight as well
+ */
+ if (phba->irq_chann_mode == NHT_MODE)
+ phba->cfg_hdw_queue = phba->cfg_irq_chann;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8543 lpfc_irq_chann set to %u "
- "(numa)\n", phba->cfg_irq_chann);
+ "(mode: %d)\n", phba->cfg_irq_chann,
+ phba->irq_chann_mode);
}
} else {
if (val > LPFC_IRQ_CHANN_MAX) {
@@ -5856,10 +5496,15 @@ lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
val,
LPFC_IRQ_CHANN_MIN,
LPFC_IRQ_CHANN_MAX);
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return -EINVAL;
}
- phba->cfg_irq_chann = val;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ phba->cfg_irq_chann = val;
+ } else {
+ phba->cfg_irq_chann = 2;
+ phba->cfg_hdw_queue = 1;
+ }
}
return 0;
@@ -5927,7 +5572,7 @@ LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
/*
-# lpfc_prot_mask: i
+# lpfc_prot_mask:
# - Bit mask of host protection capabilities used to register with the
# SCSI mid-layer
# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
@@ -5952,7 +5597,7 @@ LPFC_ATTR(prot_mask,
"T10-DIF host protection capabilities mask");
/*
-# lpfc_prot_guard: i
+# lpfc_prot_guard:
# - Bit mask of protection guard types to register with the SCSI mid-layer
# - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
# - Allows you to ultimately specify which profiles to use
@@ -6014,7 +5659,8 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
- len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "Cfg: %d SCSI: %d NVME: %d\n",
phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt);
return len;
@@ -6044,8 +5690,8 @@ lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
- "be set to %d, allowed range is [%d, %d]\n",
+ "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, "
+ "allowed range is [%d, %d]\n",
val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
return -EINVAL;
@@ -6138,6 +5784,19 @@ LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
*/
LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
+/* Signaling module parameters */
+int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
+module_param(lpfc_fabric_cgn_frequency, int, 0444);
+MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
+
+int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
+module_param(lpfc_acqe_cgn_frequency, int, 0444);
+MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
+
+int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */
+module_param(lpfc_use_cgn_signal, int, 0444);
+MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available");
+
/*
* lpfc_enable_dpp: Enable DPP on G7
* 0 = DPP on G7 disabled
@@ -6146,157 +5805,219 @@ LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
*/
LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
-struct device_attribute *lpfc_hba_attrs[] = {
- &dev_attr_nvme_info,
- &dev_attr_scsi_stat,
- &dev_attr_bg_info,
- &dev_attr_bg_guard_err,
- &dev_attr_bg_apptag_err,
- &dev_attr_bg_reftag_err,
- &dev_attr_info,
- &dev_attr_serialnum,
- &dev_attr_modeldesc,
- &dev_attr_modelname,
- &dev_attr_programtype,
- &dev_attr_portnum,
- &dev_attr_fwrev,
- &dev_attr_hdw,
- &dev_attr_option_rom_version,
- &dev_attr_link_state,
- &dev_attr_num_discovered_ports,
- &dev_attr_menlo_mgmt_mode,
- &dev_attr_lpfc_drvr_version,
- &dev_attr_lpfc_enable_fip,
- &dev_attr_lpfc_temp_sensor,
- &dev_attr_lpfc_log_verbose,
- &dev_attr_lpfc_lun_queue_depth,
- &dev_attr_lpfc_tgt_queue_depth,
- &dev_attr_lpfc_hba_queue_depth,
- &dev_attr_lpfc_peer_port_login,
- &dev_attr_lpfc_nodev_tmo,
- &dev_attr_lpfc_devloss_tmo,
- &dev_attr_lpfc_enable_fc4_type,
- &dev_attr_lpfc_fcp_class,
- &dev_attr_lpfc_use_adisc,
- &dev_attr_lpfc_first_burst_size,
- &dev_attr_lpfc_ack0,
- &dev_attr_lpfc_xri_rebalancing,
- &dev_attr_lpfc_topology,
- &dev_attr_lpfc_scan_down,
- &dev_attr_lpfc_link_speed,
- &dev_attr_lpfc_fcp_io_sched,
- &dev_attr_lpfc_ns_query,
- &dev_attr_lpfc_fcp2_no_tgt_reset,
- &dev_attr_lpfc_cr_delay,
- &dev_attr_lpfc_cr_count,
- &dev_attr_lpfc_multi_ring_support,
- &dev_attr_lpfc_multi_ring_rctl,
- &dev_attr_lpfc_multi_ring_type,
- &dev_attr_lpfc_fdmi_on,
- &dev_attr_lpfc_enable_SmartSAN,
- &dev_attr_lpfc_max_luns,
- &dev_attr_lpfc_enable_npiv,
- &dev_attr_lpfc_fcf_failover_policy,
- &dev_attr_lpfc_enable_rrq,
- &dev_attr_nport_evt_cnt,
- &dev_attr_board_mode,
- &dev_attr_max_vpi,
- &dev_attr_used_vpi,
- &dev_attr_max_rpi,
- &dev_attr_used_rpi,
- &dev_attr_max_xri,
- &dev_attr_used_xri,
- &dev_attr_npiv_info,
- &dev_attr_issue_reset,
- &dev_attr_lpfc_poll,
- &dev_attr_lpfc_poll_tmo,
- &dev_attr_lpfc_task_mgmt_tmo,
- &dev_attr_lpfc_use_msi,
- &dev_attr_lpfc_nvme_oas,
- &dev_attr_lpfc_nvme_embed_cmd,
- &dev_attr_lpfc_fcp_imax,
- &dev_attr_lpfc_force_rscn,
- &dev_attr_lpfc_cq_poll_threshold,
- &dev_attr_lpfc_cq_max_proc_limit,
- &dev_attr_lpfc_fcp_cpu_map,
- &dev_attr_lpfc_fcp_mq_threshold,
- &dev_attr_lpfc_hdw_queue,
- &dev_attr_lpfc_irq_chann,
- &dev_attr_lpfc_suppress_rsp,
- &dev_attr_lpfc_nvmet_mrq,
- &dev_attr_lpfc_nvmet_mrq_post,
- &dev_attr_lpfc_nvme_enable_fb,
- &dev_attr_lpfc_nvmet_fb_size,
- &dev_attr_lpfc_enable_bg,
- &dev_attr_lpfc_soft_wwnn,
- &dev_attr_lpfc_soft_wwpn,
- &dev_attr_lpfc_soft_wwn_enable,
- &dev_attr_lpfc_enable_hba_reset,
- &dev_attr_lpfc_enable_hba_heartbeat,
- &dev_attr_lpfc_EnableXLane,
- &dev_attr_lpfc_XLanePriority,
- &dev_attr_lpfc_xlane_lun,
- &dev_attr_lpfc_xlane_tgt,
- &dev_attr_lpfc_xlane_vpt,
- &dev_attr_lpfc_xlane_lun_state,
- &dev_attr_lpfc_xlane_lun_status,
- &dev_attr_lpfc_xlane_priority,
- &dev_attr_lpfc_sg_seg_cnt,
- &dev_attr_lpfc_max_scsicmpl_time,
- &dev_attr_lpfc_stat_data_ctrl,
- &dev_attr_lpfc_aer_support,
- &dev_attr_lpfc_aer_state_cleanup,
- &dev_attr_lpfc_sriov_nr_virtfn,
- &dev_attr_lpfc_req_fw_upgrade,
- &dev_attr_lpfc_suppress_link_up,
- &dev_attr_iocb_hw,
- &dev_attr_pls,
- &dev_attr_pt,
- &dev_attr_txq_hw,
- &dev_attr_txcmplq_hw,
- &dev_attr_lpfc_fips_level,
- &dev_attr_lpfc_fips_rev,
- &dev_attr_lpfc_dss,
- &dev_attr_lpfc_sriov_hw_max_virtfn,
- &dev_attr_protocol,
- &dev_attr_lpfc_xlane_supported,
- &dev_attr_lpfc_enable_mds_diags,
- &dev_attr_lpfc_ras_fwlog_buffsize,
- &dev_attr_lpfc_ras_fwlog_level,
- &dev_attr_lpfc_ras_fwlog_func,
- &dev_attr_lpfc_enable_bbcr,
- &dev_attr_lpfc_enable_dpp,
+/*
+ * lpfc_enable_mi: Enable FDMI MIB
+ * 0 = disabled
+ * 1 = enabled (default)
+ * Value range is [0,1].
+ */
+LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
+
+/*
+ * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if
+ * either vmid_app_header or vmid_priority_tagging is enabled.
+ * 4 - 255 = vmid support enabled for 4-255 VMs
+ * Value range is [4,255].
+ */
+LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
+ "Maximum number of VMs supported");
+
+/*
+ * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours
+ * 0 = Timeout is disabled
+ * Value range is [0,24].
+ */
+LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24,
+ "Inactivity timeout in hours");
+
+/*
+ * lpfc_vmid_app_header: Enable App Header VMID support
+ * 0 = Support is disabled (default)
+ * 1 = Support is enabled
+ * Value range is [0,1].
+ */
+LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
+ LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE,
+ "Enable App Header VMID support");
+
+/*
+ * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support
+ * 0 = Support is disabled (default)
+ * 1 = Allow supported targets only
+ * 2 = Allow all targets
+ * Value range is [0,2].
+ */
+LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
+ LPFC_VMID_PRIO_TAG_DISABLE,
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS,
+ "Enable Priority Tagging VMID support");
+
+static struct attribute *lpfc_hba_attrs[] = {
+ &dev_attr_nvme_info.attr,
+ &dev_attr_scsi_stat.attr,
+ &dev_attr_bg_info.attr,
+ &dev_attr_bg_guard_err.attr,
+ &dev_attr_bg_apptag_err.attr,
+ &dev_attr_bg_reftag_err.attr,
+ &dev_attr_info.attr,
+ &dev_attr_serialnum.attr,
+ &dev_attr_modeldesc.attr,
+ &dev_attr_modelname.attr,
+ &dev_attr_programtype.attr,
+ &dev_attr_portnum.attr,
+ &dev_attr_fwrev.attr,
+ &dev_attr_hdw.attr,
+ &dev_attr_option_rom_version.attr,
+ &dev_attr_link_state.attr,
+ &dev_attr_num_discovered_ports.attr,
+ &dev_attr_lpfc_drvr_version.attr,
+ &dev_attr_lpfc_enable_fip.attr,
+ &dev_attr_lpfc_temp_sensor.attr,
+ &dev_attr_lpfc_log_verbose.attr,
+ &dev_attr_lpfc_lun_queue_depth.attr,
+ &dev_attr_lpfc_tgt_queue_depth.attr,
+ &dev_attr_lpfc_hba_queue_depth.attr,
+ &dev_attr_lpfc_peer_port_login.attr,
+ &dev_attr_lpfc_nodev_tmo.attr,
+ &dev_attr_lpfc_devloss_tmo.attr,
+ &dev_attr_lpfc_enable_fc4_type.attr,
+ &dev_attr_lpfc_fcp_class.attr,
+ &dev_attr_lpfc_use_adisc.attr,
+ &dev_attr_lpfc_first_burst_size.attr,
+ &dev_attr_lpfc_ack0.attr,
+ &dev_attr_lpfc_xri_rebalancing.attr,
+ &dev_attr_lpfc_topology.attr,
+ &dev_attr_lpfc_scan_down.attr,
+ &dev_attr_lpfc_link_speed.attr,
+ &dev_attr_lpfc_fcp_io_sched.attr,
+ &dev_attr_lpfc_ns_query.attr,
+ &dev_attr_lpfc_fcp2_no_tgt_reset.attr,
+ &dev_attr_lpfc_cr_delay.attr,
+ &dev_attr_lpfc_cr_count.attr,
+ &dev_attr_lpfc_multi_ring_support.attr,
+ &dev_attr_lpfc_multi_ring_rctl.attr,
+ &dev_attr_lpfc_multi_ring_type.attr,
+ &dev_attr_lpfc_fdmi_on.attr,
+ &dev_attr_lpfc_enable_SmartSAN.attr,
+ &dev_attr_lpfc_max_luns.attr,
+ &dev_attr_lpfc_enable_npiv.attr,
+ &dev_attr_lpfc_fcf_failover_policy.attr,
+ &dev_attr_lpfc_enable_rrq.attr,
+ &dev_attr_lpfc_fcp_wait_abts_rsp.attr,
+ &dev_attr_nport_evt_cnt.attr,
+ &dev_attr_board_mode.attr,
+ &dev_attr_max_vpi.attr,
+ &dev_attr_used_vpi.attr,
+ &dev_attr_max_rpi.attr,
+ &dev_attr_used_rpi.attr,
+ &dev_attr_max_xri.attr,
+ &dev_attr_used_xri.attr,
+ &dev_attr_npiv_info.attr,
+ &dev_attr_issue_reset.attr,
+ &dev_attr_lpfc_poll.attr,
+ &dev_attr_lpfc_poll_tmo.attr,
+ &dev_attr_lpfc_task_mgmt_tmo.attr,
+ &dev_attr_lpfc_use_msi.attr,
+ &dev_attr_lpfc_nvme_oas.attr,
+ &dev_attr_lpfc_nvme_embed_cmd.attr,
+ &dev_attr_lpfc_fcp_imax.attr,
+ &dev_attr_lpfc_force_rscn.attr,
+ &dev_attr_lpfc_cq_poll_threshold.attr,
+ &dev_attr_lpfc_cq_max_proc_limit.attr,
+ &dev_attr_lpfc_fcp_cpu_map.attr,
+ &dev_attr_lpfc_fcp_mq_threshold.attr,
+ &dev_attr_lpfc_hdw_queue.attr,
+ &dev_attr_lpfc_irq_chann.attr,
+ &dev_attr_lpfc_suppress_rsp.attr,
+ &dev_attr_lpfc_nvmet_mrq.attr,
+ &dev_attr_lpfc_nvmet_mrq_post.attr,
+ &dev_attr_lpfc_nvme_enable_fb.attr,
+ &dev_attr_lpfc_nvmet_fb_size.attr,
+ &dev_attr_lpfc_enable_bg.attr,
+ &dev_attr_lpfc_enable_hba_reset.attr,
+ &dev_attr_lpfc_enable_hba_heartbeat.attr,
+ &dev_attr_lpfc_EnableXLane.attr,
+ &dev_attr_lpfc_XLanePriority.attr,
+ &dev_attr_lpfc_xlane_lun.attr,
+ &dev_attr_lpfc_xlane_tgt.attr,
+ &dev_attr_lpfc_xlane_vpt.attr,
+ &dev_attr_lpfc_xlane_lun_state.attr,
+ &dev_attr_lpfc_xlane_lun_status.attr,
+ &dev_attr_lpfc_xlane_priority.attr,
+ &dev_attr_lpfc_sg_seg_cnt.attr,
+ &dev_attr_lpfc_max_scsicmpl_time.attr,
+ &dev_attr_lpfc_aer_support.attr,
+ &dev_attr_lpfc_aer_state_cleanup.attr,
+ &dev_attr_lpfc_sriov_nr_virtfn.attr,
+ &dev_attr_lpfc_req_fw_upgrade.attr,
+ &dev_attr_lpfc_suppress_link_up.attr,
+ &dev_attr_iocb_hw.attr,
+ &dev_attr_pls.attr,
+ &dev_attr_pt.attr,
+ &dev_attr_txq_hw.attr,
+ &dev_attr_txcmplq_hw.attr,
+ &dev_attr_lpfc_sriov_hw_max_virtfn.attr,
+ &dev_attr_protocol.attr,
+ &dev_attr_lpfc_xlane_supported.attr,
+ &dev_attr_lpfc_enable_mds_diags.attr,
+ &dev_attr_lpfc_ras_fwlog_buffsize.attr,
+ &dev_attr_lpfc_ras_fwlog_level.attr,
+ &dev_attr_lpfc_ras_fwlog_func.attr,
+ &dev_attr_lpfc_enable_bbcr.attr,
+ &dev_attr_lpfc_enable_dpp.attr,
+ &dev_attr_lpfc_enable_mi.attr,
+ &dev_attr_cmf_info.attr,
+ &dev_attr_lpfc_max_vmid.attr,
+ &dev_attr_lpfc_vmid_inactivity_timeout.attr,
+ &dev_attr_lpfc_vmid_app_header.attr,
+ &dev_attr_lpfc_vmid_priority_tagging.attr,
NULL,
};
-struct device_attribute *lpfc_vport_attrs[] = {
- &dev_attr_info,
- &dev_attr_link_state,
- &dev_attr_num_discovered_ports,
- &dev_attr_lpfc_drvr_version,
- &dev_attr_lpfc_log_verbose,
- &dev_attr_lpfc_lun_queue_depth,
- &dev_attr_lpfc_tgt_queue_depth,
- &dev_attr_lpfc_nodev_tmo,
- &dev_attr_lpfc_devloss_tmo,
- &dev_attr_lpfc_hba_queue_depth,
- &dev_attr_lpfc_peer_port_login,
- &dev_attr_lpfc_restrict_login,
- &dev_attr_lpfc_fcp_class,
- &dev_attr_lpfc_use_adisc,
- &dev_attr_lpfc_first_burst_size,
- &dev_attr_lpfc_max_luns,
- &dev_attr_nport_evt_cnt,
- &dev_attr_npiv_info,
- &dev_attr_lpfc_enable_da_id,
- &dev_attr_lpfc_max_scsicmpl_time,
- &dev_attr_lpfc_stat_data_ctrl,
- &dev_attr_lpfc_static_vport,
- &dev_attr_lpfc_fips_level,
- &dev_attr_lpfc_fips_rev,
+static const struct attribute_group lpfc_hba_attr_group = {
+ .attrs = lpfc_hba_attrs
+};
+
+const struct attribute_group *lpfc_hba_groups[] = {
+ &lpfc_hba_attr_group,
+ NULL
+};
+
+static struct attribute *lpfc_vport_attrs[] = {
+ &dev_attr_info.attr,
+ &dev_attr_link_state.attr,
+ &dev_attr_num_discovered_ports.attr,
+ &dev_attr_lpfc_drvr_version.attr,
+ &dev_attr_lpfc_log_verbose.attr,
+ &dev_attr_lpfc_lun_queue_depth.attr,
+ &dev_attr_lpfc_tgt_queue_depth.attr,
+ &dev_attr_lpfc_nodev_tmo.attr,
+ &dev_attr_lpfc_devloss_tmo.attr,
+ &dev_attr_lpfc_hba_queue_depth.attr,
+ &dev_attr_lpfc_peer_port_login.attr,
+ &dev_attr_lpfc_restrict_login.attr,
+ &dev_attr_lpfc_fcp_class.attr,
+ &dev_attr_lpfc_use_adisc.attr,
+ &dev_attr_lpfc_first_burst_size.attr,
+ &dev_attr_lpfc_max_luns.attr,
+ &dev_attr_nport_evt_cnt.attr,
+ &dev_attr_npiv_info.attr,
+ &dev_attr_lpfc_enable_da_id.attr,
+ &dev_attr_lpfc_max_scsicmpl_time.attr,
+ &dev_attr_lpfc_static_vport.attr,
+ &dev_attr_cmf_info.attr,
NULL,
};
+static const struct attribute_group lpfc_vport_attr_group = {
+ .attrs = lpfc_vport_attrs
+};
+
+const struct attribute_group *lpfc_vport_groups[] = {
+ &lpfc_vport_attr_group,
+ NULL
+};
+
/**
* sysfs_ctlreg_write - Write method for writing to ctlreg
* @filp: open sysfs file
@@ -6495,17 +6216,14 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int error;
- error = sysfs_create_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
-
/* Virtual ports do not need ctrl_reg and mbox */
- if (error || vport->port_type == LPFC_NPIV_PORT)
- goto out;
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return 0;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_ctlreg_attr);
if (error)
- goto out_remove_stat_attr;
+ goto out;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_mbox_attr);
@@ -6515,9 +6233,6 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
return 0;
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
-out_remove_stat_attr:
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
out:
return error;
}
@@ -6530,8 +6245,7 @@ void
lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
+
/* Virtual ports do not need ctrl_reg and mbox */
if (vport->port_type == LPFC_NPIV_PORT)
return;
@@ -6685,15 +6399,24 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
case LPFC_LINK_SPEED_128GHZ:
fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
break;
+ case LPFC_LINK_SPEED_256GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_256GBIT;
+ break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
} else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
+ case LPFC_ASYNC_LINK_SPEED_1GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
case LPFC_ASYNC_LINK_SPEED_10GBPS:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
+ case LPFC_ASYNC_LINK_SPEED_20GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
+ break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
break;
@@ -6787,31 +6510,52 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if (vport->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
-
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT)
+ if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
- return NULL;
+ return NULL;
+ }
+ } else {
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return NULL;
+ }
}
memset(hs, 0, sizeof (struct fc_host_statistics));
hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
+ hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
+
/*
- * The MBX_READ_STATUS returns tx_k_bytes which has to
- * converted to words
+ * The MBX_READ_STATUS returns tx_k_bytes which has to be
+ * converted to words.
+ *
+ * Check if extended byte flag is set, to know when to collect upper
+ * bits of 64 bit wide statistics counter.
*/
- hs->tx_words = (uint64_t)
- ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
- * (uint64_t)256);
- hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
- hs->rx_words = (uint64_t)
- ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
- * (uint64_t)256);
+ if (pmb->un.varRdStatus.xkb & RD_ST_XKB) {
+ hs->tx_words = (u64)
+ ((((u64)(pmb->un.varRdStatus.xmit_xkb &
+ RD_ST_XMIT_XKB_MASK) << 32) |
+ (u64)pmb->un.varRdStatus.xmitByteCnt) *
+ (u64)256);
+ hs->rx_words = (u64)
+ ((((u64)(pmb->un.varRdStatus.rcv_xkb &
+ RD_ST_RCV_XKB_MASK) << 32) |
+ (u64)pmb->un.varRdStatus.rcvByteCnt) *
+ (u64)256);
+ } else {
+ hs->tx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
+ * (uint64_t)256);
+ hs->rx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
+ * (uint64_t)256);
+ }
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb->mbxCommand = MBX_READ_LNK_STAT;
@@ -6819,15 +6563,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if (vport->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
-
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT)
+ if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
- return NULL;
+ return NULL;
+ }
+ } else {
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return NULL;
+ }
}
hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
@@ -6838,6 +6586,9 @@ lpfc_get_stats(struct Scsi_Host *shost)
hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
hs->error_frames = pmb->un.varRdLnk.crcCnt;
+ hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn);
+ hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm);
+
hs->link_failure_count -= lso->link_failure_count;
hs->loss_of_sync_count -= lso->loss_of_sync_count;
hs->loss_of_signal_count -= lso->loss_of_signal_count;
@@ -6900,15 +6651,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
-
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT)
+ if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
- return;
+ return;
+ }
+ } else {
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
@@ -6918,15 +6673,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- else
+ if (rc != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+ } else {
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
-
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT)
- mempool_free( pmboxq, phba->mbox_mem_pool);
- return;
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
}
lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
@@ -6941,6 +6700,12 @@ lpfc_reset_stats(struct Scsi_Host *shost)
else
lso->link_events = (phba->fc_eventTag >> 1);
+ atomic64_set(&phba->cgn_acqe_stat.warn, 0);
+ atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+
+ memset(&shost_to_fc_host(shost)->fpin_stats, 0,
+ sizeof(shost_to_fc_host(shost)->fpin_stats));
+
psli->stats_start = ktime_get_seconds();
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6971,8 +6736,7 @@ lpfc_get_node_by_target(struct scsi_target *starget)
spin_lock_irq(shost->host_lock);
/* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp) &&
- ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
spin_unlock_irq(shost->host_lock);
return ndlp;
@@ -7047,7 +6811,7 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
else
rport->dev_loss_tmo = 1;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
dev_info(&rport->dev, "Cannot find remote node to "
"set rport dev loss tmo, port_id x%x\n",
rport->port_id);
@@ -7063,7 +6827,7 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
#endif
}
-/**
+/*
* lpfc_rport_show_function - Return rport target information
*
* Description:
@@ -7112,6 +6876,7 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
/**
* lpfc_hba_log_verbose_init - Set hba's log verbose level
* @phba: Pointer to lpfc_hba struct.
+ * @verbose: Verbose level to set.
*
* This function is called by the lpfc_get_cfgparam() routine to set the
* module lpfc_log_verbose into the @phba cfg_log_verbose for use with
@@ -7271,7 +7036,6 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
case PCI_DEVICE_ID_LANCER_FCOE:
case PCI_DEVICE_ID_LANCER_FCOE_VF:
case PCI_DEVICE_ID_ZEPHYR_DCSP:
- case PCI_DEVICE_ID_HORNET:
case PCI_DEVICE_ID_TIGERSHARK:
case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
@@ -7307,6 +7071,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
+ lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp);
lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
lpfc_use_msi_init(phba, lpfc_use_msi);
@@ -7321,6 +7086,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+ /* VMID Inits */
+ lpfc_max_vmid_init(phba, lpfc_max_vmid);
+ lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout);
+ lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header);
+ lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging);
if (phba->sli_rev != LPFC_SLI_REV4)
phba->cfg_EnableXLane = 0;
lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
@@ -7366,6 +7136,13 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
+ lpfc_enable_mi_init(phba, lpfc_enable_mi);
+
+ phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF;
+ phba->cmf_active_mode = LPFC_CFG_OFF;
+ if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX ||
+ lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN)
+ lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
if (phba->sli_rev != LPFC_SLI_REV4) {
/* NVME only supported on SLI4 */
@@ -7389,11 +7166,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
if (phba->cfg_irq_chann == 0)
phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
- if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
+ if (phba->cfg_irq_chann > phba->cfg_hdw_queue &&
+ phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_irq_chann = phba->cfg_hdw_queue;
- phba->cfg_soft_wwnn = 0L;
- phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_aer_support_init(phba, lpfc_aer_support);
@@ -7402,7 +7178,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode);
- phba->cfg_enable_dss = 1;
lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
@@ -7419,12 +7194,26 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
void
lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
{
- if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
+ int logit = 0;
+
+ if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
- if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu)
+ logit = 1;
+ }
+ if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
- if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
+ logit = 1;
+ }
+ if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
phba->cfg_irq_chann = phba->cfg_hdw_queue;
+ logit = 1;
+ }
+ if (logit)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2006 Reducing Queues - CPU limitation: "
+ "IRQ %d HDWQ %d\n",
+ phba->cfg_irq_chann,
+ phba->cfg_hdw_queue);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
phba->nvmet_support) {
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 0ea03ae93d91..852b025e2fec 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -88,17 +88,9 @@ struct lpfc_bsg_mbox {
uint32_t outExtWLen; /* from app */
};
-#define MENLO_DID 0x0000FC0E
-
-struct lpfc_bsg_menlo {
- struct lpfc_iocbq *cmdiocbq;
- struct lpfc_dmabuf *rmp;
-};
-
#define TYPE_EVT 1
#define TYPE_IOCB 2
#define TYPE_MBOX 3
-#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
struct bsg_job *set_job; /* job waiting for this iocb to finish */
@@ -106,7 +98,6 @@ struct bsg_job_data {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
struct lpfc_bsg_mbox mbox;
- struct lpfc_bsg_menlo menlo;
} context_un;
};
@@ -303,15 +294,14 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
struct lpfc_bsg_iocb *iocb;
unsigned long flags;
- unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -325,22 +315,24 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
iocb = &dd_data->context_un.iocb;
- ndlp = iocb->ndlp;
+ ndlp = iocb->cmdiocbq->ndlp;
rmp = iocb->rmp;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+ cmp = cmdiocbq->cmd_dmabuf;
+ bmp = cmdiocbq->bpl_dmabuf;
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
/* Copy the completed data or set the error status */
if (job) {
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -355,10 +347,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
rc = -EACCES;
}
} else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
+ total_data_placed, 0);
}
}
@@ -366,8 +357,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
lpfc_free_bsg_buffers(phba, rmp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
- lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_nlp_put(ndlp);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
kfree(dd_data);
/* Complete the job if the job is still active */
@@ -388,26 +379,28 @@ static int
lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
{
struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct fc_bsg_reply *bsg_reply = job->reply;
struct ulp_bde64 *bpl = NULL;
- uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
- IOCB_t *cmd;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
+ int request_nseg, reply_nseg;
+ u32 num_entry;
struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val;
int rc = 0;
int iocb_stat;
+ u16 ulp_context;
/* in case no data is transferred */
bsg_reply->reply_payload_rcv_len = 0;
+ if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
+ return -ENODEV;
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -417,24 +410,12 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
goto no_dd_data;
}
- if (!lpfc_nlp_get(ndlp)) {
- rc = -ENODEV;
- goto no_ndlp;
- }
-
- if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
- rc = -ENODEV;
- goto free_ndlp;
- }
-
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
rc = -ENOMEM;
- goto free_ndlp;
+ goto free_dd;
}
- cmd = &cmdiocbq->iocb;
-
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
rc = -ENOMEM;
@@ -468,39 +449,28 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
goto free_cmp;
}
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = ndlp->nlp_rpi;
+ num_entry = request_nseg + reply_nseg;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- cmd->ulpOwner = OWN_CHIP;
+ ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ else
+ ulp_context = ndlp->nlp_rpi;
+
+ lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
+ phba->fc_ratov * 2);
+
+ cmdiocbq->num_bdes = num_entry;
cmdiocbq->vport = phba->pport;
- cmdiocbq->context3 = bmp;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- timeout = phba->fc_ratov * 2;
- cmd->ulpTimeout = timeout;
-
- cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context2 = cmp;
- cmdiocbq->context3 = bmp;
- cmdiocbq->context_un.ndlp = ndlp;
+ cmdiocbq->cmd_dmabuf = cmp;
+ cmdiocbq->bpl_dmabuf = bmp;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+
+ cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+ cmdiocbq->context_un.dd_data = dd_data;
+
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
- dd_data->context_un.iocb.ndlp = ndlp;
dd_data->context_un.iocb.rmp = rmp;
job->dd_data = dd_data;
@@ -514,14 +484,19 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
- iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+ cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->ndlp) {
+ rc = -ENODEV;
+ goto free_rmp;
+ }
+ iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (iocb_stat == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed yet */
- if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
@@ -532,7 +507,7 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
}
/* iocb failed so cleanup */
- job->dd_data = NULL;
+ lpfc_nlp_put(ndlp);
free_rmp:
lpfc_free_bsg_buffers(phba, rmp);
@@ -544,9 +519,7 @@ free_bmp:
kfree(bmp);
free_cmdiocbq:
lpfc_sli_release_iocbq(phba, cmdiocbq);
-free_ndlp:
- lpfc_nlp_put(ndlp);
-no_ndlp:
+free_dd:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
@@ -580,7 +553,6 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
struct fc_bsg_ctels_reply *els_reply;
@@ -588,10 +560,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
unsigned long flags;
unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
ndlp = dd_data->context_un.iocb.ndlp;
- cmdiocbq->context1 = ndlp;
+ cmdiocbq->ndlp = ndlp;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -605,11 +578,13 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
- rsp = &rspiocbq->iocb;
- pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
+ pcmd = cmdiocbq->cmd_dmabuf;
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
/* Copy the completed job data or determine the job status if job is
@@ -617,31 +592,36 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
*/
if (job) {
- if (rsp->ulpStatus == IOSTAT_SUCCESS) {
- rsp_size = rsp->un.elsreq64.bdl.bdeSize;
+ if (ulp_status == IOSTAT_SUCCESS) {
+ rsp_size = total_data_placed;
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
- } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+ } else if (ulp_status == IOSTAT_LS_RJT) {
bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
- rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+ rjt_data = (uint8_t *)&ulp_word4;
els_reply = &bsg_reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
els_reply->rjt_data.reason_explanation = rjt_data[1];
els_reply->rjt_data.vendor_unique = rjt_data[0];
+ } else if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SEQUENCE_TIMEOUT) {
+ rc = -ETIMEDOUT;
} else {
rc = -EIO;
}
}
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, cmdiocbq);
+
+ lpfc_nlp_put(ndlp);
kfree(dd_data);
/* Complete the job if the job is still active */
@@ -710,7 +690,6 @@ lpfc_bsg_rport_els(struct bsg_job *job)
* we won't be dma into memory that is no longer allocated to for the
* request.
*/
-
cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
ndlp->nlp_DID, elscmd);
if (!cmdiocbq) {
@@ -718,23 +697,23 @@ lpfc_bsg_rport_els(struct bsg_job *job)
goto release_ndlp;
}
- rpi = ndlp->nlp_rpi;
-
/* Transfer the request payload to allocated command dma buffer */
-
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
- ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
+ cmdiocbq->cmd_dmabuf->virt,
cmdsize);
+ rpi = ndlp->nlp_rpi;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+ bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
+ phba->sli4_hba.rpi_ids[rpi]);
else
cmdiocbq->iocb.ulpContext = rpi;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context_un.ndlp = ndlp;
- cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->context_un.dd_data = dd_data;
+ cmdiocbq->ndlp = ndlp;
+ cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -753,13 +732,12 @@ lpfc_bsg_rport_els(struct bsg_job *job)
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
-
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
- if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
@@ -769,11 +747,9 @@ lpfc_bsg_rport_els(struct bsg_job *job)
rc = -EIO;
}
- /* iocb failed so cleanup */
- job->dd_data = NULL;
+ /* I/O issue failed. Cleanup resources. */
linkdown_err:
- cmdiocbq->context1 = ndlp;
lpfc_els_free_iocb(phba, cmdiocbq);
release_ndlp:
@@ -902,11 +878,8 @@ diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
return 0;
}
-/**
+/*
* lpfc_bsg_ct_unsol_event - process an unsolicited CT command
- * @phba:
- * @pring:
- * @piocbq:
*
* This function is called when an unsolicited CT command is received. It
* forwards the event to any processes registered to receive CT events.
@@ -921,46 +894,28 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
struct lpfc_iocbq *iocbq;
+ IOCB_t *iocb = NULL;
size_t offset = 0;
struct list_head head;
struct ulp_bde64 *bde;
dma_addr_t dma_addr;
int i;
- struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
- struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
- struct lpfc_hbq_entry *hbqe;
+ struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
struct lpfc_sli_ct_request *ct_req;
struct bsg_job *job = NULL;
struct fc_bsg_reply *bsg_reply;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
+ u32 bde_count = 0;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
- if (piocbq->iocb.ulpBdeCount == 0 ||
- piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
- goto error_ct_unsol_exit;
-
- if (phba->link_state == LPFC_HBA_ERROR ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
- goto error_ct_unsol_exit;
-
- if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
- dmabuf = bdeBuf1;
- else {
- dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
- piocbq->iocb.un.cont64[0].addrLow);
- dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
- }
- if (dmabuf == NULL)
- goto error_ct_unsol_exit;
- ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
+ ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
evt_req_id = ct_req->FsType;
cmd = ct_req->CommandResponse.bits.CmdRsp;
- if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
@@ -983,12 +938,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
/* take accumulated byte count from the last iocbq */
iocbq = list_entry(head.prev, typeof(*iocbq), list);
- evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
+ else
+ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
} else {
list_for_each_entry(iocbq, &head, list) {
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+ iocb = &iocbq->iocb;
+ for (i = 0; i < iocb->ulpBdeCount;
+ i++)
evt_dat->len +=
- iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+ iocb->un.cont64[i].tus.f.bdeSize;
}
}
@@ -1008,22 +968,21 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
list_for_each_entry(iocbq, &head, list) {
size = 0;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- bdeBuf1 = iocbq->context2;
- bdeBuf2 = iocbq->context3;
+ bdeBuf1 = iocbq->cmd_dmabuf;
+ bdeBuf2 = iocbq->bpl_dmabuf;
}
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = iocbq->wcqe_cmpl.word3;
+ else
+ bde_count = iocbq->iocb.ulpBdeCount;
+ for (i = 0; i < bde_count; i++) {
if (phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED) {
if (i == 0) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.un.ulpWord[0];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
dmabuf = bdeBuf1;
} else if (i == 1) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.unsli3.
- sli3Words[4];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->unsol_rcv_len;
dmabuf = bdeBuf2;
}
if ((offset + size) > evt_dat->len)
@@ -1077,17 +1036,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_in_buf_free(phba,
dmabuf);
} else {
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
}
break;
default:
if (!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
break;
}
}
@@ -1110,14 +1069,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->ct_ctx[
evt_dat->immed_dat].SID);
phba->ct_ctx[evt_dat->immed_dat].rxid =
- piocbq->iocb.ulpContext;
+ get_job_ulpcontext(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].oxid =
- piocbq->iocb.unsli3.rcvsli3.ox_id;
+ get_job_rcvoxid(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].SID =
- piocbq->iocb.un.rcvels.remoteID;
+ bf_get(wqe_els_did,
+ &piocbq->wqe.xmit_els_rsp.wqe_dest);
phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
} else
- evt_dat->immed_dat = piocbq->iocb.ulpContext;
+ evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
evt_dat->type = FC_REG_CT_EVENT;
list_add(&evt_dat->node, &evt->events_to_see);
@@ -1400,13 +1360,13 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
unsigned long flags;
int rc = 0;
+ u32 ulp_status, ulp_word4;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1419,21 +1379,23 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
ndlp = dd_data->context_un.iocb.ndlp;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+ cmp = cmdiocbq->cmd_dmabuf;
+ bmp = cmdiocbq->bpl_dmabuf;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
/* Copy the completed job data or set the error status */
if (job) {
bsg_reply = job->reply;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -1474,7 +1436,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
* @phba: Pointer to HBA context object.
* @job: Pointer to the job object.
* @tag: tag index value into the ports context exchange array.
- * @bmp: Pointer to a dma buffer descriptor.
+ * @cmp: Pointer to a cmp dma buffer descriptor.
+ * @bmp: Pointer to a bmp dma buffer descriptor.
* @num_entry: Number of enties in the bde.
**/
static int
@@ -1482,13 +1445,22 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
- IOCB_t *icmd;
struct lpfc_iocbq *ctiocb = NULL;
int rc = 0;
struct lpfc_nodelist *ndlp = NULL;
struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val;
+ u16 ulp_context, iotag;
+
+ ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
+ if (!ndlp) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "2721 ndlp null for oxid %x SID %x\n",
+ phba->ct_ctx[tag].rxid,
+ phba->ct_ctx[tag].SID);
+ return IOCB_ERROR;
+ }
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -1506,81 +1478,53 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
goto no_ctiocb;
}
- icmd = &ctiocb->iocb;
- icmd->un.xseq64.bdl.ulpIoTag32 = 0;
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
- icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- icmd->un.xseq64.w5.hcsw.Dfctl = 0;
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
- icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- /* Fill in rest of iocb */
- icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Do not issue unsol response if oxid not marked as valid */
if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
- icmd->ulpContext = phba->ct_ctx[tag].rxid;
- icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
- ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
- if (!ndlp) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "2721 ndlp null for oxid %x SID %x\n",
- icmd->ulpContext,
- phba->ct_ctx[tag].SID);
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
- /* Check if the ndlp is active */
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
-
- /* get a refernece count so the ndlp doesn't go away while
- * we respond
- */
- if (!lpfc_nlp_get(ndlp)) {
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
-
- icmd->un.ulpWord[3] =
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ phba->ct_ctx[tag].oxid, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].valid = UNSOL_INVALID;
- } else
- icmd->ulpContext = (ushort) tag;
+ iotag = get_wqe_reqtag(ctiocb);
+ } else {
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+ ctiocb->num_bdes = num_entry;
+ iotag = ctiocb->iocb.ulpIoTag;
+ }
- icmd->ulpTimeout = phba->fc_ratov * 2;
+ ulp_context = get_job_ulpcontext(phba, ctiocb);
/* Xmit CT response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
- icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
+ "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+ ulp_context, iotag, tag, phba->link_state);
- ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+ ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
- ctiocb->context1 = dd_data;
- ctiocb->context2 = cmp;
- ctiocb->context3 = bmp;
- ctiocb->context_un.ndlp = ndlp;
- ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+ ctiocb->context_un.dd_data = dd_data;
+ ctiocb->cmd_dmabuf = cmp;
+ ctiocb->bpl_dmabuf = bmp;
+ ctiocb->ndlp = ndlp;
+ ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = ctiocb;
- dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
+ if (!dd_data->context_un.iocb.ndlp) {
+ rc = -IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
dd_data->context_un.iocb.rmp = NULL;
job->dd_data = dd_data;
@@ -1595,13 +1539,12 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
-
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
- if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+ if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
@@ -1609,6 +1552,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
/* iocb failed so cleanup */
job->dd_data = NULL;
+ lpfc_nlp_put(ndlp);
issue_ct_rsp_exit:
lpfc_sli_release_iocbq(phba, ctiocb);
@@ -2033,8 +1977,6 @@ lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
{
- int rc;
-
if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3136 Port still had vfi registered: "
@@ -2044,8 +1986,7 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
phba->vpi_ids[phba->pport->vpi]);
return -EINVAL;
}
- rc = lpfc_issue_reg_vfi(phba->pport);
- return rc;
+ return lpfc_issue_reg_vfi(phba->pport);
}
/**
@@ -2404,33 +2345,27 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
union lpfc_sli4_cfg_shdr *shdr;
uint32_t shdr_status, shdr_add_status;
struct diag_status *diag_status_reply;
- int mbxstatus, rc = 0;
+ int mbxstatus, rc = -ENODEV, rc1 = 0;
shost = fc_bsg_to_shost(job);
- if (!shost) {
- rc = -ENODEV;
+ if (!shost)
goto job_error;
- }
+
vport = shost_priv(shost);
- if (!vport) {
- rc = -ENODEV;
+ if (!vport)
goto job_error;
- }
+
phba = vport->phba;
- if (!phba) {
- rc = -ENODEV;
+ if (!phba)
goto job_error;
- }
- if (phba->sli_rev < LPFC_SLI_REV4) {
- rc = -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
goto job_error;
- }
+
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
- LPFC_SLI_INTF_IF_TYPE_2) {
- rc = -ENODEV;
+ LPFC_SLI_INTF_IF_TYPE_2)
goto job_error;
- }
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct sli4_link_diag)) {
@@ -2457,16 +2392,20 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
goto job_error;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!pmboxq)
+ if (!pmboxq) {
+ rc = -ENOMEM;
goto link_diag_test_exit;
+ }
req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
sizeof(struct lpfc_sli4_cfg_mhdr));
alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
req_len, LPFC_SLI4_MBX_EMBED);
- if (alloc_len != req_len)
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
goto link_diag_test_exit;
+ }
run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
@@ -2498,13 +2437,12 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
diag_status_reply = (struct diag_status *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3012 Received Run link diag test reply "
"below minimum size (%d): reply_len:%d\n",
- (int)(sizeof(struct fc_bsg_request) +
- sizeof(struct diag_status)),
+ (int)(sizeof(*bsg_reply) +
+ sizeof(*diag_status_reply)),
job->reply_len);
rc = -EINVAL;
goto job_error;
@@ -2515,7 +2453,7 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
diag_status_reply->shdr_add_status = shdr_add_status;
link_diag_test_exit:
- rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+ rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -2524,6 +2462,8 @@ link_diag_test_exit:
job_error:
/* make error code available to userspace */
+ if (rc1 && !rc)
+ rc = rc1;
bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
@@ -2642,7 +2582,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
*
* This function obtains the transmit and receive ids required to send
* an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
- * flags are used to the unsolicted response handler is able to process
+ * flags are used to the unsolicited response handler is able to process
* the ct command sent on the same port.
**/
static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
@@ -2650,7 +2590,6 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
{
struct lpfc_bsg_event *evt;
struct lpfc_iocbq *cmdiocbq, *rspiocbq;
- IOCB_t *cmd, *rsp;
struct lpfc_dmabuf *dmabuf;
struct ulp_bde64 *bpl = NULL;
struct lpfc_sli_ct_request *ctreq = NULL;
@@ -2658,6 +2597,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
int time_left;
int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
+ u32 status;
*txxri = 0;
*rxxri = 0;
@@ -2701,9 +2641,6 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
goto err_get_xri_exit;
}
- cmd = &cmdiocbq->iocb;
- rsp = &rspiocbq->iocb;
-
memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
@@ -2713,36 +2650,24 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
ctreq->CommandResponse.bits.Size = 0;
-
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
-
- cmd->un.xseq64.w5.hcsw.Fctl = LA;
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = rpi;
-
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->bpl_dmabuf = dmabuf;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
- cmdiocbq->iocb_cmpl = NULL;
+ cmdiocbq->cmd_cmpl = NULL;
+
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
+ FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
- rspiocbq,
- (phba->fc_ratov * 2)
- + LPFC_DRVR_TIMEOUT);
- if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
+ rspiocbq, (phba->fc_ratov * 2)
+ + LPFC_DRVR_TIMEOUT);
+
+ status = get_job_ulpstatus(phba, rspiocbq);
+ if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
ret_val = -EIO;
goto err_get_xri_exit;
}
- *txxri = rsp->ulpContext;
+ *txxri = get_job_ulpcontext(phba, rspiocbq);
evt->waiting = 1;
evt->wait_time_stamp = jiffies;
@@ -2943,16 +2868,16 @@ out:
}
/**
- * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
* @phba: Pointer to HBA context object
* @rxxri: Receive exchange id
* @len: Number of data bytes
*
* This function allocates and posts a data buffer of sufficient size to receive
- * an unsolicted CT command.
+ * an unsolicited CT command.
**/
-static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
- size_t len)
+static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+ size_t len)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
@@ -2989,7 +2914,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
/* Queue buffers for the receive exchange */
num_bde = (uint32_t)rxbuffer->flag;
dmp = &rxbuffer->dma;
-
cmd = &cmdiocbq->iocb;
i = 0;
@@ -3057,7 +2981,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
ret_val = -EIO;
goto err_post_rxbufs_exit;
}
-
cmd = &cmdiocbq->iocb;
i = 0;
}
@@ -3109,7 +3032,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
size_t segment_len = 0, segment_offset = 0, current_offset = 0;
uint16_t rpi = 0;
struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
- IOCB_t *cmd, *rsp = NULL;
+ union lpfc_wqe128 *cmdwqe, *rspwqe;
struct lpfc_sli_ct_request *ctreq;
struct lpfc_dmabuf *txbmp;
struct ulp_bde64 *txbpl = NULL;
@@ -3202,7 +3125,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
goto loopback_test_exit;
}
- rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
if (rc) {
lpfcdiag_loop_self_unreg(phba, rpi);
goto loopback_test_exit;
@@ -3245,9 +3168,12 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
goto err_loopback_test_exit;
}
- cmd = &cmdiocbq->iocb;
- if (phba->sli_rev < LPFC_SLI_REV4)
- rsp = &rspiocbq->iocb;
+ cmdwqe = &cmdiocbq->wqe;
+ memset(cmdwqe, 0, sizeof(union lpfc_wqe));
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rspwqe = &rspiocbq->wqe;
+ memset(rspwqe, 0, sizeof(union lpfc_wqe));
+ }
INIT_LIST_HEAD(&head);
list_add_tail(&head, &txbuffer->dma.list);
@@ -3279,41 +3205,32 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
/* Build the XMIT_SEQUENCE iocb */
num_bde = (uint32_t)txbuffer->flag;
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
-
- cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
+ cmdiocbq->num_bdes = num_bde;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
+ cmdiocbq->vport = phba->pport;
+ cmdiocbq->cmd_cmpl = NULL;
+ cmdiocbq->bpl_dmabuf = txbmp;
if (phba->sli_rev < LPFC_SLI_REV4) {
- cmd->ulpContext = txxri;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
+ num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+
} else {
- cmd->un.xseq64.bdl.ulpIoTag32 = 0;
- cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
- cmdiocbq->context3 = txbmp;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
+ phba->sli4_hba.rpi_ids[rpi], 0xffff,
+ full_size, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
cmdiocbq->sli4_xritag = NO_XRI;
- cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
- cmdiocbq->vport = phba->pport;
- cmdiocbq->iocb_cmpl = NULL;
+
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
-
- if ((iocb_stat != IOCB_SUCCESS) ||
- ((phba->sli_rev < LPFC_SLI_REV4) &&
- (rsp->ulpStatus != IOSTAT_SUCCESS))) {
+ if (iocb_stat != IOCB_SUCCESS ||
+ (phba->sli_rev < LPFC_SLI_REV4 &&
+ (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3126 Failed loopback test issue iocb: "
"iocb_stat:x%x\n", iocb_stat);
@@ -3420,8 +3337,7 @@ lpfc_bsg_get_dfc_rev(struct bsg_job *job)
event_reply = (struct get_mgmt_rev_reply *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2741 Received GET_DFC_REV reply below "
"minimum size\n");
@@ -3447,7 +3363,7 @@ job_error:
* This is completion handler function for mailbox commands issued from
* lpfc_bsg_issue_mbox function. This function is called by the
* mailbox event handler function with no lock held. This function
- * will wake up thread waiting on the wait queue pointed by context1
+ * will wake up thread waiting on the wait queue pointed by dd_data
* of the mailbox.
**/
static void
@@ -3539,6 +3455,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
mb->mbxCommand);
return -EPERM;
}
+ break;
case MBX_WRITE_NV:
case MBX_WRITE_VPARMS:
case MBX_LOAD_SM:
@@ -3573,15 +3490,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
"1226 mbox: set_variable 0x%x, 0x%x\n",
mb->un.varWords[0],
mb->un.varWords[1]);
- if ((mb->un.varWords[0] == SETVAR_MLOMNT)
- && (mb->un.varWords[1] == 1)) {
- phba->wait_4_mlo_maint_flg = 1;
- } else if (mb->un.varWords[0] == SETVAR_MLORST) {
- spin_lock_irq(&phba->hbalock);
- phba->link_flag &= ~LS_LOOPBACK_MODE;
- spin_unlock_irq(&phba->hbalock);
- phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
- }
break;
case MBX_READ_SPARM64:
case MBX_REG_LOGIN:
@@ -3599,7 +3507,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
}
/**
- * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session
* @phba: Pointer to HBA context object.
*
* This is routine clean up and reset BSG handling of multi-buffer mbox
@@ -3888,14 +3796,14 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
}
/**
- * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
+ * @job: Pointer to the job object.
* @nemb_tp: Enumerate of non-embedded mailbox command type.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
- * non-embedded external bufffers.
+ * non-embedded external buffers.
**/
static int
lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
@@ -4079,11 +3987,12 @@ job_error:
/**
* lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
- * non-embedded external bufffers.
+ * non-embedded external buffers.
**/
static int
lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
@@ -4227,7 +4136,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
goto job_error;
}
- /* wait for additoinal external buffers */
+ /* wait for additional external buffers */
bsg_reply->result = 0;
bsg_job_done(job, bsg_reply->result,
@@ -4245,12 +4154,12 @@ job_error:
/**
* lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
- * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
- * with embedded sussystem 0x1 and opcodes with external HBDs.
+ * external buffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded subsystem 0x1 and opcodes with external HBDs.
**/
static int
lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
@@ -4306,6 +4215,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
case COMN_OPCODE_GET_PROFILE_CONFIG:
+ case COMN_OPCODE_SET_FEATURES:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
@@ -4377,7 +4287,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
}
/**
- * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers
* @phba: Pointer to HBA context object.
*
* This routine is for requesting to abort a pass-through mailbox command with
@@ -4396,7 +4306,7 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
/**
* lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
* @phba: Pointer to HBA context object.
- * @dmabuf: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
*
* This routine extracts the next mailbox read external buffer back to
* user space through BSG.
@@ -4466,6 +4376,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
/**
* lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
* @phba: Pointer to HBA context object.
+ * @job: Pointer to the job object.
* @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine sets up the next mailbox read external buffer obtained
@@ -4573,7 +4484,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
goto job_error;
}
- /* wait for additoinal external buffers */
+ /* wait for additional external buffers */
bsg_reply->result = 0;
bsg_job_done(job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
@@ -4591,8 +4502,8 @@ job_error:
/**
* lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
* command with multiple non-embedded external buffers.
@@ -4636,10 +4547,10 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
/**
* lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
- * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * This routine checks and handles non-embedded multi-buffer SLI_CONFIG
* (0x9B) mailbox commands and external buffers.
**/
static int
@@ -4710,14 +4621,14 @@ sli_cfg_ext_error:
/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a mailbox object.
+ * @job: Pointer to the job object.
* @vport: Pointer to a vport object.
*
* Allocate a tracking object, mailbox command memory, get a mailbox
* from the mailbox pool, copy the caller mailbox command.
*
* If offline and the sli is active we need to poll for the command (port is
- * being reset) and com-plete the job, otherwise issue the mailbox command and
+ * being reset) and complete the job, otherwise issue the mailbox command and
* let our completion handler finish the command.
**/
static int
@@ -5060,283 +4971,6 @@ lpfc_bsg_mbox_cmd(struct bsg_job *job)
return rc;
}
-/**
- * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
- * @phba: Pointer to HBA context object.
- * @cmdiocbq: Pointer to command iocb.
- * @rspiocbq: Pointer to response iocb.
- *
- * This function is the completion handler for iocbs issued using
- * lpfc_menlo_cmd function. This function is called by the
- * ring event handler function without any lock held. This function
- * can be called from both worker thread context and interrupt
- * context. This function also can be called from another thread which
- * cleans up the SLI layer objects.
- * This function copies the contents of the response iocb to the
- * response iocb memory object provided by the caller of
- * lpfc_sli_issue_iocb_wait and then wakes up the thread which
- * sleeps for the iocb completion.
- **/
-static void
-lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdiocbq,
- struct lpfc_iocbq *rspiocbq)
-{
- struct bsg_job_data *dd_data;
- struct bsg_job *job;
- struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
- struct lpfc_dmabuf *bmp, *cmp, *rmp;
- struct lpfc_bsg_menlo *menlo;
- unsigned long flags;
- struct menlo_response *menlo_resp;
- unsigned int rsp_size;
- int rc = 0;
-
- dd_data = cmdiocbq->context1;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- menlo = &dd_data->context_un.menlo;
- rmp = menlo->rmp;
- rsp = &rspiocbq->iocb;
-
- /* Determine if job has been aborted */
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
- job = dd_data->set_job;
- if (job) {
- bsg_reply = job->reply;
- /* Prevent timeout handling from trying to abort job */
- job->dd_data = NULL;
- }
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- /* Copy the job data or set the failing status for the job */
-
- if (job) {
- /* always return the xri, this would be used in the case
- * of a menlo download to allow the data to be sent as a
- * continuation of the exchange.
- */
-
- menlo_resp = (struct menlo_response *)
- bsg_reply->reply_data.vendor_reply.vendor_rsp;
- menlo_resp->xri = rsp->ulpContext;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
- rc = -EACCES;
- break;
- }
- } else {
- rc = -EACCES;
- }
- } else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
- bsg_reply->reply_payload_rcv_len =
- lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
- }
-
- }
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
- lpfc_free_bsg_buffers(phba, cmp);
- lpfc_free_bsg_buffers(phba, rmp);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
- kfree(dd_data);
-
- /* Complete the job if active */
-
- if (job) {
- bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
- }
-
- return;
-}
-
-/**
- * lpfc_menlo_cmd - send an ioctl for menlo hardware
- * @job: fc_bsg_job to handle
- *
- * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
- * all the command completions will return the xri for the command.
- * For menlo data requests a gen request 64 CX is used to continue the exchange
- * supplied in the menlo request header xri field.
- **/
-static int
-lpfc_menlo_cmd(struct bsg_job *job)
-{
- struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct fc_bsg_request *bsg_request = job->request;
- struct fc_bsg_reply *bsg_reply = job->reply;
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *cmdiocbq;
- IOCB_t *cmd;
- int rc = 0;
- struct menlo_command *menlo_cmd;
- struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
- struct bsg_job_data *dd_data;
- struct ulp_bde64 *bpl = NULL;
-
- /* in case no data is returned return just the return code */
- bsg_reply->reply_payload_rcv_len = 0;
-
- if (job->request_len <
- sizeof(struct fc_bsg_request) +
- sizeof(struct menlo_command)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2784 Received MENLO_CMD request below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2785 Received MENLO_CMD reply below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2786 Adapter does not support menlo "
- "commands\n");
- rc = -EPERM;
- goto no_dd_data;
- }
-
- menlo_cmd = (struct menlo_command *)
- bsg_request->rqst_data.h_vendor.vendor_cmd;
-
- /* allocate our bsg tracking structure */
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2787 Failed allocation of dd_data\n");
- rc = -ENOMEM;
- goto no_dd_data;
- }
-
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
- rc = -ENOMEM;
- goto free_dd;
- }
-
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
- if (!bmp->virt) {
- rc = -ENOMEM;
- goto free_bmp;
- }
-
- INIT_LIST_HEAD(&bmp->list);
-
- bpl = (struct ulp_bde64 *)bmp->virt;
- request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
- cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
- 1, bpl, &request_nseg);
- if (!cmp) {
- rc = -ENOMEM;
- goto free_bmp;
- }
- lpfc_bsg_copy_data(cmp, &job->request_payload,
- job->request_payload.payload_len, 1);
-
- bpl += request_nseg;
- reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
- rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
- bpl, &reply_nseg);
- if (!rmp) {
- rc = -ENOMEM;
- goto free_cmp;
- }
-
- cmdiocbq = lpfc_sli_get_iocbq(phba);
- if (!cmdiocbq) {
- rc = -ENOMEM;
- goto free_rmp;
- }
-
- cmd = &cmdiocbq->iocb;
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
- cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
- cmd->ulpBdeCount = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpOwner = OWN_CHIP;
- cmd->ulpLe = 1; /* Limited Edition */
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->vport = phba->pport;
- /* We want the firmware to timeout before we do */
- cmd->ulpTimeout = MENLO_TIMEOUT - 5;
- cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context2 = cmp;
- cmdiocbq->context3 = bmp;
- if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->ulpPU = MENLO_PU; /* 3 */
- cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
- cmd->ulpContext = MENLO_CONTEXT; /* 0 */
- } else {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
- cmd->ulpPU = 1;
- cmd->un.ulpWord[4] = 0;
- cmd->ulpContext = menlo_cmd->xri;
- }
-
- dd_data->type = TYPE_MENLO;
- dd_data->set_job = job;
- dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
- dd_data->context_un.menlo.rmp = rmp;
- job->dd_data = dd_data;
-
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
- MENLO_TIMEOUT - 5);
- if (rc == IOCB_SUCCESS)
- return 0; /* done for now */
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
-
-free_rmp:
- lpfc_free_bsg_buffers(phba, rmp);
-free_cmp:
- lpfc_free_bsg_buffers(phba, cmp);
-free_bmp:
- if (bmp->virt)
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
-free_dd:
- kfree(dd_data);
-no_dd_data:
- /* make error code available to userspace */
- bsg_reply->result = rc;
- job->dd_data = NULL;
- return rc;
-}
-
static int
lpfc_forced_link_speed(struct bsg_job *job)
{
@@ -5360,9 +4994,7 @@ lpfc_forced_link_speed(struct bsg_job *job)
forced_reply = (struct forced_link_speed_support_reply *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) +
- sizeof(struct forced_link_speed_support_reply)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"0049 Received FORCED_LINK_SPEED reply below "
"minimum size\n");
@@ -5394,9 +5026,9 @@ lpfc_check_fwlog_support(struct lpfc_hba *phba)
ras_fwlog = &phba->ras_fwlog;
- if (ras_fwlog->ras_hwsupport == false)
+ if (!ras_fwlog->ras_hwsupport)
return -EACCES;
- else if (ras_fwlog->ras_enabled == false)
+ else if (!ras_fwlog->ras_enabled)
return -EPERM;
else
return 0;
@@ -5716,8 +5348,7 @@ lpfc_get_trunk_info(struct bsg_job *job)
event_reply = (struct lpfc_trunk_info *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct lpfc_trunk_info)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2728 Received GET TRUNK _INFO reply below "
"minimum size\n");
@@ -5768,6 +5399,92 @@ job_error:
}
+static int
+lpfc_get_cgnbuf_info(struct bsg_job *job)
+{
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct get_cgnbuf_info_req *cgnbuf_req;
+ struct lpfc_cgn_info *cp;
+ uint8_t *cgn_buff;
+ int size, cinfosz;
+ int rc = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct get_cgnbuf_info_req)) {
+ rc = -ENOMEM;
+ goto job_exit;
+ }
+
+ if (!phba->sli4_hba.pc_sli4_params.cmf) {
+ rc = -ENOENT;
+ goto job_exit;
+ }
+
+ if (!phba->cgn_i || !phba->cgn_i->virt) {
+ rc = -ENOENT;
+ goto job_exit;
+ }
+
+ cp = phba->cgn_i->virt;
+ if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
+ rc = -EPERM;
+ goto job_exit;
+ }
+
+ cgnbuf_req = (struct get_cgnbuf_info_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+ /* For reset or size == 0 */
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
+ lpfc_init_congestion_stat(phba);
+ goto job_exit;
+ }
+
+ /* We don't want to include the CRC at the end */
+ cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
+
+ size = cgnbuf_req->read_size;
+ if (!size)
+ goto job_exit;
+
+ if (size < cinfosz) {
+ /* Just copy back what we can */
+ cinfosz = size;
+ rc = -E2BIG;
+ }
+
+ /* Allocate memory to read congestion info */
+ cgn_buff = vmalloc(cinfosz);
+ if (!cgn_buff) {
+ rc = -ENOMEM;
+ goto job_exit;
+ }
+
+ memcpy(cgn_buff, cp, cinfosz);
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ cgn_buff, cinfosz);
+
+ vfree(cgn_buff);
+
+job_exit:
+ bsg_reply->result = rc;
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2724 GET CGNBUF error: %d\n", rc);
+ return rc;
+}
+
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
@@ -5808,10 +5525,6 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_MBOX:
rc = lpfc_bsg_mbox_cmd(job);
break;
- case LPFC_BSG_VENDOR_MENLO_CMD:
- case LPFC_BSG_VENDOR_MENLO_DATA:
- rc = lpfc_menlo_cmd(job);
- break;
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job);
break;
@@ -5830,6 +5543,9 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
rc = lpfc_get_trunk_info(job);
break;
+ case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
+ rc = lpfc_get_cgnbuf_info(job);
+ break;
default:
rc = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
@@ -5929,7 +5645,7 @@ lpfc_bsg_timeout(struct bsg_job *job)
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O abort window is still open */
- if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+ if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return -EAGAIN;
}
@@ -5941,7 +5657,7 @@ lpfc_bsg_timeout(struct bsg_job *job)
}
}
if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
@@ -5961,31 +5677,6 @@ lpfc_bsg_timeout(struct bsg_job *job)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
- case TYPE_MENLO:
- /* Check to see if IOCB was issued to the port or not. If not,
- * remove it from the txq queue and call cancel iocbs.
- * Otherwise, call abort iotag.
- */
- cmdiocb = dd_data->context_un.menlo.cmdiocbq;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- spin_lock_irqsave(&phba->hbalock, flags);
- list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
- list) {
- if (check_iocb == cmdiocb) {
- list_move_tail(&check_iocb->list, &completions);
- break;
- }
- }
- if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- if (!list_empty(&completions)) {
- lpfc_sli_cancel_iocbs(phba, &completions,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
- }
- break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index d1708133fd54..3c04ca2d7455 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -33,8 +33,6 @@
#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
@@ -43,6 +41,7 @@
#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
#define LPFC_BSG_VENDOR_GET_TRUNK_INFO 20
+#define LPFC_BSG_VENDOR_GET_CGNBUF_INFO 21
struct set_ct_event {
uint32_t command;
@@ -130,16 +129,6 @@ struct dfc_mbox_req {
uint32_t extSeqNum;
};
-/* Used for menlo command or menlo data. The xri is only used for menlo data */
-struct menlo_command {
- uint32_t cmd;
- uint32_t xri;
-};
-
-struct menlo_response {
- uint32_t xri; /* return the xri of the iocb exchange */
-};
-
/*
* macros and data structures for handling sli-config mailbox command
* pass-through support, this header file is shared between user and
@@ -225,6 +214,10 @@ struct lpfc_sli_config_hdr {
uint32_t reserved5;
};
+#define LPFC_CSF_BOOT_DEV 0x1D
+#define LPFC_CSF_QUERY 0
+#define LPFC_CSF_SAVE 1
+
struct lpfc_sli_config_emb0_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
@@ -243,6 +236,15 @@ struct lpfc_sli_config_emb0_subsys {
#define FCOE_OPCODE_ADD_FCF 0x09
#define FCOE_OPCODE_SET_DPORT_MODE 0x27
#define FCOE_OPCODE_GET_DPORT_RESULTS 0x28
+ uint32_t timeout; /* comn_set_feature timeout */
+ uint32_t request_length; /* comn_set_feature request len */
+ uint32_t version; /* comn_set_feature version */
+ uint32_t csf_feature; /* comn_set_feature feature */
+ uint32_t word69; /* comn_set_feature parameter len */
+ uint32_t word70; /* comn_set_feature parameter val0 */
+#define lpfc_emb0_subcmnd_csf_p0_SHIFT 0
+#define lpfc_emb0_subcmnd_csf_p0_MASK 0x3
+#define lpfc_emb0_subcmnd_csf_p0_WORD word70
};
struct lpfc_sli_config_emb1_subsys {
@@ -261,6 +263,7 @@ struct lpfc_sli_config_emb1_subsys {
#define COMN_OPCODE_WRITE_OBJECT 0xAC
#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
#define COMN_OPCODE_DELETE_OBJECT 0xAE
+#define COMN_OPCODE_SET_FEATURES 0xBF
#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79
#define COMN_OPCODE_GET_CNTL_ATTRIBUTES 0x20
uint32_t timeout;
@@ -372,6 +375,13 @@ struct get_trunk_info_req {
uint32_t command;
};
+struct get_cgnbuf_info_req {
+ uint32_t command;
+ uint32_t read_size;
+ uint32_t reset;
+#define LPFC_BSG_CGN_RESET_STAT 1
+};
+
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 25d3dd39bc05..d2d207791056 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -24,7 +24,6 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
struct fc_frame_header;
-struct lpfc_nvmet_rcv_ctx;
void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
@@ -33,7 +32,9 @@ int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
-
+int lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox);
+void lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+ enum lpfc_mbox_ctx locked);
void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -56,12 +57,11 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
-void lpfc_supported_pages(struct lpfcMboxq *);
-void lpfc_pc_sli4_params(struct lpfcMboxq *);
-int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
uint16_t, uint16_t, bool);
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_reg_congestion_buf(struct lpfc_hba *phba);
+int lpfc_unreg_congestion_buf(struct lpfc_hba *phba);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
@@ -78,20 +78,44 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count, int idx);
+int lpfc_read_lds_params(struct lpfc_hba *phba);
+uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba);
+void lpfc_cmf_signal_init(struct lpfc_hba *phba);
+void lpfc_cmf_start(struct lpfc_hba *phba);
+void lpfc_cmf_stop(struct lpfc_hba *phba);
+void lpfc_init_congestion_stat(struct lpfc_hba *phba);
+void lpfc_init_congestion_buf(struct lpfc_hba *phba);
+int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba);
+uint32_t lpfc_cgn_calc_crc32(void *bufp, uint32_t sz, uint32_t seed);
+int lpfc_config_cgn_signal(struct lpfc_hba *phba);
+int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total);
+void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
+void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
+void lpfc_unblock_requests(struct lpfc_hba *phba);
+void lpfc_block_requests(struct lpfc_hba *phba);
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries);
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor);
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry);
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
-struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
- struct lpfc_nodelist *, int);
void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
+void lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_set_disctmo(struct lpfc_vport *);
int lpfc_can_disctmo(struct lpfc_vport *);
@@ -106,12 +130,17 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did);
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
int lpfc_nlp_put(struct lpfc_nodelist *);
+void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
+void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_cleanup(struct lpfc_vport *);
+void lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd);
void lpfc_disc_timeout(struct timer_list *);
int lpfc_unregister_fcf_prep(struct lpfc_hba *);
@@ -140,9 +169,12 @@ int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
-int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry);
int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry);
int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
+int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry);
+int lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry);
+void lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -170,6 +202,7 @@ void lpfc_els_timeout_handler(struct lpfc_vport *);
struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
uint8_t, struct lpfc_nodelist *,
uint32_t, uint32_t);
+void lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job);
void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -187,10 +220,11 @@ void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
void lpfc_update_vport_wwn(struct lpfc_vport *vport);
int lpfc_config_port_post(struct lpfc_hba *);
+int lpfc_sli4_refresh_params(struct lpfc_hba *phba);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
-int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
+int lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
@@ -215,6 +249,9 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
+ uint32_t len);
+
void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
void lpfc_sli4_poll_hbtimer(struct timer_list *t);
@@ -257,7 +294,6 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr);
void lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, struct lpfc_queue *wq);
-void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba);
void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
@@ -322,8 +358,28 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
+int lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag);
int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe);
+int lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocb, void *cmpl);
+void lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp);
+void lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry,
+ u8 tmo);
+void lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq,
+ u8 cr_cx_cmd);
+void lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
+ bool ia, bool wqec);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
@@ -348,16 +404,18 @@ int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
int lpfc_sli_hbq_size(void);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
- struct lpfc_iocbq *);
+ struct lpfc_iocbq *, void *);
int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
-int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
- uint64_t, lpfc_ctx_cmd);
+int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
+ lpfc_ctx_cmd abort_cmd);
int
lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
uint16_t, uint64_t, lpfc_ctx_cmd);
void lpfc_mbox_timeout(struct timer_list *t);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
+int lpfc_issue_hb_mbox(struct lpfc_hba *phba);
+void lpfc_issue_hb_tmo(struct lpfc_hba *phba);
struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
@@ -383,10 +441,11 @@ void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
+void lpfc_setup_fdmi_mask(struct lpfc_vport *vport);
int lpfc_link_reset(struct lpfc_vport *vport);
/* Function prototypes. */
-int lpfc_check_pci_resettable(const struct lpfc_hba *phba);
+int lpfc_check_pci_resettable(struct lpfc_hba *phba);
const char* lpfc_info(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
@@ -400,10 +459,9 @@ void lpfc_get_cfgparam(struct lpfc_hba *);
void lpfc_get_vport_cfgparam(struct lpfc_vport *);
int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
void lpfc_free_sysfs_attr(struct lpfc_vport *);
-extern struct device_attribute *lpfc_hba_attrs[];
-extern struct device_attribute *lpfc_vport_attrs[];
+extern const struct attribute_group *lpfc_hba_groups[];
+extern const struct attribute_group *lpfc_vport_groups[];
extern struct scsi_host_template lpfc_template;
-extern struct scsi_host_template lpfc_template_no_hr;
extern struct scsi_host_template lpfc_template_nvme;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
@@ -456,6 +514,9 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
void lpfc_create_static_vport(struct lpfc_hba *);
void lpfc_stop_hba_timers(struct lpfc_hba *);
void lpfc_stop_port(struct lpfc_hba *);
+int lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t sz);
+int lpfc_update_cmf_cmpl(struct lpfc_hba *phba, uint64_t val, uint32_t sz,
+ struct Scsi_Host *shost);
void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
@@ -565,15 +626,19 @@ void lpfc_nvme_update_localport(struct lpfc_vport *vport);
int lpfc_nvmet_create_targetport(struct lpfc_hba *phba);
int lpfc_nvmet_update_targetport(struct lpfc_hba *phba);
void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
-void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
+int lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *axchg);
+int lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *axchg);
void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
struct rqb_dmabuf *nvmebuf, uint64_t isr_ts,
uint8_t cqflag);
void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
+void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *abts_cmpl);
+ struct lpfc_iocbq *rspiocb);
void lpfc_create_multixri_pools(struct lpfc_hba *phba);
void lpfc_create_destroy_pools(struct lpfc_hba *phba);
void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
@@ -589,11 +654,36 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
int);
void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
struct lpfc_sli4_hdw_queue *qp);
-void lpfc_nvme_cmd_template(void);
+void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd);
+void lpfc_wqe_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
-void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
-void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt);
+void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ uint32_t stat, uint32_t param);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
extern unsigned long lpfc_no_hba_reset[];
+extern int lpfc_acqe_cgn_frequency;
+extern int lpfc_fabric_cgn_frequency;
+extern int lpfc_use_cgn_signal;
+
+extern union lpfc_wqe128 lpfc_iread_cmd_template;
+extern union lpfc_wqe128 lpfc_iwrite_cmd_template;
+extern union lpfc_wqe128 lpfc_icmnd_cmd_template;
+
+/* vmid interface */
+int lpfc_vmid_uvem(struct lpfc_vport *vport, struct lpfc_vmid *vmid, bool ins);
+uint32_t lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport);
+int lpfc_vmid_cmd(struct lpfc_vport *vport,
+ int cmdcode, struct lpfc_vmid *vmid);
+int lpfc_vmid_hash_fn(const char *vmid, int len);
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ uint32_t hash, uint8_t *buf);
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag);
+void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
+int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+
+void lpfc_sli_rpi_release(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 58b35a1442c1..e941a99aa965 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -44,7 +44,6 @@
#include "lpfc_disc.h"
#include "lpfc.h"
#include "lpfc_scsi.h"
-#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_version.h"
@@ -76,6 +75,9 @@
static char *lpfc_release_version = LPFC_DRIVER_VERSION;
+static void
+lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
static void
lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
@@ -85,12 +87,12 @@ lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0146 Ignoring unsolicited CT No HBQ "
"status = x%x\n",
- piocbq->iocb.ulpStatus);
+ get_job_ulpstatus(phba, piocbq));
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "0145 Ignoring unsolicted CT HBQ Size:%d "
+ "0145 Ignoring unsolicited CT HBQ Size:%d "
"status = x%x\n",
- size, piocbq->iocb.ulpStatus);
+ size, get_job_ulpstatus(phba, piocbq));
}
static void
@@ -100,79 +102,348 @@ lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
}
+/**
+ * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands
+ * @phba : pointer to lpfc hba data structure.
+ * @cmdiocb : pointer to lpfc command iocb data structure.
+ * @rspiocb : pointer to lpfc response iocb data structure.
+ *
+ * This routine is the callback function for issuing unsol ct reject command.
+ * The memory allocated in the reject command path is freed up here.
+ **/
+static void
+lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp, *bmp;
+
+ ndlp = cmdiocb->ndlp;
+ if (ndlp)
+ lpfc_nlp_put(ndlp);
+
+ mp = cmdiocb->rsp_dmabuf;
+ bmp = cmdiocb->bpl_dmabuf;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ cmdiocb->rsp_dmabuf = NULL;
+ }
+
+ if (bmp) {
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+ cmdiocb->bpl_dmabuf = NULL;
+ }
+
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
+ * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands
+ * @ndlp: pointer to a node-list data structure.
+ * @ct_req: pointer to the CT request data structure.
+ * @ulp_context: context of received UNSOL CT command
+ * @ox_id: ox_id of the UNSOL CT command
+ *
+ * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
+ * a reject response. Reject response is sent for the unhandled commands.
+ **/
+static void
+lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
+ struct lpfc_sli_ct_request *ct_req,
+ u16 ulp_context, u16 ox_id)
+{
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ct_request *ct_rsp;
+ struct lpfc_iocbq *cmdiocbq = NULL;
+ struct lpfc_dmabuf *bmp = NULL;
+ struct lpfc_dmabuf *mp = NULL;
+ struct ulp_bde64 *bpl;
+ u8 rc = 0;
+ u32 tmo;
+
+ /* fill in BDEs for command */
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp) {
+ rc = 1;
+ goto ct_exit;
+ }
+
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys);
+ if (!mp->virt) {
+ rc = 2;
+ goto ct_free_mp;
+ }
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
+ if (!bmp) {
+ rc = 3;
+ goto ct_free_mpvirt;
+ }
+
+ bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys);
+ if (!bmp->virt) {
+ rc = 4;
+ goto ct_free_bmp;
+ }
+
+ INIT_LIST_HEAD(&mp->list);
+ INIT_LIST_HEAD(&bmp->list);
+
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ memset(bpl, 0, sizeof(struct ulp_bde64));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ ct_rsp = (struct lpfc_sli_ct_request *)mp->virt;
+ memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request));
+
+ ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ct_rsp->RevisionId.bits.InId = 0;
+ ct_rsp->FsType = ct_req->FsType;
+ ct_rsp->FsSubType = ct_req->FsSubType;
+ ct_rsp->CommandResponse.bits.Size = 0;
+ ct_rsp->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CT_RESPONSE_FS_RJT);
+ ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED;
+ ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL;
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ rc = 5;
+ goto ct_free_bmpvirt;
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ ox_id, 1, FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
+ } else {
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+ }
+
+ /* Save for completion so we can release these resources */
+ cmdiocbq->rsp_dmabuf = mp;
+ cmdiocbq->bpl_dmabuf = bmp;
+ cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl;
+ tmo = (3 * phba->fc_ratov);
+
+ cmdiocbq->retry = 0;
+ cmdiocbq->vport = vport;
+ cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
+
+ cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->ndlp)
+ goto ct_no_ndlp;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+ if (rc) {
+ lpfc_nlp_put(ndlp);
+ goto ct_no_ndlp;
+ }
+ return;
+
+ct_no_ndlp:
+ rc = 6;
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ct_free_bmpvirt:
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ct_free_bmp:
+ kfree(bmp);
+ct_free_mpvirt:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ct_free_mp:
+ kfree(mp);
+ct_exit:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "6440 Unsol CT: Rsp err %d Data: x%x\n",
+ rc, vport->fc_flag);
+}
+
+/**
+ * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @ctiocbq: pointer to lpfc CT command iocb data structure.
+ *
+ * This routine is used for processing the IOCB associated with a unsolicited
+ * CT MIB request. It first determines whether there is an existing ndlp that
+ * matches the DID from the unsolicited IOCB. If not, it will return.
+ **/
+static void
+lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
+{
+ struct lpfc_sli_ct_request *ct_req;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_vport *vport = ctiocbq->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, ctiocbq);
+ u32 ulp_word4 = get_job_word4(phba, ctiocbq);
+ u32 did;
+ u32 mi_cmd;
+
+ did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp);
+ if (ulp_status) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6438 Unsol CT: status:x%x/x%x did : x%x\n",
+ ulp_status, ulp_word4, did);
+ return;
+ }
+
+ /* Ignore traffic received during vport shutdown */
+ if (vport->fc_flag & FC_UNLOADING)
+ return;
+
+ ndlp = lpfc_findnode_did(vport, did);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6439 Unsol CT: NDLP Not Found for DID : x%x",
+ did);
+ return;
+ }
+
+ ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
+
+ mi_cmd = ct_req->CommandResponse.bits.CmdRsp;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
+ lpfc_ct_reject_event(ndlp, ct_req,
+ bf_get(wqe_ctxt_tag,
+ &ctiocbq->wqe.xmit_els_rsp.wqe_com),
+ bf_get(wqe_rcvoxid,
+ &ctiocbq->wqe.xmit_els_rsp.wqe_com));
+}
+
+/**
+ * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @ctiocbq: pointer to lpfc ct iocb data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking appropriate routine
+ * after properly set up the iocb buffer from the SLI ring on which the
+ * unsolicited event was received.
+ **/
void
lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *piocbq)
+ struct lpfc_iocbq *ctiocbq)
{
struct lpfc_dmabuf *mp = NULL;
- IOCB_t *icmd = &piocbq->iocb;
+ IOCB_t *icmd = &ctiocbq->iocb;
int i;
struct lpfc_iocbq *iocbq;
- dma_addr_t paddr;
+ struct lpfc_iocbq *iocb;
+ dma_addr_t dma_addr;
uint32_t size;
struct list_head head;
- struct lpfc_dmabuf *bdeBuf;
-
- if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
- return;
+ struct lpfc_sli_ct_request *ct_req;
+ struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf;
+ u32 status, parameter, bde_count = 0;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
+
+ ctiocbq->cmd_dmabuf = NULL;
+ ctiocbq->rsp_dmabuf = NULL;
+ ctiocbq->bpl_dmabuf = NULL;
+
+ wcqe_cmpl = &ctiocbq->wcqe_cmpl;
+ status = get_job_ulpstatus(phba, ctiocbq);
+ parameter = get_job_word4(phba, ctiocbq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = wcqe_cmpl->word3;
+ else
+ bde_count = icmd->ulpBdeCount;
- if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
+ if (unlikely(status == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ } else if ((status == IOSTAT_LOCAL_REJECT) &&
+ ((parameter & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING)) {
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 2);
+ lpfc_sli3_post_buffer(phba, pring, 2);
return;
}
- /* If there are no BDEs associated with this IOCB,
- * there is nothing to do.
+ /* If there are no BDEs associated
+ * with this IOCB, there is nothing to do.
*/
- if (icmd->ulpBdeCount == 0)
+ if (bde_count == 0)
return;
+ ctiocbq->cmd_dmabuf = bdeBuf1;
+ if (bde_count == 2)
+ ctiocbq->bpl_dmabuf = bdeBuf2;
+
+ ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
+
+ if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE &&
+ ct_req->FsSubType == SLI_CT_MIB_Subtypes) {
+ lpfc_ct_handle_mibreq(phba, ctiocbq);
+ } else {
+ if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq))
+ return;
+ }
+
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
INIT_LIST_HEAD(&head);
- list_add_tail(&head, &piocbq->list);
- list_for_each_entry(iocbq, &head, list) {
- icmd = &iocbq->iocb;
- if (icmd->ulpBdeCount == 0)
+ list_add_tail(&head, &ctiocbq->list);
+ list_for_each_entry(iocb, &head, list) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = iocb->wcqe_cmpl.word3;
+ else
+ bde_count = iocb->iocb.ulpBdeCount;
+
+ if (!bde_count)
continue;
- bdeBuf = iocbq->context2;
- iocbq->context2 = NULL;
- size = icmd->un.cont64[0].tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
- lpfc_in_buf_free(phba, bdeBuf);
- if (icmd->ulpBdeCount == 2) {
- bdeBuf = iocbq->context3;
- iocbq->context3 = NULL;
- size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf,
+ bdeBuf1 = iocb->cmd_dmabuf;
+ iocb->cmd_dmabuf = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ size = iocb->wqe.gen_req.bde.tus.f.bdeSize;
+ else
+ size = iocb->iocb.un.cont64[0].tus.f.bdeSize;
+ lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
+ lpfc_in_buf_free(phba, bdeBuf1);
+ if (bde_count == 2) {
+ bdeBuf2 = iocb->bpl_dmabuf;
+ iocb->bpl_dmabuf = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ size = iocb->unsol_rcv_len;
+ else
+ size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize;
+ lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2,
size);
- lpfc_in_buf_free(phba, bdeBuf);
+ lpfc_in_buf_free(phba, bdeBuf2);
}
}
list_del(&head);
} else {
INIT_LIST_HEAD(&head);
- list_add_tail(&head, &piocbq->list);
+ list_add_tail(&head, &ctiocbq->list);
list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb;
if (icmd->ulpBdeCount == 0)
lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
for (i = 0; i < icmd->ulpBdeCount; i++) {
- paddr = getPaddr(icmd->un.cont64[i].addrHigh,
- icmd->un.cont64[i].addrLow);
+ dma_addr = getPaddr(icmd->un.cont64[i].addrHigh,
+ icmd->un.cont64[i].addrLow);
mp = lpfc_sli_ringpostbuf_get(phba, pring,
- paddr);
+ dma_addr);
size = icmd->un.cont64[i].tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
}
- lpfc_post_buffer(phba, pring, i);
+ lpfc_sli3_post_buffer(phba, pring, i);
}
list_del(&head);
}
@@ -276,32 +547,31 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
{
struct lpfc_dmabuf *buf_ptr;
- if (ctiocb->context_un.ndlp) {
- lpfc_nlp_put(ctiocb->context_un.ndlp);
- ctiocb->context_un.ndlp = NULL;
- }
- if (ctiocb->context1) {
- buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
+ /* IOCBQ job structure gets cleaned during release. Just release
+ * the dma buffers here.
+ */
+ if (ctiocb->cmd_dmabuf) {
+ buf_ptr = ctiocb->cmd_dmabuf;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- ctiocb->context1 = NULL;
+ ctiocb->cmd_dmabuf = NULL;
}
- if (ctiocb->context2) {
- lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
- ctiocb->context2 = NULL;
+ if (ctiocb->rsp_dmabuf) {
+ lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf);
+ ctiocb->rsp_dmabuf = NULL;
}
- if (ctiocb->context3) {
- buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
+ if (ctiocb->bpl_dmabuf) {
+ buf_ptr = ctiocb->bpl_dmabuf;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- ctiocb->context3 = NULL;
+ ctiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, ctiocb);
return 0;
}
-/**
+/*
* lpfc_gen_req - Build and issue a GEN_REQUEST command to the SLI Layer
* @vport: pointer to a host virtual N_Port data structure.
* @bmp: Pointer to BPL for SLI command
@@ -315,15 +585,15 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
static int
lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
- void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *),
- struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
+ void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
+ struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd;
struct lpfc_iocbq *geniocb;
int rc;
+ u16 ulp_context;
/* Allocate buffer for command iocb */
geniocb = lpfc_sli_get_iocbq(phba);
@@ -331,71 +601,56 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
if (geniocb == NULL)
return 1;
- icmd = &geniocb->iocb;
- icmd->un.genreq64.bdl.ulpIoTag32 = 0;
- icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ /* Update the num_entry bde count */
+ geniocb->num_bdes = num_entry;
- if (usr_flg)
- geniocb->context3 = NULL;
- else
- geniocb->context3 = (uint8_t *) bmp;
+ geniocb->bpl_dmabuf = bmp;
/* Save for completion so we can release these resources */
- geniocb->context1 = (uint8_t *) inp;
- geniocb->context2 = (uint8_t *) outp;
- geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
+ geniocb->cmd_dmabuf = inp;
+ geniocb->rsp_dmabuf = outp;
- /* Fill in payload, bp points to frame payload */
- icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
-
- /* Fill in rest of iocb */
- icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- icmd->un.genreq64.w5.hcsw.Dfctl = 0;
- icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+ geniocb->event_tag = event_tag;
if (!tmo) {
/* FC spec states we need 3 * ratov for CT requests */
tmo = (3 * phba->fc_ratov);
}
- icmd->ulpTimeout = tmo;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
- icmd->ulpContext = ndlp->nlp_rpi;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ else
+ ulp_context = ndlp->nlp_rpi;
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
- /* For GEN_REQUEST64_CR, use the RPI */
- icmd->ulpCt_h = 0;
- icmd->ulpCt_l = 0;
- }
+ lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo);
/* Issue GEN REQ IOCB for NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0119 Issue GEN REQ IOCB to NPORT x%x "
"Data: x%x x%x\n",
- ndlp->nlp_DID, icmd->ulpIoTag,
+ ndlp->nlp_DID, geniocb->iotag,
vport->port_state);
- geniocb->iocb_cmpl = cmpl;
- geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ geniocb->cmd_cmpl = cmpl;
+ geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
geniocb->retry = retry;
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
+ geniocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!geniocb->ndlp)
+ goto out;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_sli_release_iocbq(phba, geniocb);
- return 1;
+ lpfc_nlp_put(ndlp);
+ goto out;
}
return 0;
+out:
+ lpfc_sli_release_iocbq(phba, geniocb);
+ return 1;
}
-/**
+/*
* lpfc_ct_cmd - Build and issue a CT command
* @vport: pointer to a host virtual N_Port data structure.
* @inmp: Pointer to data buffer for response data.
@@ -431,8 +686,8 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
* lpfc_alloc_ct_rsp.
*/
cnt += 1;
- status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
- cnt, 0, retry);
+ status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp,
+ phba->fc_eventTag, cnt, 0, retry);
if (status) {
lpfc_free_ct_rsp(phba, outmp);
return -ENOMEM;
@@ -462,12 +717,11 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) ||
- (fc4_type == FC_TYPE_FCP) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
@@ -502,7 +756,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0239 Skip x%06x NameServer Rsp "
- "Data: x%x x%x %p\n",
+ "Data: x%x x%x x%px\n",
Did, vport->fc_flag,
vport->fc_rscn_id_cnt, ndlp);
}
@@ -518,7 +772,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
* Don't even bother to send GFF_ID.
*/
ndlp = lpfc_findnode_did(vport, Did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ if (ndlp &&
(ndlp->nlp_type &
(NLP_FCP_TARGET | NLP_NVME_TARGET))) {
if (fc4_type == FC_TYPE_FCP)
@@ -550,7 +804,6 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
char *str;
if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT)
@@ -579,12 +832,12 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
if (ndlp->nlp_type != NLP_NVME_INITIATOR ||
ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
continue;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_DID == Did)
ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
else
ndlp->nlp_flag |= NLP_NVMET_RECOV;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
}
}
@@ -600,7 +853,6 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
uint32_t Did, CTentry;
int Cnt;
struct list_head head;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = NULL;
lpfc_set_disctmo(vport);
@@ -646,9 +898,9 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
}
@@ -663,26 +915,33 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp;
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
int rc, type;
/* First save ndlp, before we overwrite it */
- ndlp = cmdiocb->context_un.ndlp;
+ ndlp = cmdiocb->ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *) cmdiocb->context1;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- irsp = &rspiocb->iocb;
+ cmdiocb->rsp_iocb = rspiocb;
+ inp = cmdiocb->cmd_dmabuf;
+ outp = cmdiocb->rsp_dmabuf;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_FT cmpl: status:x%x/x%x rtry:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+ ulp_status, ulp_word4, vport->fc_ns_retry);
+
+ /* Ignore response if link flipped after this request was made */
+ if (cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9043 Event tag mismatch. Ignoring NS rsp\n");
+ goto out;
+ }
/* Don't bother processing response if vport is being torn down. */
if (vport->load_flag & FC_UNLOADING) {
@@ -699,11 +958,17 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0226 NS query failed due to link event\n");
+ "0226 NS query failed due to link event: "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "port_state x%x gidft_inp x%x\n",
+ ulp_status, ulp_word4, vport->fc_flag,
+ vport->port_state, vport->gidft_inp);
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
goto out;
}
@@ -715,7 +980,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* This is a GID_FT completing so the gidft_inp counter was
* incremented before the GID_FT was issued to the wire.
*/
- vport->gidft_inp--;
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
/*
* Skip processing the NS response
@@ -730,11 +996,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_unlock_irq(shost->host_lock);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ if (ulp_status != IOSTAT_LOCAL_REJECT ||
+ (ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
@@ -743,18 +1009,21 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* CT command is being retried */
- vport->gidft_inp--;
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
vport->fc_ns_retry, type);
if (rc == 0)
goto out;
+ else { /* Unable to send NS cmd */
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
+ }
}
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0257 GID_FT Query error: 0x%x 0x%x\n",
- irsp->ulpStatus, vport->fc_ns_retry);
+ ulp_status, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTreq = (struct lpfc_sli_ct_request *) inp->virt;
@@ -768,12 +1037,12 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
- irsp->un.genreq64.bdl.bdeSize);
+ get_job_data_placed(phba, rspiocb));
lpfc_ns_rsp(vport,
outp,
CTreq->un.gid.Fc4Type,
- (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+ get_job_data_placed(phba, rspiocb));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
@@ -813,7 +1082,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} else {
/* NameServer Rsp Error */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0241 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
CTrsp->CommandResponse.bits.CmdRsp,
@@ -827,7 +1096,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation);
}
- vport->gidft_inp--;
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -856,8 +1126,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_start(vport);
}
out:
- cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -867,28 +1137,35 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp;
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
int rc;
/* First save ndlp, before we overwrite it */
- ndlp = cmdiocb->context_un.ndlp;
+ ndlp = cmdiocb->ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *)cmdiocb->context1;
- outp = (struct lpfc_dmabuf *)cmdiocb->context2;
- irsp = &rspiocb->iocb;
+ cmdiocb->rsp_iocb = rspiocb;
+ inp = cmdiocb->cmd_dmabuf;
+ outp = cmdiocb->rsp_dmabuf;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_PT cmpl: status:x%x/x%x rtry:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->fc_ns_retry);
+ /* Ignore response if link flipped after this request was made */
+ if (cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9044 Event tag mismatch. Ignoring NS rsp\n");
+ goto out;
+ }
+
/* Don't bother processing response if vport is being torn down. */
if (vport->load_flag & FC_UNLOADING) {
if (vport->fc_flag & FC_RSCN_MODE)
@@ -904,11 +1181,17 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4166 NS query failed due to link event\n");
+ "4166 NS query failed due to link event: "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "port_state x%x gidft_inp x%x\n",
+ ulp_status, ulp_word4, vport->fc_flag,
+ vport->port_state, vport->gidft_inp);
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
goto out;
}
@@ -920,7 +1203,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* This is a GID_PT completing so the gidft_inp counter was
* incremented before the GID_PT was issued to the wire.
*/
- vport->gidft_inp--;
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
/*
* Skip processing the NS response
@@ -935,27 +1219,30 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_unlock_irq(shost->host_lock);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ if (ulp_status != IOSTAT_LOCAL_REJECT ||
+ (ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
/* CT command is being retried */
- vport->gidft_inp--;
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT,
vport->fc_ns_retry, GID_PT_N_PORT);
if (rc == 0)
goto out;
+ else { /* Unable to send NS cmd */
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
+ }
}
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4103 GID_FT Query error: 0x%x 0x%x\n",
- irsp->ulpStatus, vport->fc_ns_retry);
+ ulp_status, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTreq = (struct lpfc_sli_ct_request *)inp->virt;
@@ -969,12 +1256,12 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
- irsp->un.genreq64.bdl.bdeSize);
+ get_job_data_placed(phba, rspiocb));
lpfc_ns_rsp(vport,
outp,
CTreq->un.gid.Fc4Type,
- (uint32_t)(irsp->un.genreq64.bdl.bdeSize));
+ get_job_data_placed(phba, rspiocb));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
@@ -1014,7 +1301,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
} else {
/* NameServer Rsp Error */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4109 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
CTrsp->CommandResponse.bits.CmdRsp,
@@ -1029,7 +1316,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(uint32_t)CTrsp->ReasonCode,
(uint32_t)CTrsp->Explanation);
}
- vport->gidft_inp--;
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -1058,8 +1346,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_start(vport);
}
out:
- cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
static void
@@ -1068,22 +1356,30 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
- struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTrsp;
int did, rc, retry;
uint8_t fbits;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
did = be32_to_cpu(did);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GFF_ID cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
+
+ /* Ignore response if link flipped after this request was made */
+ if (cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9045 Event tag mismatch. Ignoring NS rsp\n");
+ goto iocb_free;
+ }
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
@@ -1113,8 +1409,8 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
retry = 1;
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch ((irsp->un.ulpWord[4] &
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch ((ulp_word4 &
IOERR_PARAM_MASK)) {
case IOERR_NO_RESOURCES:
@@ -1140,21 +1436,23 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->retry, did);
if (rc == 0) {
/* success */
+ free_ndlp = cmdiocb->ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
return;
}
}
}
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0267 NameServer GFF Rsp "
"x%x Error (%d %d) Data: x%x x%x\n",
- did, irsp->ulpStatus, irsp->un.ulpWord[4],
+ did, ulp_status, ulp_word4,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
/* This is a target port, unregistered port, or the GFF_ID failed */
ndlp = lpfc_setup_disc_node(vport, did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0242 Process x%x GFF "
"NameServer Rsp Data: x%x x%x x%x\n",
@@ -1193,31 +1491,44 @@ out:
}
lpfc_disc_start(vport);
}
+
+iocb_free:
+ free_ndlp = cmdiocb->ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
return;
}
static void
lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1;
- struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTrsp;
int did;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp;
uint32_t fc4_data_0, fc4_data_1;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
did = be32_to_cpu(did);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GFT_ID cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ /* Ignore response if link flipped after this request was made */
+ if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9046 Event tag mismatch. Ignoring NS rsp\n");
+ goto out;
+ }
+
+ if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
@@ -1232,6 +1543,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ?
"NVME" : " ");
+ /* Lookup the NPort_ID queried in the GFT_ID and find the
+ * driver's local node. It's an error if the driver
+ * doesn't have one.
+ */
ndlp = lpfc_findnode_did(vport, did);
if (ndlp) {
/* The bitmask value for FCP and NVME FCP types is
@@ -1273,10 +1588,12 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
} else
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "3065 GFT_ID failed x%08x\n", ulp_status);
+out:
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ns_ndlp);
}
static void
@@ -1286,22 +1603,22 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
- IOCB_t *irsp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp;
int cmdcode, rc;
uint8_t retry;
uint32_t latt;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
/* First save ndlp, before we overwrite it */
- ndlp = cmdiocb->context_un.ndlp;
+ ndlp = cmdiocb->ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *) cmdiocb->context1;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- irsp = &rspiocb->iocb;
+ inp = cmdiocb->cmd_dmabuf;
+ outp = cmdiocb->rsp_dmabuf;
cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
CommandResponse.bits.CmdRsp);
@@ -1309,28 +1626,28 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
latt = lpfc_els_chk_latt(vport);
- /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+ /* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0209 CT Request completes, latt %d, "
- "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
- latt, irsp->ulpStatus,
+ "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n",
+ latt, ulp_status,
CTrsp->CommandResponse.bits.CmdRsp,
- cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
+ get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"CT cmd cmpl: status:x%x/x%x cmd:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
+ ulp_status, ulp_word4, cmdcode);
- if (irsp->ulpStatus) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ if (ulp_status) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0268 NS cmd x%x Error (x%x x%x)\n",
- cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
+ cmdcode, ulp_status, ulp_word4);
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_SLI_DOWN) ||
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_SLI_ABORTED)))
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SLI_DOWN) ||
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SLI_ABORTED)))
goto out;
retry = cmdiocb->retry;
@@ -1346,8 +1663,8 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
- cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -1355,15 +1672,15 @@ static void
lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ outp = cmdiocb->rsp_dmabuf;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RFT_ID;
@@ -1376,14 +1693,14 @@ static void
lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ outp = cmdiocb->rsp_dmabuf;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
@@ -1397,15 +1714,15 @@ static void
lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ outp = cmdiocb->rsp_dmabuf;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RSPN_ID;
@@ -1418,14 +1735,14 @@ static void
lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ outp = cmdiocb->rsp_dmabuf;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
@@ -1451,15 +1768,15 @@ static void
lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ outp = cmdiocb->rsp_dmabuf;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RFF_ID;
@@ -1555,7 +1872,7 @@ lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
struct lpfc_dmabuf *mp;
uint32_t type;
- mp = cmdiocb->context1;
+ mp = cmdiocb->cmd_dmabuf;
if (mp == NULL)
return 0;
CtReq = (struct lpfc_sli_ct_request *)mp->virt;
@@ -1589,8 +1906,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
int rc = 0;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)
- || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
rc=1;
goto ns_cmd_exit;
}
@@ -1709,28 +2025,30 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
vport->ct_flags &= ~FC_CT_RFT_ID;
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_RFT_ID);
- CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
+ CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID);
+
+ /* Register Application Services type if vmid enabled. */
+ if (phba->cfg_vmid_app_header)
+ CtReq->un.rft.app_serv_reg =
+ cpu_to_be32(RFT_APP_SERV_REG);
/* Register FC4 FCP type if enabled. */
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
- CtReq->un.rft.fcpReg = 1;
+ CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG);
- /* Register NVME type if enabled. Defined LE and swapped.
- * rsvd[0] is used as word1 because of the hard-coded
- * word0 usage in the ct_request data structure.
- */
+ /* Register NVME type if enabled. */
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
- CtReq->un.rft.rsvd[0] =
- cpu_to_be32(LPFC_FC4_TYPE_BITMASK);
+ CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG);
ptr = (uint32_t *)CtReq;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "6433 Issue RFT (%s %s): %08x %08x %08x %08x "
- "%08x %08x %08x %08x\n",
- CtReq->un.rft.fcpReg ? "FCP" : " ",
- CtReq->un.rft.rsvd[0] ? "NVME" : " ",
+ "6433 Issue RFT (%s %s %s): %08x %08x %08x "
+ "%08x %08x %08x %08x %08x\n",
+ CtReq->un.rft.fcp_reg ? "FCP" : " ",
+ CtReq->un.rft.nvme_reg ? "NVME" : " ",
+ CtReq->un.rft.app_serv_reg ? "APPS" : " ",
*ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
*(ptr + 4), *(ptr + 5),
*(ptr + 6), *(ptr + 7));
@@ -1831,11 +2149,6 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
}
rc=6;
- /* Decrement ndlp reference count to release ndlp reference held
- * for the failed command's callback function.
- */
- lpfc_nlp_put(ndlp);
-
ns_cmd_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
ns_cmd_free_bmp:
@@ -1845,13 +2158,48 @@ ns_cmd_free_mpvirt:
ns_cmd_free_mp:
kfree(mp);
ns_cmd_exit:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
return 1;
}
/**
+ * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands
+ * @phba: Pointer to HBA context object.
+ * @mask: Initial port attributes mask
+ *
+ * This function checks to see if any vports have deferred their FDMI RPRT.
+ * A vports RPRT may be deferred if it is issued before the primary ports
+ * RHBA completes.
+ */
+static void
+lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ int i;
+
+ phba->hba_flag |= HBA_RHBA_CMPL;
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ vport = vports[i];
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ continue;
+ if (vport->ct_flags & FC_CT_RPRT_DEFER) {
+ vport->ct_flags &= ~FC_CT_RPRT_DEFER;
+ vport->fdmi_port_mask = mask;
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
* lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion
* @phba: Pointer to HBA context object.
* @cmdiocb: Pointer to the command IOCBQ.
@@ -1865,26 +2213,27 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct lpfc_dmabuf *inp = cmdiocb->context1;
- struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTcmd = inp->virt;
struct lpfc_sli_ct_request *CTrsp = outp->virt;
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp, *free_ndlp = NULL;
uint32_t latt, cmd, err;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
latt = lpfc_els_chk_latt(vport);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"FDMI cmpl: status:x%x/x%x latt:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4], latt);
+ ulp_status, ulp_word4, latt);
- if (latt || irsp->ulpStatus) {
+ if (latt || ulp_status) {
/* Look for a retryable error */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED:
case IOERR_SLI_DOWN:
/* Driver aborted this IO. No retry as error
@@ -1914,29 +2263,36 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0229 FDMI cmd %04x failed, latt = %d "
- "ulpStatus: x%x, rid x%x\n",
- be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ "ulp_status: x%x, rid x%x\n",
+ be16_to_cpu(fdmi_cmd), latt, ulp_status,
+ ulp_word4);
}
+
+ free_ndlp = cmdiocb->ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return;
/* Check for a CT LS_RJT response */
cmd = be16_to_cpu(fdmi_cmd);
if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
"0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
/* Should we fallback to FDMI-2 / FDMI-1 ? */
switch (cmd) {
case SLI_MGMT_RHBA:
if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) {
- /* Fallback to FDMI-1 */
+ /* Fallback to FDMI-1 for HBA attributes */
vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
+
+ /* If HBA attributes are FDMI1, so should
+ * port attributes be for consistency.
+ */
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
/* Start over */
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
@@ -1944,11 +2300,17 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
case SLI_MGMT_RPRT:
+ if (vport->port_type != LPFC_PHYSICAL_PORT) {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
+ }
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
/* Fallback to FDMI-1 */
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
/* Start over */
lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
+ return;
}
if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
@@ -1958,12 +2320,23 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
case SLI_MGMT_RPA:
+ /* No retry on Vendor, RPA only done on physical port */
+ if (phba->link_flag & LS_CT_VEN_RPA) {
+ phba->link_flag &= ~LS_CT_VEN_RPA;
+ if (phba->cmf_active_mode == LPFC_CFG_OFF)
+ return;
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_DISCOVERY | LOG_ELS,
+ "6460 VEN FDMI RPA RJT\n");
+ return;
+ }
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
/* Fallback to FDMI-1 */
vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
/* Start over */
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+ return;
}
if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
@@ -1981,6 +2354,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
switch (cmd) {
case SLI_MGMT_RHBA:
+ /* Check for any RPRTs deferred till after RHBA completes */
+ lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask);
+
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0);
break;
@@ -1989,10 +2365,60 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case SLI_MGMT_DPRT:
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0);
- else
- lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+ } else {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
+
+ /* Only issue a RPRT for the vport if the RHBA
+ * for the physical port completes successfully.
+ * We may have to defer the RPRT accordingly.
+ */
+ if (phba->hba_flag & HBA_RHBA_CMPL) {
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "6078 RPRT deferred\n");
+ vport->ct_flags |= FC_CT_RPRT_DEFER;
+ }
+ }
+ break;
+ case SLI_MGMT_RPA:
+ if (vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.pc_sli4_params.mi_ver) {
+ /* mi is only for the phyical port, no vports */
+ if (phba->link_flag & LS_CT_VEN_RPA) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS |
+ LOG_CGN_MGMT,
+ "6449 VEN RPA FDMI Success\n");
+ phba->link_flag &= ~LS_CT_VEN_RPA;
+ break;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY | LOG_CGN_MGMT,
+ "6210 Issue Vendor MI FDMI %x\n",
+ phba->sli4_hba.pc_sli4_params.mi_ver);
+
+ /* CGN is only for the physical port, no vports */
+ if (lpfc_fdmi_cmd(vport, ndlp, cmd,
+ LPFC_FDMI_VENDOR_ATTR_mi) == 0)
+ phba->link_flag |= LS_CT_VEN_RPA;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS,
+ "6458 Send MI FDMI:%x Flag x%x\n",
+ phba->sli4_hba.pc_sli4_params.mi_ver,
+ phba->link_flag);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS,
+ "6459 No FDMI VEN MI support - "
+ "RPA Success\n");
+ }
break;
}
return;
@@ -2023,7 +2449,7 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
return;
ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return;
/* Check if system hostname changed */
@@ -2038,10 +2464,16 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
* DHBA -> DPRT -> RHBA -> RPA (physical port)
* DPRT -> RPRT (vports)
*/
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ /* For extra Vendor RPA */
+ phba->link_flag &= ~LS_CT_VEN_RPA;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
- else
+ } else {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+ }
/* Since this code path registers all the port attributes
* we can just return without further checking.
@@ -2061,914 +2493,625 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
LPFC_FDMI_PORT_ATTR_num_disc);
} else {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT,
LPFC_FDMI_PORT_ATTR_num_disc);
}
}
-/* Routines for all individual HBA attributes */
-static int
-lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_fdmi_attr_u32 *ae = attr;
+ int size = sizeof(*ae);
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ ae->value_u32 = cpu_to_be32(attrval);
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NODENAME);
return size;
}
-static int
-lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+
+static inline int
+lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_wwn *ae = attr;
+ int size = sizeof(*ae);
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(ae->name, wwn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- /* This string MUST be consistent with other FC platforms
- * supported by Broadcom.
- */
- strncpy(ae->un.AttrString,
- "Emulex Corporation",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype,
+ struct lpfc_name *wwnn, struct lpfc_name *wwpn)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fullwwn *ae = attr;
+ u8 *nname = ae->nname;
+ u8 *pname = ae->pname;
+ int size = sizeof(*ae);
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(nname, wwnn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
+ memcpy(pname, wwpn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- strncpy(ae->un.AttrString, phba->SerialNumber,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_string *ae = attr;
+ int len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ /*
+ * We are trusting the caller that if a fdmi string field
+ * is capped at 64 bytes, the caller passes in a string of
+ * 64 bytes or less.
+ */
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+ strncpy(ae->value_string, attrstring, sizeof(ae->value_string));
+ len = strnlen(ae->value_string, sizeof(ae->value_string));
+ /* round string length to a 32bit boundary. Ensure there's a NULL */
len += (len & 3) ? (4 - (len & 3)) : 4;
+ /* size is Type/Len (4 bytes) plus string length */
size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL);
+
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
return size;
}
-static int
-lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+/* Bitfields for FC4 Types that can be reported */
+#define ATTR_FC4_CT 0x00000001
+#define ATTR_FC4_FCP 0x00000002
+#define ATTR_FC4_NVME 0x00000004
+
+static inline int
+lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fc4types *ae = attr;
+ int size = sizeof(*ae);
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
+ if (typemask & ATTR_FC4_FCP)
+ ae->value_types[2] = 0x01; /* Type 0x8 - FCP */
+
+ if (typemask & ATTR_FC4_CT)
+ ae->value_types[7] = 0x01; /* Type 0x20 - CT */
+
+ if (typemask & ATTR_FC4_NVME)
+ ae->value_types[6] = 0x01; /* Type 0x28 - NVME */
- strncpy(ae->un.AttrString, phba->ModelDesc,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION);
return size;
}
+/* Routines for all individual HBA attributes */
static int
-lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- lpfc_vpd_t *vp = &phba->vpd;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t i, j, incr, size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
-
- /* Convert JEDEC ID to ascii for hardware version */
- incr = vp->rev.biuRev;
- for (i = 0; i < 8; i++) {
- j = (incr & 0xf);
- if (j <= 9)
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x30 +
- (uint8_t) j);
- else
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x61 +
- (uint8_t) (j - 10));
- incr = (incr >> 4);
- }
- size = FOURBYTES + 8;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ /* This string MUST be consistent with other FC platforms
+ * supported by Broadcom.
+ */
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER,
+ "Emulex Corporation");
+}
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+static int
+lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr)
+{
+ struct lpfc_hba *phba = vport->phba;
- strncpy(ae->un.AttrString, lpfc_release_version,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER,
+ phba->SerialNumber);
}
static int
-lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
-
- if (phba->sli_rev == LPFC_SLI_REV4)
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- else
- strncpy(ae->un.AttrString, phba->OptionROMVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION,
+ phba->ModelDesc);
}
static int
-lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ lpfc_vpd_t *vp = &phba->vpd;
+ char buf[16] = { 0 };
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
- init_utsname()->sysname,
- init_utsname()->release,
- init_utsname()->version);
+ return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf);
+}
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION,
+ lpfc_release_version);
}
static int
-lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_decode_firmware_rev(phba, buf, 1);
- ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ buf);
+ }
+
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ phba->OptionROMVersion);
}
static int
-lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ lpfc_decode_firmware_rev(phba, buf, 1);
- len = lpfc_vport_symbolic_node_name(vport,
- ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ snprintf(buf, sizeof(buf), "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version);
- /* Nothing is defined for this currently */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_INFO);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
-
- /* Each driver instance corresponds to a single port */
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NUM_PORTS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN,
+ LPFC_MAX_CT_SIZE);
}
static int
-lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf));
- memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FABRIC_WWNN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf);
}
static int
-lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0);
+}
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+static int
+lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr)
+{
+ /* Each driver instance corresponds to a single port */
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1);
+}
- strlcat(ae->un.AttrString, phba->BIOSVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN,
+ &vport->fabric_nodename);
}
static int
-lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION,
+ phba->BIOSVersion);
+}
+static int
+lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr)
+{
/* Driver doesn't have access to this information */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0);
}
static int
-lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
-
- strncpy(ae->un.AttrString, "EMULEX",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_ID);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX");
}
-/* Routines for all individual PORT attributes */
+/*
+ * Routines for all individual PORT attributes
+ */
+
static int
-lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 32);
+ u32 fc4types;
- ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if Firmware supports NVME and on physical port */
if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) &&
phba->sli4_hba.pc_sli4_params.nvme)
- ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ struct lpfc_hba *phba = vport->phba;
+ u32 speeds = 0;
+ u32 tcfg;
+ u8 i, cnt;
- ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
- if (phba->lmt & LMT_128Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
- if (phba->lmt & LMT_64Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
- if (phba->lmt & LMT_32Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
- if (phba->lmt & LMT_16Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_16GFC;
- if (phba->lmt & LMT_10Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_10GFC;
- if (phba->lmt & LMT_8Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_8GFC;
- if (phba->lmt & LMT_4Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_4GFC;
- if (phba->lmt & LMT_2Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_2GFC;
- if (phba->lmt & LMT_1Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_1GFC;
+ cnt = 0;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tcfg = phba->sli4_hba.conf_trunk;
+ for (i = 0; i < 4; i++, tcfg >>= 1)
+ if (tcfg & 1)
+ cnt++;
+ }
+
+ if (cnt > 2) { /* 4 lane trunk group */
+ if (phba->lmt & LMT_64Gb)
+ speeds |= HBA_PORTSPEED_256GFC;
+ if (phba->lmt & LMT_32Gb)
+ speeds |= HBA_PORTSPEED_128GFC;
+ if (phba->lmt & LMT_16Gb)
+ speeds |= HBA_PORTSPEED_64GFC;
+ } else if (cnt) { /* 2 lane trunk group */
+ if (phba->lmt & LMT_128Gb)
+ speeds |= HBA_PORTSPEED_256GFC;
+ if (phba->lmt & LMT_64Gb)
+ speeds |= HBA_PORTSPEED_128GFC;
+ if (phba->lmt & LMT_32Gb)
+ speeds |= HBA_PORTSPEED_64GFC;
+ if (phba->lmt & LMT_16Gb)
+ speeds |= HBA_PORTSPEED_32GFC;
+ } else {
+ if (phba->lmt & LMT_256Gb)
+ speeds |= HBA_PORTSPEED_256GFC;
+ if (phba->lmt & LMT_128Gb)
+ speeds |= HBA_PORTSPEED_128GFC;
+ if (phba->lmt & LMT_64Gb)
+ speeds |= HBA_PORTSPEED_64GFC;
+ if (phba->lmt & LMT_32Gb)
+ speeds |= HBA_PORTSPEED_32GFC;
+ if (phba->lmt & LMT_16Gb)
+ speeds |= HBA_PORTSPEED_16GFC;
+ if (phba->lmt & LMT_10Gb)
+ speeds |= HBA_PORTSPEED_10GFC;
+ if (phba->lmt & LMT_8Gb)
+ speeds |= HBA_PORTSPEED_8GFC;
+ if (phba->lmt & LMT_4Gb)
+ speeds |= HBA_PORTSPEED_4GFC;
+ if (phba->lmt & LMT_2Gb)
+ speeds |= HBA_PORTSPEED_2GFC;
+ if (phba->lmt & LMT_1Gb)
+ speeds |= HBA_PORTSPEED_1GFC;
+ }
} else {
/* FCoE links support only one speed */
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ u32 speeds = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_1GFC;
+ speeds = HBA_PORTSPEED_1GFC;
break;
case LPFC_LINK_SPEED_2GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_2GFC;
+ speeds = HBA_PORTSPEED_2GFC;
break;
case LPFC_LINK_SPEED_4GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_4GFC;
+ speeds = HBA_PORTSPEED_4GFC;
break;
case LPFC_LINK_SPEED_8GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_8GFC;
+ speeds = HBA_PORTSPEED_8GFC;
break;
case LPFC_LINK_SPEED_10GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_10GFC;
+ speeds = HBA_PORTSPEED_10GFC;
break;
case LPFC_LINK_SPEED_16GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_16GFC;
+ speeds = HBA_PORTSPEED_16GFC;
break;
case LPFC_LINK_SPEED_32GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_32GFC;
+ speeds = HBA_PORTSPEED_32GFC;
break;
case LPFC_LINK_SPEED_64GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_64GFC;
+ speeds = HBA_PORTSPEED_64GFC;
break;
case LPFC_LINK_SPEED_128GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_128GFC;
+ speeds = HBA_PORTSPEED_128GFC;
+ break;
+ case LPFC_LINK_SPEED_256GHZ:
+ speeds = HBA_PORTSPEED_256GFC;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
} else {
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr)
{
- struct serv_parm *hsp;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam;
- hsp = (struct serv_parm *)&vport->fc_sparam;
- ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
- (uint32_t) hsp->cmn.bbRcvSizeLsb;
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE,
+ (((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ (uint32_t)hsp->cmn.bbRcvSizeLsb);
}
static int
-lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d",
+ shost->host_no);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
- "/sys/class/scsi_host/host%d", shost->host_no);
- len = strnlen((char *)ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name);
- scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
- vport->phba->os_host_name);
-
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_HOST_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[256] = { 0 };
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf));
- len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf);
}
static int
-lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
- else
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NPORT);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE,
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ?
+ LPFC_FDMI_PORTTYPE_NLPORT :
+ LPFC_FDMI_PORTTYPE_NPORT);
}
static int
-lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS,
+ FC_COS_CLASS2 | FC_COS_CLASS3);
}
static int
-lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
-
- memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_FABRICNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME,
+ &vport->fabric_portname);
}
static int
-lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 32);
+ struct lpfc_hba *phba = vport->phba;
+ u32 fc4types;
- ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if NVME is configured or not */
- if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+ if (vport == phba->pport &&
+ phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- /* Link Up - operational */
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE,
+ LPFC_FDMI_PORTSTATE_ONLINE);
}
static int
-lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
vport->fdmi_num_disc = lpfc_find_map_node(vport);
- ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_DISC_PORT);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT,
+ vport->fdmi_num_disc);
}
static int
-lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_ID);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID);
}
static int
-lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
-
- strncpy(ae->un.AttrString, "Smart SAN Initiator",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SERVICE);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE,
+ "Smart SAN Initiator");
}
static int
-lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
-
- memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy((((uint8_t *)&ae->un.AttrString) +
- sizeof(struct lpfc_name)),
- &vport->fc_sparam.portName, sizeof(struct lpfc_name));
- size = FOURBYTES + (2 * sizeof(struct lpfc_name));
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_GUID);
- return size;
+ return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID,
+ &vport->fc_sparam.nodeName,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
-
- strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION,
+ "Smart SAN Version 2.0");
}
static int
-lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
-
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_MODEL);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
-
/* SRIOV (type 3) is not supported */
- if (vport->vpi)
- ae->un.AttrInt = cpu_to_be32(2); /* NPIV */
- else
- ae->un.AttrInt = cpu_to_be32(1); /* Physical */
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_PORT_INFO);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO,
+ (vport->vpi) ? 2 /* NPIV */ : 1 /* Physical */);
}
static int
-lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0);
+}
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_QOS);
- return size;
+static int
+lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1);
}
static int
-lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[32] = { 0 };
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY);
- return size;
+ sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver);
+
+ return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf);
}
/* RHBA attribute jump table */
int (*lpfc_fdmi_hba_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */
lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */
@@ -2992,7 +3135,7 @@ int (*lpfc_fdmi_hba_action[])
/* RPA / RPRT attribute jump table */
int (*lpfc_fdmi_port_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */
lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */
@@ -3017,14 +3160,15 @@ int (*lpfc_fdmi_port_action[])
lpfc_fdmi_smart_attr_port_info, /* bit20 RPRT_SMART_PORT_INFO */
lpfc_fdmi_smart_attr_qos, /* bit21 RPRT_SMART_QOS */
lpfc_fdmi_smart_attr_security, /* bit22 RPRT_SMART_SECURITY */
+ lpfc_fdmi_vendor_attr_mi, /* bit23 RPRT_VENDOR_MI */
};
/**
* lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
- * cmdcode: FDMI command to send
- * mask: Mask of HBA or PORT Attributes to send
+ * @cmdcode: FDMI command to send
+ * @new_mask: Mask of HBA or PORT Attributes to send
*
* Builds and sends a FDMI command using the CT subsystem.
*/
@@ -3033,56 +3177,61 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int cmdcode, uint32_t new_mask)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_dmabuf *rq, *rsp;
struct lpfc_sli_ct_request *CtReq;
- struct ulp_bde64 *bpl;
+ struct ulp_bde64_le *bde;
uint32_t bit_pos;
- uint32_t size;
+ uint32_t size, addsz;
uint32_t rsp_size;
uint32_t mask;
struct lpfc_fdmi_reg_hba *rh;
struct lpfc_fdmi_port_entry *pe;
- struct lpfc_fdmi_reg_portattr *pab = NULL;
+ struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL;
struct lpfc_fdmi_attr_block *ab = NULL;
- int (*func)(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad);
- void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
+ int (*func)(struct lpfc_vport *vport, void *attrbuf);
+ void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return 0;
cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp)
+ rq = kmalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
goto fdmi_cmd_exit;
- mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
- if (!mp->virt)
- goto fdmi_cmd_free_mp;
+ rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys);
+ if (!rq->virt)
+ goto fdmi_cmd_free_rq;
/* Allocate buffer for Buffer ptr list */
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp)
- goto fdmi_cmd_free_mpvirt;
+ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ goto fdmi_cmd_free_rqvirt;
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
- if (!bmp->virt)
- goto fdmi_cmd_free_bmp;
+ rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys);
+ if (!rsp->virt)
+ goto fdmi_cmd_free_rsp;
- INIT_LIST_HEAD(&mp->list);
- INIT_LIST_HEAD(&bmp->list);
+ INIT_LIST_HEAD(&rq->list);
+ INIT_LIST_HEAD(&rsp->list);
+
+ /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */
+ memset(rq->virt, 0, LPFC_BPL_SIZE);
+ rsp_size = LPFC_BPL_SIZE;
/* FDMI request */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0218 FDMI Request Data: x%x x%x x%x\n",
- vport->fc_flag, vport->port_state, cmdcode);
- CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+ "0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n",
+ cmdcode, new_mask, vport->fdmi_port_mask,
+ vport->fc_flag, vport->port_state);
+
+ CtReq = (struct lpfc_sli_ct_request *)rq->virt;
/* First populate the CT_IU preamble */
- memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
CtReq->RevisionId.bits.InId = 0;
@@ -3090,34 +3239,33 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
- rsp_size = LPFC_BPL_SIZE;
+
size = 0;
/* Next fill in the specific FDMI cmd information */
switch (cmdcode) {
case SLI_MGMT_RHAT:
case SLI_MGMT_RHBA:
- rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID;
+ rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un;
/* HBA Identifier */
memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_fdmi_hba_ident);
if (cmdcode == SLI_MGMT_RHBA) {
/* Registered Port List */
/* One entry (port) per adapter */
rh->rpl.EntryCnt = cpu_to_be32(1);
- memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName,
+ memcpy(&rh->rpl.pe.PortName,
+ &phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
-
- /* point to the HBA attribute block */
- size = 2 * sizeof(struct lpfc_name) +
- FOURBYTES;
- } else {
- size = sizeof(struct lpfc_name);
+ size += sizeof(struct lpfc_fdmi_reg_port_list);
}
+
ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size);
ab->EntryCnt = 0;
- size += FOURBYTES;
+ size += FOURBYTES; /* add length of EntryCnt field */
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3128,11 +3276,13 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_hba_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)rh + size));
- ab->EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)rh + size));
+ if (addsz) {
+ ab->EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto hba_out;
}
@@ -3142,27 +3292,40 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
hba_out:
ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
/* Total size */
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_RPRT:
+ if (vport->port_type != LPFC_PHYSICAL_PORT) {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return 0;
+ }
+ fallthrough;
case SLI_MGMT_RPA:
- pab = (struct lpfc_fdmi_reg_portattr *)&CtReq->un.PortID;
+ /* Store base ptr right after preamble */
+ base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un;
+
if (cmdcode == SLI_MGMT_RPRT) {
- rh = (struct lpfc_fdmi_reg_hba *)pab;
+ rh = (struct lpfc_fdmi_reg_hba *)base;
/* HBA Identifier */
memcpy(&rh->hi.PortName,
&phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
pab = (struct lpfc_fdmi_reg_portattr *)
- ((uint8_t *)pab + sizeof(struct lpfc_name));
+ ((uint8_t *)base + sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_name);
+ } else {
+ pab = base;
}
memcpy((uint8_t *)&pab->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
- size += sizeof(struct lpfc_name) + FOURBYTES;
pab->ab.EntryCnt = 0;
+ /* add length of name and EntryCnt field */
+ size += sizeof(struct lpfc_name) + FOURBYTES;
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3173,11 +3336,13 @@ hba_out:
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_port_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)pab + size));
- pab->ab.EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)base + size));
+ if (addsz) {
+ pab->ab.EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto port_out;
}
@@ -3186,19 +3351,16 @@ hba_out:
}
port_out:
pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
- /* Total size */
- if (cmdcode == SLI_MGMT_RPRT)
- size += sizeof(struct lpfc_name);
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_GHAT:
case SLI_MGMT_GRPL:
rsp_size = FC_MAX_NS_RSP;
- /* fall through */
+ fallthrough;
case SLI_MGMT_DHBA:
case SLI_MGMT_DHAT:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3208,10 +3370,16 @@ port_out:
case SLI_MGMT_GPAT:
case SLI_MGMT_GPAS:
rsp_size = FC_MAX_NS_RSP;
- /* fall through */
+ fallthrough;
case SLI_MGMT_DPRT:
+ if (vport->port_type != LPFC_PHYSICAL_PORT) {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return 0;
+ }
+ fallthrough;
case SLI_MGMT_DPA:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3224,37 +3392,32 @@ port_out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"0298 FDMI cmdcode x%x not supported\n",
cmdcode);
- goto fdmi_cmd_free_bmpvirt;
+ goto fdmi_cmd_free_rspvirt;
}
CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
- bpl = (struct ulp_bde64 *)bmp->virt;
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.f.bdeSize = size;
+ bde = (struct ulp_bde64_le *)rsp->virt;
+ bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys));
+ bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys));
+ bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 <<
+ ULP_BDE64_TYPE_SHIFT);
+ bde->type_size |= cpu_to_le32(size);
/*
* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
* to hold ndlp reference for the corresponding callback function.
*/
- if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
+ if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0))
return 0;
- /*
- * Decrement ndlp reference count to release ndlp reference held
- * for the failed command's callback function.
- */
- lpfc_nlp_put(ndlp);
-
-fdmi_cmd_free_bmpvirt:
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-fdmi_cmd_free_bmp:
- kfree(bmp);
-fdmi_cmd_free_mpvirt:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
-fdmi_cmd_free_mp:
- kfree(mp);
+fdmi_cmd_free_rspvirt:
+ lpfc_mbuf_free(phba, rsp->virt, rsp->phys);
+fdmi_cmd_free_rsp:
+ kfree(rsp);
+fdmi_cmd_free_rqvirt:
+ lpfc_mbuf_free(phba, rq->virt, rq->phys);
+fdmi_cmd_free_rq:
+ kfree(rq);
fdmi_cmd_exit:
/* Issue FDMI request failed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -3265,7 +3428,7 @@ fdmi_cmd_exit:
/**
* lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
- * @ptr - Context object of the timer.
+ * @t: Context object of the timer.
*
* This function set the WORKER_DELAYED_DISC_TMO flag and wake up
* the worker thread.
@@ -3397,3 +3560,258 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
}
return;
}
+
+static void
+lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
+ struct lpfc_sli_ct_request *ctcmd = inp->virt;
+ struct lpfc_sli_ct_request *ctrsp = outp->virt;
+ u16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
+ struct app_id_object *app;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ u32 cmd, hash, bucket;
+ struct lpfc_vmid *vmp, *cur;
+ u8 *data = outp->virt;
+ int i;
+
+ cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp);
+ if (cmd == SLI_CTAS_DALLAPP_ID)
+ lpfc_ct_free_iocb(phba, cmdiocb);
+
+ if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) {
+ if (cmd != SLI_CTAS_DALLAPP_ID)
+ goto free_res;
+ }
+ /* Check for a CT LS_RJT response */
+ if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ if (cmd != SLI_CTAS_DALLAPP_ID)
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "3306 VMID FS_RJT Data: x%x x%x x%x\n",
+ cmd, ctrsp->ReasonCode,
+ ctrsp->Explanation);
+ if ((cmd != SLI_CTAS_DALLAPP_ID) ||
+ (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) ||
+ (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) {
+ /* If DALLAPP_ID failed retry later */
+ if (cmd == SLI_CTAS_DALLAPP_ID)
+ vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
+ goto free_res;
+ }
+ }
+
+ switch (cmd) {
+ case SLI_CTAS_RAPP_IDENT:
+ app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data);
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "6712 RAPP_IDENT app id %d port id x%x id "
+ "len %d\n", be32_to_cpu(app->app_id),
+ be32_to_cpu(app->port_id),
+ app->obj.entity_id_len);
+
+ if (app->obj.entity_id_len == 0 || app->port_id == 0)
+ goto free_res;
+
+ hash = lpfc_vmid_hash_fn(app->obj.entity_id,
+ app->obj.entity_id_len);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash,
+ app->obj.entity_id);
+ if (vmp) {
+ write_lock(&vport->vmid_lock);
+ vmp->un.app_id = be32_to_cpu(app->app_id);
+ vmp->flag |= LPFC_VMID_REGISTERED;
+ vmp->flag &= ~LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ /* Set IN USE flag */
+ vport->vmid_flag |= LPFC_VMID_IN_USE;
+ } else {
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "6901 No entry found %s hash %d\n",
+ app->obj.entity_id, hash);
+ }
+ break;
+ case SLI_CTAS_DAPP_IDENT:
+ app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data);
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "6713 DAPP_IDENT app id %d port id x%x\n",
+ be32_to_cpu(app->app_id),
+ be32_to_cpu(app->port_id));
+ break;
+ case SLI_CTAS_DALLAPP_ID:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "8856 Deregistered all app ids\n");
+ read_lock(&vport->vmid_lock);
+ for (i = 0; i < phba->cfg_max_vmid; i++) {
+ vmp = &vport->vmid[i];
+ if (vmp->flag != LPFC_VMID_SLOT_FREE)
+ memset(vmp, 0, sizeof(struct lpfc_vmid));
+ }
+ read_unlock(&vport->vmid_lock);
+ /* for all elements in the hash table */
+ if (!hash_empty(vport->hash_table))
+ hash_for_each(vport->hash_table, bucket, cur, hnode)
+ hash_del(&cur->hnode);
+ vport->load_flag |= FC_ALLOW_VMID;
+ break;
+ default:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "8857 Invalid command code\n");
+ }
+free_res:
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+}
+
+/**
+ * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdcode: application server command code to send
+ * @vmid: pointer to vmid info structure
+ *
+ * Builds and sends a FDMI command using the CT subsystem.
+ */
+int
+lpfc_vmid_cmd(struct lpfc_vport *vport,
+ int cmdcode, struct lpfc_vmid *vmid)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_sli_ct_request *ctreq;
+ struct ulp_bde64 *bpl;
+ u32 size;
+ u32 rsp_size;
+ u8 *data;
+ struct lpfc_vmid_rapp_ident_list *rap;
+ struct lpfc_vmid_dapp_ident_list *dap;
+ u8 retry = 0;
+ struct lpfc_nodelist *ndlp;
+
+ void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
+
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return 0;
+
+ cmpl = lpfc_cmpl_ct_cmd_vmid;
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ goto vmid_free_mp_exit;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt)
+ goto vmid_free_mp_virt_exit;
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
+ if (!bmp)
+ goto vmid_free_bmp_exit;
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt)
+ goto vmid_free_bmp_virt_exit;
+
+ INIT_LIST_HEAD(&mp->list);
+ INIT_LIST_HEAD(&bmp->list);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "3275 VMID Request Data: x%x x%x x%x\n",
+ vport->fc_flag, vport->port_state, cmdcode);
+ ctreq = (struct lpfc_sli_ct_request *)mp->virt;
+ data = mp->virt;
+ /* First populate the CT_IU preamble */
+ memset(data, 0, LPFC_BPL_SIZE);
+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ctreq->RevisionId.bits.InId = 0;
+
+ ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE;
+ ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes;
+
+ ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
+ rsp_size = LPFC_BPL_SIZE;
+ size = 0;
+
+ switch (cmdcode) {
+ case SLI_CTAS_RAPP_IDENT:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "1329 RAPP_IDENT for %s\n", vmid->host_vmid);
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ rap = (struct lpfc_vmid_rapp_ident_list *)
+ (DAPP_IDENT_OFFSET + data);
+ rap->no_of_objects = cpu_to_be32(1);
+ rap->obj[0].entity_id_len = vmid->vmid_len;
+ memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
+ size = RAPP_IDENT_OFFSET +
+ sizeof(struct lpfc_vmid_rapp_ident_list);
+ retry = 1;
+ break;
+
+ case SLI_CTAS_GALLAPPIA_ID:
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ size = GALLAPPIA_ID_SIZE;
+ break;
+
+ case SLI_CTAS_DAPP_IDENT:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "1469 DAPP_IDENT for %s\n", vmid->host_vmid);
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ dap = (struct lpfc_vmid_dapp_ident_list *)
+ (DAPP_IDENT_OFFSET + data);
+ dap->no_of_objects = cpu_to_be32(1);
+ dap->obj[0].entity_id_len = vmid->vmid_len;
+ memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
+ size = DAPP_IDENT_OFFSET +
+ sizeof(struct lpfc_vmid_dapp_ident_list);
+ write_lock(&vport->vmid_lock);
+ vmid->flag &= ~LPFC_VMID_REGISTERED;
+ write_unlock(&vport->vmid_lock);
+ retry = 1;
+ break;
+
+ case SLI_CTAS_DALLAPP_ID:
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ size = DALLAPP_ID_SIZE;
+ break;
+
+ default:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "7062 VMID cmdcode x%x not supported\n",
+ cmdcode);
+ goto vmid_free_all_mem;
+ }
+
+ ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
+
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ bpl->addrHigh = putPaddrHigh(mp->phys);
+ bpl->addrLow = putPaddrLow(mp->phys);
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = size;
+
+ /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+ * to hold ndlp reference for the corresponding callback function.
+ */
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry))
+ return 0;
+
+ vmid_free_all_mem:
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ vmid_free_bmp_virt_exit:
+ kfree(bmp);
+ vmid_free_bmp_exit:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ vmid_free_mp_virt_exit:
+ kfree(mp);
+ vmid_free_mp_exit:
+
+ /* Issue CT request failed */
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "3276 VMID CT request failed Data: x%x\n", cmdcode);
+ return -EIO;
+}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 819335b16c2e..f5252e45a48a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -39,8 +39,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -50,7 +48,6 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -384,7 +381,7 @@ skipit:
static int lpfc_debugfs_last_xripool;
/**
- * lpfc_debugfs_common_xri_data - Dump Hardware Queue info to a buffer
+ * lpfc_debugfs_commonxripools_data - Dump Hardware Queue info to a buffer
* @phba: The HBA to gather host buffer info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
@@ -866,16 +863,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(buf+len, size-len, "%s DID:x%06x ",
statep, ndlp->nlp_DID);
len += scnprintf(buf+len, size-len,
- "WWPN x%llx ",
+ "WWPN x%016llx ",
wwn_to_u64(ndlp->nlp_portname.u.wwn));
len += scnprintf(buf+len, size-len,
- "WWNN x%llx ",
+ "WWNN x%016llx ",
wwn_to_u64(ndlp->nlp_nodename.u.wwn));
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
- len += scnprintf(buf+len, size-len, "RPI:%03d ",
- ndlp->nlp_rpi);
- else
- len += scnprintf(buf+len, size-len, "RPI:none ");
+ len += scnprintf(buf+len, size-len, "RPI:x%04x ",
+ ndlp->nlp_rpi);
len += scnprintf(buf+len, size-len, "flag:x%08x ",
ndlp->nlp_flag);
if (!ndlp->nlp_type)
@@ -898,9 +892,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
len += scnprintf(buf + len,
size - len, "NVME_INITIATOR ");
- len += scnprintf(buf+len, size-len, "usgmap:%x ",
- ndlp->nlp_usg_map);
- len += scnprintf(buf+len, size-len, "refcnt:%x",
+ len += scnprintf(buf+len, size-len, "refcnt:%d",
kref_read(&ndlp->kref));
if (iocnt) {
i = atomic_read(&ndlp->cmd_pending);
@@ -909,8 +901,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
i, ndlp->cmd_qdepth);
outio += i;
}
- len += scnprintf(buf + len, size - len, "defer:%x ",
- ndlp->nlp_defer_did);
+ len += scnprintf(buf+len, size-len, " xpt:x%x",
+ ndlp->fc4_xpt_flags);
+ if (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)
+ len += scnprintf(buf+len, size-len, " defer:%x",
+ ndlp->nlp_defer_did);
len += scnprintf(buf+len, size-len, "\n");
}
spin_unlock_irq(shost->host_lock);
@@ -960,13 +955,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(buf + len, size - len, "\tRport List:\n");
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
- spin_lock(&phba->hbalock);
+ spin_lock(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
else
nrport = NULL;
- spin_unlock(&phba->hbalock);
+ spin_unlock(&ndlp->lock);
if (!nrport)
continue;
@@ -1035,7 +1030,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
- struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
struct nvme_fc_local_port *localport;
struct lpfc_fc4_ctrl_stat *cstat;
struct lpfc_nvme_lport *lport;
@@ -1300,8 +1295,88 @@ buffer_done:
return len;
}
+void
+lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+{
+ uint64_t seg1, seg2, seg3, seg4;
+ uint64_t segsum;
+
+ if (!lpfc_cmd->ts_last_cmd ||
+ !lpfc_cmd->ts_cmd_start ||
+ !lpfc_cmd->ts_cmd_wqput ||
+ !lpfc_cmd->ts_isr_cmpl ||
+ !lpfc_cmd->ts_data_io)
+ return;
+
+ if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start)
+ return;
+ if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd)
+ return;
+ if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start)
+ return;
+ if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput)
+ return;
+ if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl)
+ return;
+ /*
+ * Segment 1 - Time from Last FCP command cmpl is handed
+ * off to NVME Layer to start of next command.
+ * Segment 2 - Time from Driver receives a IO cmd start
+ * from NVME Layer to WQ put is done on IO cmd.
+ * Segment 3 - Time from Driver WQ put is done on IO cmd
+ * to MSI-X ISR for IO cmpl.
+ * Segment 4 - Time from MSI-X ISR for IO cmpl to when
+ * cmpl is handled off to the NVME Layer.
+ */
+ seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd;
+ if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
+ seg1 = 0;
+
+ /* Calculate times relative to start of IO */
+ seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start);
+ segsum = seg2;
+ seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start;
+ if (segsum > seg3)
+ return;
+ seg3 -= segsum;
+ segsum += seg3;
+
+ seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start;
+ if (segsum > seg4)
+ return;
+ seg4 -= segsum;
+
+ phba->ktime_data_samples++;
+ phba->ktime_seg1_total += seg1;
+ if (seg1 < phba->ktime_seg1_min)
+ phba->ktime_seg1_min = seg1;
+ else if (seg1 > phba->ktime_seg1_max)
+ phba->ktime_seg1_max = seg1;
+ phba->ktime_seg2_total += seg2;
+ if (seg2 < phba->ktime_seg2_min)
+ phba->ktime_seg2_min = seg2;
+ else if (seg2 > phba->ktime_seg2_max)
+ phba->ktime_seg2_max = seg2;
+ phba->ktime_seg3_total += seg3;
+ if (seg3 < phba->ktime_seg3_min)
+ phba->ktime_seg3_min = seg3;
+ else if (seg3 > phba->ktime_seg3_max)
+ phba->ktime_seg3_max = seg3;
+ phba->ktime_seg4_total += seg4;
+ if (seg4 < phba->ktime_seg4_min)
+ phba->ktime_seg4_min = seg4;
+ else if (seg4 > phba->ktime_seg4_max)
+ phba->ktime_seg4_max = seg4;
+
+ lpfc_cmd->ts_last_cmd = 0;
+ lpfc_cmd->ts_cmd_start = 0;
+ lpfc_cmd->ts_cmd_wqput = 0;
+ lpfc_cmd->ts_isr_cmpl = 0;
+ lpfc_cmd->ts_data_io = 0;
+}
+
/**
- * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer
+ * lpfc_debugfs_ioktime_data - Dump target node list to a buffer
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
@@ -1314,13 +1389,13 @@ buffer_done:
* not exceed @size.
**/
static int
-lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
+lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
int len = 0;
if (phba->nvmet_support == 0) {
- /* NVME Initiator */
+ /* Initiator */
len += scnprintf(buf + len, PAGE_SIZE - len,
"ktime %s: Total Samples: %lld\n",
(phba->ktime_on ? "Enabled" : "Disabled"),
@@ -1330,8 +1405,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "Segment 1: Last NVME Cmd cmpl "
- "done -to- Start of next NVME cnd (in driver)\n");
+ "Segment 1: Last Cmd cmpl "
+ "done -to- Start of next Cmd (in driver)\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
@@ -1341,7 +1416,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
phba->ktime_seg1_max);
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "Segment 2: Driver start of NVME cmd "
+ "Segment 2: Driver start of Cmd "
"-to- Firmware WQ doorbell\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -1364,7 +1439,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 4: MSI-X ISR cmpl -to- "
- "NVME cmpl done\n");
+ "Cmd cmpl done\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
@@ -1603,89 +1678,124 @@ out:
}
/**
- * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer
+ * lpfc_debugfs_hdwqstat_data - Dump I/O stats to a buffer
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
- * This routine dumps the NVME statistics associated with @vport
+ * This routine dumps the NVME + SCSI statistics associated with @vport
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
-lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
+lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli4_hdw_queue *qp;
- int i, j, max_cnt;
- int len = 0;
+ struct lpfc_hdwq_stat *c_stat;
+ int i, j, len;
uint32_t tot_xmt;
uint32_t tot_rcv;
uint32_t tot_cmpl;
+ char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "CPUcheck %s ",
- (phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
- "Enabled" : "Disabled"));
- if (phba->nvmet_support) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "%s\n",
- (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
- "Rcv Enabled\n" : "Rcv Disabled\n"));
- } else {
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
- }
- max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ;
+ scnprintf(tmp, sizeof(tmp), "HDWQ Stats:\n\n");
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- qp = &phba->sli4_hba.hdwq[i];
+ scnprintf(tmp, sizeof(tmp), "(NVME Accounting: %s) ",
+ (phba->hdwqstat_on &
+ (LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO) ?
+ "Enabled" : "Disabled"));
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+ scnprintf(tmp, sizeof(tmp), "(SCSI Accounting: %s) ",
+ (phba->hdwqstat_on & LPFC_CHECK_SCSI_IO ?
+ "Enabled" : "Disabled"));
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "\n\n");
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
tot_rcv = 0;
tot_xmt = 0;
tot_cmpl = 0;
- for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
- tot_xmt += qp->cpucheck_xmt_io[j];
- tot_cmpl += qp->cpucheck_cmpl_io[j];
- if (phba->nvmet_support)
- tot_rcv += qp->cpucheck_rcv_io[j];
- }
- /* Only display Hardware Qs with something */
- if (!tot_xmt && !tot_cmpl && !tot_rcv)
- continue;
+ for_each_present_cpu(j) {
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, j);
+
+ /* Only display for this HDWQ */
+ if (i != c_stat->hdwq_no)
+ continue;
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "HDWQ %03d: ", i);
- for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
/* Only display non-zero counters */
- if (!qp->cpucheck_xmt_io[j] &&
- !qp->cpucheck_cmpl_io[j] &&
- !qp->cpucheck_rcv_io[j])
+ if (!c_stat->xmt_io && !c_stat->cmpl_io &&
+ !c_stat->rcv_io)
continue;
+
+ if (!tot_xmt && !tot_cmpl && !tot_rcv) {
+ /* Print HDWQ string only the first time */
+ scnprintf(tmp, sizeof(tmp), "[HDWQ %d]:\t", i);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+ }
+
+ tot_xmt += c_stat->xmt_io;
+ tot_cmpl += c_stat->cmpl_io;
+ if (phba->nvmet_support)
+ tot_rcv += c_stat->rcv_io;
+
+ scnprintf(tmp, sizeof(tmp), "| [CPU %d]: ", j);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
if (phba->nvmet_support) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "CPU %03d: %x/%x/%x ", j,
- qp->cpucheck_rcv_io[j],
- qp->cpucheck_xmt_io[j],
- qp->cpucheck_cmpl_io[j]);
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x RCV 0x%x |",
+ c_stat->xmt_io, c_stat->cmpl_io,
+ c_stat->rcv_io);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
} else {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "CPU %03d: %x/%x ", j,
- qp->cpucheck_xmt_io[j],
- qp->cpucheck_cmpl_io[j]);
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x |",
+ c_stat->xmt_io, c_stat->cmpl_io);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
}
}
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "Total: %x\n", tot_xmt);
- if (len >= max_cnt) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "Truncated ...\n");
- return len;
+
+ /* Check if nothing to display */
+ if (!tot_xmt && !tot_cmpl && !tot_rcv)
+ continue;
+
+ scnprintf(tmp, sizeof(tmp), "\t->\t[HDWQ Total: ");
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ if (phba->nvmet_support) {
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x RCV 0x%x]\n\n",
+ tot_xmt, tot_cmpl, tot_rcv);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+ } else {
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x]\n\n",
+ tot_xmt, tot_cmpl);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
}
}
+
+buffer_done:
+ len = strnlen(buf, size);
return len;
}
@@ -2048,10 +2158,6 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
char *pbuf;
int i;
- /* Protect copy from user */
- if (!access_ok(buf, nbytes))
- return -EFAULT;
-
memset(mybuf, 0, sizeof(mybuf));
if (copy_from_user(mybuf, buf, nbytes))
@@ -2315,15 +2421,16 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
memset(dstbuf, 0, 33);
size = (nbytes < 32) ? nbytes : 32;
if (copy_from_user(dstbuf, buf, size))
- return 0;
+ return -EFAULT;
if (dent == phba->debug_InjErrLBA) {
- if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+ if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
+ (dstbuf[2] == 'f'))
tmp = (uint64_t)(-1);
}
if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
- return 0;
+ return -EINVAL;
if (dent == phba->debug_writeGuard)
phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
@@ -2500,12 +2607,8 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_multixri_pool *multixri_pool;
- if (nbytes > 64)
- nbytes = 64;
-
- /* Protect copy from user */
- if (!access_ok(buf, nbytes))
- return -EFAULT;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2585,8 +2688,8 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
if (!phba->targetport)
return -ENXIO;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2669,10 +2772,6 @@ lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf,
char mybuf[6] = {0};
int i;
- /* Protect copy from user */
- if (!access_ok(buf, nbytes))
- return -EFAULT;
-
if (copy_from_user(mybuf, buf, (nbytes >= sizeof(mybuf)) ?
(sizeof(mybuf) - 1) : nbytes))
return -EFAULT;
@@ -2689,7 +2788,7 @@ lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf,
}
static int
-lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
+lpfc_debugfs_ioktime_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
struct lpfc_debug *debug;
@@ -2700,14 +2799,14 @@ lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
- debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL);
+ debug->buffer = kmalloc(LPFC_IOKTIME_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
- debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer,
- LPFC_NVMEKTIME_SIZE);
+ debug->len = lpfc_debugfs_ioktime_data(vport, debug->buffer,
+ LPFC_IOKTIME_SIZE);
debug->i_private = inode->i_private;
file->private_data = debug;
@@ -2718,8 +2817,8 @@ out:
}
static ssize_t
-lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
+lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
@@ -2727,8 +2826,8 @@ lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2855,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2921,7 +3020,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
}
static int
-lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
+lpfc_debugfs_hdwqstat_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
struct lpfc_debug *debug;
@@ -2932,14 +3031,14 @@ lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
- debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL);
+ debug->buffer = kcalloc(1, LPFC_SCSISTAT_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
- debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
- LPFC_CPUCHECK_SIZE);
+ debug->len = lpfc_debugfs_hdwqstat_data(vport, debug->buffer,
+ LPFC_SCSISTAT_SIZE);
debug->i_private = inode->i_private;
file->private_data = debug;
@@ -2950,19 +3049,19 @@ out:
}
static ssize_t
-lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
+lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_hdwq_stat *c_stat;
char mybuf[64];
char *pbuf;
- int i, j;
+ int i;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2972,41 +3071,39 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
if (phba->nvmet_support)
- phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+ phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
else
- phba->cpucheck_on |= (LPFC_CHECK_NVME_IO |
+ phba->hdwqstat_on |= (LPFC_CHECK_NVME_IO |
LPFC_CHECK_SCSI_IO);
return strlen(pbuf);
} else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) {
if (phba->nvmet_support)
- phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+ phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
else
- phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
+ phba->hdwqstat_on |= LPFC_CHECK_NVME_IO;
return strlen(pbuf);
} else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) {
- phba->cpucheck_on |= LPFC_CHECK_SCSI_IO;
+ if (!phba->nvmet_support)
+ phba->hdwqstat_on |= LPFC_CHECK_SCSI_IO;
return strlen(pbuf);
- } else if ((strncmp(pbuf, "rcv",
- sizeof("rcv") - 1) == 0)) {
- if (phba->nvmet_support)
- phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV;
- else
- return -EINVAL;
+ } else if ((strncmp(pbuf, "nvme_off", sizeof("nvme_off") - 1) == 0)) {
+ phba->hdwqstat_on &= ~(LPFC_CHECK_NVME_IO |
+ LPFC_CHECK_NVMET_IO);
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "scsi_off", sizeof("scsi_off") - 1) == 0)) {
+ phba->hdwqstat_on &= ~LPFC_CHECK_SCSI_IO;
return strlen(pbuf);
} else if ((strncmp(pbuf, "off",
sizeof("off") - 1) == 0)) {
- phba->cpucheck_on = LPFC_CHECK_OFF;
+ phba->hdwqstat_on = LPFC_CHECK_OFF;
return strlen(pbuf);
} else if ((strncmp(pbuf, "zero",
sizeof("zero") - 1) == 0)) {
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- qp = &phba->sli4_hba.hdwq[i];
-
- for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
- qp->cpucheck_rcv_io[j] = 0;
- qp->cpucheck_xmt_io[j] = 0;
- qp->cpucheck_cmpl_io[j] = 0;
- }
+ for_each_present_cpu(i) {
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, i);
+ c_stat->xmt_io = 0;
+ c_stat->cmpl_io = 0;
+ c_stat->rcv_io = 0;
}
return strlen(pbuf);
}
@@ -3242,7 +3339,6 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
break;
case LPFC_PCI_CFG_BROWSE: /* browse all */
goto pcicfg_browse;
- break;
default:
/* illegal count */
len = 0;
@@ -4088,6 +4184,7 @@ lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
/**
* lpfc_idiag_queacc_read_qe - read a single entry from the given queue index
* @pbuffer: The pointer to buffer to copy the read data into.
+ * @len: Length of the buffer.
* @pque: The pointer to the queue to be read.
* @index: The index into the queue entry.
*
@@ -4282,7 +4379,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
goto error_out;
- break;
+
case LPFC_IDIAG_CQ:
/* MBX complete queue */
if (phba->sli4_hba.mbx_cq &&
@@ -4334,7 +4431,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
goto error_out;
- break;
+
case LPFC_IDIAG_MQ:
/* MBX work queue */
if (phba->sli4_hba.mbx_wq &&
@@ -4348,7 +4445,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
goto error_out;
- break;
+
case LPFC_IDIAG_WQ:
/* ELS work queue */
if (phba->sli4_hba.els_wq &&
@@ -4388,9 +4485,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
}
-
goto error_out;
- break;
+
case LPFC_IDIAG_RQ:
/* HDR queue */
if (phba->sli4_hba.hdr_rq &&
@@ -4415,10 +4511,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
goto error_out;
- break;
default:
goto error_out;
- break;
}
pass_check:
@@ -4667,7 +4761,7 @@ error_out:
* @phba: The pointer to hba structure.
* @pbuffer: The pointer to the buffer to copy the data to.
* @len: The length of bytes to copied.
- * @drbregid: The id to doorbell registers.
+ * @ctlregid: The id to doorbell registers.
*
* Description:
* This routine reads a control register and copies its content to the
@@ -5057,12 +5151,12 @@ error_out:
* This routine is to get the available extent information.
*
* Returns:
- * overall lenth of the data read into the internal buffer.
+ * overall length of the data read into the internal buffer.
**/
static int
lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
{
- uint16_t ext_cnt, ext_size;
+ uint16_t ext_cnt = 0, ext_size = 0;
len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\nAvailable Extents Information:\n");
@@ -5108,7 +5202,7 @@ lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
* This routine is to get the allocated extent information.
*
* Returns:
- * overall lenth of the data read into the internal buffer.
+ * overall length of the data read into the internal buffer.
**/
static int
lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
@@ -5180,7 +5274,7 @@ lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
* This routine is to get the driver extent information.
*
* Returns:
- * overall lenth of the data read into the internal buffer.
+ * overall length of the data read into the internal buffer.
**/
static int
lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
@@ -5335,6 +5429,154 @@ lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
}
+static int
+lpfc_cgn_buffer_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ debug->buffer = vmalloc(LPFC_CGN_BUF_SIZE);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *buffer = debug->buffer;
+ uint32_t *ptr;
+ int cnt, len = 0;
+
+ if (!phba->sli4_hba.pc_sli4_params.mi_ver || !phba->cgn_i) {
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Congestion Mgmt is not supported\n");
+ goto out;
+ }
+ ptr = (uint32_t *)phba->cgn_i->virt;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Congestion Buffer Header\n");
+ /* Dump the first 32 bytes */
+ cnt = 32;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "000: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
+ *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7));
+ ptr += 8;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Congestion Buffer Data\n");
+ while (cnt < sizeof(struct lpfc_cgn_info)) {
+ if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Truncated . . .\n");
+ goto out;
+ }
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "%03x: %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n",
+ cnt, *ptr, *(ptr + 1), *(ptr + 2),
+ *(ptr + 3), *(ptr + 4), *(ptr + 5),
+ *(ptr + 6), *(ptr + 7));
+ cnt += 32;
+ ptr += 8;
+ }
+ if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Truncated . . .\n");
+ goto out;
+ }
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Parameter Data\n");
+ ptr = (uint32_t *)&phba->cgn_p;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "%08x %08x %08x %08x\n",
+ *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3));
+out:
+ return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
+}
+
+static int
+lpfc_cgn_buffer_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ vfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+static int
+lpfc_rx_monitor_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_rx_monitor_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ debug->buffer = vmalloc(MAX_DEBUGFS_RX_INFO_SIZE);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_rx_monitor_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *buffer = debug->buffer;
+
+ if (!phba->rx_monitor) {
+ scnprintf(buffer, MAX_DEBUGFS_RX_INFO_SIZE,
+ "Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, buffer,
+ MAX_DEBUGFS_RX_INFO_SIZE,
+ LPFC_MAX_RXMONITOR_ENTRY);
+ }
+
+ return simple_read_from_buffer(buf, nbytes, ppos, buffer,
+ strlen(buffer));
+}
+
+static int
+lpfc_rx_monitor_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_rx_monitor_debug *debug = file->private_data;
+
+ vfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
#undef lpfc_debugfs_op_disc_trc
static const struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@@ -5431,13 +5673,13 @@ static const struct file_operations lpfc_debugfs_op_scsistat = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_nvmektime
-static const struct file_operations lpfc_debugfs_op_nvmektime = {
+#undef lpfc_debugfs_op_ioktime
+static const struct file_operations lpfc_debugfs_op_ioktime = {
.owner = THIS_MODULE,
- .open = lpfc_debugfs_nvmektime_open,
+ .open = lpfc_debugfs_ioktime_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
- .write = lpfc_debugfs_nvmektime_write,
+ .write = lpfc_debugfs_ioktime_write,
.release = lpfc_debugfs_release,
};
@@ -5451,13 +5693,13 @@ static const struct file_operations lpfc_debugfs_op_nvmeio_trc = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_cpucheck
-static const struct file_operations lpfc_debugfs_op_cpucheck = {
+#undef lpfc_debugfs_op_hdwqstat
+static const struct file_operations lpfc_debugfs_op_hdwqstat = {
.owner = THIS_MODULE,
- .open = lpfc_debugfs_cpucheck_open,
+ .open = lpfc_debugfs_hdwqstat_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
- .write = lpfc_debugfs_cpucheck_write,
+ .write = lpfc_debugfs_hdwqstat_write,
.release = lpfc_debugfs_release,
};
@@ -5563,6 +5805,23 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.write = lpfc_idiag_extacc_write,
.release = lpfc_idiag_cmd_release,
};
+#undef lpfc_cgn_buffer_op
+static const struct file_operations lpfc_cgn_buffer_op = {
+ .owner = THIS_MODULE,
+ .open = lpfc_cgn_buffer_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_cgn_buffer_read,
+ .release = lpfc_cgn_buffer_release,
+};
+
+#undef lpfc_rx_monitor_op
+static const struct file_operations lpfc_rx_monitor_op = {
+ .owner = THIS_MODULE,
+ .open = lpfc_rx_monitor_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_rx_monitor_read,
+ .release = lpfc_rx_monitor_release,
+};
#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
@@ -5813,6 +6072,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ /* Congestion Info Buffer */
+ scnprintf(name, sizeof(name), "cgn_buffer");
+ phba->debug_cgn_buffer =
+ debugfs_create_file(name, S_IFREG | 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_cgn_buffer_op);
+ if (!phba->debug_cgn_buffer) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "6527 Cannot create debugfs "
+ "cgn_buffer\n");
+ goto debug_failed;
+ }
+
+ /* RX Monitor */
+ scnprintf(name, sizeof(name), "rx_monitor");
+ phba->debug_rx_monitor =
+ debugfs_create_file(name, S_IFREG | 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_rx_monitor_op);
+ if (!phba->debug_rx_monitor) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "6528 Cannot create debugfs "
+ "rx_monitor\n");
+ goto debug_failed;
+ }
+
/* RAS log */
snprintf(name, sizeof(name), "ras_log");
phba->debug_ras_log =
@@ -5842,7 +6127,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_lockstat);
if (!phba->debug_lockstat) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "4610 Cant create debugfs lockstat\n");
+ "4610 Can't create debugfs lockstat\n");
goto debug_failed;
}
#endif
@@ -5948,9 +6233,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_slow_ring_trc);
if (!phba->slow_ring_trc) {
- phba->slow_ring_trc = kmalloc(
- (sizeof(struct lpfc_debugfs_trc) *
- lpfc_debugfs_max_slow_ring_trc),
+ phba->slow_ring_trc = kcalloc(
+ lpfc_debugfs_max_slow_ring_trc,
+ sizeof(struct lpfc_debugfs_trc),
GFP_KERNEL);
if (!phba->slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
@@ -5959,9 +6244,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
atomic_set(&phba->slow_ring_trc_cnt, 0);
- memset(phba->slow_ring_trc, 0,
- (sizeof(struct lpfc_debugfs_trc) *
- lpfc_debugfs_max_slow_ring_trc));
}
snprintf(name, sizeof(name), "nvmeio_trc");
@@ -6075,17 +6357,22 @@ nvmeio_off:
goto debug_failed;
}
- snprintf(name, sizeof(name), "nvmektime");
- vport->debug_nvmektime =
+ snprintf(name, sizeof(name), "ioktime");
+ vport->debug_ioktime =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_nvmektime);
+ vport, &lpfc_debugfs_op_ioktime);
+ if (!vport->debug_ioktime) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0815 Cannot create debugfs ioktime\n");
+ goto debug_failed;
+ }
- snprintf(name, sizeof(name), "cpucheck");
- vport->debug_cpucheck =
+ snprintf(name, sizeof(name), "hdwqstat");
+ vport->debug_hdwqstat =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_cpucheck);
+ vport, &lpfc_debugfs_op_hdwqstat);
/*
* The following section is for additional directories/files for the
@@ -6216,11 +6503,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(vport->debug_scsistat); /* scsistat */
vport->debug_scsistat = NULL;
- debugfs_remove(vport->debug_nvmektime); /* nvmektime */
- vport->debug_nvmektime = NULL;
+ debugfs_remove(vport->debug_ioktime); /* ioktime */
+ vport->debug_ioktime = NULL;
- debugfs_remove(vport->debug_cpucheck); /* cpucheck */
- vport->debug_cpucheck = NULL;
+ debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */
+ vport->debug_hdwqstat = NULL;
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
@@ -6236,6 +6523,12 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
+ debugfs_remove(phba->debug_cgn_buffer);
+ phba->debug_cgn_buffer = NULL;
+
+ debugfs_remove(phba->debug_rx_monitor);
+ phba->debug_rx_monitor = NULL;
+
debugfs_remove(phba->debug_ras_log);
phba->debug_ras_log = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 20f2537af511..8d2e8d05bbc0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -46,13 +46,15 @@
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
-#define LPFC_NVMEKTIME_SIZE 8192
-#define LPFC_CPUCHECK_SIZE 8192
+#define LPFC_IOKTIME_SIZE 8192
#define LPFC_NVMEIO_TRC_SIZE 8192
/* scsistat output buffer size */
#define LPFC_SCSISTAT_SIZE 8192
+/* Congestion Info Buffer size */
+#define LPFC_CGN_BUF_SIZE 8192
+
#define LPFC_DEBUG_OUT_LINE_SZ 80
/*
@@ -280,6 +282,12 @@ struct lpfc_idiag {
void *ptr_private;
};
+#define MAX_DEBUGFS_RX_INFO_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
+struct lpfc_rx_monitor_debug {
+ char *i_private;
+ char *buffer;
+};
+
#else
#define lpfc_nvmeio_data(phba, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 482e4a888dae..f82615d87c4b 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -41,6 +41,7 @@ enum lpfc_work_type {
LPFC_EVT_DEV_LOSS,
LPFC_EVT_FASTPATH_MGMT_EVT,
LPFC_EVT_RESET_HBA,
+ LPFC_EVT_RECOVER_PORT
};
/* structure used to queue event to the discovery tasklet */
@@ -76,10 +77,29 @@ struct lpfc_node_rrqs {
unsigned long xri_bitmap[XRI_BITMAP_ULONGS];
};
+enum lpfc_fc4_xpt_flags {
+ NLP_XPT_REGD = 0x1,
+ SCSI_XPT_REGD = 0x2,
+ NVME_XPT_REGD = 0x4,
+ NVME_XPT_UNREG_WAIT = 0x8,
+ NLP_XPT_HAS_HH = 0x10
+};
+
+enum lpfc_nlp_save_flags {
+ /* devloss occurred during recovery */
+ NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
+ /* wait for outstanding LOGO to cmpl */
+ NLP_WAIT_FOR_LOGO = 0x2,
+};
+
struct lpfc_nodelist {
struct list_head nlp_listp;
+ struct serv_parm fc_sparam; /* buffer for service params */
struct lpfc_name nlp_portname;
struct lpfc_name nlp_nodename;
+
+ spinlock_t lock; /* Node management lock */
+
uint32_t nlp_flag; /* entry flags */
uint32_t nlp_DID; /* FC D_ID of entry */
uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
@@ -113,14 +133,9 @@ struct lpfc_nodelist {
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
u8 nlp_nvme_info; /* NVME NSLER Support */
+ uint8_t vmid_support; /* destination VMID support */
#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
- uint16_t nlp_usg_map; /* ndlp management usage bitmap */
-#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
-#define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */
-#define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */
-#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
-
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct lpfc_hba *phba;
struct fc_rport *rport; /* scsi_transport_fc port structure */
@@ -128,20 +143,25 @@ struct lpfc_nodelist {
struct lpfc_vport *vport;
struct lpfc_work_evt els_retry_evt;
struct lpfc_work_evt dev_loss_evt;
+ struct lpfc_work_evt recovery_evt;
struct kref kref;
atomic_t cmd_pending;
uint32_t cmd_qdepth;
unsigned long last_change_time;
unsigned long *active_rrqs_xri_bitmap;
- struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
- uint32_t upcall_flags;
-#define NLP_WAIT_FOR_UNREG 0x1
+
+ /* flags to keep ndlp alive until special conditions are met */
+ enum lpfc_nlp_save_flags save_flags;
+
+ enum lpfc_fc4_xpt_flags fc4_xpt_flags;
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
uint32_t nlp_defer_did;
+ wait_queue_head_t *logo_waitq;
};
+
struct lpfc_node_rrq {
struct list_head list;
uint16_t xritag;
@@ -149,7 +169,6 @@ struct lpfc_node_rrq {
uint16_t rxid;
uint32_t nlp_DID; /* FC D_ID of entry */
struct lpfc_vport *vport;
- struct lpfc_nodelist *ndlp;
unsigned long rrq_stop_time;
};
@@ -168,9 +187,8 @@ struct lpfc_node_rrq {
#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
-#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */
-#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
+#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
@@ -189,32 +207,6 @@ struct lpfc_node_rrq {
#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
-
-/* ndlp usage management macros */
-#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
- & NLP_USG_NODE_ACT_BIT) \
- && \
- !((ndlp)->nlp_usg_map \
- & NLP_USG_FREE_ACK_BIT))
-#define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_NODE_ACT_BIT)
-#define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
- = NLP_USG_NODE_ACT_BIT)
-#define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
- &= ~NLP_USG_NODE_ACT_BIT)
-#define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
- & NLP_USG_IACT_REQ_BIT)
-#define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_IACT_REQ_BIT)
-#define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
- & NLP_USG_FREE_REQ_BIT)
-#define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_FREE_REQ_BIT)
-#define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
- & NLP_USG_FREE_ACK_BIT)
-#define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_FREE_ACK_BIT)
-
/* There are 4 different double linked lists nodelist entries can reside on.
* The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
* when Link Up discovery or Registered State Change Notification (RSCN)
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 42a2bf38eaea..863b2125fed6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -25,6 +25,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -55,9 +56,18 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, uint8_t retry);
static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb);
+static void lpfc_cmpl_els_edc(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
+static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
static int lpfc_max_els_tries = 3;
+static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport);
+static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max);
+static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
+
/**
* lpfc_els_chk_latt - Check host link attention event for a vport
* @vport: pointer to a host virtual N_Port data structure.
@@ -100,7 +110,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
return 0;
/* Pending Link Event during Discovery */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0237 Pending Link Event during "
"Discovery: State x%x\n",
phba->pport->port_state);
@@ -124,9 +134,9 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
/**
* lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
* @vport: pointer to a host virtual N_Port data structure.
- * @expectRsp: flag indicating whether response is expected.
- * @cmdSize: size of the ELS command.
- * @retry: number of retries to the command IOCB when it fails.
+ * @expect_rsp: flag indicating whether response is expected.
+ * @cmd_size: size of the ELS command.
+ * @retry: number of retries to the command when it fails.
* @ndlp: pointer to a node-list data structure.
* @did: destination identifier.
* @elscmd: the ELS command code.
@@ -142,7 +152,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* Buffer Descriptor Entries (BDEs), allocates buffers for both command
* payload and response payload (if expected). The reference count on the
* ndlp is incremented by 1 and the reference to the ndlp is put into
- * context1 of the IOCB data structure for this IOCB to hold the ndlp
+ * ndlp of the IOCB data structure for this IOCB to hold the ndlp
* reference for the command's callback function to access later.
*
* Return code
@@ -150,25 +160,23 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* NULL - when els iocb data structure allocation/preparation failed
**/
struct lpfc_iocbq *
-lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
- uint16_t cmdSize, uint8_t retry,
- struct lpfc_nodelist *ndlp, uint32_t did,
- uint32_t elscmd)
+lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
+ u16 cmd_size, u8 retry,
+ struct lpfc_nodelist *ndlp, u32 did,
+ u32 elscmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
- struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
- struct ulp_bde64 *bpl;
- IOCB_t *icmd;
-
+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp;
+ struct ulp_bde64_le *bpl;
+ u32 timeout = 0;
if (!lpfc_is_link_up(phba))
return NULL;
/* Allocate buffer for command iocb */
elsiocb = lpfc_sli_get_iocbq(phba);
-
- if (elsiocb == NULL)
+ if (!elsiocb)
return NULL;
/*
@@ -176,35 +184,33 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
* in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
*/
if ((did == Fabric_DID) &&
- (phba->hba_flag & HBA_FIP_SUPPORT) &&
- ((elscmd == ELS_CMD_FLOGI) ||
- (elscmd == ELS_CMD_FDISC) ||
- (elscmd == ELS_CMD_LOGO)))
+ (phba->hba_flag & HBA_FIP_SUPPORT) &&
+ ((elscmd == ELS_CMD_FLOGI) ||
+ (elscmd == ELS_CMD_FDISC) ||
+ (elscmd == ELS_CMD_LOGO)))
switch (elscmd) {
case ELS_CMD_FLOGI:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
case ELS_CMD_FDISC:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
case ELS_CMD_LOGO:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
}
else
- elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
-
- icmd = &elsiocb->iocb;
+ elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
if (pcmd)
pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
if (!pcmd || !pcmd->virt)
@@ -213,19 +219,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
INIT_LIST_HEAD(&pcmd->list);
/* Allocate buffer for response payload */
- if (expectRsp) {
- prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (expect_rsp) {
+ prsp = kmalloc(sizeof(*prsp), GFP_KERNEL);
if (prsp)
prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&prsp->phys);
if (!prsp || !prsp->virt)
goto els_iocb_free_prsp_exit;
INIT_LIST_HEAD(&prsp->list);
- } else
+ } else {
prsp = NULL;
+ }
/* Allocate buffer for Buffer ptr list */
- pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL);
if (pbuflist)
pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&pbuflist->phys);
@@ -234,86 +241,53 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
INIT_LIST_HEAD(&pbuflist->list);
- if (expectRsp) {
- icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
-
- icmd->un.elsreq64.remoteID = did; /* DID */
- icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
- if (elscmd == ELS_CMD_FLOGI)
- icmd->ulpTimeout = FF_DEF_RATOV * 2;
- else if (elscmd == ELS_CMD_LOGO)
- icmd->ulpTimeout = phba->fc_ratov;
- else
- icmd->ulpTimeout = phba->fc_ratov * 2;
- } else {
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
- icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
- icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
- }
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
-
- /*
- * If we have NPIV enabled, we want to send ELS traffic by VPI.
- * For SLI4, since the driver controls VPIs we also want to include
- * all ELS pt2pt protocol traffic as well.
- */
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
- ((phba->sli_rev == LPFC_SLI_REV4) &&
- (vport->fc_flag & FC_PT2PT))) {
-
- if (expectRsp) {
- icmd->un.elsreq64.myID = vport->fc_myDID;
-
- /* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = phba->vpi_ids[vport->vpi];
+ if (expect_rsp) {
+ switch (elscmd) {
+ case ELS_CMD_FLOGI:
+ timeout = FF_DEF_RATOV * 2;
+ break;
+ case ELS_CMD_LOGO:
+ timeout = phba->fc_ratov;
+ break;
+ default:
+ timeout = phba->fc_ratov * 2;
}
- icmd->ulpCt_h = 0;
- /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
- if (elscmd == ELS_CMD_ECHO)
- icmd->ulpCt_l = 0; /* context = invalid RPI */
- else
- icmd->ulpCt_l = 1; /* context = VPI */
+ /* Fill SGE for the num bde count */
+ elsiocb->num_bdes = 2;
}
- bpl = (struct ulp_bde64 *) pbuflist->virt;
- bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
- bpl->tus.f.bdeSize = cmdSize;
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bmp = pcmd;
+ else
+ bmp = pbuflist;
+
+ lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did,
+ elscmd, timeout, expect_rsp);
- if (expectRsp) {
+ bpl = (struct ulp_bde64_le *)pbuflist->virt;
+ bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys));
+ bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys));
+ bpl->type_size = cpu_to_le32(cmd_size);
+ bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+
+ if (expect_rsp) {
bpl++;
- bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
- bpl->tus.f.bdeSize = FCELSSIZE;
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys));
+ bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys));
+ bpl->type_size = cpu_to_le32(FCELSSIZE);
+ bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
}
- /* prevent preparing iocb with NULL ndlp reference */
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto els_iocb_free_pbuf_exit;
- elsiocb->context2 = pcmd;
- elsiocb->context3 = pbuflist;
+ elsiocb->cmd_dmabuf = pcmd;
+ elsiocb->bpl_dmabuf = pbuflist;
elsiocb->retry = retry;
elsiocb->vport = vport;
elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
- if (prsp) {
+ if (prsp)
list_add(&prsp->list, &pcmd->list);
- }
- if (expectRsp) {
+ if (expect_rsp) {
/* Xmit ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
@@ -329,13 +303,14 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
"NPORT x%x I/O tag: x%x, size: x%x "
"port_state x%x rpi x%x fc_flag x%x\n",
elscmd, ndlp->nlp_DID, elsiocb->iotag,
- cmdSize, vport->port_state,
+ cmd_size, vport->port_state,
ndlp->nlp_rpi, vport->fc_flag);
}
+
return elsiocb;
els_iocb_free_pbuf_exit:
- if (expectRsp)
+ if (expect_rsp)
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(pbuflist);
@@ -370,7 +345,6 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct serv_parm *sp;
int rc;
@@ -378,7 +352,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
sp = &phba->fc_fabparam;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
err = 1;
goto fail;
}
@@ -418,10 +392,14 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
* for the callback routine.
*/
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp) {
+ err = 6;
+ goto fail_free_mbox;
+ }
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- err = 6;
+ err = 7;
goto fail_issue_reg_login;
}
@@ -432,16 +410,13 @@ fail_issue_reg_login:
* for the failed mbox command.
*/
lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
fail_free_mbox:
- mempool_free(mbox, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
fail:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0249 Cannot issue Register Fabric login: Err %d\n", err);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0249 Cannot issue Register Fabric login: Err %d\n",
+ err);
return -ENXIO;
}
@@ -470,7 +445,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
!(phba->link_flag & LS_LOOPBACK_MODE) &&
!(vport->fc_flag & FC_PT2PT)) {
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
rc = -ENODEV;
goto fail;
}
@@ -484,48 +459,40 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!dmabuf) {
- rc = -ENOMEM;
- goto fail;
- }
- dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
- if (!dmabuf->virt) {
+ rc = lpfc_mbox_rsrc_prep(phba, mboxq);
+ if (rc) {
rc = -ENOMEM;
- goto fail;
+ goto fail_mbox;
}
+ dmabuf = mboxq->ctx_buf;
memcpy(dmabuf->virt, &phba->fc_fabparam,
sizeof(struct serv_parm));
}
vport->port_state = LPFC_FABRIC_CFG_LINK;
- if (dmabuf)
+ if (dmabuf) {
lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
- else
+ /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */
+ mboxq->ctx_buf = dmabuf;
+ } else {
lpfc_reg_vfi(mboxq, vport, 0);
+ }
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
mboxq->vport = vport;
- mboxq->ctx_buf = dmabuf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
rc = -ENXIO;
- goto fail;
+ goto fail_mbox;
}
return 0;
+fail_mbox:
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
fail:
- if (mboxq)
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (dmabuf) {
- if (dmabuf->virt)
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
- }
-
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0289 Issue Register VFI failed: Err %d\n", rc);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0289 Issue Register VFI failed: Err %d\n", rc);
return rc;
}
@@ -550,7 +517,7 @@ lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2556 UNREG_VFI mbox allocation failed"
"HBA state x%x\n", phba->pport->port_state);
return -ENOMEM;
@@ -562,7 +529,7 @@ lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2557 UNREG_VFI issue mbox failed rc x%x "
"HBA state x%x\n",
rc, phba->pport->port_state);
@@ -638,7 +605,7 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
* @sp: pointer to service parameter data structure.
- * @irsp: pointer to the IOCB within the lpfc response IOCB.
+ * @ulp_word4: command response value
*
* This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
* function to handle the completion of a Fabric Login (FLOGI) into a fabric
@@ -655,7 +622,7 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
**/
static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp, IOCB_t *irsp)
+ struct serv_parm *sp, uint32_t ulp_word4)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
@@ -680,7 +647,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
}
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = ulp_word4 & Mask_DID;
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
ndlp->nlp_class_sup = 0;
@@ -730,7 +697,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(&phba->hbalock);
} else {
/* Because we asked f/w for NPIV it still expects us
- to call reg_vnpid atleast for the physcial host */
+ to call reg_vnpid at least for the physical host */
lpfc_printf_vlog(vport, KERN_WARNING,
LOG_ELS | LOG_VPORT,
"1817 Fabric does not support NPIV "
@@ -764,14 +731,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(np))
- continue;
if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
continue;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&np->lock);
np->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&np->lock);
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
@@ -893,10 +858,12 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (rc)
vport->fc_myDID = PT2PT_LocalID;
- /* Decrement ndlp reference count indicating that ndlp can be
- * safely released when other references to it are done.
+ /* If not registered with a transport, decrement ndlp reference
+ * count indicating that ndlp can be safely released when other
+ * references are removed.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+ lpfc_nlp_put(ndlp);
ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
if (!ndlp) {
@@ -907,11 +874,6 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
if (!ndlp)
goto fail;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if(!ndlp)
- goto fail;
}
memcpy(&ndlp->nlp_portname, &sp->portName,
@@ -920,9 +882,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sizeof(struct lpfc_name));
/* Set state will put ndlp onto node list if not already done */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -938,11 +900,12 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto fail;
}
} else {
- /* This side will wait for the PLOGI, decrement ndlp reference
- * count indicating that ndlp can be released when other
- * references to it are done.
+ /* This side will wait for the PLOGI. If not registered with
+ * a transport, decrement node reference count indicating that
+ * ndlp can be released when other references are removed.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+ lpfc_nlp_put(ndlp);
/* Start discovery - this should just do CLEAR_LA */
lpfc_disc_start(vport);
@@ -982,28 +945,40 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp = cmdiocb->context1;
- struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ IOCB_t *irsp;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
struct serv_parm *sp;
uint16_t fcf_index;
int rc;
+ u32 ulp_status, ulp_word4, tmo;
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
/* One additional decrement on node reference count to
* trigger the release of the node
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ lpfc_nlp_put(ndlp);
goto out;
}
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"FLOGI cmpl: status:x%x/x%x state:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->port_state);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/*
* In case of FIP mode, perform roundrobin FCF failover
* due to new FCF discovery
@@ -1014,8 +989,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto stop_rr_fcf_flogi;
if ((phba->fcoe_cvl_eventtag_attn ==
phba->fcoe_cvl_eventtag) &&
- (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ (ulp_status == IOSTAT_LOCAL_REJECT) &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_SLI_ABORTED))
goto stop_rr_fcf_flogi;
else
@@ -1026,8 +1001,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"status:x%x/x%x, tmo:x%x, perform "
"roundrobin FCF failover\n",
phba->fcf.current_rec.fcf_indx,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
+ ulp_status, ulp_word4, tmo);
lpfc_sli4_set_fcf_flogi_fail(phba,
phba->fcf.current_rec.fcf_indx);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
@@ -1038,35 +1012,41 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stop_rr_fcf_flogi:
/* FLOGI failure */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2858 FLOGI failure Status:x%x/x%x "
- "TMO:x%x Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2858 FLOGI failure Status:x%x/x%x TMO"
+ ":x%x Data x%x x%x\n",
+ ulp_status, ulp_word4, tmo,
+ phba->hba_flag, phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
"0150 FLOGI failure Status:x%x/x%x "
- "xri x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout);
+ "xri x%x TMO:x%x refcnt %d\n",
+ ulp_status, ulp_word4, cmdiocb->sli4_xritag,
+ tmo, kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_LOOP_OPEN_FAILURE)))
+ if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_LOOP_OPEN_FAILURE))) {
+ /* FLOGI failure */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0100 FLOGI failure Status:x%x/x%x "
+ "TMO:x%x\n",
+ ulp_status, ulp_word4, tmo);
goto flogifail;
+ }
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
+ FC_PT2PT_NO_NVME);
spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
@@ -1092,8 +1072,9 @@ stop_rr_fcf_flogi:
}
/* Do not register VFI if the driver aborted FLOGI */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_issue_reg_vfi(vport);
+
lpfc_nlp_put(ndlp);
goto out;
}
@@ -1115,12 +1096,17 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0101 FLOGI completes successfully, I/O tag:x%x, "
- "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
+ "0101 FLOGI completes successfully, I/O tag:x%x "
+ "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
- irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+ ulp_word4, sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
- vport->port_state, vport->fc_flag);
+ vport->port_state, vport->fc_flag,
+ sp->cmn.priority_tagging, kref_read(&ndlp->kref));
+
+ if (sp->cmn.priority_tagging)
+ vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
+ LPFC_VMID_TYPE_PRIO);
if (vport->port_state == LPFC_FLOGI) {
/*
@@ -1128,12 +1114,12 @@ stop_rr_fcf_flogi:
* we are point to point, if Fport we are Fabric.
*/
if (sp->cmn.fPort)
- rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
+ rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp,
+ ulp_word4);
else if (!(phba->hba_flag & HBA_FCOE_MODE))
rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
else {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_FIP | LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2831 FLOGI response with cleared Fabric "
"bit fcf_index 0x%x "
"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
@@ -1156,6 +1142,7 @@ stop_rr_fcf_flogi:
phba->fcf.current_rec.fabric_name[5],
phba->fcf.current_rec.fabric_name[6],
phba->fcf.current_rec.fabric_name[7]);
+
lpfc_nlp_put(ndlp);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
@@ -1179,6 +1166,15 @@ stop_rr_fcf_flogi:
phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out;
}
+ } else if (vport->port_state > LPFC_FLOGI &&
+ vport->fc_flag & FC_PT2PT) {
+ /*
+ * In a p2p topology, it is possible that discovery has
+ * already progressed, and this completion can be ignored.
+ * Recheck the indicated topology.
+ */
+ if (!sp->cmn.fPort)
+ goto out;
}
flogifail:
@@ -1186,25 +1182,25 @@ flogifail:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
- lpfc_nlp_put(ndlp);
-
- if (!lpfc_error_lost_link(irsp)) {
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
- } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ } else if (((ulp_status != IOSTAT_LOCAL_REJECT) ||
+ (((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_SLI_ABORTED) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ ((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_SLI_DOWN))) &&
(phba->link_state != LPFC_CLEAR_LA)) {
/* If FLOGI failed enable link interrupt. */
lpfc_issue_clear_la(phba, vport);
}
out:
+ phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING;
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -1219,22 +1215,24 @@ static void
lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
uint32_t *pcmd;
uint32_t cmd;
+ u32 ulp_status, ulp_word4;
- pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
+ pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
cmd = *pcmd;
- irsp = &rspiocb->iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"6445 ELS completes after LINK_DOWN: "
" Status %x/%x cmd x%x flg x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
- cmdiocb->iocb_flag);
+ ulp_status, ulp_word4, cmd,
+ cmdiocb->cmd_flag);
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
- cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) {
+ cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
atomic_dec(&phba->fabric_iocb_count);
}
lpfc_els_free_iocb(phba, cmdiocb);
@@ -1253,10 +1251,9 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* function field. The lpfc_issue_fabric_iocb routine is invoked to send
* out FLOGI ELS command with one outstanding fabric IOCB at a time.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the FLOGI ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the FLOGI ELS command.
*
* Return code
* 0 - successfully issued flogi iocb for @vport
@@ -1268,10 +1265,11 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
- IOCB_t *icmd;
+ union lpfc_wqe128 *wqe = NULL;
+ IOCB_t *icmd = NULL;
struct lpfc_iocbq *elsiocb;
struct lpfc_iocbq defer_flogi_acc;
- uint8_t *pcmd;
+ u8 *pcmd, ct;
uint16_t cmdsize;
uint32_t tmo, did;
int rc;
@@ -1283,8 +1281,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
+ wqe = &elsiocb->wqe;
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
icmd = &elsiocb->iocb;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For FLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
@@ -1302,15 +1301,30 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
+ /* Determine if switch supports priority tagging */
+ if (phba->cfg_vmid_priority_tagging) {
+ sp->cmn.priority_tagging = 1;
+ /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
+ if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) {
+ memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
+ sizeof(phba->wwpn));
+ memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
+ sizeof(phba->wwnn));
+ }
+ }
+
if (phba->sli_rev == LPFC_SLI_REV4) {
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
- elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
- elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
/* FLOGI needs to be 3 for WQE FCFI */
+ ct = SLI4_CT_FCFI;
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+
/* Set the fcfi to the fcfi we registered with */
- elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->fcf.fcfi);
}
+
/* Can't do SLI4 class2 without support sequence coalescing */
sp->cls2.classValid = 0;
sp->cls2.seqDelivery = 0;
@@ -1323,13 +1337,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
- } else
+ } else {
sp->cmn.request_multiple_Nport = 0;
- }
+ }
- if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
- icmd->un.elsreq64.myID = 0;
- icmd->un.elsreq64.fl = 1;
+ if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+ }
}
tmo = phba->fc_ratov;
@@ -1338,26 +1353,55 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->fc_ratov = tmo;
phba->fc_stat.elsXmitFLOGI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue FLOGI: opt:x%x",
phba->sli3_options, 0, 0);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
+
+ phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
- phba->hba_flag |= HBA_FLOGI_ISSUED;
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
/* Check for a deferred FLOGI ACC condition */
if (phba->defer_flogi_acc_flag) {
+ /* lookup ndlp for received FLOGI */
+ ndlp = lpfc_findnode_did(vport, 0);
+ if (!ndlp)
+ return 0;
+
did = vport->fc_myDID;
vport->fc_myDID = Fabric_DID;
memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
- defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
- defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
- phba->defer_flogi_acc_ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(wqe_ctxt_tag,
+ &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
+ phba->defer_flogi_acc_rx_id);
+ bf_set(wqe_rcvoxid,
+ &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
+ phba->defer_flogi_acc_ox_id);
+ } else {
+ icmd = &defer_flogi_acc.iocb;
+ icmd->ulpContext = phba->defer_flogi_acc_rx_id;
+ icmd->unsli3.rcvsli3.ox_id =
+ phba->defer_flogi_acc_ox_id;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
@@ -1370,14 +1414,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp, NULL);
phba->defer_flogi_acc_flag = false;
-
vport->fc_myDID = did;
- }
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ /* Decrement ndlp reference count to indicate the node can be
+ * released when other references are removed.
+ */
+ lpfc_nlp_put(ndlp);
}
+
return 0;
}
@@ -1401,7 +1445,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_nodelist *ndlp;
- IOCB_t *icmd;
+ u32 ulp_command;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
@@ -1418,14 +1462,22 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
- ndlp = (struct lpfc_nodelist *)(iocb->context1);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (ndlp->nlp_DID == Fabric_DID))
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ ulp_command = get_job_cmnd(phba, iocb);
+ if (ulp_command == CMD_ELS_REQUEST64_CR) {
+ ndlp = iocb->ndlp;
+ if (ndlp && ndlp->nlp_DID == Fabric_DID) {
+ if ((phba->pport->fc_flag & FC_PT2PT) &&
+ !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
+ iocb->fabric_cmd_cmpl =
+ lpfc_ignore_els_cmpl;
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ NULL);
+ }
}
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
spin_unlock_irq(&phba->hbalock);
return 0;
@@ -1464,20 +1516,21 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
return 0;
/* Set the node type */
ndlp->nlp_type |= NLP_FABRIC;
+
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
}
+ /* Reset the Fabric flag, topology change may have happened */
+ vport->fc_flag &= ~FC_FABRIC;
if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
- /* This decrement of reference count to node shall kick off
- * the release of the node.
+ /* A node reference should be retained while registered with a
+ * transport or dev-loss-evt work is pending.
+ * Otherwise, decrement node reference to trigger release.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ lpfc_nlp_put(ndlp);
return 0;
}
return 1;
@@ -1511,20 +1564,22 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
ndlp = lpfc_nlp_init(vport, Fabric_DID);
if (!ndlp)
return 0;
+
+ /* NPIV is only supported in Fabrics. */
+ ndlp->nlp_type |= NLP_FABRIC;
+
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
}
if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
- /* decrement node reference count to trigger the release of
- * the node.
+ /* A node reference should be retained while registered with a
+ * transport or dev-loss-evt work is pending.
+ * Otherwise, decrement node reference to trigger release.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ lpfc_nlp_put(ndlp);
return 0;
}
return 1;
@@ -1562,7 +1617,7 @@ lpfc_more_plogi(struct lpfc_vport *vport)
}
/**
- * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
+ * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp
* @phba: pointer to lpfc hba data structure.
* @prsp: pointer to response IOCB payload.
* @ndlp: pointer to a node-list data structure.
@@ -1597,19 +1652,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *new_ndlp;
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
- uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
+ uint32_t keepDID = 0, keep_nlp_flag = 0;
uint32_t keep_new_nlp_flag = 0;
uint16_t keep_nlp_state;
u32 keep_nlp_fc4_type = 0;
struct lpfc_nvme_rport *keep_nrport = NULL;
- int put_node;
- int put_rport;
unsigned long *active_rrqs_xri_bitmap = NULL;
/* Fabric nodes can have the same WWPN so we don't bother searching
@@ -1627,9 +1677,15 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
/* return immediately if the WWPN matches ndlp */
- if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
+ if (!new_ndlp || (new_ndlp == ndlp))
return ndlp;
+ /*
+ * Unregister from backend if not done yet. Could have been skipped
+ * due to ADISC
+ */
+ lpfc_nlp_unreg_node(vport, new_ndlp);
+
if (phba->sli_rev == LPFC_SLI_REV4) {
active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
GFP_KERNEL);
@@ -1646,52 +1702,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
(new_ndlp ? new_ndlp->nlp_flag : 0),
(new_ndlp ? new_ndlp->nlp_fc4_type : 0));
- if (!new_ndlp) {
- rc = memcmp(&ndlp->nlp_portname, name,
- sizeof(struct lpfc_name));
- if (!rc) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
- if (!new_ndlp) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
- rc = memcmp(&ndlp->nlp_portname, name,
- sizeof(struct lpfc_name));
- if (!rc) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- new_ndlp = lpfc_enable_node(vport, new_ndlp,
- NLP_STE_UNUSED_NODE);
- if (!new_ndlp) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- keepDID = new_ndlp->nlp_DID;
- if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
- memcpy(active_rrqs_xri_bitmap,
- new_ndlp->active_rrqs_xri_bitmap,
- phba->cfg_rrq_xri_bitmap_sz);
- } else {
- keepDID = new_ndlp->nlp_DID;
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- active_rrqs_xri_bitmap)
- memcpy(active_rrqs_xri_bitmap,
- new_ndlp->active_rrqs_xri_bitmap,
- phba->cfg_rrq_xri_bitmap_sz);
- }
+ keepDID = new_ndlp->nlp_DID;
+
+ if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
/* At this point in this routine, we know new_ndlp will be
* returned. however, any previous GID_FTs that were done
@@ -1711,7 +1726,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
ndlp->active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
- spin_lock_irq(shost->host_lock);
+ /* Lock both ndlps */
+ spin_lock_irq(&ndlp->lock);
+ spin_lock_irq(&new_ndlp->lock);
keep_new_nlp_flag = new_ndlp->nlp_flag;
keep_nlp_flag = ndlp->nlp_flag;
new_ndlp->nlp_flag = ndlp->nlp_flag;
@@ -1728,6 +1745,15 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
else
new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+ /*
+ * Retain the DROPPED flag. This will take care of the init
+ * refcount when affecting the state change
+ */
+ if (keep_new_nlp_flag & NLP_DROPPED)
+ new_ndlp->nlp_flag |= NLP_DROPPED;
+ else
+ new_ndlp->nlp_flag &= ~NLP_DROPPED;
+
ndlp->nlp_flag = keep_new_nlp_flag;
/* if ndlp had NLP_UNREG_INP set, keep it */
@@ -1742,7 +1768,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
else
ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ /*
+ * Retain the DROPPED flag. This will take care of the init
+ * refcount when affecting the state change
+ */
+ if (keep_nlp_flag & NLP_DROPPED)
+ ndlp->nlp_flag |= NLP_DROPPED;
+ else
+ ndlp->nlp_flag &= ~NLP_DROPPED;
+
+ spin_unlock_irq(&new_ndlp->lock);
+ spin_unlock_irq(&ndlp->lock);
/* Set nlp_states accordingly */
keep_nlp_state = new_ndlp->nlp_state;
@@ -1754,48 +1790,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Move this back to NPR state */
if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
- /* The new_ndlp is replacing ndlp totally, so we need
- * to put ndlp on UNUSED list and try to free it.
+ /* The ndlp doesn't have a portname yet, but does have an
+ * NPort ID. The new_ndlp portname matches the Rport's
+ * portname. Reinstantiate the new_ndlp and reset the ndlp.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3179 PLOGI confirm NEW: %x %x\n",
new_ndlp->nlp_DID, keepDID);
- /* Fix up the rport accordingly */
- rport = ndlp->rport;
- if (rport) {
- rdata = rport->dd_data;
- if (rdata->pnode == ndlp) {
- /* break the link before dropping the ref */
- ndlp->rport = NULL;
- lpfc_nlp_put(ndlp);
- rdata->pnode = lpfc_nlp_get(new_ndlp);
- new_ndlp->rport = rport;
- }
- new_ndlp->nlp_type = ndlp->nlp_type;
- }
-
- /* Fix up the nvme rport */
- if (ndlp->nrport) {
- ndlp->nrport = NULL;
- lpfc_nlp_put(ndlp);
- }
-
- /* We shall actually free the ndlp with both nlp_DID and
- * nlp_portname fields equals 0 to avoid any ndlp on the
- * nodelist never to be used.
- */
- if (ndlp->nlp_DID == 0) {
- spin_lock_irq(&phba->ndlp_lock);
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
- }
-
/* Two ndlps cannot have the same did on the nodelist.
- * Note: for this case, ndlp has a NULL WWPN so setting
- * the nlp_fc4_type isn't required.
+ * The KeepDID and keep_nlp_fc4_type need to be swapped
+ * because ndlp is inflight with no WWPN.
*/
ndlp->nlp_DID = keepDID;
+ ndlp->nlp_fc4_type = keep_nlp_fc4_type;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
@@ -1803,19 +1811,15 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
- if (!NLP_CHK_NODE_ACT(ndlp))
- lpfc_drop_node(vport, ndlp);
- }
- else {
+ } else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3180 PLOGI confirm SWAP: %x %x\n",
new_ndlp->nlp_DID, keepDID);
lpfc_unreg_rpi(vport, ndlp);
- /* Two ndlps cannot have the same did and the fc4
- * type must be transferred because the ndlp is in
- * flight.
+ /* The ndlp and new_ndlp both have WWPNs but are swapping
+ * NPort Ids and attributes.
*/
ndlp->nlp_DID = keepDID;
ndlp->nlp_fc4_type = keep_nlp_fc4_type;
@@ -1833,29 +1837,16 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
(ndlp->nlp_state == NLP_STE_MAPPED_NODE))
keep_nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
-
- /* Previous ndlp no longer active with nvme host transport.
- * Remove reference from earlier registration unless the
- * nvme host took care of it.
- */
- if (ndlp->nrport)
- lpfc_nlp_put(ndlp);
ndlp->nrport = keep_nrport;
-
- /* Fix up the rport accordingly */
- rport = ndlp->rport;
- if (rport) {
- rdata = rport->dd_data;
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
- }
}
+
+ /*
+ * If ndlp is not associated with any rport we can drop it here else
+ * let dev_loss_tmo_callbk trigger DEVICE_RM event
+ */
+ if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE))
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
mempool_free(active_rrqs_xri_bitmap,
@@ -1915,57 +1906,48 @@ lpfc_end_rscn(struct lpfc_vport *vport)
static void
lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_node_rrq *rrq;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
/* we pass cmdiocb to state machine which needs rspiocb as well */
rrq = cmdiocb->context_un.rrq;
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"RRQ cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4,
+ get_job_els_rsp64_did(phba, cmdiocb));
- ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2882 RRQ completes to NPort x%x "
- "with no ndlp. Data: x%x x%x x%x\n",
- irsp->un.elsreq64.remoteID,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpIoTag);
- goto out;
- }
/* rrq completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "2880 RRQ completes to NPort x%x "
+ "2880 RRQ completes to DID x%x "
"Data: x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
/* RRQ failed Don't print the vport to vport rjts */
- if (irsp->ulpStatus != IOSTAT_LS_RJT ||
- (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
- ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
- (phba)->pport->cfg_log_verbose & LOG_ELS)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
- }
-out:
- if (rrq)
- lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ if (ulp_status != IOSTAT_LS_RJT ||
+ (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
+ ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2881 RRQ failure DID:%06X Status:"
+ "x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
+ }
+
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
/**
@@ -1979,7 +1961,7 @@ out:
* ndlp on the vport node list that matches the remote node ID from the
* PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
* ignored and command IOCB released. The PLOGI response IOCB status is
- * checked for error conditons. If there is error status reported, PLOGI
+ * checked for error conditions. If there is error status reported, PLOGI
* retry shall be attempted by invoking the lpfc_els_retry() routine.
* Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
* the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
@@ -1995,86 +1977,139 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp, *free_ndlp;
struct lpfc_dmabuf *prsp;
int disc;
+ struct serv_parm *sp = NULL;
+ u32 ulp_status, ulp_word4, did, iotag;
+ bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+ }
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PLOGI cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
- ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ ndlp = lpfc_findnode_did(vport, did);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0136 PLOGI completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
- irsp->un.elsreq64.remoteID,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpIoTag);
- goto out;
+ did, ulp_status, ulp_word4, iotag);
+ goto out_freeiocb;
}
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0102 PLOGI completes to NPort x%06x "
"Data: x%x x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_fc4_type,
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
goto out;
}
/* PLOGI failed Don't print the vport to vport rjts */
- if (irsp->ulpStatus != IOSTAT_LS_RJT ||
- (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
- ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
- (phba)->pport->cfg_log_verbose & LOG_ELS)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ if (ulp_status != IOSTAT_LS_RJT ||
+ (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
+ ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2753 PLOGI failure DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
+
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
+
+ /* If a PLOGI collision occurred, the node needs to continue
+ * with the reglogin process.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
+ ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
+ spin_unlock_irq(&ndlp->lock);
+ goto out;
+ }
+
+ /* No PLOGI collision and the node is not registered with the
+ * scsi or nvme transport. It is no longer an active node. Just
+ * start the device remove process.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ release_node = true;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ if (release_node)
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
} else {
/* Good status, call state machine */
- prsp = list_entry(((struct lpfc_dmabuf *)
- cmdiocb->context2)->list.next,
+ prsp = list_entry(cmdiocb->cmd_dmabuf->list.next,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
+
+ sp = (struct serv_parm *)((u8 *)prsp->virt +
+ sizeof(u32));
+
+ ndlp->vmid_support = 0;
+ if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) ||
+ (phba->cfg_vmid_priority_tagging &&
+ sp->cmn.priority_tagging)) {
+ lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS,
+ "4018 app_hdr_support %d tagging %d DID x%x\n",
+ sp->cmn.app_hdr_support,
+ sp->cmn.priority_tagging,
+ ndlp->nlp_DID);
+ /* if the dest port supports VMID, mark it in ndlp */
+ ndlp->vmid_support = 1;
+ }
+
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_PLOGI);
+ NLP_EVT_CMPL_PLOGI);
}
if (disc && vport->num_disc_nodes) {
@@ -2092,7 +2127,16 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "PLOGI Cmpl PUT: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+out_freeiocb:
+ /* Release the reference on the original I/O request. */
+ free_ndlp = cmdiocb->ndlp;
+
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
return;
}
@@ -2105,13 +2149,12 @@ out:
* This routine issues a Port Login (PLOGI) command to a remote N_Port
* (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
* the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
- * This routine constructs the proper feilds of the PLOGI IOCB and invokes
+ * This routine constructs the proper fields of the PLOGI IOCB and invokes
* the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PLOGI ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding
+ * the ndlp and the reference to ndlp will be stored into the ndlp field
+ * of the IOCB for the completion callback function to the PLOGI ELS command.
*
* Return code
* 0 - Successfully issued a plogi for @vport
@@ -2121,7 +2164,6 @@ int
lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost;
struct serv_parm *sp;
struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *elsiocb;
@@ -2130,43 +2172,35 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
int ret;
ndlp = lpfc_findnode_did(vport, did);
+ if (!ndlp)
+ return 1;
- if (ndlp) {
- /* Defer the processing of the issue PLOGI until after the
- * outstanding UNREG_RPI mbox command completes, unless we
- * are going offline. This logic does not apply for Fabric DIDs
- */
- if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
- ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
- !(vport->fc_flag & FC_OFFLINE_MODE)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4110 Issue PLOGI x%x deferred "
- "on NPort x%x rpi x%x Data: x%px\n",
- ndlp->nlp_defer_did, ndlp->nlp_DID,
- ndlp->nlp_rpi, ndlp);
-
- /* We can only defer 1st PLOGI */
- if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
- ndlp->nlp_defer_did = did;
- return 0;
- }
- if (!NLP_CHK_NODE_ACT(ndlp))
- ndlp = NULL;
+ /* Defer the processing of the issue PLOGI until after the
+ * outstanding UNREG_RPI mbox command completes, unless we
+ * are going offline. This logic does not apply for Fabric DIDs
+ */
+ if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+ ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+ !(vport->fc_flag & FC_OFFLINE_MODE)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "4110 Issue PLOGI x%x deferred "
+ "on NPort x%x rpi x%x Data: x%px\n",
+ ndlp->nlp_defer_did, ndlp->nlp_DID,
+ ndlp->nlp_rpi, ndlp);
+
+ /* We can only defer 1st PLOGI */
+ if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
+ ndlp->nlp_defer_did = did;
+ return 0;
}
- /* If ndlp is not NULL, we will bump the reference count on it */
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
ELS_CMD_PLOGI);
if (!elsiocb)
return 1;
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
- spin_unlock_irq(shost->host_lock);
-
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For PLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
@@ -2191,6 +2225,14 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
sp->cmn.bbRcvSizeMsb &= 0xF;
+ /* Check if the destination port supports VMID */
+ ndlp->vmid_support = 0;
+ if (vport->vmid_priority_tagging)
+ sp->cmn.priority_tagging = 1;
+ else if (phba->cfg_vmid_app_header &&
+ bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags))
+ sp->cmn.app_hdr_support = 1;
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x",
did, 0, 0);
@@ -2205,13 +2247,24 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
}
phba->fc_stat.elsXmitPLOGI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
- ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi;
- if (ret == IOCB_ERROR) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PLOGI: did:x%x refcnt %d",
+ did, kref_read(&ndlp->kref), 0);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
+
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (ret) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
+
return 0;
}
@@ -2233,41 +2286,46 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
char *mode;
+ u32 loglevel;
+ u32 ulp_status;
+ u32 ulp_word4;
+ bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- irsp = &(rspiocb->iocb);
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- spin_lock_irq(shost->host_lock);
+ ndlp = cmdiocb->ndlp;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
/* Driver supports multiple FC4 types. Counters matter. */
vport->fc_prli_sent--;
ndlp->fc4_prli_sent--;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PRLI cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
"Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID, ulp_status, ulp_word4,
vport->num_disc_nodes, ndlp->fc4_prli_sent);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport))
goto out;
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
@@ -2278,24 +2336,50 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* could be expected.
*/
if ((vport->fc_flag & FC_FABRIC) ||
- (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
+ (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) {
mode = KERN_ERR;
- else
+ loglevel = LOG_TRACE_EVENT;
+ } else {
mode = KERN_INFO;
+ loglevel = LOG_ELS;
+ }
/* PRLI failed */
- lpfc_printf_vlog(vport, mode, LOG_ELS,
+ lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4, ndlp->fc4_prli_sent);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp))
- goto out;
- else
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
+
+ /*
+ * For P2P topology, retain the node so that PLOGI can be
+ * attempted on it again.
+ */
+ if (vport->fc_flag & FC_PT2PT)
+ goto out;
+
+ /* As long as this node is not registered with the SCSI
+ * or NVMe transport and no other PRLIs are outstanding,
+ * it is no longer an active node. Otherwise devloss
+ * handles the final cleanup.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ !ndlp->fc4_prli_sent) {
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ release_node = true;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ if (release_node)
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
} else {
/* Good status, call state machine. However, if another
* PRLI is outstanding, don't call the state machine
@@ -2308,6 +2392,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -2323,10 +2408,9 @@ out:
* is put to the IOCB completion callback func field before invoking the
* routine lpfc_sli_issue_iocb() to send out PRLI command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PRLI ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the PRLI ELS command.
*
* Return code
* 0 - successfully issued prli iocb command for @vport
@@ -2336,7 +2420,7 @@ int
lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
PRLI *npr;
struct lpfc_nvme_prli *npr_nvme;
@@ -2399,7 +2483,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For PRLI request, remainder of payload is service parameters */
memset(pcmd, 0, cmdsize);
@@ -2431,7 +2515,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* For FCP support */
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
- elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
+ elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ;
/* Remove FCP type - processed. */
local_nlp_type &= ~NLP_FC4_FCP;
@@ -2465,37 +2549,40 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
- elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
+ elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ;
/* Remove NVME type - processed. */
local_nlp_type &= ~NLP_FC4_NVME;
}
+ phba->fc_stat.elsXmitPRLI++;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_prli;
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue PRLI: did:x%x",
- ndlp->nlp_DID, 0, 0);
+ "Issue PRLI: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
- phba->fc_stat.elsXmitPRLI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_PRLI_SND;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
/* The vport counters are used for lpfc_scan_finished, but
* the ndlp is used to track outstanding PRLIs for different
* FC4 types.
*/
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_PRLI_SND;
vport->fc_prli_sent++;
ndlp->fc4_prli_sent++;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_PRLI_SND;
- spin_unlock_irq(shost->host_lock);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
-
+ spin_unlock_irq(&ndlp->lock);
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
@@ -2503,8 +2590,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4 &&
local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
goto send_next_prli;
-
- return 0;
+ else
+ return 0;
}
/**
@@ -2556,6 +2643,14 @@ lpfc_adisc_done(struct lpfc_vport *vport)
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
+
+ /*
+ * If link is down, clear_la and reg_vpi will be done after
+ * flogi following a link up event
+ */
+ if (!lpfc_is_link_up(phba))
+ return;
+
/* The ADISCs are complete. Doesn't matter if they
* succeeded or failed because the ADISC completion
* routine guarantees to call the state machine and
@@ -2645,64 +2740,88 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
int disc;
+ u32 ulp_status, ulp_word4, tmo;
+ bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
+
+ ndlp = cmdiocb->ndlp;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
- irsp = &(rspiocb->iocb);
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ADISC cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* ADISC completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0104 ADISC completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, disc, vport->num_disc_nodes);
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ tmo, disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_set_disctmo(vport);
}
goto out;
}
/* ADISC failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2755 ADISC failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
- /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
+
+ /* As long as this node is not registered with the SCSI or NVMe
+ * transport, it is no longer an active node. Otherwise
+ * devloss handles the final cleanup.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ release_node = true;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ if (release_node)
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_ADISC);
+ NLP_EVT_DEVICE_RM);
} else
/* Good status, call state machine */
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -2713,6 +2832,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_more_adisc(vport);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -2727,10 +2847,9 @@ out:
* and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
* to issue the ADISC ELS command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the ADISC ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the ADISC ELS command.
*
* Return code
* 0 - successfully issued adisc
@@ -2740,7 +2859,7 @@ int
lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
ADISC *ap;
struct lpfc_iocbq *elsiocb;
@@ -2753,7 +2872,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For ADISC request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
@@ -2766,24 +2885,35 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ap->DID = be32_to_cpu(vport->fc_myDID);
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue ADISC: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
phba->fc_stat.elsXmitADISC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
- spin_lock_irq(shost->host_lock);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc;
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_ADISC_SND;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ goto err;
}
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue ADISC: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto err;
+ }
+
return 0;
+
+err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
+ spin_unlock_irq(&ndlp->lock);
+ return 1;
}
/**
@@ -2795,70 +2925,75 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* This routine is the completion function for issuing the ELS Logout (LOGO)
* command. If no error status was reported from the LOGO response, the
* state machine of the associated ndlp shall be invoked for transition with
- * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
- * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
+ * respect to NLP_EVT_CMPL_LOGO event.
**/
static void
lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_vport *vport = ndlp->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfcMboxq *mbox;
unsigned long flags;
uint32_t skip_recovery = 0;
+ int wake_up_waiter = 0;
+ u32 ulp_status;
+ u32 ulp_word4;
+ u32 tmo;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- irsp = &(rspiocb->iocb);
- spin_lock_irq(shost->host_lock);
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
+
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
+ if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ wake_up_waiter = 1;
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ }
+ spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0105 LOGO completes to NPort x%x "
- "Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes);
+ "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
+ ulp_status, ulp_word4,
+ tmo, vport->num_disc_nodes);
if (lpfc_els_chk_latt(vport)) {
skip_recovery = 1;
goto out;
}
- /* Check to see if link went down during discovery */
- if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
- /* NLP_EVT_DEVICE_RM should unregister the RPI
- * which should abort all outstanding IOs.
- */
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_DEVICE_RM);
- skip_recovery = 1;
- goto out;
- }
-
/* The LOGO will not be retried on failure. A LOGO was
* issued to the remote rport and a ACC or RJT or no Answer are
* all acceptable. Note the failure and move forward with
* discovery. The PLOGI will retry.
*/
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* LOGO failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
- /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2756 LOGO failure, No Retry DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
+
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
skip_recovery = 1;
goto out;
}
@@ -2867,34 +3002,29 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Call state machine. This will unregister the rpi if needed. */
lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
-out:
- lpfc_els_free_iocb(phba, cmdiocb);
- /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
- if ((vport->fc_flag & FC_PT2PT) &&
- !(vport->fc_flag & FC_PT2PT_PLOGI)) {
- phba->pport->fc_myDID = 0;
-
- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
- if (phba->nvmet_support)
- lpfc_nvmet_update_targetport(phba);
- else
- lpfc_nvme_update_localport(phba->pport);
- }
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox) {
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
- MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- skip_recovery = 1;
- }
- }
+ /* The driver sets this flag for an NPIV instance that doesn't want to
+ * log into the remote port.
+ */
+ if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+ spin_lock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ goto out_rsrc_free;
}
+out:
+ /* At this point, the LOGO processing is complete. NOTE: For a
+ * pt2pt topology, we are assuming the NPortID will only change
+ * on link up processing. For a LOGO / PLOGI initiated by the
+ * Initiator, we are assuming the NPortID is not going to change.
+ */
+
+ if (wake_up_waiter && ndlp->logo_waitq)
+ wake_up(ndlp->logo_waitq);
/*
* If the node is a target, the handling attempts to recover the port.
* For any other port type, the rpi is unregistered as an implicit
@@ -2903,19 +3033,40 @@ out:
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
skip_recovery == 0) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irqsave(shost->host_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irqrestore(shost->host_lock, flags);
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3187 LOGO completes to NPort x%x: Start "
"Recovery Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4, tmo,
vport->num_disc_nodes);
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+
lpfc_disc_start(vport);
+ return;
}
- return;
+
+ /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the
+ * driver sends a LOGO to the rport to cleanup. For fabric and
+ * initiator ports cleanup the node as long as it the node is not
+ * register with the transport.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ }
+out_rsrc_free:
+ /* Driver is done with the I/O. */
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -2929,10 +3080,9 @@ out:
* payload of the IOCB, properly sets up the @ndlp state, and invokes the
* lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the LOGO ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the LOGO ELS command.
*
* Callers of this routine are expected to unregister the RPI first
*
@@ -2944,19 +3094,18 @@ int
lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_flag & NLP_LOGO_SND) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 0;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
@@ -2964,7 +3113,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof(uint32_t);
@@ -2973,30 +3122,40 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pcmd += sizeof(uint32_t);
memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue LOGO: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
phba->fc_stat.elsXmitLOGO++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
- spin_lock_irq(shost->host_lock);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_logo;
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto err;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue LOGO: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ lpfc_nlp_put(ndlp);
+ goto err;
}
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
return 0;
+
+err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(&ndlp->lock);
+ return 1;
}
/**
@@ -3008,10 +3167,9 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* This routine is a generic completion callback function for ELS commands.
* Specifically, it is the callback function which does not need to perform
* any command specific operations. It is currently used by the ELS command
- * issuing routines for the ELS State Change Request (SCR),
- * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
- * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
- * certain debug loggings, this callback function simply invokes the
+ * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
+ * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
+ * Other than certain debug loggings, this callback function simply invokes the
* lpfc_els_chk_latt() routine to check whether link went down during the
* discovery process.
**/
@@ -3020,50 +3178,270 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_nodelist *free_ndlp;
IOCB_t *irsp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
- irsp = &rspiocb->iocb;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "ELS cmd cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ "ELS cmd cmpl: status:x%x/x%x did:x%x",
+ ulp_status, ulp_word4, did);
+
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ iotag, ulp_status, ulp_word4, tmo);
+
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
+
+ free_ndlp = cmdiocb->ndlp;
+
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
+}
+
+/**
+ * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node.
+ * @vport: pointer to lpfc_vport data structure.
+ * @fc_ndlp: pointer to the fabric controller (0xfffffd) node.
+ *
+ * This routine registers the rpi assigned to the fabric controller
+ * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED
+ * state triggering a registration with the SCSI transport.
+ *
+ * This routine is single out because the fabric controller node
+ * does not receive a PLOGI. This routine is consumed by the
+ * SCR and RDF ELS commands. Callers are expected to qualify
+ * with SLI4 first.
+ **/
+static int
+lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
+{
+ int rc = 0;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ns_ndlp;
+ LPFC_MBOXQ_t *mbox;
+
+ if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
+ return rc;
+
+ ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (!ns_ndlp)
+ return -ENODEV;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n",
+ __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID,
+ ns_ndlp->nlp_state);
+ if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENODEV;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0936 %s: no memory for reg_login "
+ "Data: x%x x%x x%x x%x\n", __func__,
+ fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
+ fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
+ return -ENOMEM;
+ }
+ rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
+ (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
+ if (rc) {
+ rc = -EACCES;
+ goto out;
+ }
+
+ fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login;
+ mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
+ if (!mbox->ctx_ndlp) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ rc = -ENODEV;
+ lpfc_nlp_put(fc_ndlp);
+ goto out;
+ }
+ /* Success path. Exit. */
+ lpfc_nlp_set_state(vport, fc_ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ return 0;
+
+ out:
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0938 %s: failed to format reg_login "
+ "Data: x%x x%x x%x x%x\n", __func__,
+ fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
+ fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
+ return rc;
+}
+
+/**
+ * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is a generic completion callback function for Discovery ELS cmd.
+ * Currently used by the ELS command issuing routines for the ELS State Change
+ * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
+ * These commands will be retried once only for ELS timeout errors.
+ **/
+static void
+lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
+ struct lpfc_els_rdf_rsp *prdf;
+ struct lpfc_dmabuf *pcmd, *prsp;
+ u32 *pdata;
+ u32 cmd;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "ELS cmd cmpl: status:x%x/x%x did:x%x",
+ ulp_status, ulp_word4, did);
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n",
+ iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry);
+
+ pcmd = cmdiocb->cmd_dmabuf;
+ if (!pcmd)
+ goto out;
+
+ pdata = (u32 *)pcmd->virt;
+ if (!pdata)
+ goto out;
+ cmd = *pdata;
+
+ /* Only 1 retry for ELS Timeout only */
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SEQUENCE_TIMEOUT)) {
+ cmdiocb->retry++;
+ if (cmdiocb->retry <= 1) {
+ switch (cmd) {
+ case ELS_CMD_SCR:
+ lpfc_issue_els_scr(vport, cmdiocb->retry);
+ break;
+ case ELS_CMD_EDC:
+ lpfc_issue_els_edc(vport, cmdiocb->retry);
+ break;
+ case ELS_CMD_RDF:
+ lpfc_issue_els_rdf(vport, cmdiocb->retry);
+ break;
+ }
+ goto out;
+ }
+ phba->fc_stat.elsRetryExceeded++;
+ }
+ if (cmd == ELS_CMD_EDC) {
+ /* must be called before checking uplStatus and returning */
+ lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
+ return;
+ }
+ if (ulp_status) {
+ /* ELS discovery cmd completes with error */
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
+ "4203 ELS cmd x%x error: x%x x%X\n", cmd,
+ ulp_status, ulp_word4);
+ goto out;
+ }
+
+ /* The RDF response doesn't have any impact on the running driver
+ * but the notification descriptors are dumped here for support.
+ */
+ if (cmd == ELS_CMD_RDF) {
+ int i;
+
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+
+ prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
+ if (!prdf)
+ goto out;
+
+ for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
+ i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT,
+ "4677 Fabric RDF Notification Grant "
+ "Data: 0x%08x Reg: %x %x\n",
+ be32_to_cpu(
+ prdf->reg_d1.desc_tags[i]),
+ phba->cgn_reg_signal,
+ phba->cgn_reg_fpin);
+ }
+
+out:
+ /* Check to see if link went down during discovery */
+ lpfc_els_chk_latt(vport);
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
/**
* lpfc_issue_els_scr - Issue a scr to an node on a vport
* @vport: pointer to a host virtual N_Port data structure.
- * @nportid: N_Port identifier to the remote node.
- * @retry: number of retries to the command IOCB.
+ * @retry: retry counter for the command IOCB.
*
* This routine issues a State Change Request (SCR) to a fabric node
- * on a @vport. The remote node @nportid is passed into the function. It
+ * on a @vport. The remote node is Fabric Controller (0xfffffd). It
* first search the @vport node list to find the matching ndlp. If no such
* ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
* IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
* routine is invoked to send the SCR IOCB.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the SCR ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the SCR ELS command.
*
* Return code
* 0 - Successfully issued scr command
* 1 - Failed to issue scr command
**/
int
-lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
+lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
@@ -3072,30 +3450,30 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
- ndlp = lpfc_findnode_did(vport, nportid);
+ ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
if (!ndlp) {
- ndlp = lpfc_nlp_init(vport, nportid);
+ ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 1;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR);
-
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return 1;
- }
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
+ if (rc) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0937 %s: Failed to reg fc node, rc %d\n",
+ __func__, rc);
+ return 1;
+ }
+ }
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
pcmd += sizeof(uint32_t);
@@ -3109,22 +3487,24 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitSCR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- /* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
- * the node.
- */
- lpfc_nlp_put(ndlp);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of node.
- */
- if (!(vport->fc_flag & FC_PT2PT))
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue SCR: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
+ return 1;
+ }
+
return 0;
}
@@ -3139,10 +3519,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
* in point-to-point mode. When sent to the Fabric Controller, it will
* replay the RSCN to registered recipients.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RSCN ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the RSCN ELS command.
*
* Return code
* 0 - Successfully issued RSCN command
@@ -3151,6 +3530,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
int
lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
@@ -3181,26 +3561,16 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 1;
}
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return 1;
- }
- event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+ event = elsiocb->cmd_dmabuf->virt;
event->rscn.rscn_cmd = ELS_RSCN;
event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
@@ -3213,27 +3583,24 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
event->portid.rscn_fid[2] = nportid & 0x000000FF;
+ phba->fc_stat.elsXmitRSCN++;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue RSCN: did:x%x",
ndlp->nlp_DID, 0, 0);
- phba->fc_stat.elsXmitRSCN++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- /* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
- * the node.
- */
- lpfc_nlp_put(ndlp);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of node.
- */
- if (!(vport->fc_flag & FC_PT2PT))
- lpfc_nlp_put(ndlp);
return 0;
}
@@ -3251,10 +3618,9 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
* for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
* lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PARPR ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the FARPR ELS command.
*
* Return code
* 0 - Successfully issued farpr command
@@ -3263,6 +3629,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
static int
lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
FARP *fp;
@@ -3280,23 +3647,14 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 1;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_RNID);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ ndlp->nlp_DID, ELS_CMD_FARPR);
+ if (!elsiocb)
return 1;
- }
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
pcmd += sizeof(uint32_t);
@@ -3313,7 +3671,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ondlp = lpfc_findnode_did(vport, nportid);
- if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
+ if (ondlp) {
memcpy(&fp->OportName, &ondlp->nlp_portname,
sizeof(struct lpfc_name));
memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
@@ -3325,22 +3683,627 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitFARPR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
* lpfc_els_free_iocb routine to trigger the release of
* the node.
*/
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of the node.
*/
+ /* Don't release reference count as RDF is likely outstanding */
+ return 0;
+}
+
+/**
+ * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @retry: retry counter for the command IOCB.
+ *
+ * This routine issues an ELS RDF to the Fabric Controller to register
+ * for diagnostic functions.
+ *
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the RDF ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued rdf command
+ * 1 - Failed to issue rdf command
+ **/
+int
+lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_els_rdf_req *prdf;
+ struct lpfc_nodelist *ndlp;
+ uint16_t cmdsize;
+ int rc;
+
+ cmdsize = sizeof(*prdf);
+
+ ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
+ if (!ndlp) {
+ ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
+ if (!ndlp)
+ return -ENODEV;
+ lpfc_enqueue_node(vport, ndlp);
+ }
+
+ /* RDF ELS is not required on an NPIV VN_Port. */
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return -EACCES;
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_RDF);
+ if (!elsiocb)
+ return -ENOMEM;
+
+ /* Configure the payload for the supported FPIN events. */
+ prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
+ memset(prdf, 0, cmdsize);
+ prdf->rdf.fpin_cmd = ELS_RDF;
+ prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
+ sizeof(struct fc_els_rdf));
+ prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
+ prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
+ prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
+ prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
+ prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
+ prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
+ prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n",
+ ndlp->nlp_DID, phba->cgn_reg_signal,
+ phba->cgn_reg_fpin);
+
+ phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return -EIO;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue RDF: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return -EIO;
+ }
+ return 0;
+}
+
+ /**
+ * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * A received RDF implies a possible change to fabric supported diagnostic
+ * functions. This routine sends LS_ACC and then has the Nx_Port issue a new
+ * RDF request to reregister for supported diagnostic functions.
+ *
+ * Return code
+ * 0 - Success
+ * -EIO - Failed to process received RDF
+ **/
+static int
+lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ /* Send LS_ACC */
+ if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "1623 Failed to RDF_ACC from x%x for x%x\n",
+ ndlp->nlp_DID, vport->fc_myDID);
+ return -EIO;
+ }
+
+ /* Issue new RDF for reregistering */
+ if (lpfc_issue_els_rdf(vport, 0)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "2623 Failed to re register RDF for x%x\n",
+ vport->fc_myDID);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_least_capable_settings - helper function for EDC rsp processing
+ * @phba: pointer to lpfc hba data structure.
+ * @pcgd: pointer to congestion detection descriptor in EDC rsp.
+ *
+ * This helper routine determines the least capable setting for
+ * congestion signals, signal freq, including scale, from the
+ * congestion detection descriptor in the EDC rsp. The routine
+ * sets @phba values in preparation for a set_featues mailbox.
+ **/
+static void
+lpfc_least_capable_settings(struct lpfc_hba *phba,
+ struct fc_diag_cg_sig_desc *pcgd)
+{
+ u32 rsp_sig_cap = 0, drv_sig_cap = 0;
+ u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
+
+ /* Get rsp signal and frequency capabilities. */
+ rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
+ rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count);
+ rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units);
+
+ /* If the Fport does not support signals. Set FPIN only */
+ if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED)
+ goto out_no_support;
+
+ /* Apply the xmt scale to the xmt cycle to get the correct frequency.
+ * Adapter default is 100 millisSeconds. Convert all xmt cycle values
+ * to milliSeconds.
+ */
+ switch (rsp_sig_freq_scale) {
+ case EDC_CG_SIGFREQ_SEC:
+ rsp_sig_freq_cyc *= MSEC_PER_SEC;
+ break;
+ case EDC_CG_SIGFREQ_MSEC:
+ rsp_sig_freq_cyc = 1;
+ break;
+ default:
+ goto out_no_support;
+ }
+
+ /* Convenient shorthand. */
+ drv_sig_cap = phba->cgn_reg_signal;
+
+ /* Choose the least capable frequency. */
+ if (rsp_sig_freq_cyc > phba->cgn_sig_freq)
+ phba->cgn_sig_freq = rsp_sig_freq_cyc;
+
+ /* Should be some common signals support. Settle on least capable
+ * signal and adjust FPIN values. Initialize defaults to ease the
+ * decision.
+ */
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY &&
+ (drv_sig_cap == EDC_CG_SIG_WARN_ONLY ||
+ drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) {
+ phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
+ phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
+ }
+ if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) {
+ if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) {
+ phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM;
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE;
+ }
+ if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) {
+ phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
+ phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
+ }
+ }
+
+ /* We are NOT recording signal frequency in congestion info buffer */
+ return;
+
+out_no_support:
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_sig_freq = 0;
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
+}
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
+ FC_LS_TLV_DTAG_INIT);
+
+/**
+ * lpfc_cmpl_els_edc - Completion callback function for EDC
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for issuing the Exchange
+ * Diagnostic Capabilities (EDC) command. The driver issues an EDC to
+ * notify the FPort of its Congestion and Link Fault capabilities. This
+ * routine parses the FPort's response and decides on the least common
+ * values applicable to both FPort and NPort for Warnings and Alarms that
+ * are communicated via hardware signals.
+ **/
+static void
+lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp_iocb;
+ struct fc_els_edc_resp *edc_rsp;
+ struct fc_tlv_desc *tlv;
+ struct fc_diag_cg_sig_desc *pcgd;
+ struct fc_diag_lnkflt_desc *plnkflt;
+ struct lpfc_dmabuf *pcmd, *prsp;
+ const char *dtag_nm;
+ u32 *pdata, dtag;
+ int desc_cnt = 0, bytes_remain;
+ bool rcv_cap_desc = false;
+ struct lpfc_nodelist *ndlp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
+
+ ndlp = cmdiocb->ndlp;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(rspiocb);
+ iotag = get_wqe_reqtag(rspiocb);
+ } else {
+ irsp_iocb = &rspiocb->iocb;
+ tmo = irsp_iocb->ulpTimeout;
+ iotag = irsp_iocb->ulpIoTag;
+ }
+
+ lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
+ "EDC cmpl: status:x%x/x%x did:x%x",
+ ulp_status, ulp_word4, did);
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
+ iotag, ulp_status, ulp_word4, tmo);
+
+ pcmd = cmdiocb->cmd_dmabuf;
+ if (!pcmd)
+ goto out;
+
+ pdata = (u32 *)pcmd->virt;
+ if (!pdata)
+ goto out;
+
+ /* Need to clear signal values, send features MB and RDF with FPIN. */
+ if (ulp_status)
+ goto out;
+
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+
+ edc_rsp = prsp->virt;
+ if (!edc_rsp)
+ goto out;
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
+ "4676 Fabric EDC Rsp: "
+ "0x%02x, 0x%08x\n",
+ edc_rsp->acc_hdr.la_cmd,
+ be32_to_cpu(edc_rsp->desc_list_len));
+
+ /*
+ * Payload length in bytes is the response descriptor list
+ * length minus the 12 bytes of Link Service Request
+ * Information descriptor in the reply.
+ */
+ bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) -
+ sizeof(struct fc_els_lsri_desc);
+ if (bytes_remain <= 0)
+ goto out;
+
+ tlv = edc_rsp->desc;
+
+ /*
+ * cycle through EDC diagnostic descriptors to find the
+ * congestion signaling capability descriptor
+ */
+ while (bytes_remain) {
+ if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6461 Truncated TLV hdr on "
+ "Diagnostic descriptor[%d]\n",
+ desc_cnt);
+ goto out;
+ }
+
+ dtag = be32_to_cpu(tlv->desc_tag);
+ switch (dtag) {
+ case ELS_DTAG_LNK_FAULT_CAP:
+ if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+ sizeof(struct fc_diag_lnkflt_desc)) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
+ "6462 Truncated Link Fault Diagnostic "
+ "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+ desc_cnt, bytes_remain,
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+ sizeof(struct fc_diag_lnkflt_desc));
+ goto out;
+ }
+ plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
+ "4617 Link Fault Desc Data: 0x%08x 0x%08x "
+ "0x%08x 0x%08x 0x%08x\n",
+ be32_to_cpu(plnkflt->desc_tag),
+ be32_to_cpu(plnkflt->desc_len),
+ be32_to_cpu(
+ plnkflt->degrade_activate_threshold),
+ be32_to_cpu(
+ plnkflt->degrade_deactivate_threshold),
+ be32_to_cpu(plnkflt->fec_degrade_interval));
+ break;
+ case ELS_DTAG_CG_SIGNAL_CAP:
+ if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+ sizeof(struct fc_diag_cg_sig_desc)) {
+ lpfc_printf_log(
+ phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6463 Truncated Cgn Signal Diagnostic "
+ "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+ desc_cnt, bytes_remain,
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+ sizeof(struct fc_diag_cg_sig_desc));
+ goto out;
+ }
+
+ pcgd = (struct fc_diag_cg_sig_desc *)tlv;
+ lpfc_printf_log(
+ phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "4616 CGN Desc Data: 0x%08x 0x%08x "
+ "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n",
+ be32_to_cpu(pcgd->desc_tag),
+ be32_to_cpu(pcgd->desc_len),
+ be32_to_cpu(pcgd->xmt_signal_capability),
+ be16_to_cpu(pcgd->xmt_signal_frequency.count),
+ be16_to_cpu(pcgd->xmt_signal_frequency.units),
+ be32_to_cpu(pcgd->rcv_signal_capability),
+ be16_to_cpu(pcgd->rcv_signal_frequency.count),
+ be16_to_cpu(pcgd->rcv_signal_frequency.units));
+
+ /* Compare driver and Fport capabilities and choose
+ * least common.
+ */
+ lpfc_least_capable_settings(phba, pcgd);
+ rcv_cap_desc = true;
+ break;
+ default:
+ dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "4919 unknown Diagnostic "
+ "Descriptor[%d]: tag x%x (%s)\n",
+ desc_cnt, dtag, dtag_nm);
+ }
+
+ bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ desc_cnt++;
+ }
+
+out:
+ if (!rcv_cap_desc) {
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_sig_freq = 0;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
+ "4202 EDC rsp error - sending RDF "
+ "for FPIN only.\n");
+ }
+
+ lpfc_config_cgn_signal(phba);
+
+ /* Check to see if link went down during discovery */
+ lpfc_els_chk_latt(phba->pport);
+ lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
+ "EDC Cmpl: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+ lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
+}
+
+static void
+lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv;
+
+ lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP);
+ lft->desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc));
+
+ lft->degrade_activate_threshold =
+ cpu_to_be32(phba->degrade_activate_threshold);
+ lft->degrade_deactivate_threshold =
+ cpu_to_be32(phba->degrade_deactivate_threshold);
+ lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval);
+}
+
+static void
+lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv;
+
+ /* We are assuming cgd was zero'ed before calling this routine */
+
+ /* Configure the congestion detection capability */
+ cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP);
+
+ /* Descriptor len doesn't include the tag or len fields. */
+ cgd->desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc));
+
+ /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
+ * xmt_signal_frequency.count already set to 0.
+ * xmt_signal_frequency.units already set to 0.
+ */
+
+ if (phba->cmf_active_mode == LPFC_CFG_OFF) {
+ /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
+ * rcv_signal_frequency.count already set to 0.
+ * rcv_signal_frequency.units already set to 0.
+ */
+ phba->cgn_sig_freq = 0;
+ return;
+ }
+ switch (phba->cgn_reg_signal) {
+ case EDC_CG_SIG_WARN_ONLY:
+ cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY);
+ break;
+ case EDC_CG_SIG_WARN_ALARM:
+ cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM);
+ break;
+ default:
+ /* rcv_signal_capability left 0 thus no support */
+ break;
+ }
+
+ /* We start negotiation with lpfc_fabric_cgn_frequency, after
+ * the completion we settle on the higher frequency.
+ */
+ cgd->rcv_signal_frequency.count =
+ cpu_to_be16(lpfc_fabric_cgn_frequency);
+ cgd->rcv_signal_frequency.units =
+ cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
+}
+
+static bool
+lpfc_link_is_lds_capable(struct lpfc_hba *phba)
+{
+ if (!(phba->lmt & LMT_64Gb))
+ return false;
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return false;
+
+ if (phba->sli4_hba.conf_trunk) {
+ if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G)
+ return true;
+ } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) {
+ return true;
+ }
+ return false;
+}
+
+ /**
+ * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @retry: retry counter for the command iocb.
+ *
+ * This routine issues an ELS EDC to the F-Port Controller to communicate
+ * this N_Port's support of hardware signals in its Congestion
+ * Capabilities Descriptor.
+ *
+ * Note: This routine does not check if one or more signals are
+ * set in the cgn_reg_signal parameter. The caller makes the
+ * decision to enforce cgn_reg_signal as nonzero or zero depending
+ * on the conditions. During Fabric requests, the driver
+ * requires cgn_reg_signals to be nonzero. But a dynamic request
+ * to set the congestion mode to OFF from Monitor or Manage
+ * would correctly issue an EDC with no signals enabled to
+ * turn off switch functionality and then update the FW.
+ *
+ * Return code
+ * 0 - Successfully issued edc command
+ * 1 - Failed to issue edc command
+ **/
+int
+lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct fc_els_edc *edc_req;
+ struct fc_tlv_desc *tlv;
+ u16 cmdsize;
+ struct lpfc_nodelist *ndlp;
+ u8 *pcmd = NULL;
+ u32 cgn_desc_size, lft_desc_size;
+ int rc;
+
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return -EACCES;
+
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENODEV;
+
+ cgn_desc_size = (phba->cgn_init_reg_signal) ?
+ sizeof(struct fc_diag_cg_sig_desc) : 0;
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize = cgn_desc_size + lft_desc_size;
+
+ /* Skip EDC if no applicable descriptors */
+ if (!cmdsize)
+ goto try_rdf;
+
+ cmdsize += sizeof(struct fc_els_edc);
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_EDC);
+ if (!elsiocb)
+ goto try_rdf;
+
+ /* Configure the payload for the supported Diagnostics capabilities. */
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
+ memset(pcmd, 0, cmdsize);
+ edc_req = (struct fc_els_edc *)pcmd;
+ edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size);
+ edc_req->edc_cmd = ELS_EDC;
+ tlv = edc_req->desc;
+
+ if (cgn_desc_size) {
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+ tlv = fc_tlv_next_desc(tlv);
+ }
+
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ "4623 Xmit EDC to remote "
+ "NPORT x%x reg_sig x%x reg_fpin:x%x\n",
+ ndlp->nlp_DID, phba->cgn_reg_signal,
+ phba->cgn_reg_fpin);
+
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return -EIO;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue EDC: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ /* The additional lpfc_nlp_put will cause the following
+ * lpfc_els_free_iocb routine to trigger the rlease of
+ * the node.
+ */
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto try_rdf;
+ }
return 0;
+try_rdf:
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ rc = lpfc_issue_els_rdf(vport, 0);
+ return rc;
}
/**
@@ -3363,9 +4326,9 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
if (!(nlp->nlp_flag & NLP_DELAY_TMO))
return;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&nlp->lock);
nlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&nlp->lock);
del_timer_sync(&nlp->nlp_delayfunc);
nlp->nlp_last_elscmd = 0;
if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
@@ -3375,9 +4338,9 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
}
if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&nlp->lock);
nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&nlp->lock);
if (vport->num_disc_nodes) {
if (vport->port_state < LPFC_VPORT_READY) {
/* Check if there are more ADISCs to be sent */
@@ -3400,7 +4363,7 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
/**
* lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
- * @ptr: holder for the pointer to the timer function associated data (ndlp).
+ * @t: pointer to the timer function associated data (ndlp).
*
* This routine is invoked by the ndlp delayed-function timer to check
* whether there is any pending ELS retry event(s) with the node. If not, it
@@ -3453,20 +4416,19 @@ void
lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
uint32_t cmd, retry;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
cmd = ndlp->nlp_last_elscmd;
ndlp->nlp_last_elscmd = 0;
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return;
}
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* If a discovery event readded nlp_delayfunc after timer
* firing and before processing the timer, cancel the
@@ -3537,7 +4499,7 @@ lpfc_link_reset(struct lpfc_vport *vport)
"2851 Attempt link reset\n");
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2852 Failed to allocate mbox memory");
return 1;
}
@@ -3559,7 +4521,7 @@ lpfc_link_reset(struct lpfc_vport *vport)
mbox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2853 Failed to issue INIT_LINK "
"mbox command, rc:x%x\n", rc);
mempool_free(mbox, phba->mbox_mem_pool);
@@ -3578,7 +4540,7 @@ lpfc_link_reset(struct lpfc_vport *vport)
* This routine makes a retry decision on an ELS command IOCB, which has
* failed. The following ELS IOCBs use this function for retrying the command
* when previously issued command responsed with error status: FLOGI, PLOGI,
- * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
+ * PRLI, ADISC and FDISC. Based on the ELS command type and the
* returned error status, it makes the decision whether a retry shall be
* issued for the command, and whether a retry shall be made immediately or
* delayed. In the former case, the corresponding ELS command issuing-function
@@ -3595,10 +4557,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ union lpfc_wqe128 *irsp = &rspiocb->wqe;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *elscmd;
struct ls_rjt stat;
int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
@@ -3606,9 +4567,11 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
uint32_t cmd = 0;
uint32_t did;
int link_reset = 0, rc;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- /* Note: context2 may be 0 for internal driver abort
+ /* Note: cmd_dmabuf may be 0 for internal driver abort
* of delays ELS command.
*/
@@ -3617,22 +4580,21 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmd = *elscmd++;
}
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ if (ndlp)
did = ndlp->nlp_DID;
else {
/* We should only hit this case for retrying PLOGI */
- did = irsp->un.elsreq64.remoteID;
+ did = get_job_els_rsp64_did(phba, rspiocb);
ndlp = lpfc_findnode_did(vport, did);
- if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
- && (cmd != ELS_CMD_PLOGI))
- return 1;
+ if (!ndlp && (cmd != ELS_CMD_PLOGI))
+ return 0;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Retry ELS: wd7:x%x wd4:x%x did:x%x",
- *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
+ *(((uint32_t *)irsp) + 7), ulp_word4, did);
- switch (irsp->ulpStatus) {
+ switch (ulp_status) {
case IOSTAT_FCP_RSP_ERROR:
break;
case IOSTAT_REMOTE_STOP:
@@ -3646,24 +4608,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
break;
case IOSTAT_LOCAL_REJECT:
- switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+ switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_LOOP_OPEN_FAILURE:
- if (cmd == ELS_CMD_FLOGI) {
- if (PCI_DEVICE_ID_HORNET ==
- phba->pcidev->device) {
- phba->fc_topology = LPFC_TOPOLOGY_LOOP;
- phba->pport->fc_myDID = 0;
- phba->alpa_map[0] = 0;
- phba->alpa_map[1] = 0;
- }
- }
if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
delay = 1000;
retry = 1;
break;
case IOERR_ILLEGAL_COMMAND:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0124 Retry illegal cmd x%x "
"retry:x%x delay:x%x\n",
cmd, cmdiocb->retry, delay);
@@ -3709,12 +4662,25 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 1;
delay = 100;
break;
+ case IOERR_SLI_ABORTED:
+ /* Retry ELS PLOGI command?
+ * Possibly the rport just wasn't ready.
+ */
+ if (cmd == ELS_CMD_PLOGI) {
+ /* No retry if state change */
+ if (ndlp &&
+ ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
+ goto out_retry;
+ retry = 1;
+ maxretry = 2;
+ }
+ break;
}
break;
case IOSTAT_NPORT_RJT:
case IOSTAT_FABRIC_RJT:
- if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ if (ulp_word4 & RJT_UNAVAIL_TEMP) {
retry = 1;
break;
}
@@ -3727,53 +4693,80 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case IOSTAT_LS_RJT:
- stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+ stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
+ if ((vport->fc_flag & FC_PT2PT) &&
+ cmd == ELS_CMD_NVMEPRLI) {
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ case LSRJT_INVALID_CMD:
+ case LSRJT_LOGICAL_ERR:
+ case LSRJT_CMD_UNSUPPORTED:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0168 NVME PRLI LS_RJT "
+ "reason %x port doesn't "
+ "support NVME, disabling NVME\n",
+ stat.un.b.lsRjtRsnCode);
+ retry = 0;
+ vport->fc_flag |= FC_PT2PT_NO_NVME;
+ goto out_retry;
+ }
+ }
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
- /* The driver has a VALID PLOGI but the rport has
- * rejected the PRLI - can't do it now. Delay
- * for 1 second and try again - don't care about
- * the explanation.
+ /* Special case for PRLI LS_RJTs. Recall that lpfc
+ * uses a single routine to issue both PRLI FC4 types.
+ * If the PRLI is rejected because that FC4 type
+ * isn't really supported, don't retry and cause
+ * multiple transport registrations. Otherwise, parse
+ * the reason code/reason code explanation and take the
+ * appropriate action.
*/
- if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
- break;
- }
-
- /* Legacy bug fix code for targets with PLOGI delays. */
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CMD_IN_PROGRESS) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS | LOG_NODE,
+ "0153 ELS cmd x%x LS_RJT by x%x. "
+ "RsnCode x%x RsnCodeExp x%x\n",
+ cmd, did, stat.un.b.lsRjtRsnCode,
+ stat.un.b.lsRjtRsnCodeExp);
+
+ switch (stat.un.b.lsRjtRsnCodeExp) {
+ case LSEXP_CANT_GIVE_DATA:
+ case LSEXP_CMD_IN_PROGRESS:
if (cmd == ELS_CMD_PLOGI) {
delay = 1000;
maxretry = 48;
}
retry = 1;
break;
- }
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CANT_GIVE_DATA) {
- if (cmd == ELS_CMD_PLOGI) {
+ case LSEXP_REQ_UNSUPPORTED:
+ case LSEXP_NO_RSRC_ASSIGN:
+ /* These explanation codes get no retry. */
+ if (cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI)
+ break;
+ fallthrough;
+ default:
+ /* Limit the delay and retry action to a limited
+ * cmd set. There are other ELS commands where
+ * a retry is not expected.
+ */
+ if (cmd == ELS_CMD_PLOGI ||
+ cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI) {
delay = 1000;
- maxretry = 48;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
}
- retry = 1;
- break;
- }
- if (cmd == ELS_CMD_PLOGI) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
break;
}
+
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0125 FDISC Failed (x%x). "
"Fabric out of resources\n",
stat.un.lsRjtError);
@@ -3812,7 +4805,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
LSEXP_NOTHING_MORE) {
vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
retry = 1;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0820 FLOGI Failed (x%x). "
"BBCredit Not Supported\n",
stat.un.lsRjtError);
@@ -3825,7 +4819,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
(stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0122 FDISC Failed (x%x). "
"Fabric Detected Bad WWN\n",
stat.un.lsRjtError);
@@ -3846,12 +4841,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* on this rport.
*/
if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
- spin_unlock_irq(shost->host_lock);
- retry = 0;
- goto out_retry;
+ LSEXP_REQ_UNSUPPORTED) {
+ if (cmd == ELS_CMD_PRLI)
+ goto out_retry;
}
break;
}
@@ -3883,7 +4875,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((cmd == ELS_CMD_FLOGI) &&
(phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
- !lpfc_error_lost_link(irsp)) {
+ !lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* FLOGI retry policy */
retry = 1;
/* retry FLOGI forever */
@@ -3896,7 +4888,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
delay = 5000;
else if (cmdiocb->retry >= 32)
delay = 1000;
- } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+ } else if ((cmd == ELS_CMD_FDISC) &&
+ !lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* retry FDISCs every second up to devloss */
retry = 1;
maxretry = vport->cfg_devloss_tmo;
@@ -3933,8 +4926,8 @@ out_retry:
cmd, did, cmdiocb->retry, delay);
if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
- ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ ((ulp_status != IOSTAT_LOCAL_REJECT) ||
+ ((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES))) {
/* Don't reset timer for no resources */
@@ -3945,23 +4938,23 @@ out_retry:
}
phba->fc_stat.elsXmitRetry++;
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
+ if (ndlp && delay) {
phba->fc_stat.elsDelayRetry++;
ndlp->nlp_retry = cmdiocb->retry;
/* delay is specified in milliseconds */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(delay));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
if ((cmd == ELS_CMD_PRLI) ||
(cmd == ELS_CMD_NVMEPRLI))
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PRLI_ISSUE);
- else
+ else if (cmd != ELS_CMD_ADISC)
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_NPR_NODE);
ndlp->nlp_last_elscmd = cmd;
@@ -3976,7 +4969,7 @@ out_retry:
lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PLOGI:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp) {
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PLOGI_ISSUE);
@@ -4003,18 +4996,18 @@ out_retry:
}
/* No retry ELS command <elsCmd> to remote NPORT <did> */
if (logerr) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0137 No retry ELS command x%x to remote "
"NPORT x%x: Out of Resources: Error:x%x/%x\n",
- cmd, did, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ cmd, did, ulp_status,
+ ulp_word4);
}
else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0108 No retry ELS command x%x to remote "
"NPORT x%x Retried:%d Error:x%x/%x\n",
- cmd, did, cmdiocb->retry, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ cmd, did, cmdiocb->retry, ulp_status,
+ ulp_word4);
}
return 0;
}
@@ -4080,10 +5073,10 @@ lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
* command IOCB data structure contains the reference to various associated
* resources, these fields must be set to NULL if the associated reference
* not present:
- * context1 - reference to ndlp
- * context2 - reference to cmd
- * context2->next - reference to rsp
- * context3 - reference to bpl
+ * cmd_dmabuf - reference to cmd.
+ * cmd_dmabuf->next - reference to rsp
+ * rsp_dmabuf - unused
+ * bpl_dmabuf - reference to bpl
*
* It first properly decrements the reference count held on ndlp for the
* IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
@@ -4102,37 +5095,20 @@ int
lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
- struct lpfc_nodelist *ndlp;
- ndlp = (struct lpfc_nodelist *)elsiocb->context1;
- if (ndlp) {
- if (ndlp->nlp_flag & NLP_DEFER_RM) {
- lpfc_nlp_put(ndlp);
+ /* The I/O iocb is complete. Clear the node and first dmbuf */
+ elsiocb->ndlp = NULL;
- /* If the ndlp is not being used by another discovery
- * thread, free it.
- */
- if (!lpfc_nlp_not_used(ndlp)) {
- /* If ndlp is being used by another discovery
- * thread, just clear NLP_DEFER_RM
- */
- ndlp->nlp_flag &= ~NLP_DEFER_RM;
- }
- }
- else
- lpfc_nlp_put(ndlp);
- elsiocb->context1 = NULL;
- }
- /* context2 = cmd, context2->next = rsp, context3 = bpl */
- if (elsiocb->context2) {
- if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
+ /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
+ if (elsiocb->cmd_dmabuf) {
+ if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) {
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
* a hbeat.
*/
- elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
- buf_ptr = elsiocb->context2;
- elsiocb->context2 = NULL;
+ elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE;
+ buf_ptr = elsiocb->cmd_dmabuf;
+ elsiocb->cmd_dmabuf = NULL;
if (buf_ptr) {
buf_ptr1 = NULL;
spin_lock_irq(&phba->hbalock);
@@ -4151,16 +5127,16 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
spin_unlock_irq(&phba->hbalock);
}
} else {
- buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+ buf_ptr1 = elsiocb->cmd_dmabuf;
lpfc_els_free_data(phba, buf_ptr1);
- elsiocb->context2 = NULL;
+ elsiocb->cmd_dmabuf = NULL;
}
}
- if (elsiocb->context3) {
- buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+ if (elsiocb->bpl_dmabuf) {
+ buf_ptr = elsiocb->bpl_dmabuf;
lpfc_els_free_bpl(phba, buf_ptr);
- elsiocb->context3 = NULL;
+ elsiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
@@ -4176,7 +5152,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
* Accept (ACC) Response ELS command. This routine is invoked to indicate
* the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
* release the ndlp if it has the last reference remaining (reference count
- * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
+ * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
* field to NULL to inform the following lpfc_els_free_iocb() routine no
* ndlp reference count needs to be decremented. Otherwise, the ndlp
* reference use-count shall be decremented by the lpfc_els_free_iocb()
@@ -4187,41 +5163,62 @@ static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
+ u32 ulp_status, ulp_word4;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"ACC LOGO cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
+ ulp_status, ulp_word4, ndlp->nlp_DID);
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0109 ACC to LOGO completes to NPort x%x "
+ "0109 ACC to LOGO completes to NPort x%x refcnt %d "
"Data: x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
+ ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+
+ /* This clause allows the LOGO ACC to complete and free resources
+ * for the Fabric Domain Controller. It does deliberately skip
+ * the unreg_rpi and release rpi because some fabrics send RDP
+ * requests after logging out from the initiator.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC &&
+ ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
+ goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+ /* If PLOGI is being retried, PLOGI completion will cleanup the
+ * node. The NLP_NPR_2B_DISC flag needs to be retained to make
+ * progress on nodes discovered from last RSCN.
+ */
+ if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
+ goto out;
+
/* NPort Recovery mode or node is just allocated */
if (!lpfc_nlp_not_used(ndlp)) {
- /* If the ndlp is being used by another discovery
- * thread, just unregister the RPI.
+ /* A LOGO is completing and the node is in NPR state.
+ * Just unregister the RPI because the node is still
+ * required.
*/
lpfc_unreg_rpi(vport, ndlp);
} else {
/* Indicate the node has already released, should
* not reference to it from within lpfc_els_free_iocb.
*/
- cmdiocb->context1 = NULL;
+ cmdiocb->ndlp = NULL;
}
}
-
+ out:
/*
* The driver received a LOGO from the rport and has ACK'd it.
* At this point, the driver is done so release the IOCB
*/
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -4240,34 +5237,33 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
-
- pmb->ctx_buf = NULL;
- pmb->ctx_ndlp = NULL;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
+ u32 mbx_flag = pmb->mbox_flag;
+ u32 mbx_cmd = pmb->u.mb.mbxCommand;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
+ "0006 rpi x%x DID:%x flg:%x %d x%px "
+ "mbx_cmd x%x mbx_flag x%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
- if (NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_nlp_put(ndlp);
- /* This is the end of the default RPI cleanup logic for
- * this ndlp. If no other discovery threads are using
- * this ndlp, free all resources associated with it.
- */
- lpfc_nlp_not_used(ndlp);
- } else {
- lpfc_drop_node(ndlp->vport, ndlp);
- }
+ kref_read(&ndlp->kref), ndlp, mbx_cmd,
+ mbx_flag, pmb);
+
+ /* This ends the default/temporary RPI cleanup logic for this
+ * ndlp and the node and rpi needs to be released. Free the rpi
+ * first on an UNREG_LOGIN and then release the final
+ * references.
+ */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ if (mbx_cmd == MBX_UNREG_LOGIN)
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_put(ndlp);
+ lpfc_drop_node(ndlp->vport, ndlp);
}
- return;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
@@ -4281,109 +5277,85 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* nlp_flag bitmap in the ndlp data structure, if the mbox command reference
* field in the command IOCB is not NULL, the referred mailbox command will
* be send out, and then invokes the lpfc_els_free_iocb() routine to release
- * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
- * link down event occurred during the discovery, the lpfc_nlp_not_used()
- * routine shall be invoked trying to release the ndlp if no other threads
- * are currently referring it.
+ * the IOCB.
**/
static void
lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
IOCB_t *irsp;
- uint8_t *pcmd;
LPFC_MBOXQ_t *mbox = NULL;
- struct lpfc_dmabuf *mp = NULL;
- uint32_t ls_rjt = 0;
-
- irsp = &rspiocb->iocb;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
if (!vport) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3177 ELS response failed\n");
goto out;
}
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
- /* First determine if this is a LS_RJT cmpl. Note, this callback
- * function can have cmdiocb->contest1 (ndlp) field set to NULL.
- */
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
- /* A LS_RJT associated with Default RPI cleanup has its own
- * separate code path.
- */
- if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
- ls_rjt = 1;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
}
/* Check to see if link went down during discovery */
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
- if (mbox) {
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
- if (lpfc_nlp_not_used(ndlp)) {
- ndlp = NULL;
- /* Indicate the node has already released,
- * should not reference to it from within
- * the routine lpfc_els_free_iocb.
- */
- cmdiocb->context1 = NULL;
- }
+ if (!ndlp || lpfc_els_chk_latt(vport)) {
+ if (mbox)
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
goto out;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"ELS rsp cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->iocb.un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0110 ELS response tag x%x completes "
- "Data: x%x x%x x%x x%x x%x x%x x%x\n",
- cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
- rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n",
+ iotag, ulp_status, ulp_word4, tmo,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
+ ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
if (mbox) {
- if ((rspiocb->iocb.ulpStatus == 0)
+ if (ulp_status == 0
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
- (!(vport->fc_flag & FC_PT2PT)) &&
- (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
- ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_DISCOVERY,
- "0314 PLOGI recov DID x%x "
- "Data: x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_state,
- ndlp->nlp_rpi, ndlp->nlp_flag);
- mp = mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt,
- mp->phys);
- kfree(mp);
+ (!(vport->fc_flag & FC_PT2PT))) {
+ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ ndlp->nlp_state ==
+ NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "0314 PLOGI recov "
+ "DID x%x "
+ "Data: x%x x%x x%x\n",
+ ndlp->nlp_DID,
+ ndlp->nlp_state,
+ ndlp->nlp_rpi,
+ ndlp->nlp_flag);
+ goto out_free_mbox;
}
- mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
}
/* Increment reference count to ndlp to hold the
* reference to ndlp for the callback function.
*/
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ goto out_free_mbox;
+
mbox->vport = vport;
if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
@@ -4408,64 +5380,45 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
/* ELS rsp: Cannot issue reg_login for <NPortid> */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0138 ELS rsp: Cannot issue reg_login for x%x "
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
-
- if (lpfc_nlp_not_used(ndlp)) {
- ndlp = NULL;
- /* Indicate node has already been released,
- * should not reference to it from within
- * the routine lpfc_els_free_iocb.
- */
- cmdiocb->context1 = NULL;
- }
- } else {
- /* Do not drop node for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp) &&
- ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
- if (lpfc_nlp_not_used(ndlp)) {
- ndlp = NULL;
- /* Indicate node has already been
- * released, should not reference
- * to it from within the routine
- * lpfc_els_free_iocb.
- */
- cmdiocb->context1 = NULL;
- }
- }
- }
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
}
- mempool_free(mbox, phba->mbox_mem_pool);
+out_free_mbox:
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
- spin_unlock_irq(shost->host_lock);
-
- /* If the node is not being used by another discovery thread,
- * and we are sending a reject, we are done with it.
- * Release driver reference count here and free associated
- * resources.
- */
- if (ls_rjt)
- if (lpfc_nlp_not_used(ndlp))
- /* Indicate node has already been released,
- * should not reference to it from within
- * the routine lpfc_els_free_iocb.
- */
- cmdiocb->context1 = NULL;
+ if (ndlp && shost) {
+ spin_lock_irq(&ndlp->lock);
+ if (mbox)
+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ }
+ /* An SLI4 NPIV instance wants to drop the node at this point under
+ * these conditions and release the RPI.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (vport && vport->port_type == LPFC_NPIV_PORT) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
+ ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_drop_node(vport, ndlp);
+ }
}
+ /* Release the originating I/O reference. */
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -4485,10 +5438,10 @@ out:
* field of the IOCB for the completion callback function to issue the
* mailbox command to the HBA later when callback is invoked.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the corresponding response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the corresponding
+ * response ELS IOCB command.
*
* Return code
* 0 - Successfully issued acc response
@@ -4499,18 +5452,18 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
LPFC_MBOXQ_t *mbox)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
+ union lpfc_wqe128 *oldwqe = &oldiocb->wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
struct serv_parm *sp;
uint16_t cmdsize;
int rc;
ELS_PKT *els_pkt_ptr;
-
- oldcmd = &oldiocb->iocb;
+ struct fc_els_rdf_resp *rdf_resp;
switch (flag) {
case ELS_CMD_ACC:
@@ -4518,16 +5471,32 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 1;
}
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -4543,10 +5512,26 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
if (mbox)
elsiocb->context_un.mbox = mbox;
@@ -4605,12 +5590,28 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
- memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
+ pcmd = (u8 *) elsiocb->cmd_dmabuf->virt;
+
+ memcpy(pcmd, oldiocb->cmd_dmabuf->virt,
sizeof(uint32_t) + sizeof(PRLO));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
els_pkt_ptr = (ELS_PKT *) pcmd;
@@ -4620,33 +5621,88 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
"Issue ACC PRLO: did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
+ case ELS_CMD_RDF:
+ cmdsize = sizeof(*rdf_resp);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
+ rdf_resp = (struct fc_els_rdf_resp *)pcmd;
+ memset(rdf_resp, 0, sizeof(*rdf_resp));
+ rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
+
+ /* FC-LS-5 specifies desc_list_len shall be set to 12 */
+ rdf_resp->desc_list_len = cpu_to_be32(12);
+
+ /* FC-LS-5 specifies LS REQ Information descriptor */
+ rdf_resp->lsri.desc_tag = cpu_to_be32(1);
+ rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32));
+ rdf_resp->lsri.rqst_w0.cmd = ELS_RDF;
+ break;
default:
return 1;
}
if (ndlp->nlp_flag & NLP_LOGO_ACC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc;
} else {
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
}
phba->fc_stat.elsXmitACC++;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
+
+ /* Xmit ELS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
+ "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
+ "RPI: x%x, fc_flag x%x refcnt %d\n",
+ rc, elsiocb->iotag, elsiocb->sli4_xritag,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
return 0;
}
/**
- * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
+ * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command
* @vport: pointer to a virtual N_Port data structure.
- * @rejectError:
+ * @rejectError: reject response to issue
* @oldiocb: pointer to the original lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure.
* @mbox: pointer to the driver internal queue element for mailbox command.
@@ -4656,10 +5712,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
* context_un.mbox field of the IOCB for the completion callback function
* to issue to the HBA later.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the reject response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the reject response
+ * ELS IOCB command.
*
* Return code
* 0 - Successfully issued reject response
@@ -4670,13 +5726,14 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
LPFC_MBOXQ_t *mbox)
{
+ int rc;
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
- int rc;
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4684,11 +5741,20 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
pcmd += sizeof(uint32_t);
@@ -4703,20 +5769,140 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
rejectError, elsiocb->iotag,
- elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue LS_RJT: did:x%x flg:x%x err:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
+ /* The NPIV instance is rejecting this unsolicited ELS. Make sure the
+ * node's assigned RPI gets released provided this node is not already
+ * registered with the transport.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_NPIV_PORT &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
+
+ return 0;
+}
+
+ /**
+ * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: NPort to where rsp is directed
+ *
+ * This routine issues an EDC ACC RSP to the F-Port Controller to communicate
+ * this N_Port's support of hardware signals in its Congestion
+ * Capabilities Descriptor.
+ *
+ * Return code
+ * 0 - Successfully issued edc rsp command
+ * 1 - Failed to issue edc rsp command
+ **/
+static int
+lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_els_edc_resp *edc_rsp;
+ struct fc_tlv_desc *tlv;
+ struct lpfc_iocbq *elsiocb;
+ IOCB_t *icmd, *cmd;
+ union lpfc_wqe128 *wqe;
+ u32 cgn_desc_size, lft_desc_size;
+ u16 cmdsize;
+ uint8_t *pcmd;
+ int rc;
+ cmdsize = sizeof(struct fc_els_edc_resp);
+ cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize += cgn_desc_size + lft_desc_size;
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, cmdiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ cmd = &cmdiocb->iocb;
+ icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = elsiocb->cmd_dmabuf->virt;
+ memset(pcmd, 0, cmdsize);
+
+ edc_rsp = (struct fc_els_edc_resp *)pcmd;
+ edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC;
+ edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) +
+ cgn_desc_size + lft_desc_size);
+ edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
+ edc_rsp->lsri.desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
+ edc_rsp->lsri.rqst_w0.cmd = ELS_EDC;
+ tlv = edc_rsp->desc;
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue EDC ACC: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ kref_read(&ndlp->kref));
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
+
+ /* Xmit ELS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
+ "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
+ "RPI: x%x, fc_flag x%x\n",
+ rc, elsiocb->iotag, elsiocb->sli4_xritag,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi, vport->fc_flag);
+
return 0;
}
@@ -4730,10 +5916,10 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
* Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
* and invokes the lpfc_sli_issue_iocb() routine to send out the command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the ADISC Accept response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the ADISC Accept response
+ * ELS IOCB command.
*
* Return code
* 0 - Successfully issued acc adisc response
@@ -4746,10 +5932,12 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_hba *phba = vport->phba;
ADISC *ap;
IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
cmdsize = sizeof(uint32_t) + sizeof(ADISC);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4757,19 +5945,32 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb));
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit ADISC ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0130 Xmit ADISC ACC response iotag x%x xri: "
"x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -4781,25 +5982,24 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ap->DID = be32_to_cpu(vport->fc_myDID);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC ADISC: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC ADISC: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
- /* Xmit ELS ACC response tag <ulpIoTag> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
- "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x\n",
- rc, elsiocb->iotag, elsiocb->sli4_xritag,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, vport->fc_flag);
return 0;
}
@@ -4813,10 +6013,10 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
* Login (PRLI) ELS command. It simply prepares the payload of the IOCB
* and invokes the lpfc_sli_issue_iocb() routine to send out the command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PRLI Accept response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the PRLI Accept response
+ * ELS IOCB command.
*
* Return code
* 0 - Successfully issued acc prli response
@@ -4832,18 +6032,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
lpfc_vpd_t *vpd;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
uint32_t prli_fc4_req, *req_payload;
struct lpfc_dmabuf *req_buf;
int rc;
- u32 elsrspcmd;
+ u32 elsrspcmd, ulp_context;
/* Need the incoming PRLI payload to determine if the ACC is for an
* FC4 or NVME PRLI type. The PRLI type is at word 1.
*/
- req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
+ req_buf = oldiocb->cmd_dmabuf;
req_payload = (((uint32_t *)req_buf->virt) + 1);
/* PRLI type payload is at byte 3 for FCP or NVME. */
@@ -4856,7 +6057,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
if (prli_fc4_req == PRLI_FCP_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
} else {
@@ -4864,23 +6065,34 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
}
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
- ndlp->nlp_DID, elsrspcmd);
+ ndlp->nlp_DID, elsrspcmd);
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit PRLI ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0131 Xmit PRLI ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
*((uint32_t *)(pcmd)) = elsrspcmd;
@@ -4908,7 +6120,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr->ConfmComplAllowed = 1;
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
/* Respond with an NVME PRLI Type */
npr_nvme = (struct lpfc_nvme_prli *) pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
@@ -4948,17 +6160,24 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC PRLI: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC PRLI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
+
return 0;
}
@@ -4972,17 +6191,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
* This routine issues a Request Node Identification Data (RNID) Accept
* (ACC) response. It constructs the RNID ACC response command according to
* the proper @format and then calls the lpfc_sli_issue_iocb() routine to
- * issue the response. Note that this command does not need to hold the ndlp
- * reference count for the callback. So, the ndlp reference count taken by
- * the lpfc_prep_els_iocb() routine is put back and the context1 field of
- * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
- * there is no ndlp reference available.
- *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function. However, for the RNID Accept Response ELS command,
- * this is undone later by this routine after the IOCB is allocated.
+ * issue the response.
+ *
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function.
*
* Return code
* 0 - Successfully issued acc rnid response
@@ -4995,10 +6208,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
struct lpfc_hba *phba = vport->phba;
RNID *rn;
IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ (2 * sizeof(struct lpfc_name));
@@ -5010,16 +6225,27 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit RNID ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0132 Xmit RNID ACC response tag x%x xri x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ elsiocb->iotag, ulp_context);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -5048,17 +6274,24 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC RNID: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC RNID: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
+
return 0;
}
@@ -5082,7 +6315,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
struct lpfc_node_rrq *prrq;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+ pcmd = (uint8_t *)iocb->cmd_dmabuf->virt;
pcmd += sizeof(uint32_t);
rrq = (struct RRQ *)pcmd;
rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
@@ -5094,7 +6327,8 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
be32_to_cpu(bf_get(rrq_did, rrq)),
bf_get(rrq_oxid, rrq),
rxid,
- iocb->iotag, iocb->iocb.ulpContext);
+ get_wqe_reqtag(iocb),
+ get_job_ulpcontext(phba, iocb));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
@@ -5125,12 +6359,18 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
- cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmdsize = oldiocb->wcqe_cmpl.total_data_placed;
+ else
+ cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
/* The accumulated length can exceed the BPL_SIZE. For
* now, use this as the limit
@@ -5142,30 +6382,50 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
if (!elsiocb)
return 1;
- elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
- elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit ECHO ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2876 Xmit ECHO ACC response tag x%x xri x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ elsiocb->iotag, ulp_context);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC ECHO: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC ECHO: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
+
return 0;
}
@@ -5197,27 +6457,41 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS ADISCs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
+
+ if (ndlp->nlp_state != NLP_STE_NPR_NODE ||
+ !(ndlp->nlp_flag & NLP_NPR_ADISC))
continue;
- if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
- (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
- (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(&ndlp->lock);
+
+ if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+ /* This node was marked for ADISC but was not picked
+ * for discovery. This is possible if the node was
+ * missing in gidft response.
+ *
+ * At time of marking node for ADISC, we skipped unreg
+ * from backend
+ */
+ lpfc_nlp_unreg_node(vport, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
+ continue;
+ }
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
+ sentadisc++;
+ vport->num_disc_nodes++;
+ if (vport->num_disc_nodes >=
+ vport->cfg_discovery_threads) {
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ vport->fc_flag |= FC_NLP_MORE;
spin_unlock_irq(shost->host_lock);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(vport, ndlp, 0);
- sentadisc++;
- vport->num_disc_nodes++;
- if (vport->num_disc_nodes >=
- vport->cfg_discovery_threads) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
- break;
- }
+ break;
}
+
}
if (sentadisc == 0) {
spin_lock_irq(shost->host_lock);
@@ -5255,8 +6529,6 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS PLOGIs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
@@ -5631,6 +6903,12 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
case LPFC_LINK_SPEED_64GHZ:
rdp_speed = RDP_PS_64GB;
break;
+ case LPFC_LINK_SPEED_128GHZ:
+ rdp_speed = RDP_PS_128GB;
+ break;
+ case LPFC_LINK_SPEED_256GHZ:
+ rdp_speed = RDP_PS_256GB;
+ break;
default:
rdp_speed = RDP_PS_UNKNOWN;
break;
@@ -5638,6 +6916,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
+ if (phba->lmt & LMT_256Gb)
+ rdp_cap |= RDP_PS_256GB;
if (phba->lmt & LMT_128Gb)
rdp_cap |= RDP_PS_128GB;
if (phba->lmt & LMT_64Gb)
@@ -5717,12 +6997,14 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
struct lpfc_iocbq *elsiocb;
struct ulp_bde64 *bpl;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
uint8_t *pcmd;
struct ls_rjt *stat;
struct fc_rdp_res_frame *rdp_res;
uint32_t cmdsize, len;
uint16_t *flag_ptr;
int rc;
+ u32 ulp_context;
if (status != SUCCESS)
goto error;
@@ -5731,25 +7013,33 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
cmdsize = sizeof(struct fc_rdp_res_frame);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
- lpfc_max_els_tries, rdp_context->ndlp,
- rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
- lpfc_nlp_put(ndlp);
+ lpfc_max_els_tries, rdp_context->ndlp,
+ rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
goto free_rdp_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rdp_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->ox_id);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->rx_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2171 Xmit RDP response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- rdp_res = (struct fc_rdp_res_frame *)
- (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt;
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5797,47 +7087,75 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
rdp_context->page_a0, vport);
rdp_res->length = cpu_to_be32(len - 8);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
/* Now that we know the true size of the payload, update the BPL */
- bpl = (struct ulp_bde64 *)
- (((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
+ bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt;
bpl->tus.f.bdeSize = len;
bpl->tus.f.bdeFlags = 0;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
phba->fc_stat.elsXmitACC++;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto free_rdp_context;
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
+ if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ }
- kfree(rdp_context);
+ goto free_rdp_context;
- return;
error:
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
- lpfc_nlp_put(ndlp);
if (!elsiocb)
goto free_rdp_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rdp_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->ox_id);
+ bf_set(wqe_ctxt_tag,
+ &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->rx_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto free_rdp_context;
+ }
- if (rc == IOCB_ERROR)
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ }
+
free_rdp_context:
+ /* This reference put is for the original unsolicited RDP. If the
+ * prep failed, there is no reference to remove.
+ */
+ lpfc_nlp_put(ndlp);
kfree(rdp_context);
}
@@ -5855,18 +7173,19 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
}
if (lpfc_sli4_dump_page_a0(phba, mbox))
- goto prep_mbox_fail;
+ goto rdp_fail;
mbox->vport = rdp_context->ndlp->vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
- goto issue_mbox_fail;
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ return 1;
+ }
return 0;
-prep_mbox_fail:
-issue_mbox_fail:
+rdp_fail:
mempool_free(mbox, phba->mbox_mem_pool);
return 1;
}
@@ -5897,7 +7216,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
struct fc_rdp_req_frame *rdp_req;
struct lpfc_rdp_context *rdp_context;
- IOCB_t *cmd = NULL;
+ union lpfc_wqe128 *cmd = NULL;
struct ls_rjt stat;
if (phba->sli_rev < LPFC_SLI_REV4 ||
@@ -5914,7 +7233,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
goto error;
}
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -5939,10 +7258,17 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
goto error;
}
- cmd = &cmdiocb->iocb;
+ cmd = &cmdiocb->wqe;
rdp_context->ndlp = lpfc_nlp_get(ndlp);
- rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
- rdp_context->rx_id = cmd->ulpContext;
+ if (!rdp_context->ndlp) {
+ kfree(rdp_context);
+ rjt_err = LSRJT_UNABLE_TPC;
+ goto error;
+ }
+ rdp_context->ox_id = bf_get(wqe_rcvoxid,
+ &cmd->xmit_els_rsp.wqe_com);
+ rdp_context->rx_id = bf_get(wqe_ctxt_tag,
+ &cmd->xmit_els_rsp.wqe_com);
rdp_context->cmpl = lpfc_els_rdp_cmpl;
if (lpfc_get_rdp_info(phba, rdp_context)) {
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
@@ -5972,6 +7298,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
@@ -6018,43 +7345,67 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!elsiocb)
goto free_lcb_context;
- lcb_res = (struct fc_lcb_res_frame *)
- (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt;
memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
- icmd = &elsiocb->iocb;
- icmd->ulpContext = lcb_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
- pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ lcb_context->ox_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
lcb_res->lcb_sub_command = lcb_context->sub_command;
lcb_res->lcb_type = lcb_context->type;
lcb_res->capability = lcb_context->capability;
lcb_res->lcb_frequency = lcb_context->frequency;
lcb_res->lcb_duration = lcb_context->duration;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
+
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
+ goto out;
+ }
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ }
+ out:
kfree(lcb_context);
return;
error:
cmdsize = sizeof(struct fc_lcb_res_frame);
elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
- lpfc_max_els_tries, ndlp,
- ndlp->nlp_DID, ELS_CMD_LS_RJT);
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LS_RJT);
lpfc_nlp_put(ndlp);
if (!elsiocb)
goto free_lcb_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = lcb_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
- pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ lcb_context->ox_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
@@ -6063,11 +7414,19 @@ error:
if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitLSRJT++;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto free_lcb_context;
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
+ if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ }
free_lcb_context:
kfree(lcb_context);
}
@@ -6167,10 +7526,10 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint8_t *lp;
struct fc_lcb_request_frame *beacon;
struct lpfc_lcb_context *lcb_context;
- uint8_t state, rjt_err;
+ u8 state, rjt_err = 0;
struct ls_rjt stat;
- pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint8_t *)pcmd->virt;
beacon = (struct fc_lcb_request_frame *)pcmd->virt;
@@ -6210,18 +7569,25 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lcb_context->type = beacon->lcb_type;
lcb_context->frequency = beacon->lcb_frequency;
lcb_context->duration = beacon->lcb_duration;
- lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
- lcb_context->rx_id = cmdiocb->iocb.ulpContext;
+ lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb);
+ lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb);
lcb_context->ndlp = lpfc_nlp_get(ndlp);
+ if (!lcb_context->ndlp) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ goto rjt_free;
+ }
+
if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
- lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_ELS, "0193 failed to send mail box");
- kfree(lcb_context);
+ lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0193 failed to send mail box");
lpfc_nlp_put(ndlp);
rjt_err = LSRJT_UNABLE_TPC;
- goto rjt;
+ goto rjt_free;
}
return 0;
+
+rjt_free:
+ kfree(lcb_context);
rjt:
memset(&stat, 0, sizeof(stat));
stat.un.b.lsRjtRsnCode = rjt_err;
@@ -6360,12 +7726,11 @@ return_did_out:
static int
lpfc_rscn_recovery_check(struct lpfc_vport *vport)
{
- struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_nodelist *ndlp = NULL, *n;
/* Move all affected nodes by pending RSCNs to NPR state. */
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) ||
- (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+ list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) {
+ if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue;
@@ -6385,13 +7750,6 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
continue;
}
- /* Check to see if we need to NVME rescan this target
- * remoteport.
- */
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
- ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
- lpfc_nvme_rescan_port(vport, ndlp);
-
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -6417,14 +7775,14 @@ lpfc_send_rscn_event(struct lpfc_vport *vport,
uint32_t payload_len;
struct lpfc_rscn_event_header *rscn_event_data;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
payload_ptr = (uint32_t *) pcmd->virt;
payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
payload_len, GFP_KERNEL);
if (!rscn_event_data) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0147 Failed to allocate memory for RSCN event\n");
return;
}
@@ -6477,7 +7835,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
int rscn_id = 0, hba_id = 0;
int i, tmo;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
@@ -6553,6 +7911,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
ndlp, NULL);
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies +
+ msecs_to_jiffies(1000 * tmo));
+ }
return 0;
}
}
@@ -6572,7 +7937,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Get the array count after successfully have the token */
rscn_cnt = vport->fc_rscn_id_cnt;
/* If we are already processing an RSCN, save the received
- * RSCN payload buffer, cmdiocb->context2 to process later.
+ * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
*/
if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6605,10 +7970,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
} else {
vport->fc_rscn_id_list[rscn_cnt] = pcmd;
vport->fc_rscn_id_cnt++;
- /* If we zero, cmdiocb->context2, the calling
+ /* If we zero, cmdiocb->cmd_dmabuf, the calling
* routine will not try to free it.
*/
- cmdiocb->context2 = NULL;
+ cmdiocb->cmd_dmabuf = NULL;
}
/* Deferred RSCN */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -6645,10 +8010,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Indicate we are done walking fc_rscn_id_list on this vport */
vport->fc_rscn_flush = 0;
/*
- * If we zero, cmdiocb->context2, the calling routine will
+ * If we zero, cmdiocb->cmd_dmabuf, the calling routine will
* not try to free it.
*/
- cmdiocb->context2 = NULL;
+ cmdiocb->cmd_dmabuf = NULL;
lpfc_set_disctmo(vport);
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
@@ -6700,8 +8065,7 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
vport->num_disc_nodes = 0;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)
- && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer. Need to
* know how many gidfts were issued. If none, then just
* flush the RSCN. Otherwise, the outstanding requests
@@ -6719,13 +8083,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
} else {
/* Nameserver login in question. Revalidate. */
if (ndlp) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_PLOGI_ISSUE);
- if (!ndlp) {
- lpfc_els_flush_rscn(vport);
- return 0;
- }
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
} else {
ndlp = lpfc_nlp_init(vport, NameServer_DID);
if (!ndlp) {
@@ -6778,9 +8137,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *lp = (uint32_t *) pcmd->virt;
- IOCB_t *icmd = &cmdiocb->iocb;
+ union lpfc_wqe128 *wqe = &cmdiocb->wqe;
struct serv_parm *sp;
LPFC_MBOXQ_t *mbox;
uint32_t cmd, did;
@@ -6788,6 +8147,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t fc_flag = 0;
uint32_t port_state = 0;
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
cmd = *lp++;
sp = (struct serv_parm *) lp;
@@ -6797,11 +8159,11 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* We should never receive a FLOGI in loop mode, ignore it */
- did = icmd->un.elsreq64.remoteID;
+ did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
/* An FLOGI ELS command <elsCmd> was received from DID <did> in
Loop Mode */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0113 An FLOGI ELS command x%x was "
"received from DID x%x in Loop Mode\n",
cmd, did);
@@ -6839,6 +8201,12 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
}
+ /* External loopback plug insertion detected */
+ phba->link_flag |= LS_EXTERNAL_LOOPBACK;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC,
+ "1119 External Loopback plug detected\n");
+
/* abort the flogi coming back to ourselves
* due to external loopback on the port.
*/
@@ -6893,9 +8261,10 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Defer ACC response until AFTER we issue a FLOGI */
if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
- phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
- phba->defer_flogi_acc_ox_id =
- cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+ phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
+ &wqe->xmit_els_rsp.wqe_com);
+ phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
+ &wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did;
@@ -6944,7 +8313,7 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
RNID *rn;
struct ls_rjt stat;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
lp++;
@@ -6985,7 +8354,7 @@ lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
uint8_t *pcmd;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+ pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt;
/* skip over first word of echo command to find echo data */
pcmd += sizeof(uint32_t);
@@ -7054,23 +8423,25 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
*
* This routine is the completion callback function for the MBX_READ_LNK_STAT
* mailbox command. This callback function is to actually send the Accept
- * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It
* collects the link statistics from the completion of the MBX_READ_LNK_STAT
- * mailbox command, constructs the RPS response with the link statistics
+ * mailbox command, constructs the RLS response with the link statistics
* collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
- * response to the RPS.
+ * response to the RLS.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPS Accept Response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the RLS Accept Response
+ * ELS IOCB command.
*
**/
static void
lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
+ int rc = 0;
MAILBOX_t *mb;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
struct RLS_RSP *rls_rsp;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
@@ -7078,10 +8449,11 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t oxid;
uint16_t rxid;
uint32_t cmdsize;
+ u32 ulp_context;
mb = &pmb->u.mb;
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
pmb->ctx_buf = NULL;
@@ -7105,11 +8477,19 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rxid;
- icmd->unsli3.rcvsli3.ox_id = oxid;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* Xri / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
+ }
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
rls_rsp = (struct RLS_RSP *)pcmd;
@@ -7125,108 +8505,22 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
"2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
-}
-
-/**
- * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
- * @phba: pointer to lpfc hba data structure.
- * @pmb: pointer to the driver internal queue element for mailbox command.
- *
- * This routine is the completion callback function for the MBX_READ_LNK_STAT
- * mailbox command. This callback function is to actually send the Accept
- * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
- * collects the link statistics from the completion of the MBX_READ_LNK_STAT
- * mailbox command, constructs the RPS response with the link statistics
- * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
- * response to the RPS.
- *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPS Accept Response ELS IOCB command.
- *
- **/
-static void
-lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
-{
- MAILBOX_t *mb;
- IOCB_t *icmd;
- RPS_RSP *rps_rsp;
- uint8_t *pcmd;
- struct lpfc_iocbq *elsiocb;
- struct lpfc_nodelist *ndlp;
- uint16_t status;
- uint16_t oxid;
- uint16_t rxid;
- uint32_t cmdsize;
-
- mb = &pmb->u.mb;
-
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
- oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
- pmb->ctx_ndlp = NULL;
- pmb->ctx_buf = NULL;
-
- if (mb->mbxStatus) {
- mempool_free(pmb, phba->mbox_mem_pool);
return;
}
- cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
- mempool_free(pmb, phba->mbox_mem_pool);
- elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
- lpfc_max_els_tries, ndlp,
- ndlp->nlp_DID, ELS_CMD_ACC);
-
- /* Decrement the ndlp reference count from previous mbox command */
- lpfc_nlp_put(ndlp);
-
- if (!elsiocb)
- return;
-
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rxid;
- icmd->unsli3.rcvsli3.ox_id = oxid;
-
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof(uint32_t); /* Skip past command */
- rps_rsp = (RPS_RSP *)pcmd;
-
- if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
- status = 0x10;
- else
- status = 0x8;
- if (phba->pport->fc_flag & FC_FABRIC)
- status |= 0x4;
-
- rps_rsp->rsvd1 = 0;
- rps_rsp->portStatus = cpu_to_be16(status);
- rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
- rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
- rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
- rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
- rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
- rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
- /* Xmit ELS RPS ACC response tag <ulpIoTag> */
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
- "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
- "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ }
return;
}
@@ -7236,7 +8530,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* @cmdiocb: pointer to lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure.
*
- * This routine processes Read Port Status (RPL) IOCB received as an
+ * This routine processes Read Link Status (RLS) IOCB received as an
* ELS unsolicited event. It first checks the remote port state. If the
* remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
* state, it invokes the lpfc_els_rsl_reject() routine to send the reject
@@ -7255,19 +8549,22 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
struct ls_rjt stat;
+ u32 ctx = get_job_ulpcontext(phba, cmdiocb);
+ u32 ox_id = get_job_rcvoxid(phba, cmdiocb);
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
- /* reject the unsolicited RPS request and done with it */
+ /* reject the unsolicited RLS request and done with it */
goto reject_out;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
mbox->ctx_buf = (void *)((unsigned long)
- ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
- cmdiocb->iocb.ulpContext)); /* rx_id */
+ (ox_id << 16 | ctx));
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ goto node_err;
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
@@ -7278,6 +8575,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* command.
*/
lpfc_nlp_put(ndlp);
+node_err:
mempool_free(mbox, phba->mbox_mem_pool);
}
reject_out:
@@ -7303,10 +8601,10 @@ reject_out:
* response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
* Value (RTV) unsolicited IOCB event.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPS Accept Response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the RTV Accept Response
+ * ELS IOCB command.
*
* Return codes
* 0 - Successfully processed rtv iocb (currently always return 0)
@@ -7315,17 +8613,20 @@ static int
lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
+ int rc = 0;
+ IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_hba *phba = vport->phba;
struct ls_rjt stat;
struct RTV_RSP *rtv_rsp;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
uint32_t cmdsize;
-
+ u32 ulp_context;
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
- /* reject the unsolicited RPS request and done with it */
+ /* reject the unsolicited RTV request and done with it */
goto reject_out;
cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
@@ -7336,13 +8637,23 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
/* use the command's xri in the response */
- elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
- elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, cmdiocb));
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, cmdiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb);
+ icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb);
+ }
rtv_rsp = (struct RTV_RSP *)pcmd;
@@ -7358,92 +8669,24 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
"2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
"Data: x%x x%x x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi,
rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
- return 0;
-
-reject_out:
- /* issue rejection response */
- stat.un.b.lsRjtRsvd0 = 0;
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
- stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
- return 0;
-}
-
-/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
- * @vport: pointer to a host virtual N_Port data structure.
- * @cmdiocb: pointer to lpfc command iocb data structure.
- * @ndlp: pointer to a node-list data structure.
- *
- * This routine processes Read Port Status (RPS) IOCB received as an
- * ELS unsolicited event. It first checks the remote port state. If the
- * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
- * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
- * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
- * for reading the HBA link statistics. It is for the callback function,
- * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
- * to actually sending out RPS Accept (ACC) response.
- *
- * Return codes
- * 0 - Successfully processed rps iocb (currently always return 0)
- **/
-static int
-lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
- struct lpfc_nodelist *ndlp)
-{
- struct lpfc_hba *phba = vport->phba;
- uint32_t *lp;
- uint8_t flag;
- LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *pcmd;
- RPS *rps;
- struct ls_rjt stat;
-
- if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
- (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
- /* reject the unsolicited RPS request and done with it */
- goto reject_out;
+ return 0;
+ }
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
- flag = (be32_to_cpu(*lp++) & 0xf);
- rps = (RPS *) lp;
-
- if ((flag == 0) ||
- ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
- ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
- sizeof(struct lpfc_name)) == 0))) {
-
- printk("Fix me....\n");
- dump_stack();
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
- if (mbox) {
- lpfc_read_lnk_stat(phba, mbox);
- mbox->ctx_buf = (void *)((unsigned long)
- ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
- cmdiocb->iocb.ulpContext)); /* rx_id */
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- mbox->vport = vport;
- mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
- if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
- != MBX_NOT_FINISHED)
- /* Mbox completion will send ELS Response */
- return 0;
- /* Decrement reference count used for the failed mbox
- * command.
- */
- lpfc_nlp_put(ndlp);
- mempool_free(mbox, phba->mbox_mem_pool);
- }
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
}
+ return 0;
reject_out:
/* issue rejection response */
@@ -7455,7 +8698,7 @@ reject_out:
return 0;
}
-/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
+/* lpfc_issue_els_rrq - Process an unsolicited rrq iocb
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
* @did: DID of the target.
@@ -7479,10 +8722,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint16_t cmdsize;
int ret;
-
- if (ndlp != rrq->ndlp)
- ndlp = rrq->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return 1;
/* If ndlp is not NULL, we will bump the reference count on it */
@@ -7492,7 +8732,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For RRQ request, remainder of payload is Exchange IDs */
*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
@@ -7510,14 +8750,22 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue RRQ: did:x%x",
did, rrq->xritag, rrq->rxid);
elsiocb->context_un.rrq = rrq;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
- ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq;
+
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp)
+ goto io_err;
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (ret == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ lpfc_nlp_put(ndlp);
+ goto io_err;
}
return 0;
+
+ io_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -7557,10 +8805,10 @@ lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
* This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
* It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPL Accept Response ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the RPL Accept Response
+ * ELS command.
*
* Return code
* 0 - Successfully issued ACC RPL ELS command
@@ -7570,11 +8818,14 @@ static int
lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd, *oldcmd;
+ IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
RPL_RSP rpl_rsp;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
+ u32 ulp_context;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_ACC);
@@ -7582,12 +8833,21 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* Xri / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb));
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = get_job_ulpcontext(phba, oldiocb);
+ icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb);
+ }
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint16_t);
*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
@@ -7606,16 +8866,24 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
"0120 Xmit ELS RPL ACC response tag x%x "
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
+
return 0;
}
@@ -7659,7 +8927,7 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
rpl = (RPL *) (lp + 1);
maxsize = be32_to_cpu(rpl->maxsize);
@@ -7707,13 +8975,11 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
- IOCB_t *icmd;
FARP *fp;
uint32_t cnt, did;
- icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ did = get_job_els_rsp64_did(vport->phba, cmdiocb);
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
lp++;
@@ -7780,13 +9046,11 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
- IOCB_t *icmd;
uint32_t did;
- icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
+ did = get_job_els_rsp64_did(vport->phba, cmdiocb);
+ pcmd = cmdiocb->cmd_dmabuf;
+ lp = (uint32_t *)pcmd->virt;
lp++;
/* FARP-RSP received from DID <did> */
@@ -7826,7 +9090,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
FAN *fp;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
- lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
fp = (FAN *) ++lp;
/* FAN received; Fan does not have a reply sequence */
if ((vport == phba->pport) &&
@@ -7854,8 +9118,140 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
/**
+ * lpfc_els_rcv_edc - Process an unsolicited EDC iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ * 0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_els_edc *edc_req;
+ struct fc_tlv_desc *tlv;
+ uint8_t *payload;
+ uint32_t *ptr, dtag;
+ const char *dtag_nm;
+ int desc_cnt = 0, bytes_remain;
+ struct fc_diag_lnkflt_desc *plnkflt;
+
+ payload = cmdiocb->cmd_dmabuf->virt;
+
+ edc_req = (struct fc_els_edc *)payload;
+ bytes_remain = be32_to_cpu(edc_req->desc_len);
+
+ ptr = (uint32_t *)payload;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
+ "3319 Rcv EDC payload len %d: x%x x%x x%x\n",
+ bytes_remain, be32_to_cpu(*ptr),
+ be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
+
+ /* No signal support unless there is a congestion descriptor */
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_sig_freq = 0;
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
+
+ if (bytes_remain <= 0)
+ goto out;
+
+ tlv = edc_req->desc;
+
+ /*
+ * cycle through EDC diagnostic descriptors to find the
+ * congestion signaling capability descriptor
+ */
+ while (bytes_remain) {
+ if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
+ "6464 Truncated TLV hdr on "
+ "Diagnostic descriptor[%d]\n",
+ desc_cnt);
+ goto out;
+ }
+
+ dtag = be32_to_cpu(tlv->desc_tag);
+ switch (dtag) {
+ case ELS_DTAG_LNK_FAULT_CAP:
+ if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+ sizeof(struct fc_diag_lnkflt_desc)) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
+ "6465 Truncated Link Fault Diagnostic "
+ "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+ desc_cnt, bytes_remain,
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+ sizeof(struct fc_diag_lnkflt_desc));
+ goto out;
+ }
+ plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
+ "4626 Link Fault Desc Data: x%08x len x%x "
+ "da x%x dd x%x interval x%x\n",
+ be32_to_cpu(plnkflt->desc_tag),
+ be32_to_cpu(plnkflt->desc_len),
+ be32_to_cpu(
+ plnkflt->degrade_activate_threshold),
+ be32_to_cpu(
+ plnkflt->degrade_deactivate_threshold),
+ be32_to_cpu(plnkflt->fec_degrade_interval));
+ break;
+ case ELS_DTAG_CG_SIGNAL_CAP:
+ if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+ sizeof(struct fc_diag_cg_sig_desc)) {
+ lpfc_printf_log(
+ phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6466 Truncated cgn signal Diagnostic "
+ "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+ desc_cnt, bytes_remain,
+ FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+ sizeof(struct fc_diag_cg_sig_desc));
+ goto out;
+ }
+
+ phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
+ phba->cgn_reg_signal = phba->cgn_init_reg_signal;
+
+ /* We start negotiation with lpfc_fabric_cgn_frequency.
+ * When we process the EDC, we will settle on the
+ * higher frequency.
+ */
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+
+ lpfc_least_capable_settings(
+ phba, (struct fc_diag_cg_sig_desc *)tlv);
+ break;
+ default:
+ dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
+ "6467 unknown Diagnostic "
+ "Descriptor[%d]: tag x%x (%s)\n",
+ desc_cnt, dtag, dtag_nm);
+ }
+ bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ desc_cnt++;
+ }
+out:
+ /* Need to send back an ACC */
+ lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp);
+
+ lpfc_config_cgn_signal(phba);
+ return 0;
+}
+
+/**
* lpfc_els_timeout - Handler funciton to the els timer
- * @ptr: holder for the timer function associated data.
+ * @t: timer context used to obtain the vport.
*
* This routine is invoked by the ELS timer after timeout. It posts the ELS
* timer timeout event by setting the WORKER_ELS_TMO bit to the work port
@@ -7904,6 +9300,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
LIST_HEAD(abort_list);
+ u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0;
timeout = (uint32_t)(phba->fc_ratov << 1);
@@ -7912,31 +9309,35 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (unlikely(!pring))
return;
- if ((phba->pport->load_flag & FC_UNLOADING))
+ if (phba->pport->load_flag & FC_UNLOADING)
return;
+
spin_lock_irq(&phba->hbalock);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
- if ((phba->pport->load_flag & FC_UNLOADING)) {
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
- return;
- }
-
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- cmd = &piocb->iocb;
+ ulp_command = get_job_cmnd(phba, piocb);
+ ulp_context = get_job_ulpcontext(phba, piocb);
+ did = get_job_els_rsp64_did(phba, piocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(piocb);
+ } else {
+ cmd = &piocb->iocb;
+ iotag = cmd->ulpIoTag;
+ }
- if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
- piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 ||
+ ulp_command == CMD_ABORT_XRI_CX ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN)
continue;
if (piocb->vport != vport)
continue;
- pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ pcmd = piocb->cmd_dmabuf;
if (pcmd)
els_command = *(uint32_t *) (pcmd->virt);
@@ -7954,12 +9355,12 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
remote_ID = 0xffffffff;
- if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
- remote_ID = cmd->un.elsreq64.remoteID;
- else {
+ if (ulp_command != CMD_GEN_REQUEST64_CR) {
+ remote_ID = did;
+ } else {
struct lpfc_nodelist *ndlp;
- ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ ndlp = __lpfc_findnode_rpi(vport, ulp_context);
+ if (ndlp)
remote_ID = ndlp->nlp_DID;
}
list_add_tail(&piocb->dlist, &abort_list);
@@ -7969,17 +9370,20 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- cmd = &piocb->iocb;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0127 ELS timeout Data: x%x x%x x%x "
"x%x\n", els_command,
- remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+ remote_ID, ulp_command, iotag);
+
spin_lock_irq(&phba->hbalock);
list_del_init(&piocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
if (!list_empty(&pring->txcmplq))
if (!(phba->pport->load_flag & FC_UNLOADING))
mod_timer(&vport->els_tmofunc,
@@ -8013,7 +9417,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
- IOCB_t *cmd = NULL;
+ u32 ulp_command;
unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
@@ -8038,20 +9442,20 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
if (piocb->vport != vport)
continue;
- if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
continue;
/* On the ELS ring we can have ELS_REQUESTs or
* GEN_REQUESTs waiting for a response.
*/
- cmd = &piocb->iocb;
- if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ ulp_command = get_job_cmnd(phba, piocb);
+ if (ulp_command == CMD_ELS_REQUEST64_CR) {
list_add_tail(&piocb->dlist, &abort_list);
/* If the link is down when flushing ELS commands
@@ -8062,9 +9466,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
* and avoid any retry logic.
*/
if (phba->link_state == LPFC_LINK_DOWN)
- piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
+ piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
}
- if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
+ if (ulp_command == CMD_GEN_REQUEST64_CR)
list_add_tail(&piocb->dlist, &abort_list);
}
@@ -8076,11 +9480,14 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
if (!list_empty(&abort_list))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3387 abort list for txq not empty\n");
INIT_LIST_HEAD(&abort_list);
@@ -8092,17 +9499,17 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
* just queue them up for lpfc_sli_cancel_iocbs
*/
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
- cmd = &piocb->iocb;
+ ulp_command = get_job_cmnd(phba, piocb);
- if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
- }
/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
- if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
- cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
- cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmd->ulpCommand == CMD_ABORT_XRI_CN)
+ if (ulp_command == CMD_QUE_RING_BUF_CN ||
+ ulp_command == CMD_QUE_RING_BUF64_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_CX)
continue;
if (piocb->vport != vport)
@@ -8116,7 +9523,6 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (vport == phba->pport) {
list_for_each_entry_safe(piocb, tmp_iocb,
&phba->fabric_iocb_list, list) {
- cmd = &piocb->iocb;
list_del_init(&piocb->list);
list_add_tail(&piocb->list, &abort_list);
}
@@ -8184,22 +9590,25 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
struct ls_rjt stat;
struct lpfc_nodelist *ndlp;
uint32_t *pcmd;
+ u32 ulp_status, ulp_word4;
- ndlp = cmdiocbp->context1;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ ndlp = cmdiocbp->ndlp;
+ if (!ndlp)
return;
- if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
+ ulp_status = get_job_ulpstatus(phba, rspiocbp);
+ ulp_word4 = get_job_word4(phba, rspiocbp);
+
+ if (ulp_status == IOSTAT_LS_RJT) {
lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
sizeof(struct lpfc_name));
memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
sizeof(struct lpfc_name));
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- cmdiocbp->context2)->virt);
+ pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt;
lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
- stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
+ stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
fc_host_post_vendor_event(shost,
@@ -8209,10 +9618,10 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
LPFC_NL_VENDOR_ID);
return;
}
- if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
- (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
+ if (ulp_status == IOSTAT_NPORT_BSY ||
+ ulp_status == IOSTAT_FABRIC_BSY) {
fabric_event.event_type = FC_REG_FABRIC_EVENT;
- if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
+ if (ulp_status == IOSTAT_NPORT_BSY)
fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
else
fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
@@ -8234,7 +9643,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
* lpfc_send_els_event - Posts unsolicited els event
* @vport: Pointer to vport object.
* @ndlp: Pointer FC node object.
- * @cmd: ELS command code.
+ * @payload: ELS command code type.
*
* This function posts an event when there is an incoming
* unsolicited ELS command.
@@ -8251,7 +9660,7 @@ lpfc_send_els_event(struct lpfc_vport *vport,
if (*payload == ELS_CMD_LOGO) {
logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
if (!logo_data) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0148 Failed to allocate memory "
"for LOGO event\n");
return;
@@ -8261,7 +9670,7 @@ lpfc_send_els_event(struct lpfc_vport *vport,
els_data = kmalloc(sizeof(struct lpfc_els_event_header),
GFP_KERNEL);
if (!els_data) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0149 Failed to allocate memory "
"for ELS event\n");
return;
@@ -8310,6 +9719,403 @@ lpfc_send_els_event(struct lpfc_vport *vport,
}
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
+ FC_FPIN_LI_EVT_TYPES_INIT);
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types,
+ FC_FPIN_DELI_EVT_TYPES_INIT);
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types,
+ FC_FPIN_CONGN_EVT_TYPES_INIT);
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm,
+ fc_fpin_congn_severity_types,
+ FC_FPIN_CONGN_SEVERITY_INIT);
+
+
+/**
+ * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port
+ * @phba: Pointer to phba object.
+ * @wwnlist: Pointer to list of WWPNs in FPIN payload
+ * @cnt: count of WWPNs in FPIN payload
+ *
+ * This routine is called by LI and PC descriptors.
+ * Limit the number of WWPNs displayed to 6 log messages, 6 per log message
+ */
+static void
+lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
+{
+ char buf[LPFC_FPIN_WWPN_LINE_SZ];
+ __be64 wwn;
+ u64 wwpn;
+ int i, len;
+ int line = 0;
+ int wcnt = 0;
+ bool endit = false;
+
+ len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:");
+ for (i = 0; i < cnt; i++) {
+ /* Are we on the last WWPN */
+ if (i == (cnt - 1))
+ endit = true;
+
+ /* Extract the next WWPN from the payload */
+ wwn = *wwnlist++;
+ wwpn = be64_to_cpu(wwn);
+ len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
+ " %016llx", wwpn);
+
+ /* Log a message if we are on the last WWPN
+ * or if we hit the max allowed per message.
+ */
+ wcnt++;
+ if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) {
+ buf[len] = 0;
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "4686 %s\n", buf);
+
+ /* Check if we reached the last WWPN */
+ if (endit)
+ return;
+
+ /* Limit the number of log message displayed per FPIN */
+ line++;
+ if (line == LPFC_FPIN_WWPN_NUM_LINE) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "4687 %d WWPNs Truncated\n",
+ cnt - i - 1);
+ return;
+ }
+
+ /* Start over with next log message */
+ wcnt = 0;
+ len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ,
+ "Additional WWPNs:");
+ }
+ }
+}
+
+/**
+ * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
+ * @phba: Pointer to phba object.
+ * @tlv: Pointer to the Link Integrity Notification Descriptor.
+ *
+ * This function processes a Link Integrity FPIN event by logging a message.
+ **/
+static void
+lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
+ const char *li_evt_str;
+ u32 li_evt, cnt;
+
+ li_evt = be16_to_cpu(li->event_type);
+ li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
+ cnt = be32_to_cpu(li->pname_count);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "4680 FPIN Link Integrity %s (x%x) "
+ "Detecting PN x%016llx Attached PN x%016llx "
+ "Duration %d mSecs Count %d Port Cnt %d\n",
+ li_evt_str, li_evt,
+ be64_to_cpu(li->detecting_wwpn),
+ be64_to_cpu(li->attached_wwpn),
+ be32_to_cpu(li->event_threshold),
+ be32_to_cpu(li->event_count), cnt);
+
+ lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt);
+}
+
+/**
+ * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event.
+ * @phba: Pointer to hba object.
+ * @tlv: Pointer to the Delivery Notification Descriptor TLV
+ *
+ * This function processes a Delivery FPIN event by logging a message.
+ **/
+static void
+lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv;
+ const char *del_rsn_str;
+ u32 del_rsn;
+ __be32 *frame;
+
+ del_rsn = be16_to_cpu(del->deli_reason_code);
+ del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn);
+
+ /* Skip over desc_tag/desc_len header to payload */
+ frame = (__be32 *)(del + 1);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "4681 FPIN Delivery %s (x%x) "
+ "Detecting PN x%016llx Attached PN x%016llx "
+ "DiscHdr0 x%08x "
+ "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x "
+ "DiscHdr4 x%08x DiscHdr5 x%08x\n",
+ del_rsn_str, del_rsn,
+ be64_to_cpu(del->detecting_wwpn),
+ be64_to_cpu(del->attached_wwpn),
+ be32_to_cpu(frame[0]),
+ be32_to_cpu(frame[1]),
+ be32_to_cpu(frame[2]),
+ be32_to_cpu(frame[3]),
+ be32_to_cpu(frame[4]),
+ be32_to_cpu(frame[5]));
+}
+
+/**
+ * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event.
+ * @phba: Pointer to hba object.
+ * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV
+ *
+ * This function processes a Peer Congestion FPIN event by logging a message.
+ **/
+static void
+lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv;
+ const char *pc_evt_str;
+ u32 pc_evt, cnt;
+
+ pc_evt = be16_to_cpu(pc->event_type);
+ pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt);
+ cnt = be32_to_cpu(pc->pname_count);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS,
+ "4684 FPIN Peer Congestion %s (x%x) "
+ "Duration %d mSecs "
+ "Detecting PN x%016llx Attached PN x%016llx "
+ "Impacted Port Cnt %d\n",
+ pc_evt_str, pc_evt,
+ be32_to_cpu(pc->event_period),
+ be64_to_cpu(pc->detecting_wwpn),
+ be64_to_cpu(pc->attached_wwpn),
+ cnt);
+
+ lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt);
+}
+
+/**
+ * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification
+ * @phba: Pointer to hba object.
+ * @tlv: Pointer to the Congestion Notification Descriptor TLV
+ *
+ * This function processes an FPIN Congestion Notifiction. The notification
+ * could be an Alarm or Warning. This routine feeds that data into driver's
+ * running congestion algorithm. It also processes the FPIN by
+ * logging a message. It returns 1 to indicate deliver this message
+ * to the upper layer or 0 to indicate don't deliver it.
+ **/
+static int
+lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct lpfc_cgn_info *cp;
+ struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv;
+ const char *cgn_evt_str;
+ u32 cgn_evt;
+ const char *cgn_sev_str;
+ u32 cgn_sev;
+ uint16_t value;
+ u32 crc;
+ bool nm_log = false;
+ int rc = 1;
+
+ cgn_evt = be16_to_cpu(cgn->event_type);
+ cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt);
+ cgn_sev = cgn->severity;
+ cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev);
+
+ /* The driver only takes action on a Credit Stall or Oversubscription
+ * event type to engage the IO algorithm. The driver prints an
+ * unmaskable message only for Lost Credit and Credit Stall.
+ * TODO: Still need to have definition of host action on clear,
+ * lost credit and device specific event types.
+ */
+ switch (cgn_evt) {
+ case FPIN_CONGN_LOST_CREDIT:
+ nm_log = true;
+ break;
+ case FPIN_CONGN_CREDIT_STALL:
+ nm_log = true;
+ fallthrough;
+ case FPIN_CONGN_OVERSUBSCRIPTION:
+ if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION)
+ nm_log = false;
+ switch (cgn_sev) {
+ case FPIN_CONGN_SEVERITY_ERROR:
+ /* Take action here for an Alarm event */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+ if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
+ /* Track of alarm cnt for SYNC_WQE */
+ atomic_inc(&phba->cgn_sync_alarm_cnt);
+ }
+ /* Track alarm cnt for cgn_info regardless
+ * of whether CMF is configured for Signals
+ * or FPINs.
+ */
+ atomic_inc(&phba->cgn_fabric_alarm_cnt);
+ goto cleanup;
+ }
+ break;
+ case FPIN_CONGN_SEVERITY_WARNING:
+ /* Take action here for a Warning event */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+ if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
+ /* Track of warning cnt for SYNC_WQE */
+ atomic_inc(&phba->cgn_sync_warn_cnt);
+ }
+ /* Track warning cnt and freq for cgn_info
+ * regardless of whether CMF is configured for
+ * Signals or FPINs.
+ */
+ atomic_inc(&phba->cgn_fabric_warn_cnt);
+cleanup:
+ /* Save frequency in ms */
+ phba->cgn_fpin_frequency =
+ be32_to_cpu(cgn->event_period);
+ value = phba->cgn_fpin_frequency;
+ if (phba->cgn_i) {
+ cp = (struct lpfc_cgn_info *)
+ phba->cgn_i->virt;
+ cp->cgn_alarm_freq =
+ cpu_to_le16(value);
+ cp->cgn_warn_freq =
+ cpu_to_le16(value);
+ crc = lpfc_cgn_calc_crc32
+ (cp,
+ LPFC_CGN_INFO_SZ,
+ LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(crc);
+ }
+
+ /* Don't deliver to upper layer since
+ * driver took action on this tlv.
+ */
+ rc = 0;
+ }
+ break;
+ }
+ break;
+ }
+
+ /* Change the log level to unmaskable for the following event types. */
+ lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO),
+ LOG_CGN_MGMT | LOG_ELS,
+ "4683 FPIN CONGESTION %s type %s (x%x) Event "
+ "Duration %d mSecs\n",
+ cgn_sev_str, cgn_evt_str, cgn_evt,
+ be32_to_cpu(cgn->event_period));
+ return rc;
+}
+
+void
+lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_els_fpin *fpin = (struct fc_els_fpin *)p;
+ struct fc_tlv_desc *tlv, *first_tlv, *current_tlv;
+ const char *dtag_nm;
+ int desc_cnt = 0, bytes_remain, cnt;
+ u32 dtag, deliver = 0;
+ int len;
+
+ /* FPINs handled only if we are in the right discovery state */
+ if (vport->port_state < LPFC_DISC_AUTH)
+ return;
+
+ /* make sure there is the full fpin header */
+ if (fpin_length < sizeof(struct fc_els_fpin))
+ return;
+
+ /* Sanity check descriptor length. The desc_len value does not
+ * include space for the ELS command and the desc_len fields.
+ */
+ len = be32_to_cpu(fpin->desc_len);
+ if (fpin_length < len + sizeof(struct fc_els_fpin)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "4671 Bad ELS FPIN length %d: %d\n",
+ len, fpin_length);
+ return;
+ }
+
+ tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
+ first_tlv = tlv;
+ bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
+ bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
+
+ /* process each descriptor separately */
+ while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
+ bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
+ dtag = be32_to_cpu(tlv->desc_tag);
+ switch (dtag) {
+ case ELS_DTAG_LNK_INTEGRITY:
+ lpfc_els_rcv_fpin_li(phba, tlv);
+ deliver = 1;
+ break;
+ case ELS_DTAG_DELIVERY:
+ lpfc_els_rcv_fpin_del(phba, tlv);
+ deliver = 1;
+ break;
+ case ELS_DTAG_PEER_CONGEST:
+ lpfc_els_rcv_fpin_peer_cgn(phba, tlv);
+ deliver = 1;
+ break;
+ case ELS_DTAG_CONGESTION:
+ deliver = lpfc_els_rcv_fpin_cgn(phba, tlv);
+ break;
+ default:
+ dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "4678 unknown FPIN descriptor[%d]: "
+ "tag x%x (%s)\n",
+ desc_cnt, dtag, dtag_nm);
+
+ /* If descriptor is bad, drop the rest of the data */
+ return;
+ }
+ lpfc_cgn_update_stat(phba, dtag);
+ cnt = be32_to_cpu(tlv->desc_len);
+
+ /* Sanity check descriptor length. The desc_len value does not
+ * include space for the desc_tag and the desc_len fields.
+ */
+ len -= (cnt + sizeof(struct fc_tlv_desc));
+ if (len < 0) {
+ dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "4672 Bad FPIN descriptor TLV length "
+ "%d: %d %d %s\n",
+ cnt, len, fpin_length, dtag_nm);
+ return;
+ }
+
+ current_tlv = tlv;
+ bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+ tlv = fc_tlv_next_desc(tlv);
+
+ /* Format payload such that the FPIN delivered to the
+ * upper layer is a single descriptor FPIN.
+ */
+ if (desc_cnt)
+ memcpy(first_tlv, current_tlv,
+ (cnt + sizeof(struct fc_els_fpin)));
+
+ /* Adjust the length so that it only reflects a
+ * single descriptor FPIN.
+ */
+ fpin_length = cnt + sizeof(struct fc_els_fpin);
+ fpin->desc_len = cpu_to_be32(fpin_length);
+ fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */
+
+ /* Send every descriptor individually to the upper layer */
+ if (deliver)
+ fc_host_fpin_rcv(lpfc_shost_from_vport(vport),
+ fpin_length, (char *)fpin);
+ desc_cnt++;
+ }
+}
+
/**
* lpfc_els_unsol_buffer - Process an unsolicited event data buffer
* @phba: pointer to lpfc hba data structure.
@@ -8328,29 +10134,34 @@ static void
lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
{
- struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
- uint32_t *payload;
- uint32_t cmd, did, newnode;
+ u32 *payload, payload_len;
+ u32 cmd = 0, did = 0, newnode, status = 0;
uint8_t rjt_exp, rjt_err = 0, init_link = 0;
- IOCB_t *icmd = &elsiocb->iocb;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
LPFC_MBOXQ_t *mbox;
- if (!vport || !(elsiocb->context2))
+ if (!vport || !elsiocb->cmd_dmabuf)
goto dropit;
newnode = 0;
- payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+ wcqe_cmpl = &elsiocb->wcqe_cmpl;
+ payload = elsiocb->cmd_dmabuf->virt;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ payload_len = wcqe_cmpl->total_data_placed;
+ else
+ payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
+ status = get_job_ulpstatus(phba, elsiocb);
cmd = *payload;
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
- lpfc_post_buffer(phba, pring, 1);
+ lpfc_sli3_post_buffer(phba, pring, 1);
- did = icmd->un.rcvels.remoteID;
- if (icmd->ulpStatus) {
+ did = get_job_els_rsp64_did(phba, elsiocb);
+ if (status) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV Unsol ELS: status:x%x/x%x did:x%x",
- icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ status, get_job_word4(phba, elsiocb), did);
goto dropit;
}
@@ -8377,20 +10188,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
newnode = 1;
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
ndlp->nlp_type |= NLP_FABRIC;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- goto dropit;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- newnode = 1;
- if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
- ndlp->nlp_type |= NLP_FABRIC;
} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- /* This is similar to the new node path */
- ndlp = lpfc_nlp_get(ndlp);
- if (!ndlp)
- goto dropit;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
newnode = 1;
}
@@ -8401,15 +10199,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Do not process any unsolicited ELS commands
* if the ndlp is in DEV_LOSS
*/
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
goto dropit;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp)
+ goto dropit;
elsiocb->vport = vport;
if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
@@ -8418,9 +10219,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0112 ELS command x%x received from NPORT x%x "
- "Data: x%x x%x x%x x%x\n",
- cmd, did, vport->port_state, vport->fc_flag,
- vport->fc_myDID, vport->fc_prevDID);
+ "refcnt %d Data: x%x x%x x%x x%x\n",
+ cmd, did, kref_read(&ndlp->kref), vport->port_state,
+ vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
/* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
@@ -8446,7 +10247,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* the vfi. This is done in lpfc_rcv_plogi but
* that is called after the reg_vfi.
*/
- vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+ vport->fc_myDID =
+ bf_get(els_rsp64_sid,
+ &elsiocb->wqe.xmit_els_rsp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3312 Remote port assigned DID x%x "
"%x\n", vport->fc_myDID,
@@ -8471,9 +10274,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, elsiocb,
NLP_EVT_RCV_PLOGI);
@@ -8499,8 +10302,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
+ /* retain node if our response is deferred */
+ if (phba->defer_flogi_acc_flag)
+ break;
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_LOGO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8515,6 +10322,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+ if (newnode)
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_PRLO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8542,7 +10352,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRSCN++;
lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_ADISC:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8620,7 +10431,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvLIRR++;
lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RLS:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8630,17 +10442,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRLS++;
lpfc_els_rcv_rls(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
- break;
- case ELS_CMD_RPS:
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
- "RCV RPS: did:x%x/ste:x%x flg:x%x",
- did, vport->port_state, ndlp->nlp_flag);
-
- phba->fc_stat.elsRcvRPS++;
- lpfc_els_rcv_rps(vport, elsiocb, ndlp);
- if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RPL:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8650,7 +10453,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRPL++;
lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RNID:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8660,7 +10464,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRNID++;
lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RTV:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8669,7 +10474,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRTV++;
lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RRQ:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8679,7 +10485,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRRQ++;
lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_ECHO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8689,7 +10496,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvECHO++;
lpfc_els_rcv_echo(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_REC:
/* receive this due to exchange closed */
@@ -8697,12 +10505,31 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_INVALID_OX_RX;
break;
case ELS_CMD_FPIN:
- /*
- * Received FPIN from fabric - pass it to the
- * transport FPIN handler.
- */
- fc_host_fpin_rcv(shost, elsiocb->iocb.unsli3.rcvsli3.acc_len,
- (char *)payload);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FPIN: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload,
+ payload_len);
+
+ /* There are no replies, so no rjt codes */
+ break;
+ case ELS_CMD_EDC:
+ lpfc_els_rcv_edc(vport, elsiocb, ndlp);
+ break;
+ case ELS_CMD_RDF:
+ phba->fc_stat.elsRcvRDF++;
+ /* Accept RDF only from fabric controller */
+ if (did != Fabric_Cntl_DID) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "1115 Received RDF from invalid DID "
+ "x%x\n", did);
+ rjt_err = LSRJT_PROTOCOL_ERR;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ goto lsrjt;
+ }
+
+ lpfc_els_rcv_rdf(vport, elsiocb, ndlp);
break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8714,11 +10541,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0115 Unknown ELS command x%x "
"received from NPORT x%x\n", cmd, did);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
}
@@ -8729,11 +10557,16 @@ lsrjt:
stat.un.b.lsRjtRsnCode = rjt_err;
stat.un.b.lsRjtRsnCodeExp = rjt_exp;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
- NULL);
+ NULL);
+ /* Remove the reference from above for new nodes. */
+ if (newnode)
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
}
- lpfc_nlp_put(elsiocb->context1);
- elsiocb->context1 = NULL;
+ /* Release the reference on this elsiocb, not the ndlp. */
+ lpfc_nlp_put(elsiocb->ndlp);
+ elsiocb->ndlp = NULL;
/* Special case. Driver received an unsolicited command that
* unsupportable given the driver's current state. Reset the
@@ -8759,10 +10592,11 @@ lsrjt:
dropit:
if (vport && !(vport->load_flag & FC_UNLOADING))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0111 Dropping received ELS cmd "
- "Data: x%x x%x x%x\n",
- icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
+ "Data: x%x x%x x%x x%x\n",
+ cmd, status, get_job_word4(phba, elsiocb), did);
+
phba->fc_stat.elsRcvDrop++;
}
@@ -8782,77 +10616,93 @@ void
lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *elsiocb)
{
- struct lpfc_vport *vport = phba->pport;
- IOCB_t *icmd = &elsiocb->iocb;
+ struct lpfc_vport *vport = elsiocb->vport;
+ u32 ulp_command, status, parameter, bde_count = 0;
+ IOCB_t *icmd;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
+ struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf;
dma_addr_t paddr;
- struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
- struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
- elsiocb->context1 = NULL;
- elsiocb->context2 = NULL;
- elsiocb->context3 = NULL;
+ elsiocb->cmd_dmabuf = NULL;
+ elsiocb->rsp_dmabuf = NULL;
+ elsiocb->bpl_dmabuf = NULL;
- if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
+ wcqe_cmpl = &elsiocb->wcqe_cmpl;
+ ulp_command = get_job_cmnd(phba, elsiocb);
+ status = get_job_ulpstatus(phba, elsiocb);
+ parameter = get_job_word4(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = wcqe_cmpl->word3;
+ else
+ bde_count = elsiocb->iocb.ulpBdeCount;
+
+ if (status == IOSTAT_NEED_BUFFER) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
- (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ } else if (status == IOSTAT_LOCAL_REJECT &&
+ (parameter & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING) {
phba->fc_stat.NoRcvBuf++;
/* Not enough posted buffers; Try posting more buffers */
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 0);
+ lpfc_sli3_post_buffer(phba, pring, 0);
return;
}
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
- icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
- if (icmd->unsli3.rcvsli3.vpi == 0xffff)
- vport = phba->pport;
- else
- vport = lpfc_find_vport_by_vpid(phba,
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ icmd = &elsiocb->iocb;
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (ulp_command == CMD_IOCB_RCV_ELS64_CX ||
+ ulp_command == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (icmd->unsli3.rcvsli3.vpi == 0xffff)
+ vport = phba->pport;
+ else
+ vport = lpfc_find_vport_by_vpid(phba,
icmd->unsli3.rcvsli3.vpi);
+ }
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
- if (icmd->ulpBdeCount == 0)
+ if (bde_count == 0)
return;
- /* type of ELS cmd is first 32bit word
- * in packet
- */
+ /* Account for SLI2 or SLI3 and later unsolicited buffering */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- elsiocb->context2 = bdeBuf1;
+ elsiocb->cmd_dmabuf = bdeBuf1;
+ if (bde_count == 2)
+ elsiocb->bpl_dmabuf = bdeBuf2;
} else {
+ icmd = &elsiocb->iocb;
paddr = getPaddr(icmd->un.cont64[0].addrHigh,
icmd->un.cont64[0].addrLow);
- elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
- paddr);
+ elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ if (bde_count == 2) {
+ paddr = getPaddr(icmd->un.cont64[1].addrHigh,
+ icmd->un.cont64[1].addrLow);
+ elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ paddr);
+ }
}
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
/*
* The different unsolicited event handlers would tell us
- * if they are done with "mp" by setting context2 to NULL.
+ * if they are done with "mp" by setting cmd_dmabuf to NULL.
*/
- if (elsiocb->context2) {
- lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
- elsiocb->context2 = NULL;
- }
-
- /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
- if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
- icmd->ulpBdeCount == 2) {
- elsiocb->context2 = bdeBuf2;
- lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
- /* free mp if we are done with it */
- if (elsiocb->context2) {
- lpfc_in_buf_free(phba, elsiocb->context2);
- elsiocb->context2 = NULL;
- }
+ if (elsiocb->cmd_dmabuf) {
+ lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
+ elsiocb->cmd_dmabuf = NULL;
+ }
+
+ if (elsiocb->bpl_dmabuf) {
+ lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf);
+ elsiocb->bpl_dmabuf = NULL;
}
+
}
static void
@@ -8873,13 +10723,9 @@ lpfc_start_fdmi(struct lpfc_vport *vport)
return;
}
}
- if (!NLP_CHK_NODE_ACT(ndlp))
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (ndlp) {
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
- }
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
/**
@@ -8909,9 +10755,9 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
if (vport->fc_flag & FC_DISC_DELAYED) {
spin_unlock_irq(shost->host_lock);
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "3334 Delay fc port discovery for %d seconds\n",
- phba->fc_ratov);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "3334 Delay fc port discovery for %d secs\n",
+ phba->fc_ratov);
mod_timer(&vport->delayed_disc_tmo,
jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
return;
@@ -8927,30 +10773,19 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0251 NameServer login: no memory\n");
return;
}
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp) {
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- lpfc_disc_start(vport);
- return;
- }
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0348 NameServer login: node freed\n");
- return;
- }
}
+
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0252 Cannot issue NameServer login\n");
return;
}
@@ -8978,7 +10813,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
MAILBOX_t *mb = &pmb->u.mb;
int rc;
@@ -8987,7 +10822,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
if (mb->mbxStatus) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0915 Register VPI failed : Status: x%x"
" upd bit: x%x \n", mb->mbxStatus,
mb->un.varRegVpi.upd);
@@ -9017,15 +10852,15 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
rc = lpfc_sli_issue_mbox(phba, pmb,
MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_vlog(vport,
- KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"2732 Failed to issue INIT_VPI"
" mailbox command\n");
} else {
lpfc_nlp_put(ndlp);
return;
}
- /* fall through */
+ fallthrough;
default:
/* Try to recover from this error */
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -9063,8 +10898,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
}
- } else
+ } else {
lpfc_do_scr_ns_plogi(phba, vport);
+ }
}
mbox_err_exit:
/* Now, we decrement the ndlp reference count held for this
@@ -9097,6 +10933,11 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_reg_vpi(vport, mbox);
mbox->vport = vport;
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto mbox_err_exit;
+ }
+
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
== MBX_NOT_FINISHED) {
@@ -9106,12 +10947,12 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_nlp_put(ndlp);
mempool_free(mbox, phba->mbox_mem_pool);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0253 Register VPI: Can't send mbox\n");
goto mbox_err_exit;
}
} else {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0254 Register VPI: no memory\n");
goto mbox_err_exit;
}
@@ -9169,7 +11010,6 @@ void
lpfc_retry_pport_discovery(struct lpfc_hba *phba)
{
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
/* Cancel the all vports retry delay retry timers */
lpfc_cancel_all_vport_retry_delay_timer(phba);
@@ -9179,11 +11019,10 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
if (!ndlp)
return;
- shost = lpfc_shost_from_vport(phba->pport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
phba->pport->port_state = LPFC_FLOGI;
return;
@@ -9203,9 +11042,11 @@ lpfc_fabric_login_reqd(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
- (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
+ if (ulp_status != IOSTAT_FABRIC_RJT ||
+ ulp_word4 != RJT_LOGIN_REQUIRED)
return 0;
else
return 1;
@@ -9237,18 +11078,21 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_iocbq *piocb;
- struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
struct serv_parm *sp;
uint8_t fabric_param_changed;
+ u32 ulp_status, ulp_word4;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0123 FDISC completes. x%x/x%x prevDID: x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->fc_prevDID);
/* Since all FDISCs are being single threaded, we
* must reset the discovery timer for ALL vports
@@ -9260,9 +11104,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"FDISC cmpl: status:x%x/x%x prevdid:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+ ulp_status, ulp_word4, vport->fc_prevDID);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
lpfc_retry_pport_discovery(phba);
@@ -9273,11 +11117,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
/* FDISC failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0126 FDISC failed. (x%x/x%x)\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto fdisc_failed;
}
+
+ lpfc_check_nlp_post_devloss(vport, ndlp);
+
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
@@ -9286,7 +11133,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_flag |= FC_PUBLIC_LOOP;
spin_unlock_irq(shost->host_lock);
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = ulp_word4 & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
@@ -9305,13 +11152,12 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) ||
- (np->nlp_state != NLP_STE_NPR_NODE) ||
+ if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
continue;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
np->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
@@ -9334,6 +11180,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* to update the MAC address.
*/
lpfc_register_new_vport(phba, vport, ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
goto out;
}
@@ -9343,16 +11190,22 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_do_scr_ns_plogi(phba, vport);
+
+ /* The FDISC completed successfully. Move the fabric ndlp to
+ * UNMAPPED state and register with the transport.
+ */
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
goto out;
+
fdisc_failed:
if (vport->fc_vport &&
(vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
/* Cancel discovery timer */
lpfc_can_disctmo(vport);
- lpfc_nlp_put(ndlp);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -9366,10 +11219,9 @@ out:
* routine to issue the IOCB, which makes sure only one outstanding fabric
* IOCB will be sent off HBA at any given time.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the FDISC ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the FDISC ELS command.
*
* Return code
* 0 - Successfully issued fdisc iocb command
@@ -9381,6 +11233,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe = NULL;
struct lpfc_iocbq *elsiocb;
struct serv_parm *sp;
uint8_t *pcmd;
@@ -9395,25 +11248,24 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ELS_CMD_FDISC);
if (!elsiocb) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0255 Issue FDISC: no IOCB\n");
return 1;
}
- icmd = &elsiocb->iocb;
- icmd->un.elsreq64.myID = 0;
- icmd->un.elsreq64.fl = 1;
-
- /*
- * SLI3 ports require a different context type value than SLI4.
- * Catch SLI3 ports here and override the prep.
- */
- if (phba->sli_rev == LPFC_SLI_REV3) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(els_req64_sid, &wqe->els_req, 0);
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
}
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
pcmd += sizeof(uint32_t); /* CSP Word 1 */
memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
@@ -9439,22 +11291,31 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_set_disctmo(vport);
phba->fc_stat.elsXmitFDISC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue FDISC: did:x%x",
did, 0, 0);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp)
+ goto err_out;
+
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0256 Issue FDISC: Cannot send IOCB\n");
- return 1;
+ lpfc_nlp_put(ndlp);
+ goto err_out;
}
+
lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
return 0;
+
+ err_out:
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0256 Issue FDISC: Cannot send IOCB\n");
+ return 1;
}
/**
@@ -9479,33 +11340,56 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ u32 ulp_status, ulp_word4, did, tmo;
- ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
- irsp = &rspiocb->iocb;
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "LOGO npiv cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
+ ndlp = cmdiocb->ndlp;
- lpfc_els_free_iocb(phba, cmdiocb);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
- /* Trigger the release of the ndlp after logo */
- lpfc_nlp_put(ndlp);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ did = get_job_els_rsp64_did(phba, rspiocb);
+ tmo = irsp->ulpTimeout;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "LOGO npiv cmpl: status:x%x/x%x did:x%x",
+ ulp_status, ulp_word4, did);
/* NPIV LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2928 NPIV LOGO completes to NPort x%x "
- "Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes);
+ "Data: x%x x%x x%x x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ tmo, vport->num_disc_nodes,
+ kref_read(&ndlp->kref), ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
vport->fc_flag &= ~FC_FABRIC;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
}
+
+ if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ /* Wake up lpfc_vport_delete if waiting...*/
+ if (ndlp->logo_waitq)
+ wake_up(ndlp->logo_waitq);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+ }
+
+ /* Safe to release resources now. */
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -9515,10 +11399,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*
* This routine issues a LOGO ELS command to an @ndlp off a @vport.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the LOGO ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
+ * the IOCB for the completion callback function to the LOGO ELS command.
*
* Return codes
* 0 - Successfully issued logo off the @vport
@@ -9527,7 +11410,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int
lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
@@ -9539,7 +11422,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof(uint32_t);
@@ -9552,24 +11435,34 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"Issue LOGO npiv did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
- spin_lock_irq(shost->host_lock);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo;
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ goto err;
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto err;
}
return 0;
+
+err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(&ndlp->lock);
+ return 1;
}
/**
* lpfc_fabric_block_timeout - Handler function to the fabric block timer
- * @ptr: holder for the timer function associated data.
+ * @t: timer context used to obtain the lpfc hba.
*
* This routine is invoked by the fabric iocb block timer after
* timeout. It posts the fabric iocb block timeout event by setting the
@@ -9612,7 +11505,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
struct lpfc_iocbq *iocb;
unsigned long iflags;
int ret;
- IOCB_t *cmd;
repeat:
iocb = NULL;
@@ -9627,31 +11519,28 @@ repeat:
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (iocb) {
- iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
- iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
- iocb->iocb_flag |= LPFC_IO_FABRIC;
+ iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
+ iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->cmd_flag |= LPFC_IO_FABRIC;
lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
- "Fabric sched1: ste:x%x",
- iocb->vport->port_state, 0, 0);
+ "Fabric sched1: ste:x%x",
+ iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
- iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
- iocb->fabric_iocb_cmpl = NULL;
- iocb->iocb_flag &= ~LPFC_IO_FABRIC;
- cmd = &iocb->iocb;
- cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
- cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
- iocb->iocb_cmpl(phba, iocb, iocb);
+ iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
+ iocb->fabric_cmd_cmpl = NULL;
+ iocb->cmd_flag &= ~LPFC_IO_FABRIC;
+ set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT);
+ iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
+ iocb->cmd_cmpl(phba, iocb, iocb);
atomic_dec(&phba->fabric_iocb_count);
goto repeat;
}
}
-
- return;
}
/**
@@ -9702,26 +11591,27 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
* @rspiocb: pointer to lpfc response iocb data structure.
*
* This routine is the callback function that is put to the fabric iocb's
- * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
- * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
+ * callback function pointer (iocb->cmd_cmpl). The original iocb's callback
+ * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback
* function first restores and invokes the original iocb's callback function
* and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
* fabric bound iocb from the driver internal fabric iocb list onto the wire.
**/
static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct ls_rjt stat;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
+ WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
- switch (rspiocb->iocb.ulpStatus) {
+ switch (ulp_status) {
case IOSTAT_NPORT_RJT:
case IOSTAT_FABRIC_RJT:
- if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ if (ulp_word4 & RJT_UNAVAIL_TEMP)
lpfc_block_fabric_iocbs(phba);
- }
break;
case IOSTAT_NPORT_BSY:
@@ -9730,8 +11620,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case IOSTAT_LS_RJT:
- stat.un.lsRjtError =
- be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
+ stat.un.ls_rjt_error_be =
+ cpu_to_be32(ulp_word4);
if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
lpfc_block_fabric_iocbs(phba);
@@ -9740,10 +11630,10 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
- cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
- cmdiocb->fabric_iocb_cmpl = NULL;
- cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
- cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
+ cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl;
+ cmdiocb->fabric_cmd_cmpl = NULL;
+ cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
+ cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb);
atomic_dec(&phba->fabric_iocb_count);
if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
@@ -9794,20 +11684,20 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
atomic_inc(&phba->fabric_iocb_count);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ready) {
- iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
- iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
- iocb->iocb_flag |= LPFC_IO_FABRIC;
+ iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
+ iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->cmd_flag |= LPFC_IO_FABRIC;
lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
- "Fabric sched2: ste:x%x",
- iocb->vport->port_state, 0, 0);
+ "Fabric sched2: ste:x%x",
+ iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
- iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
- iocb->fabric_iocb_cmpl = NULL;
- iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
+ iocb->fabric_cmd_cmpl = NULL;
+ iocb->cmd_flag &= ~LPFC_IO_FABRIC;
atomic_dec(&phba->fabric_iocb_count);
}
} else {
@@ -9827,7 +11717,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
* driver internal fabric IOCB list. The list contains fabric IOCBs to be
* issued to the ELS IOCB ring. This abort function walks the fabric IOCB
* list, removes each IOCB associated with the @vport off the list, set the
- * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
* associated with the IOCB.
**/
static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
@@ -9860,7 +11750,7 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
* driver internal fabric IOCB list. The list contains fabric IOCBs to be
* issued to the ELS IOCB ring. This abort function walks the fabric IOCB
* list, removes each IOCB associated with the @ndlp off the list, set the
- * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
* associated with the IOCB.
**/
void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
@@ -9897,7 +11787,7 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
* This routine aborts all the IOCBs currently on the driver internal
* fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
* IOCB ring. This function takes the entire IOCB list off the fabric IOCB
- * list, removes IOCBs off the list, set the status feild to
+ * list, removes IOCBs off the list, set the status field to
* IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
* the IOCB.
**/
@@ -9926,17 +11816,31 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ struct lpfc_nodelist *ndlp = NULL;
unsigned long iflag = 0;
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
- if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+ if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
+ lpfc_nlp_put(sglq_entry->ndlp);
+ ndlp = sglq_entry->ndlp;
sglq_entry->ndlp = NULL;
+
+ /* If the xri on the abts_els_sgl list is for the Fport
+ * node and the vport is unloading, the xri aborted wcqe
+ * likely isn't coming back. Just release the sgl.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ ndlp->nlp_DID == Fabric_DID) {
+ list_del(&sglq_entry->list);
+ sglq_entry->state = SGL_FREED;
+ list_add_tail(&sglq_entry->list,
+ &phba->sli4_hba.lpfc_els_sgl_list);
+ }
+ }
}
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
return;
}
@@ -9963,8 +11867,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
pring = lpfc_phba_elsring(phba);
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->sli4_xritag == xri) {
@@ -9974,11 +11877,15 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
list_add_tail(&sglq_entry->list,
&phba->sli4_hba.lpfc_els_sgl_list);
sglq_entry->state = SGL_FREED;
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_set_rrq_active(phba, ndlp,
- sglq_entry->sli4_lxritag,
- rxid, 1);
+ spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
+ iflag);
+
+ if (ndlp) {
+ lpfc_set_rrq_active(phba, ndlp,
+ sglq_entry->sli4_lxritag,
+ rxid, 1);
+ lpfc_nlp_put(ndlp);
+ }
/* Check if TXQ queue needs to be serviced */
if (pring && !list_empty(&pring->txq))
@@ -9986,21 +11893,18 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
return;
}
}
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
lxri = lpfc_sli4_xri_inrange(phba, xri);
- if (lxri == NO_XRI) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (lxri == NO_XRI)
return;
- }
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
sglq_entry = __lpfc_get_active_sglq(phba, lxri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
sglq_entry->state = SGL_XRI_ABORTED;
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
@@ -10030,8 +11934,7 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
"rport in state 0x%x\n", ndlp->nlp_state);
return;
}
- lpfc_printf_log(phba, KERN_ERR,
- LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3094 Start rport recovery on shost id 0x%x "
"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
"flags 0x%x\n",
@@ -10042,10 +11945,321 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
* The rport is not responding. Remove the FCP-2 flag to prevent
* an ADISC in the follow-up recovery code.
*/
- spin_lock_irqsave(shost->host_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag |= NLP_ISSUE_LOGO;
- spin_unlock_irqrestore(shost->host_lock, flags);
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_unreg_rpi(vport, ndlp);
}
+static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport)
+{
+ bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE);
+}
+
+static void
+lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max)
+{
+ u32 i;
+
+ if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE))
+ return;
+
+ for (i = min; i <= max; i++)
+ set_bit(i, vport->vmid_priority_range);
+}
+
+static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid)
+{
+ set_bit(ctcl_vmid, vport->vmid_priority_range);
+}
+
+u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport)
+{
+ u32 i;
+
+ i = find_first_bit(vport->vmid_priority_range,
+ LPFC_VMID_MAX_PRIORITY_RANGE);
+
+ if (i == LPFC_VMID_MAX_PRIORITY_RANGE)
+ return 0;
+
+ clear_bit(i, vport->vmid_priority_range);
+ return i;
+}
+
+#define MAX_PRIORITY_DESC 255
+
+static void
+lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct priority_range_desc *desc;
+ struct lpfc_dmabuf *prsp = NULL;
+ struct lpfc_vmid_priority_range *vmid_range = NULL;
+ u32 *data;
+ struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ u8 *pcmd, max_desc;
+ u32 len, i;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+
+ prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+
+ pcmd = prsp->virt;
+ data = (u32 *)pcmd;
+ if (data[0] == ELS_CMD_LS_RJT) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "3277 QFPA LS_RJT x%x x%x\n",
+ data[0], data[1]);
+ goto out;
+ }
+ if (ulp_status) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ "6529 QFPA failed with status x%x x%x\n",
+ ulp_status, ulp_word4);
+ goto out;
+ }
+
+ if (!vport->qfpa_res) {
+ max_desc = FCELSSIZE / sizeof(*vport->qfpa_res);
+ vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res),
+ GFP_KERNEL);
+ if (!vport->qfpa_res)
+ goto out;
+ }
+
+ len = *((u32 *)(pcmd + 4));
+ len = be32_to_cpu(len);
+ memcpy(vport->qfpa_res, pcmd, len + 8);
+ len = len / LPFC_PRIORITY_RANGE_DESC_SIZE;
+
+ desc = (struct priority_range_desc *)(pcmd + 8);
+ vmid_range = vport->vmid_priority.vmid_range;
+ if (!vmid_range) {
+ vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range),
+ GFP_KERNEL);
+ if (!vmid_range) {
+ kfree(vport->qfpa_res);
+ goto out;
+ }
+ vport->vmid_priority.vmid_range = vmid_range;
+ }
+ vport->vmid_priority.num_descriptors = len;
+
+ for (i = 0; i < len; i++, vmid_range++, desc++) {
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
+ "6539 vmid values low=%d, high=%d, qos=%d, "
+ "local ve id=%d\n", desc->lo_range,
+ desc->hi_range, desc->qos_priority,
+ desc->local_ve_id);
+
+ vmid_range->low = desc->lo_range << 1;
+ if (desc->local_ve_id == QFPA_ODD_ONLY)
+ vmid_range->low++;
+ if (desc->qos_priority)
+ vport->vmid_flag |= LPFC_VMID_QOS_ENABLED;
+ vmid_range->qos = desc->qos_priority;
+
+ vmid_range->high = desc->hi_range << 1;
+ if ((desc->local_ve_id == QFPA_ODD_ONLY) ||
+ (desc->local_ve_id == QFPA_EVEN_ODD))
+ vmid_range->high++;
+ }
+ lpfc_init_cs_ctl_bitmap(vport);
+ for (i = 0; i < vport->vmid_priority.num_descriptors; i++) {
+ lpfc_vmid_set_cs_ctl_range(vport,
+ vport->vmid_priority.vmid_range[i].low,
+ vport->vmid_priority.vmid_range[i].high);
+ }
+
+ vport->vmid_flag |= LPFC_VMID_QFPA_CMPL;
+ out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+}
+
+int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *elsiocb;
+ u8 *pcmd;
+ int ret;
+
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENXIO;
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp,
+ ndlp->nlp_DID, ELS_CMD_QFPA);
+ if (!elsiocb)
+ return -ENOMEM;
+
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
+
+ *((u32 *)(pcmd)) = ELS_CMD_QFPA;
+ pcmd += 4;
+
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa;
+
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(vport->phba, elsiocb);
+ return -ENXIO;
+ }
+
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2);
+ if (ret != IOCB_SUCCESS) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return -EIO;
+ }
+ vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED;
+ return 0;
+}
+
+int
+lpfc_vmid_uvem(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid, bool instantiated)
+{
+ struct lpfc_vem_id_desc *vem_id_desc;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *elsiocb;
+ struct instantiated_ve_desc *inst_desc;
+ struct lpfc_vmid_context *vmid_context;
+ u8 *pcmd;
+ u32 *len;
+ int ret = 0;
+
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENXIO;
+
+ vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL);
+ if (!vmid_context)
+ return -ENOMEM;
+ elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2,
+ ndlp, Fabric_DID, ELS_CMD_UVEM);
+ if (!elsiocb)
+ goto out;
+
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
+ "3427 Host vmid %s %d\n",
+ vmid->host_vmid, instantiated);
+ vmid_context->vmp = vmid;
+ vmid_context->nlp = ndlp;
+ vmid_context->instantiated = instantiated;
+ elsiocb->vmid_tag.vmid_context = vmid_context;
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
+
+ if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
+ memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
+ LPFC_COMPRESS_VMID_SIZE);
+
+ *((u32 *)(pcmd)) = ELS_CMD_UVEM;
+ len = (u32 *)(pcmd + 4);
+ *len = cpu_to_be32(LPFC_UVEM_SIZE - 8);
+
+ vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8);
+ vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
+ vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
+ memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
+ LPFC_COMPRESS_VMID_SIZE);
+
+ inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
+ inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
+ inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
+ memcpy(inst_desc->global_vem_id, vmid->host_vmid,
+ LPFC_COMPRESS_VMID_SIZE);
+
+ bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
+ bf_set(lpfc_instantiated_local_id, inst_desc,
+ vmid->un.cs_ctl_vmid);
+ if (instantiated) {
+ inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
+ } else {
+ inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG);
+ lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid);
+ }
+ inst_desc->word6 = cpu_to_be32(inst_desc->word6);
+
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem;
+
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
+ lpfc_els_free_iocb(vport->phba, elsiocb);
+ goto out;
+ }
+
+ ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0);
+ if (ret != IOCB_SUCCESS) {
+ lpfc_els_free_iocb(vport->phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto out;
+ }
+
+ return 0;
+ out:
+ kfree(vmid_context);
+ return -EIO;
+}
+
+static void
+lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = icmdiocb->vport;
+ struct lpfc_dmabuf *prsp = NULL;
+ struct lpfc_vmid_context *vmid_context =
+ icmdiocb->vmid_tag.vmid_context;
+ struct lpfc_nodelist *ndlp = icmdiocb->ndlp;
+ u8 *pcmd;
+ u32 *data;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf;
+ struct lpfc_vmid *vmid;
+
+ vmid = vmid_context->vmp;
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ ndlp = NULL;
+
+ prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+ pcmd = prsp->virt;
+ data = (u32 *)pcmd;
+ if (data[0] == ELS_CMD_LS_RJT) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
+ goto out;
+ }
+ if (ulp_status) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "4533 UVEM error status %x: %x\n",
+ ulp_status, ulp_word4);
+ goto out;
+ }
+ spin_lock(&phba->hbalock);
+ /* Set IN USE flag */
+ vport->vmid_flag |= LPFC_VMID_IN_USE;
+ phba->pport->vmid_flag |= LPFC_VMID_IN_USE;
+ spin_unlock(&phba->hbalock);
+
+ if (vmid_context->instantiated) {
+ write_lock(&vport->vmid_lock);
+ vmid->flag |= LPFC_VMID_REGISTERED;
+ vmid->flag &= ~LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ }
+
+ out:
+ kfree(vmid_context);
+ lpfc_els_free_iocb(phba, icmdiocb);
+ lpfc_nlp_put(ndlp);
+}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index dcc8999c6a68..d38ebd7281b9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -36,8 +36,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_nl.h"
@@ -73,36 +71,77 @@ static void lpfc_disc_timeout_handler(struct lpfc_vport *);
static void lpfc_disc_flush_list(struct lpfc_vport *vport);
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
static int lpfc_fcf_inuse(struct lpfc_hba *);
+static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static void lpfc_check_inactive_vmid(struct lpfc_hba *phba);
+static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba);
-void
-lpfc_terminate_rport_io(struct fc_rport *rport)
+static int
+lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
+{
+ if (ndlp->nlp_fc4_type ||
+ ndlp->nlp_type & NLP_FABRIC)
+ return 1;
+ return 0;
+}
+/* The source of a terminate rport I/O is either a dev_loss_tmo
+ * event or a call to fc_remove_host. While the rport should be
+ * valid during these downcalls, the transport can call twice
+ * in a single event. This routine provides somoe protection
+ * as the NDLP isn't really free, just released to the pool.
+ */
+static int
+lpfc_rport_invalid(struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
- struct lpfc_nodelist * ndlp;
- struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+
+ if (!rport) {
+ pr_err("**** %s: NULL rport, exit.\n", __func__);
+ return -EINVAL;
+ }
rdata = rport->dd_data;
+ if (!rdata) {
+ pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
+ return -EINVAL;
+ }
+
ndlp = rdata->pnode;
+ if (!rdata->pnode) {
+ pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
+ return -EINVAL;
+ }
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
- printk(KERN_ERR "Cannot find remote node"
- " to terminate I/O Data x%x\n",
- rport->port_id);
- return;
+ if (!ndlp->vport) {
+ pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
+ "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
+ rport->scsi_target_id);
+ return -EINVAL;
}
+ return 0;
+}
- phba = ndlp->phba;
+void
+lpfc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *vport;
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
- "rport terminate: sid:x%x did:x%x flg:x%x",
- ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+ if (lpfc_rport_invalid(rport))
+ return;
- if (ndlp->nlp_sid != NLP_NO_SID) {
- lpfc_sli_abort_iocb(ndlp->vport,
- &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
- }
+ rdata = rport->dd_data;
+ ndlp = rdata->pnode;
+ vport = ndlp->vport;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport terminate: sid:x%x did:x%x flg:x%x",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ if (ndlp->nlp_sid != NLP_NO_SID)
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
/*
@@ -111,19 +150,14 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
void
lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
{
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist * ndlp;
+ struct lpfc_nodelist *ndlp;
struct lpfc_vport *vport;
- struct Scsi_Host *shost;
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
- int put_node;
- int put_rport;
unsigned long iflags;
- rdata = rport->dd_data;
- ndlp = rdata->pnode;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
+ if (!ndlp)
return;
vport = ndlp->vport;
@@ -134,22 +168,30 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
- ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
-
- /* Don't defer this if we are in the process of deleting the vport
- * or unloading the driver. The unload will cleanup the node
- * appropriately we just need to cleanup the ndlp rport info here.
+ "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
+ "load_flag x%x refcnt %d state %d xpt x%x\n",
+ ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_state, ndlp->fc4_xpt_flags);
+
+ /* Don't schedule a worker thread event if the vport is going down.
+ * The teardown process cleans up the node via lpfc_drop_node.
*/
if (vport->load_flag & FC_UNLOADING) {
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
+ ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
+
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ /* clear the NLP_XPT_REGD if the node is not registered
+ * with nvme-fc
+ */
+ if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+
+ /* Remove the node reference from remote_port_add now.
+ * The driver will not call remote_port_delete.
+ */
+ lpfc_nlp_put(ndlp);
return;
}
@@ -157,42 +199,208 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
return;
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
- "6789 rport name %llx != node port name %llx",
- rport->port_name,
- wwn_to_u64(ndlp->nlp_portname.u.wwn));
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6789 rport name %llx != node port name %llx",
+ rport->port_name,
+ wwn_to_u64(ndlp->nlp_portname.u.wwn));
evtp = &ndlp->dev_loss_evt;
if (!list_empty(&evtp->evt_listp)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
- "6790 rport name %llx dev_loss_evt pending",
- rport->port_name);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6790 rport name %llx dev_loss_evt pending\n",
+ rport->port_name);
return;
}
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irqsave(shost->host_lock, iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(shost->host_lock, iflags);
- /* We need to hold the node by incrementing the reference
- * count until this queued work is done
+ /* If there is a PLOGI in progress, and we are in a
+ * NLP_NPR_2B_DISC state, don't turn off the flag.
*/
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
+ /*
+ * The backend does not expect any more calls associated with this
+ * rport. Remove the association between rport and ndlp.
+ */
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
+ ndlp->rport = NULL;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (phba->worker_thread) {
+ /* We need to hold the node by incrementing the reference
+ * count until this queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ lpfc_worker_wake_up(phba);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ } else {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3188 worker thread is stopped %s x%06x, "
+ " rport x%px flg x%x load_flag x%x refcnt "
+ "%d\n", __func__, ndlp->nlp_DID,
+ ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref));
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ /* Node is in dev loss. No further transaction. */
+ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (evtp->evt_arg1) {
- evtp->evt = LPFC_EVT_DEV_LOSS;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- lpfc_worker_wake_up(phba);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
/**
+ * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport
+ * @vport: Pointer to vport context object.
+ *
+ * This function checks for idle VMID entries related to a particular vport. If
+ * found unused/idle, free them accordingly.
+ **/
+static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport)
+{
+ u16 keep;
+ u32 difftime = 0, r, bucket;
+ u64 *lta;
+ int cpu;
+ struct lpfc_vmid *vmp;
+
+ write_lock(&vport->vmid_lock);
+
+ if (!vport->cur_vmid_cnt)
+ goto out;
+
+ /* iterate through the table */
+ hash_for_each(vport->hash_table, bucket, vmp, hnode) {
+ keep = 0;
+ if (vmp->flag & LPFC_VMID_REGISTERED) {
+ /* check if the particular VMID is in use */
+ /* for all available per cpu variable */
+ for_each_possible_cpu(cpu) {
+ /* if last access time is less than timeout */
+ lta = per_cpu_ptr(vmp->last_io_time, cpu);
+ if (!lta)
+ continue;
+ difftime = (jiffies) - (*lta);
+ if ((vport->vmid_inactivity_timeout *
+ JIFFIES_PER_HR) > difftime) {
+ keep = 1;
+ break;
+ }
+ }
+
+ /* if none of the cpus have been used by the vm, */
+ /* remove the entry if already registered */
+ if (!keep) {
+ /* mark the entry for deregistration */
+ vmp->flag = LPFC_VMID_DE_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ if (vport->vmid_priority_tagging)
+ r = lpfc_vmid_uvem(vport, vmp, false);
+ else
+ r = lpfc_vmid_cmd(vport,
+ SLI_CTAS_DAPP_IDENT,
+ vmp);
+
+ /* decrement number of active vms and mark */
+ /* entry in slot as free */
+ write_lock(&vport->vmid_lock);
+ if (!r) {
+ struct lpfc_vmid *ht = vmp;
+
+ vport->cur_vmid_cnt--;
+ ht->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(ht->last_io_time);
+ ht->last_io_time = NULL;
+ hash_del(&ht->hnode);
+ }
+ }
+ }
+ }
+ out:
+ write_unlock(&vport->vmid_lock);
+}
+
+/**
+ * lpfc_check_inactive_vmid - VMID inactivity checker
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the worker thread to determine if an entry in
+ * the VMID table can be released since there was no I/O activity seen from that
+ * particular VM for the specified time. When this happens, the entry in the
+ * table is released and also the resources on the switch cleared.
+ **/
+
+static void lpfc_check_inactive_vmid(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (!vports)
+ return;
+
+ for (i = 0; i <= phba->max_vports; i++) {
+ if ((!vports[i]) && (i == 0))
+ vport = phba->pport;
+ else
+ vport = vports[i];
+ if (!vport)
+ break;
+
+ lpfc_check_inactive_vmid_one(vport);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer to remote node object.
+ *
+ * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of
+ * node during dev_loss_tmo processing, then this function restores the nlp_put
+ * kref decrement from lpfc_dev_loss_tmo_handler.
+ **/
+void
+lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
+ ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_get(ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
+ "8438 Devloss timeout reversed on DID x%x "
+ "refcnt %d ndlp %p flag x%x "
+ "port_state = x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
+ ndlp->nlp_flag, vport->port_state);
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ }
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+}
+
+/**
* lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
* @ndlp: Pointer to remote node object.
*
@@ -205,70 +413,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
static int
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
{
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct Scsi_Host *shost;
uint8_t *name;
- int put_node;
int warn_on = 0;
int fcf_inuse = 0;
+ bool recovering = false;
+ struct fc_vport *fc_vport = NULL;
unsigned long iflags;
- rport = ndlp->rport;
vport = ndlp->vport;
- shost = lpfc_shost_from_vport(vport);
+ name = (uint8_t *)&ndlp->nlp_portname;
+ phba = vport->phba;
- spin_lock_irqsave(shost->host_lock, iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(shost->host_lock, iflags);
-
- if (!rport)
- return fcf_inuse;
-
- name = (uint8_t *) &ndlp->nlp_portname;
- phba = vport->phba;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4)
fcf_inuse = lpfc_fcf_inuse(phba);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport devlosstmo:did:x%x type:x%x id:x%x",
- ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+ "rport devlosstmo:did:x%x type:x%x id:x%x",
+ ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
- ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
-
- /*
- * lpfc_nlp_remove if reached with dangling rport drops the
- * reference. To make sure that does not happen clear rport
- * pointer in ndlp before lpfc_nlp_put.
- */
- rdata = rport->dd_data;
-
- /* Don't defer this if we are in the process of deleting the vport
- * or unloading the driver. The unload will cleanup the node
- * appropriately we just need to cleanup the ndlp rport info here.
- */
- if (vport->load_flag & FC_UNLOADING) {
- if (ndlp->nlp_sid != NLP_NO_SID) {
- /* flush the target */
- lpfc_sli_abort_iocb(vport,
- &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
- }
- put_node = rdata->pnode != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
-
- return fcf_inuse;
- }
+ "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
+ /* If the driver is recovering the rport, ignore devloss. */
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0284 Devloss timeout Ignored on "
@@ -280,33 +454,87 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return fcf_inuse;
}
- put_node = rdata->pnode != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
+ /* Fabric nodes are done. */
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ /* In massive vport configuration settings, it's possible
+ * dev_loss_tmo fired during node recovery. So, check if
+ * fabric nodes are in discovery states outstanding.
+ */
+ switch (ndlp->nlp_DID) {
+ case Fabric_DID:
+ fc_vport = vport->fc_vport;
+ if (fc_vport &&
+ fc_vport->vport_state == FC_VPORT_INITIALIZING)
+ recovering = true;
+ break;
+ case Fabric_Cntl_DID:
+ if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+ recovering = true;
+ break;
+ case FDMI_DID:
+ fallthrough;
+ case NameServer_DID:
+ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
+ recovering = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
- if (ndlp->nlp_type & NLP_FABRIC)
+ /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing
+ * the following lpfc_nlp_put is necessary after fabric node is
+ * recovered.
+ */
+ if (recovering) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_NODE,
+ "8436 Devloss timeout marked on "
+ "DID x%x refcnt %d ndlp %p "
+ "flag x%x port_state = x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref),
+ ndlp, ndlp->nlp_flag,
+ vport->port_state);
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ /* Fabric node fully recovered before this dev_loss_tmo
+ * queue work is processed. Thus, ignore the
+ * dev_loss_tmo event.
+ */
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_NODE,
+ "8437 Devloss timeout ignored on "
+ "DID x%x refcnt %d ndlp %p "
+ "flag x%x port_state = x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref),
+ ndlp, ndlp->nlp_flag,
+ vport->port_state);
+ return fcf_inuse;
+ }
+
+ lpfc_nlp_put(ndlp);
return fcf_inuse;
+ }
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
- lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
if (warn_on) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0203 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "NPort x%06x Data: x%x x%x x%x\n",
+ "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ ndlp->nlp_state, ndlp->nlp_rpi,
+ kref_read(&ndlp->kref));
} else {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
"0204 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
"NPort x%06x Data: x%x x%x x%x\n",
@@ -316,16 +544,46 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
- if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
- !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
- (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
- (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
- (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
+ /* If we are devloss, but we are in the process of rediscovering the
+ * ndlp, don't issue a NLP_EVT_DEVICE_RM event.
+ */
+ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
+ return fcf_inuse;
+ }
+
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
return fcf_inuse;
}
+static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (!vports)
+ return;
+
+ for (i = 0; i <= phba->max_vports; i++) {
+ if ((!vports[i]) && (i == 0))
+ vport = phba->pport;
+ else
+ vport = vports[i];
+ if (!vport)
+ break;
+
+ if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) {
+ if (!lpfc_issue_els_qfpa(vport))
+ vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
/**
* lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
* @phba: Pointer to hba context object.
@@ -522,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
int free_evt;
int fcf_inuse;
uint32_t nlp_did;
+ bool hba_pci_err;
spin_lock_irq(&phba->hbalock);
while (!list_empty(&phba->work_list)) {
list_remove_head((&phba->work_list), evtp, typeof(*evtp),
evt_listp);
spin_unlock_irq(&phba->hbalock);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
free_evt = 1;
switch (evtp->evt) {
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
- lpfc_els_retry_delay_handler(ndlp);
- free_evt = 0; /* evt is part of ndlp */
+ if (!hba_pci_err) {
+ lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ }
/* decrement the node reference count held
* for this queued work
*/
@@ -553,6 +815,17 @@ lpfc_work_list_done(struct lpfc_hba *phba)
fcf_inuse,
nlp_did);
break;
+ case LPFC_EVT_RECOVER_PORT:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ if (!hba_pci_err) {
+ lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+ free_evt = 0;
+ }
+ /* decrement the node reference count held for
+ * this queued work
+ */
+ lpfc_nlp_put(ndlp);
+ break;
case LPFC_EVT_ONLINE:
if (phba->link_state < LPFC_LINK_DOWN)
*(int *) (evtp->evt_arg1) = lpfc_online(phba);
@@ -617,26 +890,52 @@ lpfc_work_done(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct lpfc_vport *vport;
int i;
+ bool hba_pci_err;
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
phba->work_ha = 0;
spin_unlock_irq(&phba->hbalock);
+ if (hba_pci_err)
+ ha_copy = 0;
/* First, try to post the next mailbox command to SLI4 device */
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
lpfc_sli4_post_async_mbox(phba);
- if (ha_copy & HA_ERATT)
+ if (ha_copy & HA_ERATT) {
/* Handle the error attention event */
lpfc_handle_eratt(phba);
+ if (phba->fw_dump_cmpl) {
+ complete(phba->fw_dump_cmpl);
+ phba->fw_dump_cmpl = NULL;
+ }
+ }
+
if (ha_copy & HA_MBATT)
lpfc_sli_handle_mb_event(phba);
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
+ /* Handle VMID Events */
+ if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
+ if (phba->pport->work_port_events &
+ WORKER_CHECK_VMID_ISSUE_QFPA) {
+ lpfc_check_vmid_qfpa_issue(phba);
+ phba->pport->work_port_events &=
+ ~WORKER_CHECK_VMID_ISSUE_QFPA;
+ }
+ if (phba->pport->work_port_events &
+ WORKER_CHECK_INACTIVE_VMID) {
+ lpfc_check_inactive_vmid(phba);
+ phba->pport->work_port_events &=
+ ~WORKER_CHECK_INACTIVE_VMID;
+ }
+ }
+
/* Process SLI4 events */
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
if (phba->hba_flag & HBA_RRQ_ACTIVE)
@@ -672,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba)
work_port_events = vport->work_port_events;
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
+ if (hba_pci_err)
+ continue;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
@@ -757,7 +1058,7 @@ lpfc_do_work(void *p)
|| kthread_should_stop()));
/* Signal wakeup shall terminate the worker thread */
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0433 Wakeup on signal: rc=x%x\n", rc);
break;
}
@@ -812,19 +1113,38 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
struct lpfc_nodelist *ndlp, *next_ndlp;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* It's possible the FLOGI to the fabric node never
+ * successfully completed and never registered with the
+ * transport. In this case there is no way to clean up
+ * the node.
+ */
+ if (ndlp->nlp_DID == Fabric_DID) {
+ if (ndlp->nlp_prev_state ==
+ NLP_STE_UNUSED_NODE &&
+ !ndlp->fc4_xpt_flags)
+ lpfc_nlp_put(ndlp);
+ }
continue;
+ }
+
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
- ((vport->port_type == LPFC_NPIV_PORT) &&
- (ndlp->nlp_DID == NameServer_DID)))
+ ((vport->port_type == LPFC_NPIV_PORT) &&
+ ((ndlp->nlp_DID == NameServer_DID) ||
+ (ndlp->nlp_DID == FDMI_DID) ||
+ (ndlp->nlp_DID == Fabric_Cntl_DID))))
lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */
if ((phba->sli_rev < LPFC_SLI_REV4) &&
(!remove && ndlp->nlp_type & NLP_FABRIC))
continue;
+
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
lpfc_disc_state_machine(vport, ndlp, NULL,
remove
? NLP_EVT_DEVICE_RM
@@ -863,6 +1183,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
@@ -880,6 +1201,13 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
vport->fc_flag &= ~FC_DISC_DELAYED;
spin_unlock_irq(shost->host_lock);
del_timer_sync(&vport->delayed_disc_tmo);
+
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
+ /* Assume success on link up */
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+ }
}
int
@@ -890,15 +1218,20 @@ lpfc_linkdown(struct lpfc_hba *phba)
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
+ int offline;
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
/* Block all SCSI stack I/Os */
lpfc_scsi_dev_block(phba);
+ offline = pci_channel_offline(phba->pcidev);
phba->defer_flogi_acc_flag = false;
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
spin_unlock_irq(&phba->hbalock);
@@ -909,6 +1242,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->trunk_link.link1.state = 0;
phba->trunk_link.link2.state = 0;
phba->trunk_link.link3.state = 0;
+ phba->trunk_link.phy_lnk_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
phba->sli4_hba.link_state.logical_speed =
LPFC_LINK_SPEED_UNKNOWN;
}
@@ -936,7 +1271,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
/* Clean up any SLI3 firmware default rpi's */
- if (phba->sli_rev > LPFC_SLI_REV3)
+ if (phba->sli_rev > LPFC_SLI_REV3 || offline)
goto skip_unreg_did;
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -978,8 +1313,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
+
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -1021,15 +1355,19 @@ lpfc_linkup_port(struct lpfc_vport *vport)
FCH_EVT_LINKUP, 0);
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ if (phba->defer_flogi_acc_flag)
+ vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ else
+ vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI |
+ FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
+ lpfc_setup_fdmi_mask(vport);
- if (vport->fc_flag & FC_LBIT)
- lpfc_linkup_cleanup_nodes(vport);
-
+ lpfc_linkup_cleanup_nodes(vport);
}
static int
@@ -1059,9 +1397,8 @@ lpfc_linkup(struct lpfc_hba *phba)
phba->pport->rcv_flogi_cnt = 0;
spin_unlock_irq(shost->host_lock);
- /* reinitialize initial FLOGI flag */
- phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
- phba->defer_flogi_acc_flag = false;
+ /* reinitialize initial HBA flag */
+ phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
return 0;
}
@@ -1088,7 +1425,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Check for error */
if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0320 CLEAR_LA mbxStatus error x%x hba "
"state x%x\n",
mb->mbxStatus, vport->port_state);
@@ -1134,17 +1471,19 @@ out:
return;
}
-
void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
-
- if (pmb->u.mb.mbxStatus)
- goto out;
+ LPFC_MBOXQ_t *sparam_mb;
+ u16 status = pmb->u.mb.mbxStatus;
+ int rc;
mempool_free(pmb, phba->mbox_mem_pool);
+ if (status)
+ goto out;
+
/* don't perform discovery for SLI4 loopback diagnostic test */
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(phba->hba_flag & HBA_FCOE_MODE) &&
@@ -1163,25 +1502,52 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Start discovery by sending a FLOGI. port_state is identically
- * LPFC_FLOGI while waiting for FLOGI cmpl
+ * LPFC_FLOGI while waiting for FLOGI cmpl.
*/
- if (vport->port_state != LPFC_FLOGI)
- lpfc_initial_flogi(vport);
- else if (vport->fc_flag & FC_PT2PT)
- lpfc_disc_start(vport);
+ if (vport->port_state != LPFC_FLOGI) {
+ /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if
+ * bb-credit recovery is in place.
+ */
+ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
+ !(phba->link_flag & LS_LOOPBACK_MODE)) {
+ sparam_mb = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!sparam_mb)
+ goto sparam_out;
+
+ rc = lpfc_read_sparam(phba, sparam_mb, 0);
+ if (rc) {
+ mempool_free(sparam_mb, phba->mbox_mem_pool);
+ goto sparam_out;
+ }
+ sparam_mb->vport = vport;
+ sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+ rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_mbox_rsrc_cleanup(phba, sparam_mb,
+ MBOX_THD_UNLOCKED);
+ goto sparam_out;
+ }
+ phba->hba_flag |= HBA_DEFER_FLOGI;
+ } else {
+ lpfc_initial_flogi(vport);
+ }
+ } else {
+ if (vport->fc_flag & FC_PT2PT)
+ lpfc_disc_start(vport);
+ }
return;
out:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "0306 CONFIG_LINK mbxStatus error x%x "
- "HBA state x%x\n",
- pmb->u.mb.mbxStatus, vport->port_state);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
+ status, vport->port_state);
+sparam_out:
lpfc_linkdown(phba);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0200 CONFIG_LINK bad hba state x%x\n",
vport->port_state);
@@ -1191,7 +1557,7 @@ out:
/**
* lpfc_sli4_clear_fcf_rr_bmask
- * @phba pointer to the struct lpfc_hba for this port.
+ * @phba: pointer to the struct lpfc_hba for this port.
* This fucnction resets the round robin bit mask and clears the
* fcf priority list. The list deletions are done while holding the
* hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
@@ -1217,10 +1583,10 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_vport *vport = mboxq->vport;
if (mboxq->u.mb.mbxStatus) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "2017 REG_FCFI mbxStatus error x%x "
- "HBA state x%x\n",
- mboxq->u.mb.mbxStatus, vport->port_state);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2017 REG_FCFI mbxStatus error x%x "
+ "HBA state x%x\n", mboxq->u.mb.mbxStatus,
+ vport->port_state);
goto fail_out;
}
@@ -1290,7 +1656,7 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
/**
* lpfc_sw_name_match - Check if the fcf switch name match.
- * @fab_name: pointer to fabric name.
+ * @sw_name: pointer to switch name.
* @new_fcf_record: pointer to fcf record.
*
* This routine compare the fcf record's switch name with provided
@@ -1353,14 +1719,14 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
}
/**
- * lpfc_update_fcf_record - Update driver fcf record
* __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: Index for the lpfc_fcf_record.
* @new_fcf_record: pointer to hba fcf record.
*
* This routine updates the driver FCF priority record from the new HBA FCF
- * record. This routine is called with the host lock held.
+ * record. The hbalock is asserted held in the code path calling this
+ * routine.
**/
static void
__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
@@ -1369,8 +1735,6 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
{
struct lpfc_fcf_pri *fcf_pri;
- lockdep_assert_held(&phba->hbalock);
-
fcf_pri = &phba->fcf.fcf_pri[fcf_index];
fcf_pri->fcf_rec.fcf_index = fcf_index;
/* FCF record priority */
@@ -1380,7 +1744,7 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
/**
* lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
- * @fcf: pointer to driver fcf record.
+ * @fcf_rec: pointer to driver fcf record.
* @new_fcf_record: pointer to fcf record.
*
* This routine copies the FCF information from the FCF
@@ -1438,7 +1802,7 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
}
/**
- * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record - Update driver fcf record
* @phba: pointer to lpfc hba data structure.
* @fcf_rec: pointer to driver fcf record.
* @new_fcf_record: pointer to hba fcf record.
@@ -1448,7 +1812,7 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
*
* This routine updates the driver FCF record from the new HBA FCF record
* together with the address mode, vlan_id, and other informations. This
- * routine is called with the host lock held.
+ * routine is called with the hbalock held.
**/
static void
__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
@@ -1792,8 +2156,8 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
* This function makes an running random selection decision on FCF record to
* use through a sequence of @fcf_cnt eligible FCF records with equal
* probability. To perform integer manunipulation of random numbers with
- * size unit32_t, the lower 16 bits of the 32-bit random number returned
- * from prandom_u32() are taken as the random random number generated.
+ * size unit32_t, a 16-bit random number returned from get_random_u16() is
+ * taken as the random random number generated.
*
* Returns true when outcome is for the newly read FCF record should be
* chosen; otherwise, return false when outcome is for keeping the previously
@@ -1805,7 +2169,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
uint32_t rand_num;
/* Get 16-bit uniform random number */
- rand_num = 0xFFFF & prandom_u32();
+ rand_num = get_random_u16();
/* Decision with probability 1/fcf_cnt */
if ((fcf_cnt * rand_num) < 0xFFFF)
@@ -1843,7 +2207,7 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
if (unlikely(!mboxq->sge_array)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2524 Failed to get the non-embedded SGE "
"virtual address\n");
return NULL;
@@ -1859,11 +2223,12 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
if (shdr_status || shdr_add_status) {
if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
if_type == LPFC_SLI_INTF_IF_TYPE_2)
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2726 READ_FCF_RECORD Indicates empty "
"FCF table.\n");
else
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2521 READ_FCF_RECORD mailbox failed "
"with status x%x add_status x%x, "
"mbx\n", shdr_status, shdr_add_status);
@@ -1947,7 +2312,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
}
/**
- lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
+ * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
* @phba: pointer to lpfc hba data structure.
* @fcf_rec: pointer to an existing FCF record.
* @new_fcf_record: pointer to a new FCF record.
@@ -2061,7 +2426,7 @@ stop_flogi_current_fcf:
/**
* lpfc_sli4_fcf_pri_list_del
* @phba: pointer to lpfc hba data structure.
- * @fcf_index the index of the fcf record to delete
+ * @fcf_index: the index of the fcf record to delete
* This routine checks the on list flag of the fcf_index to be deleted.
* If it is one the list then it is removed from the list, and the flag
* is cleared. This routine grab the hbalock before removing the fcf
@@ -2091,7 +2456,7 @@ static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
/**
* lpfc_sli4_set_fcf_flogi_fail
* @phba: pointer to lpfc hba data structure.
- * @fcf_index the index of the fcf record to update
+ * @fcf_index: the index of the fcf record to update
* This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
* flag so the the round robin slection for the particular priority level
* will try a different fcf record that does not have this bit set.
@@ -2111,7 +2476,8 @@ lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
/**
* lpfc_sli4_fcf_pri_list_add
* @phba: pointer to lpfc hba data structure.
- * @fcf_index the index of the fcf record to add
+ * @fcf_index: the index of the fcf record to add
+ * @new_fcf_record: pointer to a new FCF record.
* This routine checks the priority of the fcf_index to be added.
* If it is a lower priority than the current head of the fcf_pri list
* then it is added to the list in the right order.
@@ -2241,7 +2607,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
&next_fcf_index);
if (!new_fcf_record) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2765 Mailbox command READ_FCF_RECORD "
"failed to retrieve a FCF record.\n");
/* Let next new FCF event trigger fast failover */
@@ -2285,7 +2651,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
phba->fcf.current_rec.fcf_indx) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2862 FCF (x%x) matches property "
"of in-use FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
@@ -2355,7 +2722,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->pport->fc_flag);
goto out;
} else
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2863 New FCF (x%x) matches "
"property of in-use FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
@@ -2603,7 +2970,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t next_fcf_index, fcf_index;
uint16_t current_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, stop the roundrobin failover process */
@@ -2708,7 +3075,7 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct fcf_record *new_fcf_record;
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, no need to proceed */
@@ -2769,10 +3136,9 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_0) &&
mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX,
- "2891 Init VFI mailbox failed 0x%x\n",
- mboxq->u.mb.mbxStatus);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2891 Init VFI mailbox failed 0x%x\n",
+ mboxq->u.mb.mbxStatus);
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
@@ -2800,7 +3166,7 @@ lpfc_issue_init_vfi(struct lpfc_vport *vport)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX, "2892 Failed to allocate "
+ LOG_TRACE_EVENT, "2892 Failed to allocate "
"init_vfi mailbox\n");
return;
}
@@ -2808,8 +3174,8 @@ lpfc_issue_init_vfi(struct lpfc_vport *vport)
mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2893 Failed to issue init_vfi mailbox\n");
mempool_free(mboxq, vport->phba->mbox_mem_pool);
}
}
@@ -2829,10 +3195,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX,
- "2609 Init VPI mailbox failed 0x%x\n",
- mboxq->u.mb.mbxStatus);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2609 Init VPI mailbox failed 0x%x\n",
+ mboxq->u.mb.mbxStatus);
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
@@ -2846,7 +3211,7 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
lpfc_printf_vlog(vport, KERN_ERR,
- LOG_DISCOVERY,
+ LOG_TRACE_EVENT,
"2731 Cannot find fabric "
"controller node\n");
else
@@ -2859,7 +3224,7 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_initial_fdisc(vport);
else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2606 No NPIV Fabric support\n");
}
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -2882,8 +3247,7 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport)
if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
vpi = lpfc_alloc_vpi(vport->phba);
if (!vpi) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3303 Failed to obtain vport vpi\n");
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
@@ -2894,7 +3258,7 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport)
mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX, "2607 Failed to allocate "
+ LOG_TRACE_EVENT, "2607 Failed to allocate "
"init_vpi mailbox\n");
return;
}
@@ -2903,8 +3267,8 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport)
mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2608 Failed to issue init_vpi mailbox\n");
mempool_free(mboxq, vport->phba->mbox_mem_pool);
}
}
@@ -2948,7 +3312,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
lpfc_vport_set_state(vports[i],
FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_vlog(vports[i], KERN_ERR,
- LOG_ELS,
+ LOG_TRACE_EVENT,
"0259 No NPIV "
"Fabric support\n");
}
@@ -2960,7 +3324,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
void
lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
- struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
struct lpfc_vport *vport = mboxq->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -2972,10 +3335,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_0) &&
mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "2018 REG_VFI mbxStatus error x%x "
- "HBA state x%x\n",
- mboxq->u.mb.mbxStatus, vport->port_state);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2018 REG_VFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* FLOGI failed, use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -3041,12 +3404,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
out_free_mem:
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (dmabuf) {
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
- }
- return;
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
}
static void
@@ -3062,7 +3420,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Check for error */
if (mb->mbxStatus) {
/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0319 READ_SPARAM mbxStatus error x%x "
"hba state x%x>\n",
mb->mbxStatus, vport->port_state);
@@ -3091,18 +3449,20 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
+
+ /* Check if sending the FLOGI is being deferred to after we get
+ * up to date CSPs from MBX_READ_SPARAM.
+ */
+ if (phba->hba_flag & HBA_DEFER_FLOGI) {
+ lpfc_initial_flogi(vport);
+ phba->hba_flag &= ~HBA_DEFER_FLOGI;
+ }
return;
out:
- pmb->ctx_buf = NULL;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
lpfc_issue_clear_la(phba, vport);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
}
static void
@@ -3112,7 +3472,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
struct Scsi_Host *shost;
int i;
- struct lpfc_dmabuf *mp;
int rc;
struct fcf_record *fcf_record;
uint32_t fc_flags = 0;
@@ -3132,6 +3491,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
case LPFC_LINK_SPEED_32GHZ:
case LPFC_LINK_SPEED_64GHZ:
case LPFC_LINK_SPEED_128GHZ:
+ case LPFC_LINK_SPEED_256GHZ:
break;
default:
phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
@@ -3149,7 +3509,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
- phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
shost = lpfc_shost_from_vport(vport);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
@@ -3224,6 +3584,8 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
lpfc_linkup(phba);
+ sparam_mbox = NULL;
+
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!sparam_mbox)
goto out;
@@ -3237,10 +3599,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(sparam_mbox, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED);
goto out;
}
@@ -3269,7 +3628,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
GFP_KERNEL);
if (unlikely(!fcf_record)) {
lpfc_printf_log(phba, KERN_ERR,
- LOG_MBOX | LOG_SLI,
+ LOG_TRACE_EVENT,
"2554 Could not allocate memory for "
"fcf record\n");
rc = -ENODEV;
@@ -3281,7 +3640,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR,
- LOG_MBOX | LOG_SLI,
+ LOG_TRACE_EVENT,
"2013 Could not manually add FCF "
"record 0, status %d\n", rc);
rc = -ENODEV;
@@ -3323,7 +3682,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
return;
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
vport->port_state, sparam_mbox, cfglink_mbox);
lpfc_issue_clear_la(phba, vport);
@@ -3409,18 +3768,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
- if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (bf_get(lpfc_mbx_read_top_mm, la))
- phba->sli.sli_flag |= LPFC_MENLO_MAINT;
- else
- phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
phba->link_events++;
- if ((attn_type == LPFC_ATT_LINK_UP) &&
- !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
+ if (attn_type == LPFC_ATT_LINK_UP) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3434,17 +3783,22 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
- "Data: x%x x%x x%x x%x x%x x%x %d\n",
+ "Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
bf_get(lpfc_mbx_read_top_alpa_granted,
la),
bf_get(lpfc_mbx_read_top_link_spd, la),
phba->alpa_map[0],
- bf_get(lpfc_mbx_read_top_mm, la),
- bf_get(lpfc_mbx_read_top_fa, la),
- phba->wait_4_mlo_maint_flg);
+ bf_get(lpfc_mbx_read_top_fa, la));
}
lpfc_mbx_process_link_up(phba, la);
+
+ if (phba->cmf_active_mode != LPFC_CFG_OFF)
+ lpfc_cmf_signal_init(phba);
+
+ if (phba->lmt & LMT_64Gb)
+ lpfc_read_lds_params(phba);
+
} else if (attn_type == LPFC_ATT_LINK_DOWN ||
attn_type == LPFC_ATT_UNEXP_WWPN) {
phba->fc_stat.LinkDown++;
@@ -3458,64 +3812,28 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1313 Link Down Unexpected FA WWPN Event x%x "
- "received Data: x%x x%x x%x x%x x%x\n",
+ "received Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
- attn_type == LPFC_ATT_LINK_UP) {
- if (phba->link_state != LPFC_LINK_DOWN) {
- phba->fc_stat.LinkDown++;
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1312 Link Down Event x%x received "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- lpfc_mbx_issue_link_down(phba);
- } else
- lpfc_enable_la(phba);
-
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1310 Menlo Maint Mode Link up Event x%x rcvd "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- /*
- * The cmnd that triggered this will be waiting for this
- * signal.
- */
- /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
- if (phba->wait_4_mlo_maint_flg) {
- phba->wait_4_mlo_maint_flg = 0;
- wake_up_interruptible(&phba->wait_4_mlo_m_q);
- }
- }
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- bf_get(lpfc_mbx_read_top_fa, la)) {
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- lpfc_issue_clear_la(phba, vport);
+ bf_get(lpfc_mbx_read_top_fa, la))
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n",
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_cmpl_read_topology_free_mbuf:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/*
@@ -3528,18 +3846,21 @@ void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ /* The driver calls the state machine with the pmb pointer
+ * but wants to make sure a stale ctx_buf isn't acted on.
+ * The ctx_buf is restored later and cleaned up.
+ */
pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY,
+ "0002 rpi:%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@@ -3553,9 +3874,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* there is another reg login in
* process.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* We cannot leave the RPI registered because
@@ -3568,10 +3889,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Call state machine */
lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+ pmb->ctx_buf = mp;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
/* decrement the node reference count held for this callback
* function.
*/
@@ -3596,7 +3916,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
break;
/* If VPI is busy, reset the HBA */
case 0x9700:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
vport->vpi, mb->mbxStatus);
if (!(phba->pport->load_flag & FC_UNLOADING))
@@ -3607,7 +3927,6 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_cleanup_vports_rrqs(vport, NULL);
/*
@@ -3634,10 +3953,9 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
return rc;
}
return 0;
@@ -3721,7 +4039,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0542 lpfc_create_static_vport failed to"
" allocate mailbox memory\n");
return;
@@ -3731,7 +4049,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
if (!vport_info) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0543 lpfc_create_static_vport failed to"
" allocate vport_info\n");
mempool_free(pmb, phba->mbox_mem_pool);
@@ -3740,11 +4058,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_buff = (uint8_t *) vport_info;
do {
- /* free dma buffer from previous round */
+ /* While loop iteration forces a free dma buffer from
+ * the previous loop because the mbox is reused and
+ * the dump routine is a single-use construct.
+ */
if (pmb->ctx_buf) {
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
+ pmb->ctx_buf = NULL;
}
if (lpfc_dump_static_vport(phba, pmb, offset))
goto out;
@@ -3792,11 +4114,12 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
!= VPORT_INFO_REV)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0545 lpfc_create_static_vport bad"
- " information header 0x%x 0x%x\n",
- le32_to_cpu(vport_info->signature),
- le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0545 lpfc_create_static_vport bad"
+ " information header 0x%x 0x%x\n",
+ le32_to_cpu(vport_info->signature),
+ le32_to_cpu(vport_info->rev) &
+ VPORT_INFO_REV_MASK);
goto out;
}
@@ -3828,16 +4151,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
out:
kfree(vport_info);
- if (mbx_wait_rc != MBX_TIMEOUT) {
- if (pmb->ctx_buf) {
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(pmb, phba->mbox_mem_pool);
- }
-
- return;
+ if (mbx_wait_rc != MBX_TIMEOUT)
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/*
@@ -3851,22 +4166,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct Scsi_Host *shost;
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
- pmb->ctx_buf = NULL;
if (mb->mbxStatus) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0258 Register Fabric login error: 0x%x\n",
mb->mbxStatus);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* FLOGI failed, use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -3908,9 +4217,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_do_scr_ns_plogi(phba, vport);
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* Drop the reference count from the mbox at the end after
* all the current reference to the ndlp have been done.
@@ -3933,7 +4240,8 @@ lpfc_issue_gidft(struct lpfc_vport *vport)
/* Cannot issue NameServer FCP Query, so finish up
* discovery
*/
- lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0604 %s FC TYPE %x %s\n",
"Failed to issue GID_FT to ",
FC_TYPE_FCP,
@@ -3949,7 +4257,8 @@ lpfc_issue_gidft(struct lpfc_vport *vport)
/* Cannot issue NameServer NVME Query, so finish up
* discovery
*/
- lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0605 %s FC_TYPE %x %s %d\n",
"Failed to issue GID_FT to ",
FC_TYPE_NVME,
@@ -3981,7 +4290,7 @@ lpfc_issue_gidpt(struct lpfc_vport *vport)
/* Cannot issue NameServer FCP Query, so finish up
* discovery
*/
- lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0606 %s Port TYPE %x %s\n",
"Failed to issue GID_PT to ",
GID_PT_N_PORT,
@@ -4002,16 +4311,15 @@ void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
+ int rc;
- pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
vport->gidft_inp = 0;
if (mb->mbxStatus) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0260 Register NameServer error: 0x%x\n",
mb->mbxStatus);
@@ -4020,12 +4328,18 @@ out:
* callback function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
- /* If no other thread is using the ndlp, free it */
- lpfc_nlp_not_used(ndlp);
+ /* If the node is not registered with the scsi or nvme
+ * transport, remove the fabric node. The failed reg_login
+ * is terminal.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_not_used(ndlp);
+ }
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/*
@@ -4048,10 +4362,10 @@ out:
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
- "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
+ "0003 rpi:%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
@@ -4070,7 +4384,28 @@ out:
FC_TYPE_NVME);
/* Issue SCR just before NameServer GID_FT Query */
- lpfc_issue_els_scr(vport, SCR_DID, 0);
+ lpfc_issue_els_scr(vport, 0);
+
+ /* Link was bounced or a Fabric LOGO occurred. Start EDC
+ * with initial FW values provided the congestion mode is
+ * not off. Note that signals may or may not be supported
+ * by the adapter but FPIN is provided by default for 1
+ * or both missing signals support.
+ */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+ phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
+ phba->cgn_reg_signal = phba->cgn_init_reg_signal;
+ rc = lpfc_issue_els_edc(vport, 0);
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_INIT | LOG_ELS | LOG_DISCOVERY,
+ "4220 Issue EDC status x%x Data x%x\n",
+ rc, phba->cgn_init_reg_signal);
+ } else if (phba->lmt & LMT_64Gb) {
+ /* may send link fault capability descriptor */
+ lpfc_issue_els_edc(vport, 0);
+ } else {
+ lpfc_issue_els_rdf(vport, 0);
+ }
}
vport->fc_ns_retry = 0;
@@ -4085,13 +4420,54 @@ out:
* callback function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return;
}
+/*
+ * This routine handles processing a Fabric Controller REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+
+ pmb->ctx_ndlp = NULL;
+ if (mb->mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0933 %s: Register FC login error: 0x%x\n",
+ __func__, mb->mbxStatus);
+ goto out;
+ }
+
+ lpfc_check_nlp_post_devloss(vport, ndlp);
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->nlp_state);
+
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+ out:
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
+
+ /* Drop the reference count from the mbox at the end after
+ * all the current reference to the ndlp have been done.
+ */
+ lpfc_nlp_put(ndlp);
+}
+
static void
lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -4100,6 +4476,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_rport_data *rdata;
struct fc_rport_identifiers rport_ids;
struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
return;
@@ -4110,47 +4487,46 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rport_ids.port_id = ndlp->nlp_DID;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
- /*
- * We leave our node pointer in rport->dd_data when we unregister a
- * FCP target port. But fc_remote_port_add zeros the space to which
- * rport->dd_data points. So, if we're reusing a previously
- * registered port, drop the reference that we took the last time we
- * registered the port.
- */
- rport = ndlp->rport;
- if (rport) {
- rdata = rport->dd_data;
- /* break the link before dropping the ref */
- ndlp->rport = NULL;
- if (rdata) {
- if (rdata->pnode == ndlp)
- lpfc_nlp_put(ndlp);
- rdata->pnode = NULL;
- }
- /* drop reference for earlier registeration */
- put_device(&rport->dev);
- }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport add: did:x%x flg:x%x type x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+ "rport add: did:x%x flg:x%x type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
/* Don't add the remote port if unloading. */
if (vport->load_flag & FC_UNLOADING)
return;
+ /*
+ * Disassociate any older association between this ndlp and rport
+ */
+ if (ndlp->rport) {
+ rdata = ndlp->rport->dd_data;
+ rdata->pnode = NULL;
+ }
+
ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
- if (!rport || !get_device(&rport->dev)) {
+ if (!rport) {
dev_printk(KERN_WARNING, &phba->pcidev->dev,
"Warning: fc_remote_port_add failed\n");
return;
}
- /* initialize static port data */
+ /* Successful port add. Complete initializing node data */
rport->maxframe_size = ndlp->nlp_maxframe;
rport->supported_classes = ndlp->nlp_class_sup;
rdata = rport->dd_data;
rdata->pnode = lpfc_nlp_get(ndlp);
+ if (!rdata->pnode) {
+ dev_warn(&phba->pcidev->dev,
+ "Warning - node ref failed. Unreg rport\n");
+ fc_remote_port_delete(rport);
+ ndlp->rport = NULL;
+ return;
+ }
+
+ spin_lock_irqsave(&ndlp->lock, flags);
+ ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, flags);
if (ndlp->nlp_type & NLP_FCP_TARGET)
rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
@@ -4167,13 +4543,15 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3183 rport register x%06x, rport x%px role x%x\n",
- ndlp->nlp_DID, rport, rport_ids.roles);
+ "3183 %s rport x%px DID x%x, role x%x refcnt %d\n",
+ __func__, rport, rport->port_id, rport->roles,
+ kref_read(&ndlp->kref));
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
+
return;
}
@@ -4191,12 +4569,13 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3184 rport unregister x%06x, rport x%px\n",
- ndlp->nlp_DID, rport);
+ "3184 rport unregister x%06x, rport x%px "
+ "xptflg x%x refcnt %d\n",
+ ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
fc_remote_port_delete(rport);
-
- return;
+ lpfc_nlp_put(ndlp);
}
static void
@@ -4238,11 +4617,164 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
spin_unlock_irqrestore(shost->host_lock, iflags);
}
+/* Register a node with backend if not already done */
+void
+lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ lpfc_check_nlp_post_devloss(vport, ndlp);
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+ /* Already registered with backend, trigger rescan */
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
+ lpfc_nvme_rescan_port(vport, ndlp);
+ }
+ return;
+ }
+
+ ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (lpfc_valid_xpt_node(ndlp)) {
+ vport->phba->nport_event_cnt++;
+ /*
+ * Tell the fc transport about the port, if we haven't
+ * already. If we have, and it's a scsi entity, be
+ */
+ lpfc_register_remote_port(vport, ndlp);
+ }
+
+ /* We are done if we do not have any NVME remote node */
+ if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
+ return;
+
+ /* Notify the NVME transport of this new rport. */
+ if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
+ ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+ if (vport->phba->nvmet_support == 0) {
+ /* Register this rport with the transport.
+ * Only NVME Target Rports are registered with
+ * the transport.
+ */
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ }
+ } else {
+ /* Just take an NDLP ref count since the
+ * target does not register rports.
+ */
+ lpfc_nlp_get(ndlp);
+ }
+ }
+}
+
+/* Unregister a node with backend if not already done */
+void
+lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0999 %s Not regd: ndlp x%px rport x%px DID "
+ "x%x FLG x%x XPT x%x\n",
+ __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
+ return;
+ }
+
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (ndlp->rport &&
+ ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
+ vport->phba->nport_event_cnt++;
+ lpfc_unregister_remote_port(ndlp);
+ } else if (!ndlp->rport) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
+ " XPT x%x refcnt %d\n",
+ __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
+ }
+
+ if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
+ vport->phba->nport_event_cnt++;
+ if (vport->phba->nvmet_support == 0) {
+ /* Start devloss if target. */
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ lpfc_nvme_unregister_port(vport, ndlp);
+ } else {
+ /* NVMET has no upcall. */
+ lpfc_nlp_put(ndlp);
+ }
+ }
+
+}
+
+/*
+ * Adisc state change handling
+ */
+static void
+lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int new_state)
+{
+ switch (new_state) {
+ /*
+ * Any state to ADISC_ISSUE
+ * Do nothing, adisc cmpl handling will trigger state changes
+ */
+ case NLP_STE_ADISC_ISSUE:
+ break;
+
+ /*
+ * ADISC_ISSUE to mapped states
+ * Trigger a registration with backend, it will be nop if
+ * already registered
+ */
+ case NLP_STE_UNMAPPED_NODE:
+ ndlp->nlp_type |= NLP_FC_NODE;
+ fallthrough;
+ case NLP_STE_MAPPED_NODE:
+ ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+ lpfc_nlp_reg_node(vport, ndlp);
+ break;
+
+ /*
+ * ADISC_ISSUE to non-mapped states
+ * We are moving from ADISC_ISSUE to a non-mapped state because
+ * ADISC failed, we would have skipped unregistering with
+ * backend, attempt it now
+ */
+ case NLP_STE_NPR_NODE:
+ ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
+ fallthrough;
+ default:
+ lpfc_nlp_unreg_node(vport, ndlp);
+ break;
+ }
+
+}
+
static void
lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ /* Trap ADISC changes here */
+ if (new_state == NLP_STE_ADISC_ISSUE ||
+ old_state == NLP_STE_ADISC_ISSUE) {
+ lpfc_handle_adisc_state(vport, ndlp, new_state);
+ return;
+ }
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
@@ -4253,79 +4785,21 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_NPR_NODE)
ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
- /* FCP and NVME Transport interface */
+ /* Reg/Unreg for FCP and NVME Transport interface */
if ((old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
- if (ndlp->rport) {
- vport->phba->nport_event_cnt++;
- lpfc_unregister_remote_port(ndlp);
- }
-
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
- vport->phba->nport_event_cnt++;
- if (vport->phba->nvmet_support == 0) {
- /* Start devloss if target. */
- if (ndlp->nlp_type & NLP_NVME_TARGET)
- lpfc_nvme_unregister_port(vport, ndlp);
- } else {
- /* NVMET has no upcall. */
- lpfc_nlp_put(ndlp);
- }
- }
+ /* For nodes marked for ADISC, Handle unreg in ADISC cmpl
+ * if linkup. In linkdown do unreg_node
+ */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC) ||
+ !lpfc_is_link_up(vport->phba))
+ lpfc_nlp_unreg_node(vport, ndlp);
}
- /* FCP and NVME Transport interfaces */
-
if (new_state == NLP_STE_MAPPED_NODE ||
- new_state == NLP_STE_UNMAPPED_NODE) {
- if (ndlp->nlp_fc4_type ||
- ndlp->nlp_DID == Fabric_DID ||
- ndlp->nlp_DID == NameServer_DID ||
- ndlp->nlp_DID == FDMI_DID) {
- vport->phba->nport_event_cnt++;
- /*
- * Tell the fc transport about the port, if we haven't
- * already. If we have, and it's a scsi entity, be
- */
- lpfc_register_remote_port(vport, ndlp);
- }
- /* Notify the NVME transport of this new rport. */
- if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
- ndlp->nlp_fc4_type & NLP_FC4_NVME) {
- if (vport->phba->nvmet_support == 0) {
- /* Register this rport with the transport.
- * Only NVME Target Rports are registered with
- * the transport.
- */
- if (ndlp->nlp_type & NLP_NVME_TARGET) {
- vport->phba->nport_event_cnt++;
- lpfc_nvme_register_port(vport, ndlp);
- }
- } else {
- /* Just take an NDLP ref count since the
- * target does not register rports.
- */
- lpfc_nlp_get(ndlp);
- }
- }
- }
+ new_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nlp_reg_node(vport, ndlp);
- if ((new_state == NLP_STE_MAPPED_NODE) &&
- (vport->stat_data_enabled)) {
- /*
- * A new target is discovered, if there is no buffer for
- * statistical data collection allocate buffer.
- */
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_KERNEL);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
- "0286 lpfc_nlp_state_cleanup failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
/*
* If the node just added to Mapped list was an FCP target,
* but the remote port registration failed or assigned a target
@@ -4337,9 +4811,9 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(!ndlp->rport ||
ndlp->rport->scsi_target_id == -1 ||
ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
}
@@ -4372,6 +4846,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int old_state = ndlp->nlp_state;
+ int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
char name1[16], name2[16];
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -4384,6 +4859,12 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"node statechg did:x%x old:%d ste:%d",
ndlp->nlp_DID, old_state, state);
+ if (node_dropped && old_state == NLP_STE_UNUSED_NODE &&
+ state != NLP_STE_UNUSED_NODE) {
+ ndlp->nlp_flag &= ~NLP_DROPPED;
+ lpfc_nlp_get(ndlp);
+ }
+
if (old_state == NLP_STE_NPR_NODE &&
state != NLP_STE_NPR_NODE)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -4431,15 +4912,6 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
NLP_STE_UNUSED_NODE);
}
-static void
-lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
-{
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
- lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
- lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
- NLP_STE_UNUSED_NODE);
-}
/**
* lpfc_initialize_node - Initialize all fields of node object
* @vport: Pointer to Virtual Port object.
@@ -4461,128 +4933,19 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
+ INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
+
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
ndlp->nlp_sid = NLP_NO_SID;
ndlp->nlp_fc4_type = NLP_FC4_NONE;
kref_init(&ndlp->kref);
- NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
}
-struct lpfc_nodelist *
-lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- int state)
-{
- struct lpfc_hba *phba = vport->phba;
- uint32_t did, flag;
- unsigned long flags;
- unsigned long *active_rrqs_xri_bitmap = NULL;
- int rpi = LPFC_RPI_ALLOC_ERROR;
- uint32_t defer_did = 0;
-
- if (!ndlp)
- return NULL;
-
- if (phba->sli_rev == LPFC_SLI_REV4) {
- if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
- rpi = lpfc_sli4_alloc_rpi(vport->phba);
- else
- rpi = ndlp->nlp_rpi;
-
- if (rpi == LPFC_RPI_ALLOC_ERROR) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0359 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d FAILED RPI "
- " ALLOC\n",
- __func__,
- (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- return NULL;
- }
- }
-
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- /* The ndlp should not be in memory free mode */
- if (NLP_CHK_FREE_REQ(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0277 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- goto free_rpi;
- }
- /* The ndlp should not already be in active mode */
- if (NLP_CHK_NODE_ACT(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0278 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- goto free_rpi;
- }
-
- /* First preserve the orginal DID, xri_bitmap and some flags */
- did = ndlp->nlp_DID;
- flag = (ndlp->nlp_flag & NLP_UNREG_INP);
- if (flag & NLP_UNREG_INP)
- defer_did = ndlp->nlp_defer_did;
- if (phba->sli_rev == LPFC_SLI_REV4)
- active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
-
- /* Zero ndlp except of ndlp linked list pointer */
- memset((((char *)ndlp) + sizeof (struct list_head)), 0,
- sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
-
- /* Next reinitialize and restore saved objects */
- lpfc_initialize_node(vport, ndlp, did);
- ndlp->nlp_flag |= flag;
- if (flag & NLP_UNREG_INP)
- ndlp->nlp_defer_did = defer_did;
- if (phba->sli_rev == LPFC_SLI_REV4)
- ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
-
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0008 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
- }
-
-
- if (state != NLP_STE_UNUSED_NODE)
- lpfc_nlp_set_state(vport, ndlp, state);
- else
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0013 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px STATE=UNUSED\n",
- ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
-
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
- "node enable: did:x%x",
- ndlp->nlp_DID, 0, 0);
- return ndlp;
-
-free_rpi:
- if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_sli4_free_rpi(vport->phba, rpi);
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- }
- return NULL;
-}
-
void
lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -4596,6 +4959,7 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ ndlp->nlp_flag |= NLP_DROPPED;
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
@@ -4662,7 +5026,8 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Turn off discovery timer if its running */
- if (vport->fc_flag & FC_DISC_TMO) {
+ if (vport->fc_flag & FC_DISC_TMO ||
+ timer_pending(&vport->fc_disctmo)) {
spin_lock_irqsave(shost->host_lock, iflags);
vport->fc_flag &= ~FC_DISC_TMO;
spin_unlock_irqrestore(shost->host_lock, iflags);
@@ -4691,26 +5056,31 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb,
struct lpfc_nodelist *ndlp)
{
- IOCB_t *icmd = &iocb->iocb;
- struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_vport *vport = ndlp->vport;
+ u8 ulp_command;
+ u16 ulp_context;
+ u32 remote_id;
if (iocb->vport != vport)
return 0;
+ ulp_command = get_job_cmnd(phba, iocb);
+ ulp_context = get_job_ulpcontext(phba, iocb);
+ remote_id = get_job_els_rsp64_did(phba, iocb);
+
if (pring->ringno == LPFC_ELS_RING) {
- switch (icmd->ulpCommand) {
+ switch (ulp_command) {
case CMD_GEN_REQUEST64_CR:
- if (iocb->context_un.ndlp == ndlp)
+ if (iocb->ndlp == ndlp)
return 1;
- /* fall through */
+ fallthrough;
case CMD_ELS_REQUEST64_CR:
- if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
+ if (remote_id == ndlp->nlp_DID)
return 1;
- /* fall through */
+ fallthrough;
case CMD_XMIT_ELS_RSP64_CX:
- if (iocb->context1 == (uint8_t *) ndlp)
+ if (iocb->ndlp == ndlp)
return 1;
- /* fall through */
}
} else if (pring->ringno == LPFC_FCP_RING) {
/* Skip match check if waiting to relogin to FCP target */
@@ -4718,9 +5088,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
(ndlp->nlp_flag & NLP_DELAY_TMO)) {
return 0;
}
- if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+ if (ulp_context == ndlp->nlp_rpi)
return 1;
- }
}
return 0;
}
@@ -4820,7 +5189,6 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
- mempool_free(pmb, phba->mbox_mem_pool);
/* Check to see if there are any deferred events to process */
if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
@@ -4835,13 +5203,25 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
} else {
+ /* NLP_RELEASE_RPI is only set for SLI4 ports. */
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irq(&ndlp->lock);
}
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ spin_unlock_irq(&ndlp->lock);
}
+
+ /* The node has an outstanding reference for the unreg. Now
+ * that the LOGO action and cleanup are finished, release
+ * resources.
+ */
+ lpfc_nlp_put(ndlp);
+ mempool_free(pmb, phba->mbox_mem_pool);
}
/*
@@ -4855,8 +5235,14 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
{
unsigned long iflags;
+ /* Driver always gets a reference on the mailbox job
+ * in support of async jobs.
+ */
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ return;
+
if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -4864,20 +5250,15 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) &&
(kref_read(&ndlp->kref) > 0)) {
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
} else {
if (vport->load_flag & FC_UNLOADING) {
if (phba->sli_rev == LPFC_SLI_REV4) {
- spin_lock_irqsave(&vport->phba->ndlp_lock,
- iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- spin_unlock_irqrestore(&vport->phba->ndlp_lock,
- iflags);
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
}
- lpfc_nlp_get(ndlp);
}
- mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
@@ -4935,6 +5316,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
+ if (!mbox->ctx_ndlp) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+
if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
/*
* accept PLOGIs after unreg_rpi_cmpl
@@ -4955,9 +5341,34 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
+ lpfc_nlp_put(ndlp);
}
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "1444 Failed to allocate mempool "
+ "unreg_rpi UNREG x%x, "
+ "DID x%x, flag x%x, "
+ "ndlp x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp);
+
+ /* Because mempool_alloc failed, we
+ * will issue a LOGO here and keep the rpi alive if
+ * not unloading.
+ */
+ if (!(vport->load_flag & FC_UNLOADING)) {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
+
+ return 1;
}
lpfc_no_rpi(phba, ndlp);
out:
@@ -4990,8 +5401,8 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (!vports) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "2884 Vport array allocation failed \n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2884 Vport array allocation failed \n");
return;
}
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -5034,9 +5445,10 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mempool_free(mbox, phba->mbox_mem_pool);
if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
- "1836 Could not issue "
- "unreg_login(all_rpis) status %d\n", rc);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "1836 Could not issue "
+ "unreg_login(all_rpis) status %d\n",
+ rc);
}
}
@@ -5063,7 +5475,7 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
mempool_free(mbox, phba->mbox_mem_pool);
if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1815 Could not issue "
"unreg_did (default rpis) status %d\n",
rc);
@@ -5077,11 +5489,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
static int
lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
- struct lpfc_dmabuf *mp;
- unsigned long iflags;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5089,22 +5498,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
- if (NLP_CHK_FREE_REQ(ndlp)) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0280 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- lpfc_dequeue_node(vport, ndlp);
- } else {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0281 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- lpfc_disable_node(vport, ndlp);
- }
-
+ lpfc_dequeue_node(vport, ndlp);
/* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
@@ -5134,16 +5528,11 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
(ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
- mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
list_del(&mb->list);
- mempool_free(mb, phba->mbox_mem_pool);
- /* We shall not invoke the lpfc_nlp_put to decrement
- * the ndlp reference count as we are in the process
- * of lpfc_nlp_release.
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
+
+ /* Don't invoke lpfc_nlp_put. The driver is in
+ * lpfc_nlp_release context.
*/
}
}
@@ -5151,106 +5540,22 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_els_abort(phba, ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = 0;
del_timer_sync(&ndlp->nlp_delayfunc);
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ list_del_init(&ndlp->recovery_evt.evt_listp);
lpfc_cleanup_vports_rrqs(vport, ndlp);
+
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- if (!lpfc_unreg_rpi(vport, ndlp)) {
- /* Clean up unregistered and non freed rpis */
- if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
- !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
- lpfc_sli4_free_rpi(vport->phba,
- ndlp->nlp_rpi);
- spin_lock_irqsave(&vport->phba->ndlp_lock,
- iflags);
- ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- spin_unlock_irqrestore(&vport->phba->ndlp_lock,
- iflags);
- }
- }
- return 0;
-}
-
-/*
- * Check to see if we can free the nlp back to the freelist.
- * If we are in the middle of using the nlp in the discovery state
- * machine, defer the free till we reach the end of the state machine.
- */
-static void
-lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
- LPFC_MBOXQ_t *mbox;
- int rc;
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
- !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
- !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
- phba->sli_rev != LPFC_SLI_REV4) {
- /* For this case we need to cleanup the default rpi
- * allocated by the firmware.
- */
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_NODE | LOG_DISCOVERY,
- "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
- "ref %d map:x%x ndlp x%px\n",
- ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
- != NULL) {
- rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
- (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
- if (rc) {
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- else {
- mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
- mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
- mbox->vport = vport;
- mbox->ctx_ndlp = ndlp;
- rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- }
- }
- }
- lpfc_cleanup_node(vport, ndlp);
-
- /*
- * ndlp->rport must be set to NULL before it reaches here
- * i.e. break rport/node link before doing lpfc_nlp_put for
- * registered rport and then drop the reference of rport.
- */
- if (ndlp->rport) {
- /*
- * extra lpfc_nlp_put dropped the reference of ndlp
- * for registered rport so need to cleanup rport
- */
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0940 removed node x%px DID x%x "
- "rpi %d rport not null x%px\n",
- ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
- ndlp->rport);
- rport = ndlp->rport;
- rdata = rport->dd_data;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- put_device(&rport->dev);
- }
+ return 0;
}
static int
@@ -5319,8 +5624,8 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
if (lpfc_matchdid(vport, ndlp, did)) {
data1 = (((uint32_t)ndlp->nlp_state << 24) |
((uint32_t)ndlp->nlp_xri << 16) |
- ((uint32_t)ndlp->nlp_type << 8) |
- ((uint32_t)ndlp->nlp_usg_map & 0xff));
+ ((uint32_t)ndlp->nlp_type << 8)
+ );
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
"Data: x%px x%x x%x x%x x%x x%px\n",
@@ -5388,7 +5693,6 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
struct lpfc_nodelist *
lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
ndlp = lpfc_findnode_did(vport, did);
@@ -5409,28 +5713,9 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
- return ndlp;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- if (vport->phba->nvmet_support)
- return NULL;
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
- "0014 Could not enable ndlp\n");
- return NULL;
- }
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "6454 Setup Enabled Node 2B_DISC x%x "
- "Data:x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, vport->fc_flag);
-
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp;
}
@@ -5472,9 +5757,9 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6456 Skip Setup RSCN Node x%x "
@@ -5508,9 +5793,9 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
*/
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
return ndlp;
}
@@ -5716,9 +6001,9 @@ static void
lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- IOCB_t *icmd;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_sli_ring *pring;
+ u32 ulp_command;
pring = lpfc_phba_elsring(phba);
if (unlikely(!pring))
@@ -5729,12 +6014,13 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->ndlp != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
- (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
list_move_tail(&iocb->list, &completions);
}
@@ -5742,17 +6028,21 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
/* Next check the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->ndlp != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
- icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
}
}
spin_unlock_irq(&phba->hbalock);
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
@@ -5767,8 +6057,6 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
lpfc_free_tx(phba, ndlp);
@@ -5777,12 +6065,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
}
}
+/*
+ * lpfc_notify_xport_npr - notifies xport of node disappearance
+ * @vport: Pointer to Virtual Port object.
+ *
+ * Transitions all ndlps to NPR state. When lpfc_nlp_set_state
+ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
+ * and transport notified that the node is gone.
+ * Return Code:
+ * none
+ */
+static void
+lpfc_notify_xport_npr(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp) {
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
+}
void
lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
{
lpfc_els_flush_rscn(vport);
lpfc_els_flush_cmd(vport);
lpfc_disc_flush_list(vport);
+ if (pci_channel_offline(vport->phba->pcidev))
+ lpfc_notify_xport_npr(vport);
}
/*****************************************************************************/
@@ -5856,8 +6166,6 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
/* Start discovery by sending FLOGI, clean up old rpis */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -5884,7 +6192,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_FLOGI:
/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
/* Initial FLOGI timeout */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0222 Initial %s timeout\n",
vport->vpi ? "FDISC" : "FLOGI");
@@ -5902,12 +6211,13 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_FABRIC_CFG_LINK:
/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
NameServer login */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0223 Timeout while waiting for "
"NameServer login\n");
/* Next look for NameServer ndlp */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ if (ndlp)
lpfc_els_abort(phba, ndlp);
/* ReStart discovery */
@@ -5915,7 +6225,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_NS_QRY:
/* Check for wait for NameServer Rsp timeout */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0224 NameServer Query timeout "
"Data: x%x x%x\n",
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
@@ -5948,7 +6259,8 @@ restart_disc:
/* Setup and issue mailbox INITIALIZE LINK command */
initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!initlinkmbox) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0206 Device Discovery "
"completion error\n");
phba->link_state = LPFC_HBA_ERROR;
@@ -5970,7 +6282,8 @@ restart_disc:
case LPFC_DISC_AUTH:
/* Node Authentication timeout */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0227 Node Authentication timeout\n");
lpfc_disc_flush_list(vport);
@@ -5990,10 +6303,12 @@ restart_disc:
case LPFC_VPORT_READY:
if (vport->fc_flag & FC_RSCN_MODE) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0231 RSCN timeout Data: x%x "
- "x%x\n",
- vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+ "x%x x%x x%x\n",
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY,
+ vport->port_state, vport->gidft_inp);
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_cmd(vport);
@@ -6004,7 +6319,8 @@ restart_disc:
break;
default:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0273 Unexpected discovery timeout, "
"vport State x%x\n", vport->port_state);
break;
@@ -6013,21 +6329,23 @@ restart_disc:
switch (phba->link_state) {
case LPFC_CLEAR_LA:
/* CLEAR LA timeout */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0228 CLEAR LA timeout\n");
clrlaerr = 1;
break;
case LPFC_LINK_UP:
lpfc_issue_clear_la(phba, vport);
- /* fall through */
+ fallthrough;
case LPFC_LINK_UNKNOWN:
case LPFC_WARM_START:
case LPFC_INIT_START:
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0230 Unexpected timeout, hba link "
"state x%x\n", phba->link_state);
clrlaerr = 1;
@@ -6060,11 +6378,9 @@ void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
- pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
if (phba->sli_rev < LPFC_SLI_REV4)
@@ -6073,30 +6389,29 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
- "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ "0004 rpi:%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
/*
* Start issuing Fabric-Device Management Interface (FDMI) command to
* 0xfffffa (FDMI well known port).
* DHBA -> DPRT -> RHBA -> RPA (physical port)
* DPRT -> RPRT (vports)
*/
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
- else
+ } else {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+ }
/* decrement the node reference count held for this callback
* function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return;
}
@@ -6105,10 +6420,6 @@ lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
{
uint16_t *rpi = param;
- /* check for active node */
- if (!NLP_CHK_NODE_ACT(ndlp))
- return 0;
-
return ndlp->nlp_rpi == *rpi;
}
@@ -6212,15 +6523,15 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
* Translate the physical vpi to the logical vpi. The
* vport stores the logical vpi.
*/
- for (i = 0; i < phba->max_vpi; i++) {
+ for (i = 0; i <= phba->max_vpi; i++) {
if (vpi == phba->vpi_ids[i])
break;
}
- if (i >= phba->max_vpi) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "2936 Could not find Vport mapped "
- "to vpi %d\n", vpi);
+ if (i > phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
return NULL;
}
}
@@ -6257,16 +6568,17 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+ spin_lock_init(&ndlp->lock);
+
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0007 Init New ndlp x%px, rpi:x%x DID:%x "
- "flg:x%x refcnt:%d map:x%x\n",
+ "flg:x%x refcnt:%d\n",
ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag, kref_read(&ndlp->kref),
- ndlp->nlp_usg_map);
+ ndlp->nlp_flag, kref_read(&ndlp->kref));
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
@@ -6291,39 +6603,51 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
static void
lpfc_nlp_release(struct kref *kref)
{
- struct lpfc_hba *phba;
- unsigned long flags;
struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
kref);
+ struct lpfc_vport *vport = ndlp->vport;
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node release: did:x%x flg:x%x type:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0279 %s: ndlp:x%px did %x "
- "usgmap:x%x refcnt:%d rpi:%x\n",
- __func__,
- (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref), ndlp->nlp_rpi);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
+ __func__, ndlp, ndlp->nlp_DID,
+ kref_read(&ndlp->kref), ndlp->nlp_rpi);
/* remove ndlp from action. */
- lpfc_nlp_remove(ndlp->vport, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_cleanup_node(vport, ndlp);
+
+ /* Not all ELS transactions have registered the RPI with the port.
+ * In these cases the rpi usage is temporary and the node is
+ * released when the WQE is completed. Catch this case to free the
+ * RPI to the pool. Because this node is in the release path, a lock
+ * is unnecessary. All references are gone and the node has been
+ * dequeued.
+ */
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
+ !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
+ }
- /* clear the ndlp active flag for all release cases */
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- NLP_CLR_NODE_ACT(ndlp);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ /* The node is not freed back to memory, it is released to a pool so
+ * the node fields need to be cleaned up.
+ */
+ ndlp->vport = NULL;
+ ndlp->nlp_state = NLP_STE_FREED_NODE;
+ ndlp->nlp_flag = 0;
+ ndlp->fc4_xpt_flags = 0;
/* free ndlp memory for final ndlp release */
- if (NLP_CHK_FREE_REQ(ndlp)) {
- kfree(ndlp->lat_data);
- if (phba->sli_rev == LPFC_SLI_REV4)
- mempool_free(ndlp->active_rrqs_xri_bitmap,
- ndlp->phba->active_rrq_pool);
- mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
- }
+ if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
+ mempool_free(ndlp->active_rrqs_xri_bitmap,
+ ndlp->phba->active_rrq_pool);
+ mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
}
/* This routine bumps the reference count for a ndlp structure to ensure
@@ -6333,7 +6657,6 @@ lpfc_nlp_release(struct kref *kref)
struct lpfc_nodelist *
lpfc_nlp_get(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
unsigned long flags;
if (ndlp) {
@@ -6341,94 +6664,43 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
"node get: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
+
/* The check of ndlp usage to prevent incrementing the
* ndlp reference count that is in the process of being
* released.
*/
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
+ if (!kref_get_unless_zero(&ndlp->kref)) {
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0276 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
+ "0276 %s: ndlp:x%px refcnt:%d\n",
+ __func__, (void *)ndlp, kref_read(&ndlp->kref));
return NULL;
- } else
- kref_get(&ndlp->kref);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ }
+ spin_unlock_irqrestore(&ndlp->lock, flags);
+ } else {
+ WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
}
+
return ndlp;
}
/* This routine decrements the reference count for a ndlp structure. If the
- * count goes to 0, this indicates the the associated nodelist should be
- * freed. Returning 1 indicates the ndlp resource has been released; on the
- * other hand, returning 0 indicates the ndlp resource has not been released
- * yet.
+ * count goes to 0, this indicates the associated nodelist should be freed.
*/
int
lpfc_nlp_put(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
- unsigned long flags;
-
- if (!ndlp)
- return 1;
-
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node put: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref));
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- /* Check the ndlp memory free acknowledge flag to avoid the
- * possible race condition that kref_put got invoked again
- * after previous one has done ndlp memory free.
- */
- if (NLP_CHK_FREE_ACK(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0274 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- return 1;
- }
- /* Check the ndlp inactivate log flag to avoid the possible
- * race condition that kref_put got invoked again after ndlp
- * is already in inactivating state.
- */
- if (NLP_CHK_IACT_REQ(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0275 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
+ if (ndlp) {
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node put: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
- return 1;
+ } else {
+ WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
}
- /* For last put, mark the ndlp usage flags to make sure no
- * other kref_get and kref_put on the same ndlp shall get
- * in between the process when the final kref_put has been
- * invoked on this ndlp.
- */
- if (kref_read(&ndlp->kref) == 1) {
- /* Indicate ndlp is put to inactive state. */
- NLP_SET_IACT_REQ(ndlp);
- /* Acknowledge ndlp memory free has been seen. */
- if (NLP_CHK_FREE_REQ(ndlp))
- NLP_SET_FREE_ACK(ndlp);
- }
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- /* Note, the kref_put returns 1 when decrementing a reference
- * count that was 1, it invokes the release callback function,
- * but it still left the reference count as 1 (not actually
- * performs the last decrementation). Otherwise, it actually
- * decrements the reference count and returns 0.
- */
- return kref_put(&ndlp->kref, lpfc_nlp_release);
+
+ return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
}
/* This routine free's the specified nodelist if it is not in use
@@ -6443,6 +6715,7 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
"node not used: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
+
if (kref_read(&ndlp->kref) == 1)
if (lpfc_nlp_put(ndlp))
return 1;
@@ -6488,7 +6761,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
goto out;
}
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+ if (ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
ret = 1;
spin_unlock_irq(shost->host_lock);
@@ -6524,10 +6797,10 @@ lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2555 UNREG_VFI mbxStatus error x%x "
- "HBA state x%x\n",
- mboxq->u.mb.mbxStatus, vport->port_state);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2555 UNREG_VFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
}
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
@@ -6549,10 +6822,10 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_vport *vport = mboxq->vport;
if (mboxq->u.mb.mbxStatus) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2550 UNREG_FCFI mbxStatus error x%x "
- "HBA state x%x\n",
- mboxq->u.mb.mbxStatus, vport->port_state);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2550 UNREG_FCFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
}
mempool_free(mboxq, phba->mbox_mem_pool);
return;
@@ -6641,7 +6914,7 @@ lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2551 UNREG_FCFI mbox allocation failed"
"HBA state x%x\n", phba->pport->port_state);
return -ENOMEM;
@@ -6652,7 +6925,7 @@ lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2552 Unregister FCFI command failed rc x%x "
"HBA state x%x\n",
rc, phba->pport->port_state);
@@ -6676,7 +6949,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
/* Preparation for unregistering fcf */
rc = lpfc_unregister_fcf_prep(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2748 Failed to prepare for unregistering "
"HBA's FCF record: rc=%d\n", rc);
return;
@@ -6712,7 +6985,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2553 lpfc_unregister_unused_fcf failed "
"to read FCF record HBA state x%x\n",
phba->pport->port_state);
@@ -6734,7 +7007,7 @@ lpfc_unregister_fcf(struct lpfc_hba *phba)
/* Preparation for unregistering fcf */
rc = lpfc_unregister_fcf_prep(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2749 Failed to prepare for unregistering "
"HBA's FCF record: rc=%d\n", rc);
return;
@@ -6821,9 +7094,9 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
GFP_KERNEL);
if (!conn_entry) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2566 Failed to allocate connection"
- " table entry\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2566 Failed to allocate connection"
+ " table entry\n");
return;
}
@@ -6967,7 +7240,7 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
/* Check the region signature first */
if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2567 Config region 23 has bad signature\n");
return;
}
@@ -6976,8 +7249,8 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
/* Check the data structure version */
if (buff[offset] != LPFC_REGION23_VERSION) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2568 Config region 23 has bad version\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2568 Config region 23 has bad version\n");
return;
}
offset += 4;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 436cdc8c5ef4..5c283936ff08 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -22,7 +22,7 @@
#define FDMI_DID 0xfffffaU
#define NameServer_DID 0xfffffcU
-#define SCR_DID 0xfffffdU
+#define Fabric_Cntl_DID 0xfffffdU
#define Fabric_DID 0xfffffeU
#define Bcast_DID 0xffffffU
#define Mask_DID 0xffffffU
@@ -97,6 +97,18 @@ union CtCommandResponse {
#define FC4_FEATURE_INIT 0x2
#define FC4_FEATURE_NVME_DISC 0x4
+enum rft_word0 {
+ RFT_FCP_REG = (0x1 << 8),
+};
+
+enum rft_word1 {
+ RFT_NVME_REG = (0x1 << 8),
+};
+
+enum rft_word3 {
+ RFT_APP_SERV_REG = (0x1 << 0),
+};
+
struct lpfc_sli_ct_request {
/* Structure is in Big Endian format */
union CtRevisionId RevisionId;
@@ -131,25 +143,13 @@ struct lpfc_sli_ct_request {
uint8_t Fc4Type;
} gid_ff;
struct rft {
- uint32_t PortId; /* For RFT_ID requests */
+ __be32 port_id; /* For RFT_ID requests */
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd0:16;
- uint32_t rsvd1:7;
- uint32_t fcpReg:1; /* Type 8 */
- uint32_t rsvd2:2;
- uint32_t ipReg:1; /* Type 5 */
- uint32_t rsvd3:5;
-#else /* __LITTLE_ENDIAN_BITFIELD */
- uint32_t rsvd0:16;
- uint32_t fcpReg:1; /* Type 8 */
- uint32_t rsvd1:7;
- uint32_t rsvd3:5;
- uint32_t ipReg:1; /* Type 5 */
- uint32_t rsvd2:2;
-#endif
-
- uint32_t rsvd[7];
+ __be32 fcp_reg; /* rsvd 31:9, fcp_reg 8, rsvd 7:0 */
+ __be32 nvme_reg; /* rsvd 31:9, nvme_reg 8, rsvd 7:0 */
+ __be32 word2;
+ __be32 app_serv_reg; /* rsvd 31:1, app_serv_reg 0 */
+ __be32 word[4];
} rft;
struct rnn {
uint32_t PortId; /* For RNN_ID requests */
@@ -275,6 +275,7 @@ struct lpfc_sli_ct_request {
#define SLI_CT_ACCESS_DENIED 0x10
#define SLI_CT_INVALID_PORT_ID 0x11
#define SLI_CT_DATABASE_EMPTY 0x12
+#define SLI_CT_APP_ID_NOT_AVAILABLE 0x40
/*
* Name Server Command Codes
@@ -400,16 +401,16 @@ struct csp {
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
uint16_t multicast:1; /* FC Word 1, bit 25 */
- uint16_t broadcast:1; /* FC Word 1, bit 24 */
+ uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */
- uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t priority_tagging:1; /* FC Word 1, bit 23 */
uint16_t simplex:1; /* FC Word 1, bit 22 */
uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
uint16_t dhd:1; /* FC Word 1, bit 18 */
uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
uint16_t payloadlength:1; /* FC Word 1, bit 16 */
#else /* __LITTLE_ENDIAN_BITFIELD */
- uint16_t broadcast:1; /* FC Word 1, bit 24 */
+ uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */
uint16_t multicast:1; /* FC Word 1, bit 25 */
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
@@ -423,7 +424,7 @@ struct csp {
uint16_t dhd:1; /* FC Word 1, bit 18 */
uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
uint16_t simplex:1; /* FC Word 1, bit 22 */
- uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t priority_tagging:1; /* FC Word 1, bit 23 */
#endif
uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */
@@ -510,8 +511,6 @@ struct class_parms {
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
};
-#define FAPWWN_KEY_VENDOR 0x42524344 /*valid vendor version fawwpn key*/
-
struct serv_parm { /* Structure is in Big Endian format */
struct csp cmn;
struct lpfc_name portName;
@@ -588,6 +587,7 @@ struct fc_vft_header {
#define ELS_CMD_RRQ 0x12000000
#define ELS_CMD_REC 0x13000000
#define ELS_CMD_RDP 0x18000000
+#define ELS_CMD_RDF 0x19000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_NVMEPRLI 0x20140018
#define ELS_CMD_PRLO 0x21100014
@@ -597,7 +597,6 @@ struct fc_vft_header {
#define ELS_CMD_ADISC 0x52000000
#define ELS_CMD_FARP 0x54000000
#define ELS_CMD_FARPR 0x55000000
-#define ELS_CMD_RPS 0x56000000
#define ELS_CMD_RPL 0x57000000
#define ELS_CMD_FAN 0x60000000
#define ELS_CMD_RSCN 0x61040000
@@ -607,6 +606,9 @@ struct fc_vft_header {
#define ELS_CMD_LIRR 0x7A000000
#define ELS_CMD_LCB 0x81000000
#define ELS_CMD_FPIN 0x16000000
+#define ELS_CMD_EDC 0x17000000
+#define ELS_CMD_QFPA 0xB0000000
+#define ELS_CMD_UVEM 0xB1000000
#else /* __LITTLE_ENDIAN_BITFIELD */
#define ELS_CMD_MASK 0xffff
#define ELS_RSP_MASK 0xff
@@ -630,6 +632,7 @@ struct fc_vft_header {
#define ELS_CMD_RRQ 0x12
#define ELS_CMD_REC 0x13
#define ELS_CMD_RDP 0x18
+#define ELS_CMD_RDF 0x19
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_NVMEPRLI 0x18001420
#define ELS_CMD_PRLO 0x14001021
@@ -639,7 +642,6 @@ struct fc_vft_header {
#define ELS_CMD_ADISC 0x52
#define ELS_CMD_FARP 0x54
#define ELS_CMD_FARPR 0x55
-#define ELS_CMD_RPS 0x56
#define ELS_CMD_RPL 0x57
#define ELS_CMD_FAN 0x60
#define ELS_CMD_RSCN 0x0461
@@ -649,6 +651,9 @@ struct fc_vft_header {
#define ELS_CMD_LIRR 0x7A
#define ELS_CMD_LCB 0x81
#define ELS_CMD_FPIN ELS_FPIN
+#define ELS_CMD_EDC ELS_EDC
+#define ELS_CMD_QFPA 0xB0
+#define ELS_CMD_UVEM 0xB1
#endif
/*
@@ -657,6 +662,7 @@ struct fc_vft_header {
struct ls_rjt { /* Structure is in Big Endian format */
union {
+ __be32 ls_rjt_error_be;
uint32_t lsRjtError;
struct {
uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */
@@ -697,6 +703,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
#define LSEXP_OUT_OF_RESOURCE 0x29
#define LSEXP_CANT_GIVE_DATA 0x2A
#define LSEXP_REQ_UNSUPPORTED 0x2C
+#define LSEXP_NO_RSRC_ASSIGN 0x52
uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
} b;
} un;
@@ -919,24 +926,6 @@ typedef struct _RNID { /* Structure is in Big Endian format */
} un;
} __packed RNID;
-typedef struct _RPS { /* Structure is in Big Endian format */
- union {
- uint32_t portNum;
- struct lpfc_name portName;
- } un;
-} RPS;
-
-typedef struct _RPS_RSP { /* Structure is in Big Endian format */
- uint16_t rsvd1;
- uint16_t portStatus;
- uint32_t linkFailureCnt;
- uint32_t lossSyncCnt;
- uint32_t lossSignalCnt;
- uint32_t primSeqErrCnt;
- uint32_t invalidXmitWord;
- uint32_t crcCnt;
-} RPS_RSP;
-
struct RLS { /* Structure is in Big Endian format */
uint32_t rls;
#define rls_rsvd_SHIFT 24
@@ -1335,48 +1324,174 @@ struct fc_rdp_res_frame {
};
+/* UVEM */
+
+#define LPFC_UVEM_SIZE 60
+#define LPFC_UVEM_VEM_ID_DESC_SIZE 16
+#define LPFC_UVEM_VE_MAP_DESC_SIZE 20
+
+#define VEM_ID_DESC_TAG 0x0001000A
+struct lpfc_vem_id_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t vem_id[16];
+};
+
+#define LPFC_QFPA_SIZE 4
+
+#define INSTANTIATED_VE_DESC_TAG 0x0001000B
+struct instantiated_ve_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t global_vem_id[16];
+ uint32_t word6;
+#define lpfc_instantiated_local_id_SHIFT 0
+#define lpfc_instantiated_local_id_MASK 0x000000ff
+#define lpfc_instantiated_local_id_WORD word6
+#define lpfc_instantiated_nport_id_SHIFT 8
+#define lpfc_instantiated_nport_id_MASK 0x00ffffff
+#define lpfc_instantiated_nport_id_WORD word6
+};
+
+#define DEINSTANTIATED_VE_DESC_TAG 0x0001000C
+struct deinstantiated_ve_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t global_vem_id[16];
+ uint32_t word6;
+#define lpfc_deinstantiated_nport_id_SHIFT 0
+#define lpfc_deinstantiated_nport_id_MASK 0x000000ff
+#define lpfc_deinstantiated_nport_id_WORD word6
+#define lpfc_deinstantiated_local_id_SHIFT 24
+#define lpfc_deinstantiated_local_id_MASK 0x00ffffff
+#define lpfc_deinstantiated_local_id_WORD word6
+};
+
+/* Query Fabric Priority Allocation Response */
+#define LPFC_PRIORITY_RANGE_DESC_SIZE 12
+
+struct priority_range_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t lo_range;
+ uint8_t hi_range;
+ uint8_t qos_priority;
+ uint8_t local_ve_id;
+};
+
+struct fc_qfpa_res {
+ uint32_t reply_sequence; /* LS_ACC or LS_RJT */
+ uint32_t length; /* FC Word 1 */
+ struct priority_range_desc desc[1];
+};
+
+/* Application Server command code */
+/* VMID */
+
+#define SLI_CT_APP_SEV_Subtypes 0x20 /* Application Server subtype */
+
+#define SLI_CTAS_GAPPIA_ENT 0x0100 /* Get Application Identifier */
+#define SLI_CTAS_GALLAPPIA 0x0101 /* Get All Application Identifier */
+#define SLI_CTAS_GALLAPPIA_ID 0x0102 /* Get All Application Identifier */
+ /* for Nport */
+#define SLI_CTAS_GAPPIA_IDAPP 0x0103 /* Get Application Identifier */
+ /* for Nport */
+#define SLI_CTAS_RAPP_IDENT 0x0200 /* Register Application Identifier */
+#define SLI_CTAS_DAPP_IDENT 0x0300 /* Deregister Application */
+ /* Identifier */
+#define SLI_CTAS_DALLAPP_ID 0x0301 /* Deregister All Application */
+ /* Identifier */
+
+struct entity_id_object {
+ uint8_t entity_id_len;
+ uint8_t entity_id[255]; /* VM UUID */
+};
+
+struct app_id_object {
+ uint32_t port_id;
+ uint32_t app_id;
+ struct entity_id_object obj;
+};
+
+struct lpfc_vmid_rapp_ident_list {
+ uint32_t no_of_objects;
+ struct entity_id_object obj[1];
+};
+
+struct lpfc_vmid_dapp_ident_list {
+ uint32_t no_of_objects;
+ struct entity_id_object obj[1];
+};
+
+#define GALLAPPIA_ID_LAST 0x80
+struct lpfc_vmid_gallapp_ident_list {
+ uint8_t control;
+ uint8_t reserved[3];
+ struct app_id_object app_id;
+};
+
+#define RAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4)
+#define DAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4)
+#define GALLAPPIA_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4)
+#define DALLAPP_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4)
+
/******** FDMI ********/
/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
-/*
- * Registered Port List Format
- */
-struct lpfc_fdmi_reg_port_list {
- uint32_t EntryCnt;
- uint32_t pe; /* Variable-length array */
+/* Definitions for HBA / Port attribute entries */
+
+/* Attribute Entry Structures */
+
+struct lpfc_fdmi_attr_u32 {
+ __be16 type;
+ __be16 len;
+ __be32 value_u32;
};
+struct lpfc_fdmi_attr_wwn {
+ __be16 type;
+ __be16 len;
-/* Definitions for HBA / Port attribute entries */
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 name[8];
+};
-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
- /* Structure is in Big Endian format */
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
+struct lpfc_fdmi_attr_fullwwn {
+ __be16 type;
+ __be16 len;
+
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 nname[8];
+ u8 pname[8];
};
+struct lpfc_fdmi_attr_fc4types {
+ __be16 type;
+ __be16 len;
+ u8 value_types[32];
+};
-/* Attribute Entry */
-struct lpfc_fdmi_attr_entry {
- union {
- uint32_t AttrInt;
- uint8_t AttrTypes[32];
- uint8_t AttrString[256];
- struct lpfc_name AttrWWN;
- } un;
+struct lpfc_fdmi_attr_string {
+ __be16 type;
+ __be16 len;
+ char value_string[256];
};
-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
+/* Maximum FDMI attribute length is Type+Len (4 bytes) + 256 byte string */
+#define FDMI_MAX_ATTRLEN sizeof(struct lpfc_fdmi_attr_string)
/*
* HBA Attribute Block
*/
struct lpfc_fdmi_attr_block {
uint32_t EntryCnt; /* Number of HBA attribute entries */
- struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */
+ /* Variable Length Attribute Entry TLV's follow */
};
/*
@@ -1394,14 +1509,24 @@ struct lpfc_fdmi_hba_ident {
};
/*
+ * Registered Port List Format
+ */
+struct lpfc_fdmi_reg_port_list {
+ uint32_t EntryCnt;
+ struct lpfc_fdmi_port_entry pe;
+} __packed;
+
+/*
* Register HBA(RHBA)
*/
struct lpfc_fdmi_reg_hba {
struct lpfc_fdmi_hba_ident hi;
- struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
-/* struct lpfc_fdmi_attr_block ab; */
+ struct lpfc_fdmi_reg_port_list rpl;
};
+/******** MI MIB ********/
+#define SLI_CT_MIB_Subtypes 0x11
+
/*
* Register HBA Attributes (RHAT)
*/
@@ -1487,7 +1612,7 @@ struct lpfc_fdmi_reg_portattr {
#define LPFC_FDMI2_HBA_ATTR 0x0002efff
/*
- * Port Attrubute Types
+ * Port Attribute Types
*/
#define RPRT_SUPPORTED_FC4_TYPES 0x1 /* 32 byte binary array */
#define RPRT_SUPPORTED_SPEED 0x2 /* 32-bit unsigned int */
@@ -1505,6 +1630,7 @@ struct lpfc_fdmi_reg_portattr {
#define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */
#define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */
#define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */
+#define RPRT_VENDOR_MI 0xf047 /* vendor ascii string */
#define RPRT_SMART_SERVICE 0xf100 /* 4 to 256 byte ASCII string */
#define RPRT_SMART_GUID 0xf101 /* 8 byte WWNN + 8 byte WWPN */
#define RPRT_SMART_VERSION 0xf102 /* 4 to 256 byte ASCII string */
@@ -1537,6 +1663,7 @@ struct lpfc_fdmi_reg_portattr {
#define LPFC_FDMI_SMART_ATTR_port_info 0x00100000 /* Vendor specific */
#define LPFC_FDMI_SMART_ATTR_qos 0x00200000 /* Vendor specific */
#define LPFC_FDMI_SMART_ATTR_security 0x00400000 /* Vendor specific */
+#define LPFC_FDMI_VENDOR_ATTR_mi 0x00800000 /* Vendor specific */
/* Bit mask for FDMI-1 defined PORT attributes */
#define LPFC_FDMI1_PORT_ATTR 0x0000003f
@@ -1595,6 +1722,7 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
#define PCI_DEVICE_ID_LANCER_G6_FC 0xe300
#define PCI_DEVICE_ID_LANCER_G7_FC 0xf400
+#define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@@ -1627,7 +1755,6 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
-#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
@@ -1635,6 +1762,28 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_TOMCAT 0x0714
#define PCI_DEVICE_ID_SKYHAWK 0x0724
#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
+#define PCI_VENDOR_ID_ATTO 0x117c
+#define PCI_DEVICE_ID_CLRY_16XE 0x0064
+#define PCI_DEVICE_ID_CLRY_161E 0x0063
+#define PCI_DEVICE_ID_CLRY_162E 0x0064
+#define PCI_DEVICE_ID_CLRY_164E 0x0065
+#define PCI_DEVICE_ID_CLRY_16XP 0x0094
+#define PCI_DEVICE_ID_CLRY_161P 0x00a0
+#define PCI_DEVICE_ID_CLRY_162P 0x0094
+#define PCI_DEVICE_ID_CLRY_164P 0x00a1
+#define PCI_DEVICE_ID_CLRY_32XE 0x0094
+#define PCI_DEVICE_ID_CLRY_321E 0x00a2
+#define PCI_DEVICE_ID_CLRY_322E 0x00a3
+#define PCI_DEVICE_ID_CLRY_324E 0x00ac
+#define PCI_DEVICE_ID_CLRY_32XP 0x00bb
+#define PCI_DEVICE_ID_CLRY_321P 0x00bc
+#define PCI_DEVICE_ID_CLRY_322P 0x00bd
+#define PCI_DEVICE_ID_CLRY_324P 0x00be
+#define PCI_DEVICE_ID_TLFC_2 0x0064
+#define PCI_DEVICE_ID_TLFC_2XX2 0x4064
+#define PCI_DEVICE_ID_TLFC_3 0x0094
+#define PCI_DEVICE_ID_TLFC_3162 0x40a6
+#define PCI_DEVICE_ID_TLFC_3322 0x40a7
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1650,7 +1799,6 @@ struct lpfc_fdmi_reg_portattr {
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
-#define HORNET_JDEC_ID 0x2057706D
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@@ -2547,19 +2695,26 @@ typedef struct {
} READ_SPARM_VAR;
/* Structure for MB Command READ_STATUS (14) */
+enum read_status_word1 {
+ RD_ST_CC = 0x01,
+ RD_ST_XKB = 0x80,
+};
+
+enum read_status_word17 {
+ RD_ST_XMIT_XKB_MASK = 0x3fffff,
+};
+
+enum read_status_word18 {
+ RD_ST_RCV_XKB_MASK = 0x3fffff,
+};
typedef struct {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd1:31;
- uint32_t clrCounters:1;
- uint16_t activeXriCnt;
- uint16_t activeRpiCnt;
-#else /* __LITTLE_ENDIAN_BITFIELD */
- uint32_t clrCounters:1;
- uint32_t rsvd1:31;
- uint16_t activeRpiCnt;
- uint16_t activeXriCnt;
-#endif
+ u8 clear_counters; /* rsvd 7:1, cc 0 */
+ u8 rsvd5;
+ u8 rsvd6;
+ u8 xkb; /* xkb 7, rsvd 6:0 */
+
+ u32 rsvd8;
uint32_t xmitByteCnt;
uint32_t rcvByteCnt;
@@ -2571,6 +2726,14 @@ typedef struct {
uint32_t totalRespExchanges;
uint32_t rcvPbsyCnt;
uint32_t rcvFbsyCnt;
+
+ u32 drop_frame_no_rq;
+ u32 empty_rq;
+ u32 drop_frame_no_xri;
+ u32 empty_xri;
+
+ u32 xmit_xkb; /* rsvd 31:22, xmit_xkb 21:0 */
+ u32 rcv_xkb; /* rsvd 31:22, rcv_xkb 21:0 */
} READ_STATUS_VAR;
/* Structure for MB Command READ_RPI (15) */
@@ -2936,7 +3099,6 @@ struct lpfc_mbx_read_top {
#define lpfc_mbx_read_top_topology_WORD word3
#define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
#define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
-#define LPFC_TOPOLOGY_MM 0x05 /* maint mode zephtr to menlo */
/* store the LILP AL_PA position map into */
struct ulp_bde64 lilpBde64;
#define LPFC_ALPA_MAP_SIZE 128
@@ -3284,8 +3446,7 @@ typedef struct {
#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd1 : 19; /* Reserved */
- uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd1 : 20; /* Reserved */
uint32_t casabt : 1; /* Configure async abts status notice */
uint32_t rsvd2 : 2; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
@@ -3309,12 +3470,10 @@ typedef struct {
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd2 : 2; /* Reserved */
uint32_t casabt : 1; /* Configure async abts status notice */
- uint32_t cdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd1 : 19; /* Reserved */
+ uint32_t rsvd1 : 20; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd3 : 19; /* Reserved */
- uint32_t gdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 : 20; /* Reserved */
uint32_t gasabt : 1; /* Grant async abts status notice */
uint32_t rsvd4 : 2; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
@@ -3338,8 +3497,7 @@ typedef struct {
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t rsvd4 : 2; /* Reserved */
uint32_t gasabt : 1; /* Grant async abts status notice */
- uint32_t gdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd3 : 19; /* Reserved */
+ uint32_t rsvd3 : 20; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
@@ -3361,15 +3519,11 @@ typedef struct {
uint32_t rsvd6; /* Reserved */
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t fips_rev : 3; /* FIPS Spec Revision */
- uint32_t fips_level : 4; /* FIPS Level */
- uint32_t sec_err : 9; /* security crypto error */
+ uint32_t rsvd7 : 16;
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
#else /* __LITTLE_ENDIAN */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
- uint32_t sec_err : 9; /* security crypto error */
- uint32_t fips_level : 4; /* FIPS Level */
- uint32_t fips_rev : 3; /* FIPS Spec Revision */
+ uint32_t rsvd7 : 16;
#endif
} CONFIG_PORT_VAR;
@@ -3581,19 +3735,26 @@ union sli_var {
};
typedef struct {
+ struct_group_tagged(MAILBOX_word0, bits,
+ union {
+ struct {
#ifdef __BIG_ENDIAN_BITFIELD
- uint16_t mbxStatus;
- uint8_t mbxCommand;
- uint8_t mbxReserved:6;
- uint8_t mbxHc:1;
- uint8_t mbxOwner:1; /* Low order bit first word */
+ uint16_t mbxStatus;
+ uint8_t mbxCommand;
+ uint8_t mbxReserved:6;
+ uint8_t mbxHc:1;
+ uint8_t mbxOwner:1; /* Low order bit first word */
#else /* __LITTLE_ENDIAN_BITFIELD */
- uint8_t mbxOwner:1; /* Low order bit first word */
- uint8_t mbxHc:1;
- uint8_t mbxReserved:6;
- uint8_t mbxCommand;
- uint16_t mbxStatus;
+ uint8_t mbxOwner:1; /* Low order bit first word */
+ uint8_t mbxHc:1;
+ uint8_t mbxReserved:6;
+ uint8_t mbxCommand;
+ uint16_t mbxStatus;
#endif
+ };
+ u32 word0;
+ };
+ );
MAILVARIANTS un;
union sli_var us;
@@ -3652,7 +3813,7 @@ typedef struct {
#define IOERR_ILLEGAL_COMMAND 0x06
#define IOERR_XCHG_DROPPED 0x07
#define IOERR_ILLEGAL_FIELD 0x08
-#define IOERR_BAD_CONTINUE 0x09
+#define IOERR_RPI_SUSPENDED 0x09
#define IOERR_TOO_MANY_BUFFERS 0x0A
#define IOERR_RCV_BUFFER_WAITING 0x0B
#define IOERR_NO_CONNECTION 0x0C
@@ -4275,23 +4436,15 @@ lpfc_is_LC_HBA(unsigned short device)
}
/*
- * Determine if an IOCB failed because of a link event or firmware reset.
+ * Determine if failed because of a link event or firmware reset.
*/
-
static inline int
-lpfc_error_lost_link(IOCB_t *iocbp)
+lpfc_error_lost_link(u32 ulp_status, u32 ulp_word4)
{
- return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
- iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
- iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
+ return (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (ulp_word4 == IOERR_SLI_ABORTED ||
+ ulp_word4 == IOERR_LINK_DOWN ||
+ ulp_word4 == IOERR_SLI_DOWN));
}
-#define MENLO_TRANSPORT_TYPE 0xfe
-#define MENLO_CONTEXT 0
-#define MENLO_PU 3
-#define MENLO_TIMEOUT 30
-#define SETVAR_MLOMNT 0x103107
-#define SETVAR_MLORST 0x103007
-
#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 9a064b96e570..5288fc69908a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,6 +20,9 @@
* included with this package. *
*******************************************************************/
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+
/* Macros to deal with bit fields. Each bit field must have 3 #defines
* associated with it (_SHIFT, _MASK, and _WORD).
* EG. For a bit field that is in the 7th bit of the "field4" field of a
@@ -57,6 +60,14 @@
((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+#define get_wqe_reqtag(x) (((x)->wqe.words[9] >> 0) & 0xFFFF)
+#define get_wqe_tmo(x) (((x)->wqe.words[7] >> 24) & 0x00FF)
+
+#define get_job_ulpword(x, y) ((x)->iocb.un.ulpWord[y])
+
+#define set_job_ulpstatus(x, y) bf_set(lpfc_wcqe_c_status, &(x)->wcqe_cmpl, y)
+#define set_job_ulpword4(x, y) ((&(x)->wcqe_cmpl)->parameter = y)
+
struct dma_address {
uint32_t addr_lo;
uint32_t addr_hi;
@@ -92,6 +103,9 @@ struct lpfc_sli_intf {
#define LPFC_SLI_INTF_FAMILY_BE3 0x1
#define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa
#define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb
+#define LPFC_SLI_INTF_FAMILY_G6 0xc
+#define LPFC_SLI_INTF_FAMILY_G7 0xd
+#define LPFC_SLI_INTF_FAMILY_G7P 0xe
#define lpfc_sli_intf_slirev_SHIFT 4
#define lpfc_sli_intf_slirev_MASK 0x0000000F
#define lpfc_sli_intf_slirev_WORD word0
@@ -122,6 +136,7 @@ struct lpfc_sli_intf {
/* Define SLI4 Alignment requirements. */
#define LPFC_ALIGN_16_BYTE 16
#define LPFC_ALIGN_64_BYTE 64
+#define SLI4_PAGE_SIZE 4096
/* Define SLI4 specific definitions. */
#define LPFC_MQ_CQE_BYTE_OFFSET 256
@@ -223,6 +238,34 @@ struct lpfc_sli_intf {
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
+enum ulp_bde64_word3 {
+ ULP_BDE64_SIZE_MASK = 0xffffff,
+
+ ULP_BDE64_TYPE_SHIFT = 24,
+ ULP_BDE64_TYPE_MASK = (0xff << ULP_BDE64_TYPE_SHIFT),
+
+ /* BDE (Host_resident) */
+ ULP_BDE64_TYPE_BDE_64 = (0x00 << ULP_BDE64_TYPE_SHIFT),
+ /* Immediate Data BDE */
+ ULP_BDE64_TYPE_BDE_IMMED = (0x01 << ULP_BDE64_TYPE_SHIFT),
+ /* BDE (Port-resident) */
+ ULP_BDE64_TYPE_BDE_64P = (0x02 << ULP_BDE64_TYPE_SHIFT),
+ /* Input BDE (Host-resident) */
+ ULP_BDE64_TYPE_BDE_64I = (0x08 << ULP_BDE64_TYPE_SHIFT),
+ /* Input BDE (Port-resident) */
+ ULP_BDE64_TYPE_BDE_64IP = (0x0A << ULP_BDE64_TYPE_SHIFT),
+ /* BLP (Host-resident) */
+ ULP_BDE64_TYPE_BLP_64 = (0x40 << ULP_BDE64_TYPE_SHIFT),
+ /* BLP (Port-resident) */
+ ULP_BDE64_TYPE_BLP_64P = (0x42 << ULP_BDE64_TYPE_SHIFT),
+};
+
+struct ulp_bde64_le {
+ __le32 type_size; /* type 31:24, size 23:0 */
+ __le32 addr_low;
+ __le32 addr_high;
+};
+
struct ulp_bde64 {
union ULP_BDE_TUS {
uint32_t w;
@@ -270,6 +313,9 @@ struct lpfc_sli4_flags {
#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
#define lpfc_vfi_rsrc_rdy_WORD word0
#define LPFC_VFI_RSRC_RDY 1
+#define lpfc_ftr_ashdr_SHIFT 4
+#define lpfc_ftr_ashdr_MASK 0x00000001
+#define lpfc_ftr_ashdr_WORD word0
};
struct sli4_bls_rsp {
@@ -387,6 +433,12 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF
#define lpfc_wcqe_c_ersp0_WORD word0
uint32_t total_data_placed;
+#define lpfc_wcqe_c_cmf_cg_SHIFT 31
+#define lpfc_wcqe_c_cmf_cg_MASK 0x00000001
+#define lpfc_wcqe_c_cmf_cg_WORD total_data_placed
+#define lpfc_wcqe_c_cmf_bw_SHIFT 0
+#define lpfc_wcqe_c_cmf_bw_MASK 0x0FFFFFFF
+#define lpfc_wcqe_c_cmf_bw_WORD total_data_placed
uint32_t parameter;
#define lpfc_wcqe_c_bg_edir_SHIFT 5
#define lpfc_wcqe_c_bg_edir_MASK 0x00000001
@@ -648,12 +700,19 @@ struct lpfc_register {
#define lpfc_sliport_status_oti_SHIFT 29
#define lpfc_sliport_status_oti_MASK 0x1
#define lpfc_sliport_status_oti_WORD word0
+#define lpfc_sliport_status_dip_SHIFT 25
+#define lpfc_sliport_status_dip_MASK 0x1
+#define lpfc_sliport_status_dip_WORD word0
#define lpfc_sliport_status_rn_SHIFT 24
#define lpfc_sliport_status_rn_MASK 0x1
#define lpfc_sliport_status_rn_WORD word0
#define lpfc_sliport_status_rdy_SHIFT 23
#define lpfc_sliport_status_rdy_MASK 0x1
#define lpfc_sliport_status_rdy_WORD word0
+#define lpfc_sliport_status_pldv_SHIFT 0
+#define lpfc_sliport_status_pldv_MASK 0x1
+#define lpfc_sliport_status_pldv_WORD word0
+#define CFG_PLD 0x3C
#define MAX_IF_TYPE_2_RESETS 6
#define LPFC_CTL_PORT_CTL_OFFSET 0x408
@@ -678,6 +737,8 @@ struct lpfc_register {
#define lpfc_sliport_eqdelay_id_MASK 0xfff
#define lpfc_sliport_eqdelay_id_WORD word0
#define LPFC_SEC_TO_USEC 1000000
+#define LPFC_SEC_TO_MSEC 1000
+#define LPFC_MSECS_TO_SECS(msecs) ((msecs) / 1000)
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
@@ -950,6 +1011,12 @@ union lpfc_sli4_cfg_shdr {
#define lpfc_mbox_hdr_add_status_SHIFT 8
#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
#define lpfc_mbox_hdr_add_status_WORD word7
+#define LPFC_ADD_STATUS_INCOMPAT_OBJ 0xA2
+#define lpfc_mbox_hdr_add_status_2_SHIFT 16
+#define lpfc_mbox_hdr_add_status_2_MASK 0x000000FF
+#define lpfc_mbox_hdr_add_status_2_WORD word7
+#define LPFC_ADD_STATUS_2_INCOMPAT_FLASH 0x01
+#define LPFC_ADD_STATUS_2_INCORRECT_ASIC 0x02
uint32_t response_length;
uint32_t actual_response_length;
} response;
@@ -1006,6 +1073,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D
#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
+#define LPFC_MBOX_OPCODE_REG_CONGESTION_BUF 0x8E
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
@@ -1114,6 +1182,12 @@ struct lpfc_mbx_sge {
uint32_t length;
};
+struct lpfc_mbx_host_buf {
+ uint32_t length;
+ uint32_t pa_lo;
+ uint32_t pa_hi;
+};
+
struct lpfc_mbx_nembed_cmd {
struct lpfc_sli4_cfg_mhdr cfg_mhdr;
#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
@@ -1124,6 +1198,31 @@ struct lpfc_mbx_nembed_sge_virt {
void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
};
+#define LPFC_MBX_OBJECT_NAME_LEN_DW 26
+struct lpfc_mbx_read_object { /* Version 0 */
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rd_object_rlen_SHIFT 0
+#define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF
+#define lpfc_mbx_rd_object_rlen_WORD word0
+ uint32_t rd_object_offset;
+ __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
+#define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */
+ uint32_t rd_object_cnt;
+ struct lpfc_mbx_host_buf rd_object_hbuf[4];
+ } request;
+ struct {
+ uint32_t rd_object_actual_rlen;
+ uint32_t word1;
+#define lpfc_mbx_rd_object_eof_SHIFT 31
+#define lpfc_mbx_rd_object_eof_MASK 0x1
+#define lpfc_mbx_rd_object_eof_WORD word1
+ } response;
+ } u;
+};
+
struct lpfc_mbx_eq_create {
struct mbox_header header;
union {
@@ -1546,7 +1645,7 @@ struct rq_context {
#define lpfc_rq_context_hdr_size_WORD word1
uint32_t word2;
#define lpfc_rq_context_cq_id_SHIFT 16
-#define lpfc_rq_context_cq_id_MASK 0x000003FF
+#define lpfc_rq_context_cq_id_MASK 0x0000FFFF
#define lpfc_rq_context_cq_id_WORD word2
#define lpfc_rq_context_buf_size_SHIFT 0
#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
@@ -2319,6 +2418,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
#define ADD_STATUS_INVALID_REQUEST 0x4B
+#define ADD_STATUS_INVALID_OBJECT_NAME 0xA0
#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58
struct lpfc_mbx_sli4_config {
@@ -2794,6 +2894,15 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
+#define lpfc_mbx_rd_conf_fawwpn_SHIFT 30
+#define lpfc_mbx_rd_conf_fawwpn_MASK 0x00000001
+#define lpfc_mbx_rd_conf_fawwpn_WORD word1
+#define lpfc_mbx_rd_conf_wcs_SHIFT 28 /* warning signaling */
+#define lpfc_mbx_rd_conf_wcs_MASK 0x00000001
+#define lpfc_mbx_rd_conf_wcs_WORD word1
+#define lpfc_mbx_rd_conf_acs_SHIFT 27 /* alarm signaling */
+#define lpfc_mbx_rd_conf_acs_MASK 0x00000001
+#define lpfc_mbx_rd_conf_acs_WORD word1
uint32_t word2;
#define lpfc_mbx_rd_conf_lnk_numb_SHIFT 0
#define lpfc_mbx_rd_conf_lnk_numb_MASK 0x0000003F
@@ -2938,6 +3047,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16
#define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2
+#define lpfc_mbx_rq_ftr_rq_ashdr_SHIFT 17
+#define lpfc_mbx_rq_ftr_rq_ashdr_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_ashdr_WORD word2
uint32_t word3;
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
@@ -2969,62 +3081,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16
#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3
-};
-
-struct lpfc_mbx_supp_pages {
- uint32_t word1;
-#define qs_SHIFT 0
-#define qs_MASK 0x00000001
-#define qs_WORD word1
-#define wr_SHIFT 1
-#define wr_MASK 0x00000001
-#define wr_WORD word1
-#define pf_SHIFT 8
-#define pf_MASK 0x000000ff
-#define pf_WORD word1
-#define cpn_SHIFT 16
-#define cpn_MASK 0x000000ff
-#define cpn_WORD word1
- uint32_t word2;
-#define list_offset_SHIFT 0
-#define list_offset_MASK 0x000000ff
-#define list_offset_WORD word2
-#define next_offset_SHIFT 8
-#define next_offset_MASK 0x000000ff
-#define next_offset_WORD word2
-#define elem_cnt_SHIFT 16
-#define elem_cnt_MASK 0x000000ff
-#define elem_cnt_WORD word2
- uint32_t word3;
-#define pn_0_SHIFT 24
-#define pn_0_MASK 0x000000ff
-#define pn_0_WORD word3
-#define pn_1_SHIFT 16
-#define pn_1_MASK 0x000000ff
-#define pn_1_WORD word3
-#define pn_2_SHIFT 8
-#define pn_2_MASK 0x000000ff
-#define pn_2_WORD word3
-#define pn_3_SHIFT 0
-#define pn_3_MASK 0x000000ff
-#define pn_3_WORD word3
- uint32_t word4;
-#define pn_4_SHIFT 24
-#define pn_4_MASK 0x000000ff
-#define pn_4_WORD word4
-#define pn_5_SHIFT 16
-#define pn_5_MASK 0x000000ff
-#define pn_5_WORD word4
-#define pn_6_SHIFT 8
-#define pn_6_MASK 0x000000ff
-#define pn_6_WORD word4
-#define pn_7_SHIFT 0
-#define pn_7_MASK 0x000000ff
-#define pn_7_WORD word4
- uint32_t rsvd[27];
-#define LPFC_SUPP_PAGES 0
-#define LPFC_BLOCK_GUARD_PROFILES 1
-#define LPFC_SLI4_PARAMETERS 2
+#define lpfc_mbx_rq_ftr_rsp_ashdr_SHIFT 17
+#define lpfc_mbx_rq_ftr_rsp_ashdr_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ashdr_WORD word3
};
struct lpfc_mbx_memory_dump_type3 {
@@ -3243,121 +3302,6 @@ struct user_eeprom {
uint8_t reserved191[57];
};
-struct lpfc_mbx_pc_sli4_params {
- uint32_t word1;
-#define qs_SHIFT 0
-#define qs_MASK 0x00000001
-#define qs_WORD word1
-#define wr_SHIFT 1
-#define wr_MASK 0x00000001
-#define wr_WORD word1
-#define pf_SHIFT 8
-#define pf_MASK 0x000000ff
-#define pf_WORD word1
-#define cpn_SHIFT 16
-#define cpn_MASK 0x000000ff
-#define cpn_WORD word1
- uint32_t word2;
-#define if_type_SHIFT 0
-#define if_type_MASK 0x00000007
-#define if_type_WORD word2
-#define sli_rev_SHIFT 4
-#define sli_rev_MASK 0x0000000f
-#define sli_rev_WORD word2
-#define sli_family_SHIFT 8
-#define sli_family_MASK 0x000000ff
-#define sli_family_WORD word2
-#define featurelevel_1_SHIFT 16
-#define featurelevel_1_MASK 0x000000ff
-#define featurelevel_1_WORD word2
-#define featurelevel_2_SHIFT 24
-#define featurelevel_2_MASK 0x0000001f
-#define featurelevel_2_WORD word2
- uint32_t word3;
-#define fcoe_SHIFT 0
-#define fcoe_MASK 0x00000001
-#define fcoe_WORD word3
-#define fc_SHIFT 1
-#define fc_MASK 0x00000001
-#define fc_WORD word3
-#define nic_SHIFT 2
-#define nic_MASK 0x00000001
-#define nic_WORD word3
-#define iscsi_SHIFT 3
-#define iscsi_MASK 0x00000001
-#define iscsi_WORD word3
-#define rdma_SHIFT 4
-#define rdma_MASK 0x00000001
-#define rdma_WORD word3
- uint32_t sge_supp_len;
-#define SLI4_PAGE_SIZE 4096
- uint32_t word5;
-#define if_page_sz_SHIFT 0
-#define if_page_sz_MASK 0x0000ffff
-#define if_page_sz_WORD word5
-#define loopbk_scope_SHIFT 24
-#define loopbk_scope_MASK 0x0000000f
-#define loopbk_scope_WORD word5
-#define rq_db_window_SHIFT 28
-#define rq_db_window_MASK 0x0000000f
-#define rq_db_window_WORD word5
- uint32_t word6;
-#define eq_pages_SHIFT 0
-#define eq_pages_MASK 0x0000000f
-#define eq_pages_WORD word6
-#define eqe_size_SHIFT 8
-#define eqe_size_MASK 0x000000ff
-#define eqe_size_WORD word6
- uint32_t word7;
-#define cq_pages_SHIFT 0
-#define cq_pages_MASK 0x0000000f
-#define cq_pages_WORD word7
-#define cqe_size_SHIFT 8
-#define cqe_size_MASK 0x000000ff
-#define cqe_size_WORD word7
- uint32_t word8;
-#define mq_pages_SHIFT 0
-#define mq_pages_MASK 0x0000000f
-#define mq_pages_WORD word8
-#define mqe_size_SHIFT 8
-#define mqe_size_MASK 0x000000ff
-#define mqe_size_WORD word8
-#define mq_elem_cnt_SHIFT 16
-#define mq_elem_cnt_MASK 0x000000ff
-#define mq_elem_cnt_WORD word8
- uint32_t word9;
-#define wq_pages_SHIFT 0
-#define wq_pages_MASK 0x0000ffff
-#define wq_pages_WORD word9
-#define wqe_size_SHIFT 8
-#define wqe_size_MASK 0x000000ff
-#define wqe_size_WORD word9
- uint32_t word10;
-#define rq_pages_SHIFT 0
-#define rq_pages_MASK 0x0000ffff
-#define rq_pages_WORD word10
-#define rqe_size_SHIFT 8
-#define rqe_size_MASK 0x000000ff
-#define rqe_size_WORD word10
- uint32_t word11;
-#define hdr_pages_SHIFT 0
-#define hdr_pages_MASK 0x0000000f
-#define hdr_pages_WORD word11
-#define hdr_size_SHIFT 8
-#define hdr_size_MASK 0x0000000f
-#define hdr_size_WORD word11
-#define hdr_pp_align_SHIFT 16
-#define hdr_pp_align_MASK 0x0000ffff
-#define hdr_pp_align_WORD word11
- uint32_t word12;
-#define sgl_pages_SHIFT 0
-#define sgl_pages_MASK 0x0000000f
-#define sgl_pages_WORD word12
-#define sgl_pp_align_SHIFT 16
-#define sgl_pp_align_MASK 0x0000ffff
-#define sgl_pp_align_WORD word12
- uint32_t rsvd_13_63[51];
-};
#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
&(~((SLI4_PAGE_SIZE)-1)))
@@ -3484,25 +3428,35 @@ struct lpfc_sli4_parameters {
#define cfg_nosr_SHIFT 9
#define cfg_nosr_MASK 0x00000001
#define cfg_nosr_WORD word19
-
#define cfg_bv1s_SHIFT 10
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
-#define cfg_pvl_SHIFT 13
-#define cfg_pvl_MASK 0x00000001
-#define cfg_pvl_WORD word19
#define cfg_nsler_SHIFT 12
#define cfg_nsler_MASK 0x00000001
#define cfg_nsler_WORD word19
+#define cfg_pvl_SHIFT 13
+#define cfg_pvl_MASK 0x00000001
+#define cfg_pvl_WORD word19
+
+#define cfg_pbde_SHIFT 20
+#define cfg_pbde_MASK 0x00000001
+#define cfg_pbde_WORD word19
uint32_t word20;
#define cfg_max_tow_xri_SHIFT 0
#define cfg_max_tow_xri_MASK 0x0000ffff
#define cfg_max_tow_xri_WORD word20
- uint32_t word21; /* RESERVED */
- uint32_t word22; /* RESERVED */
+ uint32_t word21;
+#define cfg_mi_ver_SHIFT 0
+#define cfg_mi_ver_MASK 0x0000ffff
+#define cfg_mi_ver_WORD word21
+#define cfg_cmf_SHIFT 24
+#define cfg_cmf_MASK 0x000000ff
+#define cfg_cmf_WORD word21
+
+ uint32_t mib_size;
uint32_t word23; /* RESERVED */
uint32_t word24;
@@ -3529,8 +3483,12 @@ struct lpfc_sli4_parameters {
};
#define LPFC_SET_UE_RECOVERY 0x10
-#define LPFC_SET_MDS_DIAGS 0x11
+#define LPFC_SET_MDS_DIAGS 0x12
#define LPFC_SET_DUAL_DUMP 0x1e
+#define LPFC_SET_CGN_SIGNAL 0x1f
+#define LPFC_SET_ENABLE_MI 0x21
+#define LPFC_SET_LD_SIGNAL 0x23
+#define LPFC_SET_ENABLE_CMF 0x24
struct lpfc_mbx_set_feature {
struct mbox_header header;
uint32_t feature;
@@ -3539,12 +3497,15 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_UER_SHIFT 0
#define lpfc_mbx_set_feature_UER_MASK 0x00000001
#define lpfc_mbx_set_feature_UER_WORD word6
-#define lpfc_mbx_set_feature_mds_SHIFT 0
+#define lpfc_mbx_set_feature_mds_SHIFT 2
#define lpfc_mbx_set_feature_mds_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_WORD word6
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
+#define lpfc_mbx_set_feature_CGN_warn_freq_SHIFT 0
+#define lpfc_mbx_set_feature_CGN_warn_freq_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_CGN_warn_freq_WORD word6
#define lpfc_mbx_set_feature_dd_SHIFT 0
#define lpfc_mbx_set_feature_dd_MASK 0x00000001
#define lpfc_mbx_set_feature_dd_WORD word6
@@ -3554,23 +3515,73 @@ struct lpfc_mbx_set_feature {
#define LPFC_DISABLE_DUAL_DUMP 0
#define LPFC_ENABLE_DUAL_DUMP 1
#define LPFC_QUERY_OP_DUAL_DUMP 2
- uint32_t word7;
+#define lpfc_mbx_set_feature_cmf_SHIFT 0
+#define lpfc_mbx_set_feature_cmf_MASK 0x00000001
+#define lpfc_mbx_set_feature_cmf_WORD word6
+#define lpfc_mbx_set_feature_lds_qry_SHIFT 0
+#define lpfc_mbx_set_feature_lds_qry_MASK 0x00000001
+#define lpfc_mbx_set_feature_lds_qry_WORD word6
+#define LPFC_QUERY_LDS_OP 1
+#define lpfc_mbx_set_feature_mi_SHIFT 0
+#define lpfc_mbx_set_feature_mi_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_mi_WORD word6
+#define lpfc_mbx_set_feature_milunq_SHIFT 16
+#define lpfc_mbx_set_feature_milunq_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_milunq_WORD word6
+ u32 word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
#define lpfc_mbx_set_feature_UERP_WORD word7
#define lpfc_mbx_set_feature_UESR_SHIFT 16
#define lpfc_mbx_set_feature_UESR_MASK 0x0000ffff
#define lpfc_mbx_set_feature_UESR_WORD word7
+#define lpfc_mbx_set_feature_CGN_alarm_freq_SHIFT 0
+#define lpfc_mbx_set_feature_CGN_alarm_freq_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_CGN_alarm_freq_WORD word7
+ u32 word8;
+#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0
+#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff
+#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8
+ u32 word9;
+ u32 word10;
};
#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2
+#define LPFC_SET_HOST_DATE_TIME 0x4
+
+struct lpfc_mbx_set_host_date_time {
+ uint32_t word6;
+#define lpfc_mbx_set_host_month_WORD word6
+#define lpfc_mbx_set_host_month_SHIFT 16
+#define lpfc_mbx_set_host_month_MASK 0xFF
+#define lpfc_mbx_set_host_day_WORD word6
+#define lpfc_mbx_set_host_day_SHIFT 8
+#define lpfc_mbx_set_host_day_MASK 0xFF
+#define lpfc_mbx_set_host_year_WORD word6
+#define lpfc_mbx_set_host_year_SHIFT 0
+#define lpfc_mbx_set_host_year_MASK 0xFF
+ uint32_t word7;
+#define lpfc_mbx_set_host_hour_WORD word7
+#define lpfc_mbx_set_host_hour_SHIFT 16
+#define lpfc_mbx_set_host_hour_MASK 0xFF
+#define lpfc_mbx_set_host_min_WORD word7
+#define lpfc_mbx_set_host_min_SHIFT 8
+#define lpfc_mbx_set_host_min_MASK 0xFF
+#define lpfc_mbx_set_host_sec_WORD word7
+#define lpfc_mbx_set_host_sec_SHIFT 0
+#define lpfc_mbx_set_host_sec_MASK 0xFF
+};
+
struct lpfc_mbx_set_host_data {
#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48
struct mbox_header header;
uint32_t param_id;
uint32_t param_len;
- uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+ union {
+ uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+ struct lpfc_mbx_set_host_date_time tm;
+ } un;
};
struct lpfc_mbx_set_trunk_mode {
@@ -3588,6 +3599,21 @@ struct lpfc_mbx_get_sli4_parameters {
struct lpfc_sli4_parameters sli4_parameters;
};
+struct lpfc_mbx_reg_congestion_buf {
+ struct mbox_header header;
+ uint32_t word0;
+#define lpfc_mbx_reg_cgn_buf_type_WORD word0
+#define lpfc_mbx_reg_cgn_buf_type_SHIFT 0
+#define lpfc_mbx_reg_cgn_buf_type_MASK 0xFF
+#define lpfc_mbx_reg_cgn_buf_cnt_WORD word0
+#define lpfc_mbx_reg_cgn_buf_cnt_SHIFT 16
+#define lpfc_mbx_reg_cgn_buf_cnt_MASK 0xFF
+ uint32_t word1;
+ uint32_t length;
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+};
+
struct lpfc_rscr_desc_generic {
#define LPFC_RSRC_DESC_WSIZE 22
uint32_t desc[LPFC_RSRC_DESC_WSIZE];
@@ -3753,6 +3779,9 @@ struct lpfc_controller_attribute {
#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
+#define lpfc_cntl_attr_flash_id_SHIFT 16
+#define lpfc_cntl_attr_flash_id_MASK 0x000000ff
+#define lpfc_cntl_attr_flash_id_WORD word17
uint32_t mbx_da_struct_ver;
uint32_t ep_fw_da_struct_ver;
uint32_t ncsi_ver_str[3];
@@ -3894,6 +3923,7 @@ struct lpfc_mbx_get_port_name {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
+
#define LPFC_MBX_WR_CONFIG_MAX_BDE 1
struct lpfc_mbx_wr_object {
struct mbox_header header;
@@ -3910,7 +3940,7 @@ struct lpfc_mbx_wr_object {
#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
#define lpfc_wr_object_write_length_WORD word4
uint32_t write_offset;
- uint32_t object_name[26];
+ uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
uint32_t bde_count;
struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
} request;
@@ -3959,6 +3989,7 @@ struct lpfc_mqe {
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
struct lpfc_mbx_mq_create mq_create;
struct lpfc_mbx_mq_create_ext mq_create_ext;
+ struct lpfc_mbx_read_object read_object;
struct lpfc_mbx_eq_create eq_create;
struct lpfc_mbx_modify_eq_delay eq_delay;
struct lpfc_mbx_cq_create cq_create;
@@ -3983,9 +4014,8 @@ struct lpfc_mqe {
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_config query_fw_cfg;
struct lpfc_mbx_set_beacon_config beacon_config;
- struct lpfc_mbx_supp_pages supp_pages;
- struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ struct lpfc_mbx_reg_congestion_buf reg_congestion_buf;
struct lpfc_mbx_set_link_diag_state link_diag_state;
struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
struct lpfc_mbx_run_link_diag_test link_diag_test;
@@ -4040,6 +4070,7 @@ struct lpfc_mcqe {
#define LPFC_TRAILER_CODE_GRP5 0x5
#define LPFC_TRAILER_CODE_FC 0x10
#define LPFC_TRAILER_CODE_SLI 0x11
+#define LPFC_TRAILER_CODE_CMSTAT 0x13
};
struct lpfc_acqe_link {
@@ -4274,10 +4305,23 @@ struct lpfc_acqe_misconfigured_event {
#define LPFC_SLI_EVENT_STATUS_UNCERTIFIED 0x05
};
+struct lpfc_acqe_cgn_signal {
+ u32 word0;
+#define lpfc_warn_acqe_SHIFT 0
+#define lpfc_warn_acqe_MASK 0x7FFFFFFF
+#define lpfc_warn_acqe_WORD word0
+#define lpfc_imm_acqe_SHIFT 31
+#define lpfc_imm_acqe_MASK 0x1
+#define lpfc_imm_acqe_WORD word0
+ u32 alarm_cnt;
+ u32 word2;
+ u32 trailer;
+};
+
struct lpfc_acqe_sli {
uint32_t event_data1;
uint32_t event_data2;
- uint32_t reserved;
+ uint32_t event_data3;
uint32_t trailer;
#define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1
#define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2
@@ -4286,8 +4330,11 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
+#define LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG 0xE
#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
+#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11
+#define LPFC_SLI_EVENT_TYPE_RD_SIGNAL 0x12
};
/*
@@ -4375,9 +4422,14 @@ struct wqe_common {
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
#define wqe_ebde_cnt_WORD word10
-#define wqe_nvme_SHIFT 4
-#define wqe_nvme_MASK 0x00000001
-#define wqe_nvme_WORD word10
+#define wqe_xchg_SHIFT 4
+#define wqe_xchg_MASK 0x00000001
+#define wqe_xchg_WORD word10
+#define LPFC_SCSI_XCHG 0x0
+#define LPFC_NVME_XCHG 0x1
+#define wqe_appid_SHIFT 5
+#define wqe_appid_MASK 0x00000001
+#define wqe_appid_WORD word10
#define wqe_oas_SHIFT 6
#define wqe_oas_MASK 0x00000001
#define wqe_oas_WORD word10
@@ -4433,12 +4485,8 @@ struct wqe_common {
#define wqe_cmd_type_MASK 0x0000000f
#define wqe_cmd_type_WORD word11
#define wqe_els_id_SHIFT 4
-#define wqe_els_id_MASK 0x00000003
+#define wqe_els_id_MASK 0x00000007
#define wqe_els_id_WORD word11
-#define LPFC_ELS_ID_FLOGI 3
-#define LPFC_ELS_ID_FDISC 2
-#define LPFC_ELS_ID_LOGO 1
-#define LPFC_ELS_ID_DEFAULT 0
#define wqe_irsp_SHIFT 4
#define wqe_irsp_MASK 0x00000001
#define wqe_irsp_WORD word11
@@ -4448,6 +4496,9 @@ struct wqe_common {
#define wqe_sup_SHIFT 6
#define wqe_sup_MASK 0x00000001
#define wqe_sup_WORD word11
+#define wqe_ffrq_SHIFT 6
+#define wqe_ffrq_MASK 0x00000001
+#define wqe_ffrq_WORD word11
#define wqe_wqec_SHIFT 7
#define wqe_wqec_MASK 0x00000001
#define wqe_wqec_WORD word11
@@ -4485,6 +4536,14 @@ struct lpfc_wqe_generic{
uint32_t payload[4];
};
+enum els_request64_wqe_word11 {
+ LPFC_ELS_ID_DEFAULT,
+ LPFC_ELS_ID_LOGO,
+ LPFC_ELS_ID_FDISC,
+ LPFC_ELS_ID_FLOGI,
+ LPFC_ELS_ID_PLOGI,
+};
+
struct els_request64_wqe {
struct ulp_bde64 bde;
uint32_t payload_len;
@@ -4686,10 +4745,75 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
-#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
+struct cmf_sync_wqe {
+ uint32_t rsrvd[3];
+ uint32_t word3;
+#define cmf_sync_interval_SHIFT 0
+#define cmf_sync_interval_MASK 0x00000ffff
+#define cmf_sync_interval_WORD word3
+#define cmf_sync_afpin_SHIFT 16
+#define cmf_sync_afpin_MASK 0x000000001
+#define cmf_sync_afpin_WORD word3
+#define cmf_sync_asig_SHIFT 17
+#define cmf_sync_asig_MASK 0x000000001
+#define cmf_sync_asig_WORD word3
+#define cmf_sync_op_SHIFT 20
+#define cmf_sync_op_MASK 0x00000000f
+#define cmf_sync_op_WORD word3
+#define cmf_sync_ver_SHIFT 24
+#define cmf_sync_ver_MASK 0x0000000ff
+#define cmf_sync_ver_WORD word3
+#define LPFC_CMF_SYNC_VER 1
+ uint32_t event_tag;
+ uint32_t word5;
+#define cmf_sync_wsigmax_SHIFT 0
+#define cmf_sync_wsigmax_MASK 0x00000ffff
+#define cmf_sync_wsigmax_WORD word5
+#define cmf_sync_wsigcnt_SHIFT 16
+#define cmf_sync_wsigcnt_MASK 0x00000ffff
+#define cmf_sync_wsigcnt_WORD word5
+ uint32_t word6;
+ uint32_t word7;
+#define cmf_sync_cmnd_SHIFT 8
+#define cmf_sync_cmnd_MASK 0x0000000ff
+#define cmf_sync_cmnd_WORD word7
+ uint32_t word8;
+ uint32_t word9;
+#define cmf_sync_reqtag_SHIFT 0
+#define cmf_sync_reqtag_MASK 0x00000ffff
+#define cmf_sync_reqtag_WORD word9
+#define cmf_sync_wfpinmax_SHIFT 16
+#define cmf_sync_wfpinmax_MASK 0x0000000ff
+#define cmf_sync_wfpinmax_WORD word9
+#define cmf_sync_wfpincnt_SHIFT 24
+#define cmf_sync_wfpincnt_MASK 0x0000000ff
+#define cmf_sync_wfpincnt_WORD word9
+ uint32_t word10;
+#define cmf_sync_qosd_SHIFT 9
+#define cmf_sync_qosd_MASK 0x00000001
+#define cmf_sync_qosd_WORD word10
+ uint32_t word11;
+#define cmf_sync_cmd_type_SHIFT 0
+#define cmf_sync_cmd_type_MASK 0x0000000f
+#define cmf_sync_cmd_type_WORD word11
+#define cmf_sync_wqec_SHIFT 7
+#define cmf_sync_wqec_MASK 0x00000001
+#define cmf_sync_wqec_WORD word11
+#define cmf_sync_cqid_SHIFT 16
+#define cmf_sync_cqid_MASK 0x0000ffff
+#define cmf_sync_cqid_WORD word11
+ uint32_t read_bytes;
+ uint32_t word13;
+#define cmf_sync_period_SHIFT 16
+#define cmf_sync_period_MASK 0x0000ffff
+#define cmf_sync_period_WORD word13
+ uint32_t word14;
+ uint32_t word15;
+};
+
struct abort_cmd_wqe {
uint32_t rsrvd[3];
uint32_t word3;
@@ -4795,6 +4919,23 @@ struct send_frame_wqe {
uint32_t fc_hdr_wd5; /* word 15 */
};
+#define ELS_RDF_REG_TAG_CNT 4
+struct lpfc_els_rdf_reg_desc {
+ struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */
+ __be32 desc_tags[ELS_RDF_REG_TAG_CNT];
+ /* tags in reg_desc */
+};
+
+struct lpfc_els_rdf_req {
+ struct fc_els_rdf rdf; /* hdr up to descriptors */
+ struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
+};
+
+struct lpfc_els_rdf_rsp {
+ struct fc_els_rdf_resp rdf_resp; /* hdr up to descriptors */
+ struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
+};
+
union lpfc_wqe {
uint32_t words[16];
struct lpfc_wqe_generic generic;
@@ -4802,6 +4943,7 @@ union lpfc_wqe {
struct fcp_iread64_wqe fcp_iread;
struct fcp_iwrite64_wqe fcp_iwrite;
struct abort_cmd_wqe abort_cmd;
+ struct cmf_sync_wqe cmf_sync;
struct create_xri_wqe create_xri;
struct xmit_bcast64_wqe xmit_bcast64;
struct xmit_seq64_wqe xmit_sequence;
@@ -4822,6 +4964,7 @@ union lpfc_wqe128 {
struct fcp_iread64_wqe fcp_iread;
struct fcp_iwrite64_wqe fcp_iwrite;
struct abort_cmd_wqe abort_cmd;
+ struct cmf_sync_wqe cmf_sync;
struct create_xri_wqe create_xri;
struct xmit_bcast64_wqe xmit_bcast64;
struct xmit_seq64_wqe xmit_sequence;
@@ -4837,6 +4980,7 @@ union lpfc_wqe128 {
#define MAGIC_NUMBER_G6 0xFEAA0003
#define MAGIC_NUMBER_G7 0xFEAA0005
+#define MAGIC_NUMBER_G7P 0xFEAA0020
struct lpfc_grp_hdr {
uint32_t size;
@@ -4858,10 +5002,13 @@ struct lpfc_grp_hdr {
#define NVME_READ_CMD 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define NVME_WRITE_CMD 0x1
+#define COMMAND_DATA_IN 0x0
+#define COMMAND_DATA_OUT 0x1
#define FCP_COMMAND_TRECEIVE 0x2
#define FCP_COMMAND_TRSP 0x3
#define FCP_COMMAND_TSEND 0x7
#define OTHER_COMMAND 0x8
+#define CMF_SYNC_COMMAND 0xA
#define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD
@@ -4883,6 +5030,7 @@ struct lpfc_grp_hdr {
#define CMD_FCP_TRECEIVE64_WQE 0xA1
#define CMD_FCP_TRSP64_WQE 0xA3
#define CMD_GEN_REQUEST64_WQE 0xC2
+#define CMD_CMF_SYNC_WQE 0xE8
#define CMD_WQE_MASK 0xff
@@ -4890,3 +5038,27 @@ struct lpfc_grp_hdr {
#define LPFC_FW_DUMP 1
#define LPFC_FW_RESET 2
#define LPFC_DV_RESET 3
+
+/* On some kernels, enum fc_ls_tlv_dtag does not have
+ * these 2 enums defined, on other kernels it does.
+ * To get aound this we need to add these 2 defines here.
+ */
+#ifndef ELS_DTAG_LNK_FAULT_CAP
+#define ELS_DTAG_LNK_FAULT_CAP 0x0001000D
+#endif
+#ifndef ELS_DTAG_CG_SIGNAL_CAP
+#define ELS_DTAG_CG_SIGNAL_CAP 0x0001000F
+#endif
+
+/*
+ * Initializer useful for decoding FPIN string table.
+ */
+#define FC_FPIN_CONGN_SEVERITY_INIT { \
+ { FPIN_CONGN_SEVERITY_WARNING, "Warning" }, \
+ { FPIN_CONGN_SEVERITY_ERROR, "Alarm" }, \
+}
+
+/* Used for logging FPIN messages */
+#define LPFC_FPIN_WWPN_LINE_SZ 128
+#define LPFC_FPIN_WWPN_LINE_CNT 6
+#define LPFC_FPIN_WWPN_NUM_LINE 6
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index d48414e295a0..0b1616e93cf4 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -60,8 +60,6 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
- PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
@@ -118,9 +116,41 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC,
+ PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2XX2, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3162, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3322, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5a605773dd0a..b535f1fd3010 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -50,8 +50,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -61,7 +59,6 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -71,6 +68,7 @@
static enum cpuhp_state lpfc_cpuhp_state;
/* Used when mapping IRQ vectors in a driver centric manner */
static uint32_t lpfc_present_cpu;
+static bool lpfc_pldv_detect;
static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
@@ -96,11 +94,14 @@ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
+static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
#define LPFC_NVMET_BUF_POST 254
+static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
/**
* lpfc_config_port_prep - Perform lpfc initialization prior to config port
@@ -156,7 +157,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0324 Config Port initialization "
"error, mbxCmd x%x READ_NVPARM, "
"mbxStatus x%x\n",
@@ -180,7 +181,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
lpfc_read_rev(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0439 Adapter failed to init, mbxCmd x%x "
"READ_REV, mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
@@ -195,7 +196,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
*/
if (mb->un.varRdRev.rr == 0) {
vp->rev.rBit = 0;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0440 Adapter failed to init, READ_REV has "
"missing revision information.\n");
mempool_free(pmb, phba->mbox_mem_pool);
@@ -256,6 +257,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
*/
if (mb->un.varDmp.word_cnt == 0)
break;
+
if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -263,6 +265,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
+
lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
kfree(lpfc_vpd_data);
@@ -322,8 +325,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
prog_id_word = pmboxq->u.mb.un.varWords[7];
/* Decode the Option rom version word to a readable string */
- if (prg->dist < 4)
- dist = dist_char[prg->dist];
+ dist = dist_char[prg->dist];
if ((prg->dist == 3) && (prg->num == 0))
snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
@@ -338,7 +340,6 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/**
* lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
- * cfg_soft_wwnn, cfg_soft_wwpn
* @vport: pointer to lpfc vport data structure.
*
*
@@ -348,22 +349,13 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
void
lpfc_update_vport_wwn(struct lpfc_vport *vport)
{
- uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
- u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
-
- /* If the soft name exists then update it using the service params */
- if (vport->phba->cfg_soft_wwnn)
- u64_to_wwn(vport->phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (vport->phba->cfg_soft_wwpn)
- u64_to_wwn(vport->phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
+ struct lpfc_hba *phba = vport->phba;
/*
* If the name is empty or there exists a soft name
* then copy the service params name, otherwise use the fc name
*/
- if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ if (vport->fc_nodename.u.wwn[0] == 0)
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
else
@@ -376,22 +368,35 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
*/
if (vport->fc_portname.u.wwn[0] != 0 &&
memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name)))
+ sizeof(struct lpfc_name))) {
vport->vport_flag |= FAWWPN_PARAM_CHG;
- if (vport->fc_portname.u.wwn[0] == 0 ||
- vport->phba->cfg_soft_wwpn ||
- (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
- vport->vport_flag & FAWWPN_SET) {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
+ if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
+ phba->sli4_hba.fawwpn_flag &=
+ ~LPFC_FAWWPN_FABRIC;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_DISCOVERY | LOG_ELS,
+ "2701 FA-PWWN change WWPN from %llx to "
+ "%llx: vflag x%x fawwpn_flag x%x\n",
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64
+ (vport->fc_sparam.portName.u.wwn),
+ vport->vport_flag,
+ phba->sli4_hba.fawwpn_flag);
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ }
+ }
+
+ if (vport->fc_portname.u.wwn[0] == 0)
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
- vport->vport_flag &= ~FAWWPN_SET;
- if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
- vport->vport_flag |= FAWWPN_SET;
- }
+ sizeof(struct lpfc_name));
else
memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
- sizeof(struct lpfc_name));
+ sizeof(struct lpfc_name));
}
/**
@@ -445,20 +450,21 @@ lpfc_config_port_post(struct lpfc_hba *phba)
pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0448 Adapter failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
phba->link_state = LPFC_HBA_ERROR;
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return -EIO;
}
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
+ * longer needed. Prevent unintended ctx_buf access as the mbox is
+ * reused.
+ */
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -499,7 +505,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_read_config(phba, pmb);
pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0453 Adapter failed to init, mbxCmd x%x "
"READ_CONFIG, mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
@@ -512,21 +518,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_sli_read_link_ste(phba);
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
- i = (mb->un.varRdConfig.max_xri + 1);
- if (phba->cfg_hba_queue_depth > i) {
+ if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"3359 HBA queue depth changed from %d to %d\n",
- phba->cfg_hba_queue_depth, i);
- phba->cfg_hba_queue_depth = i;
- }
-
- /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
- i = (mb->un.varRdConfig.max_xri >> 3);
- if (phba->pport->cfg_lun_queue_depth > i) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "3360 LUN queue depth changed from %d to %d\n",
- phba->pport->cfg_lun_queue_depth, i);
- phba->pport->cfg_lun_queue_depth = i;
+ phba->cfg_hba_queue_depth,
+ mb->un.varRdConfig.max_xri);
+ phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
}
phba->lmt = mb->un.varRdConfig.lmt;
@@ -557,7 +554,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
}
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0352 Config MSI mailbox command "
"failed, mbxCmd x%x, mbxStatus x%x\n",
pmb->u.mb.mbxCommand,
@@ -601,24 +598,22 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
- phba->hb_outstanding = 0;
+ phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
phba->last_completion_time = jiffies;
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
if (phba->hba_flag & LINK_DISABLED) {
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
- "2598 Adapter Link is disabled.\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2598 Adapter Link is disabled.\n");
lpfc_down_link(phba, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
- "2599 Adapter failed to issue DOWN_LINK"
- " mbox command rc 0x%x\n", rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2599 Adapter failed to issue DOWN_LINK"
+ " mbox command rc 0x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
@@ -642,9 +637,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0456 Adapter failed to issue "
"ASYNCEVT_ENABLE mbox status x%x\n",
rc);
@@ -664,7 +657,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0435 Adapter failed "
"to get Option ROM version status x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
@@ -673,6 +667,56 @@ lpfc_config_port_post(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_refresh_params - update driver copy of params.
+ * @phba: Pointer to HBA context object.
+ *
+ * This is called to refresh driver copy of dynamic fields from the
+ * common_get_sli4_parameters descriptor.
+ **/
+int
+lpfc_sli4_refresh_params(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mqe *mqe;
+ struct lpfc_sli4_parameters *mbx_sli4_parameters;
+ int length, rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ mqe = &mboxq->u.mqe;
+ /* Read the port's SLI4 Config Parameters */
+ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (unlikely(rc)) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+ }
+ mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+
+ /* Are we forcing MI off via module parameter? */
+ if (phba->cfg_enable_mi)
+ phba->sli4_hba.pc_sli4_params.mi_ver =
+ bf_get(cfg_mi_ver, mbx_sli4_parameters);
+ else
+ phba->sli4_hba.pc_sli4_params.mi_ver = 0;
+
+ phba->sli4_hba.pc_sli4_params.cmf =
+ bf_get(cfg_cmf, mbx_sli4_parameters);
+ phba->sli4_hba.pc_sli4_params.pls =
+ bf_get(cfg_pvl, mbx_sli4_parameters);
+
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
* lpfc_hba_init_link - Initialize the FC link
* @phba: pointer to lpfc hba data structure.
* @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
@@ -742,10 +786,10 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
!(phba->lmt & LMT_64Gb))) {
/* Reset link speed to auto */
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1302 Invalid speed for this board:%d "
- "Reset link speed to auto.\n",
- phba->cfg_link_speed);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "1302 Invalid speed for this board:%d "
+ "Reset link speed to auto.\n",
+ phba->cfg_link_speed);
phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
}
lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
@@ -754,10 +798,10 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
lpfc_set_loopback_flag(phba);
rc = lpfc_sli_issue_mbox(phba, pmb, flag);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0498 Adapter failed to init, mbxCmd x%x "
- "INIT_LINK, mbxStatus x%x\n",
- mb->mbxCommand, mb->mbxStatus);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0498 Adapter failed to init, mbxCmd x%x "
+ "INIT_LINK, mbxStatus x%x\n",
+ mb->mbxCommand, mb->mbxStatus);
if (phba->sli_rev <= LPFC_SLI_REV3) {
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
@@ -803,17 +847,15 @@ lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
return -ENOMEM;
}
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
- "0491 Adapter Link is disabled.\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0491 Adapter Link is disabled.\n");
lpfc_down_link(phba, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, flag);
if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
- "2522 Adapter failed to issue DOWN_LINK"
- " mbox command rc 0x%x\n", rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2522 Adapter failed to issue DOWN_LINK"
+ " mbox command rc 0x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
@@ -995,7 +1037,7 @@ lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
spin_lock_irq(&pring->ring_lock);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
list_splice_init(&pring->txcmplq, &completions);
pring->txcmplq_cnt = 0;
spin_unlock_irq(&pring->ring_lock);
@@ -1008,7 +1050,6 @@ lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
/**
* lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
- int i;
* @phba: pointer to lpfc HBA data structure.
*
* This routine will do uninitialization after the HBA is reset when bring
@@ -1041,7 +1082,7 @@ static int
lpfc_hba_down_post_s4(struct lpfc_hba *phba)
{
struct lpfc_io_buf *psb, *psb_next;
- struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
+ struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
struct lpfc_sli4_hdw_queue *qp;
LIST_HEAD(aborts);
LIST_HEAD(nvme_aborts);
@@ -1059,12 +1100,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
* driver is unloading or reposted if the driver is restarting
* the port.
*/
- spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
- /* scsl_buf_list */
+
/* sgl_list_lock required because worker thread uses this
* list.
*/
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry(sglq_entry,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
sglq_entry->state = SGL_FREED;
@@ -1073,11 +1113,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
&phba->sli4_hba.lpfc_els_sgl_list);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
/* abts_xxxx_buf_list_lock required because worker thread uses this
* list.
*/
+ spin_lock_irq(&phba->hbalock);
cnt = 0;
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
@@ -1108,7 +1149,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
&nvmet_aborts);
spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
- ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
+ ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
}
@@ -1136,7 +1177,7 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
/**
* lpfc_hb_timeout - The HBA-timer timeout handler
- * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ * @t: timer context used to obtain the pointer to lpfc hba data structure.
*
* This is the HBA-timer timeout handler registered to the lpfc driver. When
* this timer fires, a HBA timeout event shall be posted to the lpfc driver
@@ -1170,7 +1211,7 @@ lpfc_hb_timeout(struct timer_list *t)
/**
* lpfc_rrq_timeout - The RRQ-timer timeout handler
- * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ * @t: timer context used to obtain the pointer to lpfc hba data structure.
*
* This is the RRQ-timer timeout handler registered to the lpfc driver. When
* this timer fires, a RRQ timeout event shall be posted to the lpfc driver
@@ -1220,10 +1261,10 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
unsigned long drvr_flag;
spin_lock_irqsave(&phba->hbalock, drvr_flag);
- phba->hb_outstanding = 0;
+ phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
- /* Check and reset heart-beat timer is necessary */
+ /* Check and reset heart-beat timer if necessary */
mempool_free(pmboxq, phba->mbox_mem_pool);
if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
!(phba->link_state == LPFC_HBA_ERROR) &&
@@ -1234,6 +1275,76 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
return;
}
+/*
+ * lpfc_idle_stat_delay_work - idle_stat tracking
+ *
+ * This routine tracks per-cq idle_stat and determines polling decisions.
+ *
+ * Return codes:
+ * None
+ **/
+static void
+lpfc_idle_stat_delay_work(struct work_struct *work)
+{
+ struct lpfc_hba *phba = container_of(to_delayed_work(work),
+ struct lpfc_hba,
+ idle_stat_delay_work);
+ struct lpfc_queue *cq;
+ struct lpfc_sli4_hdw_queue *hdwq;
+ struct lpfc_idle_stat *idle_stat;
+ u32 i, idle_percent;
+ u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
+
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
+ if (phba->link_state == LPFC_HBA_ERROR ||
+ phba->pport->fc_flag & FC_OFFLINE_MODE ||
+ phba->cmf_active_mode != LPFC_CFG_OFF)
+ goto requeue;
+
+ for_each_present_cpu(i) {
+ hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
+ cq = hdwq->io_cq;
+
+ /* Skip if we've already handled this cq's primary CPU */
+ if (cq->chann != i)
+ continue;
+
+ idle_stat = &phba->sli4_hba.idle_stat[i];
+
+ /* get_cpu_idle_time returns values as running counters. Thus,
+ * to know the amount for this period, the prior counter values
+ * need to be subtracted from the current counter values.
+ * From there, the idle time stat can be calculated as a
+ * percentage of 100 - the sum of the other consumption times.
+ */
+ wall_idle = get_cpu_idle_time(i, &wall, 1);
+ diff_idle = wall_idle - idle_stat->prev_idle;
+ diff_wall = wall - idle_stat->prev_wall;
+
+ if (diff_wall <= diff_idle)
+ busy_time = 0;
+ else
+ busy_time = diff_wall - diff_idle;
+
+ idle_percent = div64_u64(100 * busy_time, diff_wall);
+ idle_percent = 100 - idle_percent;
+
+ if (idle_percent < 15)
+ cq->poll_mode = LPFC_QUEUE_WORK;
+ else
+ cq->poll_mode = LPFC_IRQ_POLL;
+
+ idle_stat->prev_idle = wall_idle;
+ idle_stat->prev_wall = wall;
+ }
+
+requeue:
+ schedule_delayed_work(&phba->idle_stat_delay_work,
+ msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
+}
+
static void
lpfc_hb_eq_delay_work(struct work_struct *work)
{
@@ -1328,6 +1439,60 @@ static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
}
/**
+ * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * If a HB mbox is not already in progrees, this routine will allocate
+ * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
+ * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
+ **/
+int
+lpfc_issue_hb_mbox(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ int retval;
+
+ /* Is a Heartbeat mbox already in progress */
+ if (phba->hba_flag & HBA_HBEAT_INP)
+ return 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+
+ lpfc_heart_beat(phba, pmboxq);
+ pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
+ pmboxq->vport = phba->pport;
+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+
+ if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+ phba->hba_flag |= HBA_HBEAT_INP;
+
+ return 0;
+}
+
+/**
+ * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
+ * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
+ * of the value of lpfc_enable_hba_heartbeat.
+ * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
+ * try to issue a MBX_HEARTBEAT mbox command.
+ **/
+void
+lpfc_issue_hb_tmo(struct lpfc_hba *phba)
+{
+ if (phba->cfg_enable_hba_heartbeat)
+ return;
+ phba->hba_flag |= HBA_HBEAT_TMO;
+}
+
+/**
* lpfc_hb_timeout_handler - The HBA-timer timeout handler
* @phba: pointer to lpfc hba data structure.
*
@@ -1347,9 +1512,9 @@ void
lpfc_hb_timeout_handler(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
- LPFC_MBOXQ_t *pmboxq;
struct lpfc_dmabuf *buf_ptr;
- int retval, i;
+ int retval = 0;
+ int i, tmo;
struct lpfc_sli *psli = &phba->sli;
LIST_HEAD(completions);
@@ -1371,24 +1536,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
(phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
- spin_lock_irq(&phba->pport->work_port_lock);
-
- if (time_after(phba->last_completion_time +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
- jiffies)) {
- spin_unlock_irq(&phba->pport->work_port_lock);
- if (!phba->hb_outstanding)
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
- else
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
- return;
- }
- spin_unlock_irq(&phba->pport->work_port_lock);
-
if (phba->elsbuf_cnt &&
(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
spin_lock_irq(&phba->hbalock);
@@ -1408,37 +1555,43 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* If there is no heart beat outstanding, issue a heartbeat command */
if (phba->cfg_enable_hba_heartbeat) {
- if (!phba->hb_outstanding) {
+ /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
+ spin_lock_irq(&phba->pport->work_port_lock);
+ if (time_after(phba->last_completion_time +
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+ jiffies)) {
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ if (phba->hba_flag & HBA_HBEAT_INP)
+ tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
+ else
+ tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
+ goto out;
+ }
+ spin_unlock_irq(&phba->pport->work_port_lock);
+
+ /* Check if a MBX_HEARTBEAT is already in progress */
+ if (phba->hba_flag & HBA_HBEAT_INP) {
+ /*
+ * If heart beat timeout called with HBA_HBEAT_INP set
+ * we need to give the hb mailbox cmd a chance to
+ * complete or TMO.
+ */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0459 Adapter heartbeat still outstanding: "
+ "last compl time was %d ms.\n",
+ jiffies_to_msecs(jiffies
+ - phba->last_completion_time));
+ tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
+ } else {
if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
(list_empty(&psli->mboxq))) {
- pmboxq = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
- if (!pmboxq) {
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 *
- LPFC_HB_MBOX_INTERVAL));
- return;
- }
- lpfc_heart_beat(phba, pmboxq);
- pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
- pmboxq->vport = phba->pport;
- retval = lpfc_sli_issue_mbox(phba, pmboxq,
- MBX_NOWAIT);
-
- if (retval != MBX_BUSY &&
- retval != MBX_SUCCESS) {
- mempool_free(pmboxq,
- phba->mbox_mem_pool);
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 *
- LPFC_HB_MBOX_INTERVAL));
- return;
+ retval = lpfc_issue_hb_mbox(phba);
+ if (retval) {
+ tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
+ goto out;
}
phba->skipped_hb = 0;
- phba->hb_outstanding = 1;
} else if (time_before_eq(phba->last_completion_time,
phba->skipped_hb)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -1449,30 +1602,23 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
} else
phba->skipped_hb = jiffies;
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
- return;
- } else {
- /*
- * If heart beat timeout called with hb_outstanding set
- * we need to give the hb mailbox cmd a chance to
- * complete or TMO.
- */
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "0459 Adapter heartbeat still out"
- "standing:last compl time was %d ms.\n",
- jiffies_to_msecs(jiffies
- - phba->last_completion_time));
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
+ tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
+ goto out;
}
} else {
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ /* Check to see if we want to force a MBX_HEARTBEAT */
+ if (phba->hba_flag & HBA_HBEAT_TMO) {
+ retval = lpfc_issue_hb_mbox(phba);
+ if (retval)
+ tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
+ else
+ tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
+ goto out;
+ }
+ tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
}
+out:
+ mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
}
/**
@@ -1515,6 +1661,11 @@ void
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
+ if (phba->link_state == LPFC_HBA_ERROR &&
+ test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
phba->link_state = LPFC_HBA_ERROR;
spin_unlock_irq(&phba->hbalock);
@@ -1550,11 +1701,11 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
return;
}
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0479 Deferred Adapter Hardware Error "
- "Data: x%x x%x x%x\n",
- phba->work_hs,
- phba->work_status[0], phba->work_status[1]);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0479 Deferred Adapter Hardware Error "
+ "Data: x%x x%x x%x\n",
+ phba->work_hs, phba->work_status[0],
+ phba->work_status[1]);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
@@ -1705,7 +1856,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
temp_event_data.event_code = LPFC_CRIT_TEMP;
temp_event_data.data = (uint32_t)temperature;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0406 Adapter maximum temperature exceeded "
"(%ld), taking this port offline "
"Data: x%x x%x x%x\n",
@@ -1729,7 +1880,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
* failure is a value other than FFER6. Do not call the offline
* twice. This is the adapter hardware error path.
*/
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0457 Adapter Hardware Error "
"Data: x%x x%x x%x\n",
phba->work_hs,
@@ -1750,7 +1901,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
* lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
* @phba: pointer to lpfc hba data structure.
* @mbx_action: flag for mailbox shutdown action.
- *
+ * @en_rn_msg: send reset/port recovery message.
* This routine is invoked to perform an SLI4 port PCI function reset in
* response to port status register polling attention. It waits for port
* status register (ERR, RDY, RN) bits before proceeding with function reset.
@@ -1763,6 +1914,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
{
int rc;
uint32_t intr_mode;
+ LPFC_MBOXQ_t *mboxq;
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) {
@@ -1777,9 +1929,27 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
/* need reset: attempt for port recovery */
if (en_rn_msg)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2887 Reset Needed: Attempting Port "
"Recovery...\n");
+
+ /* If we are no wait, the HBA has been reset and is not
+ * functional, thus we should clear
+ * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
+ */
+ if (mbx_action == LPFC_MBX_NO_WAIT) {
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+ if (phba->sli.mbox_active) {
+ mboxq = phba->sli.mbox_active;
+ mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ __lpfc_mbox_cmpl_put(phba, mboxq);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ }
+
lpfc_offline_prep(phba, mbx_action);
lpfc_sli_flush_io_rings(phba);
lpfc_offline(phba);
@@ -1787,14 +1957,14 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
lpfc_sli4_disable_intr(phba);
rc = lpfc_sli_brdrestart(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6309 Failed to restart board\n");
return rc;
}
/* request and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3175 Failed to enable interrupt\n");
return -EIO;
}
@@ -1833,9 +2003,9 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
* we cannot communicate with the pci card anyway.
*/
if (pci_channel_offline(phba->pcidev)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3166 pci channel is offline\n");
- lpfc_sli4_offline_eratt(phba);
+ lpfc_sli_flush_io_rings(phba);
return;
}
@@ -1856,7 +2026,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
lpfc_sli4_offline_eratt(phba);
return;
}
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"7623 Checking UE recoverable");
for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
@@ -1873,7 +2043,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
msleep(1000);
}
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4827 smphr_port_status x%x : Waited %dSec",
smphr_port_status, i);
@@ -1891,14 +2061,14 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
LPFC_MBX_NO_WAIT, en_rn_msg);
if (rc == 0)
return;
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"4215 Failed to recover UE");
break;
}
}
}
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"7624 Firmware not ready: Failing UE recovery,"
" waited %dSec", i);
phba->link_state = LPFC_HBA_ERROR;
@@ -1911,7 +2081,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
&portstat_reg.word0);
/* consider PCI bus read error as pci_channel_offline */
if (pci_rd_rc1 == -EIO) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3151 PCI bus read access failure: x%x\n",
readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
lpfc_sli4_offline_eratt(phba);
@@ -1920,10 +2090,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2889 Port Overtemperature event, "
- "taking port offline Data: x%x x%x\n",
- reg_err1, reg_err2);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2889 Port Overtemperature event, "
+ "taking port offline Data: x%x x%x\n",
+ reg_err1, reg_err2);
phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
@@ -1945,17 +2115,17 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3143 Port Down: Firmware Update "
"Detected\n");
en_rn_msg = false;
} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3144 Port Down: Debug Dump\n");
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3145 Port Down: Provisioning\n");
/* If resets are disabled then leave the HBA alone and return */
@@ -1974,7 +2144,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
break;
}
/* fall through for not able to recover */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3152 Unrecoverable error\n");
phba->link_state = LPFC_HBA_ERROR;
break;
@@ -2025,7 +2195,6 @@ lpfc_handle_latt(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
LPFC_MBOXQ_t *pmb;
volatile uint32_t control;
- struct lpfc_dmabuf *mp;
int rc = 0;
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -2034,23 +2203,17 @@ lpfc_handle_latt(struct lpfc_hba *phba)
goto lpfc_handle_latt_err_exit;
}
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp) {
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
rc = 2;
- goto lpfc_handle_latt_free_pmb;
- }
-
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt) {
- rc = 3;
- goto lpfc_handle_latt_free_mp;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ goto lpfc_handle_latt_err_exit;
}
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
-
psli->slistat.link_event++;
- lpfc_read_topology(phba, pmb, mp);
+ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = vport;
/* Block ELS IOCBs until we have processed this mbox command */
@@ -2071,11 +2234,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
lpfc_handle_latt_free_mbuf:
phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
-lpfc_handle_latt_free_mp:
- kfree(mp);
-lpfc_handle_latt_free_pmb:
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
lpfc_handle_latt_err_exit:
/* Enable Link attention interrupts */
spin_lock_irq(&phba->hbalock);
@@ -2092,12 +2251,107 @@ lpfc_handle_latt_err_exit:
lpfc_linkdown(phba);
phba->link_state = LPFC_HBA_ERROR;
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
return;
}
+static void
+lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
+{
+ int i, j;
+
+ while (length > 0) {
+ /* Look for Serial Number */
+ if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->SerialNumber[j++] = vpd[(*pindex)++];
+ if (j == 31)
+ break;
+ }
+ phba->SerialNumber[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
+ phba->vpd_flag |= VPD_MODEL_DESC;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelDesc[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ModelDesc[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
+ phba->vpd_flag |= VPD_MODEL_NAME;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelName[j++] = vpd[(*pindex)++];
+ if (j == 79)
+ break;
+ }
+ phba->ModelName[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
+ phba->vpd_flag |= VPD_PROGRAM_TYPE;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ProgramType[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ProgramType[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
+ phba->vpd_flag |= VPD_PORT;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3 + i);
+ while (i--) {
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_GET)) {
+ j++;
+ (*pindex)++;
+ } else
+ phba->Port[j++] = vpd[(*pindex)++];
+ if (j == 19)
+ break;
+ }
+ if ((phba->sli_rev != LPFC_SLI_REV4) ||
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_NON))
+ phba->Port[j] = 0;
+ continue;
+ } else {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ *pindex += i;
+ length -= (3 + i);
+ }
+ }
+}
+
/**
* lpfc_parse_vpd - Parse VPD (Vital Product Data)
* @phba: pointer to lpfc hba data structure.
@@ -2117,7 +2371,7 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
uint8_t lenlo, lenhi;
int Length;
- int i, j;
+ int i;
int finished = 0;
int index = 0;
@@ -2150,101 +2404,10 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
Length = ((((unsigned short)lenhi) << 8) + lenlo);
if (Length > len - index)
Length = len - index;
- while (Length > 0) {
- /* Look for Serial Number */
- if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->SerialNumber[j++] = vpd[index++];
- if (j == 31)
- break;
- }
- phba->SerialNumber[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
- phba->vpd_flag |= VPD_MODEL_DESC;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelDesc[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ModelDesc[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
- phba->vpd_flag |= VPD_MODEL_NAME;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelName[j++] = vpd[index++];
- if (j == 79)
- break;
- }
- phba->ModelName[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
- phba->vpd_flag |= VPD_PROGRAM_TYPE;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ProgramType[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ProgramType[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
- phba->vpd_flag |= VPD_PORT;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_GET)) {
- j++;
- index++;
- } else
- phba->Port[j++] = vpd[index++];
- if (j == 19)
- break;
- }
- if ((phba->sli_rev != LPFC_SLI_REV4) ||
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_NON))
- phba->Port[j] = 0;
- continue;
- }
- else {
- index += 2;
- i = vpd[index];
- index += 1;
- index += i;
- Length -= (3 + i);
- }
- }
- finished = 0;
- break;
+
+ lpfc_fill_vpd(phba, vpd, Length, &index);
+ finished = 0;
+ break;
case 0x78:
finished = 1;
break;
@@ -2258,6 +2421,90 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
}
/**
+ * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
+static void
+lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
+{
+ uint16_t sub_dev_id = phba->pcidev->subsystem_device;
+ char *model = "<Unknown>";
+ int tbolt = 0;
+
+ switch (sub_dev_id) {
+ case PCI_DEVICE_ID_CLRY_161E:
+ model = "161E";
+ break;
+ case PCI_DEVICE_ID_CLRY_162E:
+ model = "162E";
+ break;
+ case PCI_DEVICE_ID_CLRY_164E:
+ model = "164E";
+ break;
+ case PCI_DEVICE_ID_CLRY_161P:
+ model = "161P";
+ break;
+ case PCI_DEVICE_ID_CLRY_162P:
+ model = "162P";
+ break;
+ case PCI_DEVICE_ID_CLRY_164P:
+ model = "164P";
+ break;
+ case PCI_DEVICE_ID_CLRY_321E:
+ model = "321E";
+ break;
+ case PCI_DEVICE_ID_CLRY_322E:
+ model = "322E";
+ break;
+ case PCI_DEVICE_ID_CLRY_324E:
+ model = "324E";
+ break;
+ case PCI_DEVICE_ID_CLRY_321P:
+ model = "321P";
+ break;
+ case PCI_DEVICE_ID_CLRY_322P:
+ model = "322P";
+ break;
+ case PCI_DEVICE_ID_CLRY_324P:
+ model = "324P";
+ break;
+ case PCI_DEVICE_ID_TLFC_2XX2:
+ model = "2XX2";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3162:
+ model = "3162";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3322:
+ model = "3322";
+ tbolt = 1;
+ break;
+ default:
+ model = "Unknown";
+ break;
+ }
+
+ if (mdp && mdp[0] == '\0')
+ snprintf(mdp, 79, "%s", model);
+
+ if (descp && descp[0] == '\0')
+ snprintf(descp, 255,
+ "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
+ (tbolt) ? "ThunderLink FC " : "Celerity FC-",
+ model,
+ phba->Port);
+}
+
+/**
* lpfc_get_hba_model_desc - Retrieve HBA device model name and description
* @phba: pointer to lpfc hba data structure.
* @mdp: pointer to the data structure to hold the derived model name.
@@ -2287,6 +2534,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
+ if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
+ lpfc_get_atto_model_desc(phba, mdp, descp);
+ return;
+ }
+
if (phba->lmt & LMT_64Gb)
max_speed = 64;
else if (phba->lmt & LMT_32Gb)
@@ -2436,11 +2688,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_SAT_S:
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
break;
- case PCI_DEVICE_ID_HORNET:
- m = (typeof(m)){"LP21000", "PCIe",
- "Obsolete, Unsupported FCoE Adapter"};
- GE = 1;
- break;
case PCI_DEVICE_ID_PROTEUS_VF:
m = (typeof(m)){"LPev12000", "PCIe IOV",
"Obsolete, Unsupported Fibre Channel Adapter"};
@@ -2491,6 +2738,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_LANCER_G7_FC:
m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
break;
+ case PCI_DEVICE_ID_LANCER_G7P_FC:
+ m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
+ break;
case PCI_DEVICE_ID_SKYHAWK:
case PCI_DEVICE_ID_SKYHAWK_VF:
oneConnect = 1;
@@ -2526,7 +2776,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
}
/**
- * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
+ * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
* @phba: pointer to lpfc hba data structure.
* @pring: pointer to a IOCB ring.
* @cnt: the number of IOCBs to be posted to the IOCB ring.
@@ -2538,7 +2788,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
* The number of IOCBs NOT able to be posted to the IOCB ring.
**/
int
-lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
+lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
{
IOCB_t *icmd;
struct lpfc_iocbq *iocb;
@@ -2644,7 +2894,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
/* Ring 0, ELS / CT buffers */
- lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
+ lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
/* Ring 2 - FCP no buffers needed */
return 0;
@@ -2790,29 +3040,11 @@ lpfc_cleanup(struct lpfc_vport *vport)
if (phba->link_state > LPFC_LINK_DOWN)
lpfc_port_link_failure(vport);
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- continue;
- spin_lock_irq(&phba->ndlp_lock);
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
- /* Trigger the release of the ndlp memory */
- lpfc_nlp_put(ndlp);
- continue;
- }
- spin_lock_irq(&phba->ndlp_lock);
- if (NLP_CHK_FREE_REQ(ndlp)) {
- /* The ndlp should not be in memory free mode already */
- spin_unlock_irq(&phba->ndlp_lock);
- continue;
- } else
- /* Indicate request for freeing ndlp memory */
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
+ /* Clean up VMID resources */
+ if (lpfc_is_vmid_enabled(phba))
+ lpfc_vmid_vport_cleanup(vport);
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (vport->port_type != LPFC_PHYSICAL_PORT &&
ndlp->nlp_DID == Fabric_DID) {
/* Just free up ndlp with Fabric_DID for vports */
@@ -2820,21 +3052,40 @@ lpfc_cleanup(struct lpfc_vport *vport)
continue;
}
- /* take care of nodes in unused state before the state
- * machine taking action.
- */
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ if (ndlp->nlp_DID == Fabric_Cntl_DID &&
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
lpfc_nlp_put(ndlp);
continue;
}
- if (ndlp->nlp_type & NLP_FABRIC)
+ /* Fabric Ports not in UNMAPPED state are cleaned up in the
+ * DEVICE_RM event.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RM);
- }
+ if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
+
+ /* This is a special case flush to return all
+ * IOs before entering this loop. There are
+ * two points in the code where a flush is
+ * avoided if the FC_UNLOADING flag is set.
+ * one is in the multipool destroy,
+ * (this prevents a crash) and the other is
+ * in the nvme abort handler, ( also prevents
+ * a crash). Both of these exceptions are
+ * cases where the slot is still accessible.
+ * The flush here is only when the pci slot
+ * is offline.
+ */
+ if (vport->load_flag & FC_UNLOADING &&
+ pci_channel_offline(phba->pcidev))
+ lpfc_sli_flush_io_rings(vport->phba);
/* At this point, ALL ndlp's should be gone
* because of the previous NLP_EVT_DEVICE_RM.
@@ -2842,17 +3093,19 @@ lpfc_cleanup(struct lpfc_vport *vport)
*/
while (!list_empty(&vport->fc_nodes)) {
if (i++ > 3000) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0233 Nodelist not empty\n");
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_NODE,
- "0282 did:x%x ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- ndlp->nlp_DID, (void *)ndlp,
- ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
+ LOG_DISCOVERY,
+ "0282 did:x%x ndlp:x%px "
+ "refcnt:%d xflags x%x nflag x%x\n",
+ ndlp->nlp_DID, (void *)ndlp,
+ kref_read(&ndlp->kref),
+ ndlp->fc4_xpt_flags,
+ ndlp->nlp_flag);
}
break;
}
@@ -2922,6 +3175,123 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
}
/**
+ * lpfc_cmf_stop - Stop CMF processing
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is called when the link goes down or if CMF mode is turned OFF.
+ * It is also called when going offline or unloaded just before the
+ * congestion info buffer is unregistered.
+ **/
+void
+lpfc_cmf_stop(struct lpfc_hba *phba)
+{
+ int cpu;
+ struct lpfc_cgn_stat *cgs;
+
+ /* We only do something if CMF is enabled */
+ if (!phba->sli4_hba.pc_sli4_params.cmf)
+ return;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6221 Stop CMF / Cancel Timer\n");
+
+ /* Cancel the CMF timer */
+ hrtimer_cancel(&phba->cmf_timer);
+
+ /* Zero CMF counters */
+ atomic_set(&phba->cmf_busy, 0);
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ atomic64_set(&cgs->total_bytes, 0);
+ atomic64_set(&cgs->rcv_bytes, 0);
+ atomic_set(&cgs->rx_io_cnt, 0);
+ atomic64_set(&cgs->rx_latency, 0);
+ }
+ atomic_set(&phba->cmf_bw_wait, 0);
+
+ /* Resume any blocked IO - Queue unblock on workqueue */
+ queue_work(phba->wq, &phba->unblock_request_work);
+}
+
+static inline uint64_t
+lpfc_get_max_line_rate(struct lpfc_hba *phba)
+{
+ uint64_t rate = lpfc_sli_port_speed_get(phba);
+
+ return ((((unsigned long)rate) * 1024 * 1024) / 10);
+}
+
+void
+lpfc_cmf_signal_init(struct lpfc_hba *phba)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6223 Signal CMF init\n");
+
+ /* Use the new fc_linkspeed to recalculate */
+ phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
+ phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
+ phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
+ phba->cmf_interval_rate, 1000);
+ phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
+
+ /* This is a signal to firmware to sync up CMF BW with link speed */
+ lpfc_issue_cmf_sync_wqe(phba, 0, 0);
+}
+
+/**
+ * lpfc_cmf_start - Start CMF processing
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is called when the link comes up or if CMF mode is turned OFF
+ * to Monitor or Managed.
+ **/
+void
+lpfc_cmf_start(struct lpfc_hba *phba)
+{
+ struct lpfc_cgn_stat *cgs;
+ int cpu;
+
+ /* We only do something if CMF is enabled */
+ if (!phba->sli4_hba.pc_sli4_params.cmf ||
+ phba->cmf_active_mode == LPFC_CFG_OFF)
+ return;
+
+ /* Reinitialize congestion buffer info */
+ lpfc_init_congestion_buf(phba);
+
+ atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+ atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_warn_cnt, 0);
+
+ atomic_set(&phba->cmf_busy, 0);
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ atomic64_set(&cgs->total_bytes, 0);
+ atomic64_set(&cgs->rcv_bytes, 0);
+ atomic_set(&cgs->rx_io_cnt, 0);
+ atomic64_set(&cgs->rx_latency, 0);
+ }
+ phba->cmf_latency.tv_sec = 0;
+ phba->cmf_latency.tv_nsec = 0;
+
+ lpfc_cmf_signal_init(phba);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6222 Start CMF / Timer\n");
+
+ phba->cmf_timer_cnt = 0;
+ hrtimer_start(&phba->cmf_timer,
+ ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
+ HRTIMER_MODE_REL);
+ /* Setup for latency check in IO cmpl routines */
+ ktime_get_real_ts64(&phba->cmf_latency);
+
+ atomic_set(&phba->cmf_bw_wait, 0);
+ atomic_set(&phba->cmf_stop_io, 0);
+}
+
+/**
* lpfc_stop_hba_timers - Stop all the timers associated with an HBA
* @phba: pointer to lpfc hba data structure.
*
@@ -2934,6 +3304,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
if (phba->pport)
lpfc_stop_vport_timers(phba->pport);
cancel_delayed_work_sync(&phba->eq_delay_work);
+ cancel_delayed_work_sync(&phba->idle_stat_delay_work);
del_timer_sync(&phba->sli.mbox_tmo);
del_timer_sync(&phba->fabric_block_timer);
del_timer_sync(&phba->eratt_poll);
@@ -2942,7 +3313,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->rrq_tmr);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
}
- phba->hb_outstanding = 0;
+ phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
@@ -2954,7 +3325,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0297 Invalid device group (x%x)\n",
phba->pci_dev_grp);
break;
@@ -2965,6 +3336,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
/**
* lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
* @phba: pointer to lpfc hba data structure.
+ * @mbx_action: flag for mailbox no wait action.
*
* This routine marks a HBA's management interface as blocked. Once the HBA's
* management interface is marked as blocked, all the user space access to
@@ -3001,10 +3373,10 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
/* Check active mailbox complete status every 2ms */
msleep(2);
if (time_after(jiffies, timeout)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2813 Mgmt IO is Blocked %x "
- "- mbox cmd %x still active\n",
- phba->sli.sli_flag, actcmd);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2813 Mgmt IO is Blocked %x "
+ "- mbox cmd %x still active\n",
+ phba->sli.sli_flag, actcmd);
break;
}
}
@@ -3024,7 +3396,6 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_vport **vports;
int i, rpi;
- unsigned long flags;
if (phba->sli_rev != LPFC_SLI_REV4)
return;
@@ -3040,22 +3411,18 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
rpi = lpfc_sli4_alloc_rpi(phba);
if (rpi == LPFC_RPI_ALLOC_ERROR) {
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- NLP_CLR_NODE_ACT(ndlp);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ /* TODO print log? */
continue;
}
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"0009 Assign RPI x%x to ndlp x%px "
- "DID:x%06x flg:x%x map:x%x\n",
+ "DID:x%06x flg:x%x\n",
ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, ndlp->nlp_usg_map);
+ ndlp->nlp_flag);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3349,7 +3716,7 @@ lpfc_online(struct lpfc_hba *phba)
!phba->nvmet_support) {
error = lpfc_nvme_create_localport(phba->pport);
if (error)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6132 NVME restore reg failed "
"on nvmei error x%x\n", error);
}
@@ -3415,6 +3782,7 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
/**
* lpfc_offline_prep - Prepare a HBA to be brought offline
* @phba: pointer to lpfc hba data structure.
+ * @mbx_action: flag for mailbox shutdown action.
*
* This routine is invoked to prepare a HBA to be brought offline. It performs
* unregistration login to all the nodes on all vports and flushes the mailbox
@@ -3428,6 +3796,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
int i;
+ int offline;
+ bool hba_pci_err;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
@@ -3436,6 +3806,9 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_linkdown(phba);
+ offline = pci_channel_offline(phba->pcidev);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
+
/* Issue an unreg_login to all nodes on all vports */
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
@@ -3453,42 +3826,58 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if ((!NLP_CHK_NODE_ACT(ndlp)) ||
- ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- /* Driver must assume RPI is invalid for
- * any unused or inactive node.
- */
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- continue;
- }
- if (ndlp->nlp_type & NLP_FABRIC) {
- lpfc_disc_state_machine(vports[i], ndlp,
- NULL, NLP_EVT_DEVICE_RECOVERY);
- lpfc_disc_state_machine(vports[i], ndlp,
- NULL, NLP_EVT_DEVICE_RM);
- }
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
+
+ if (offline || hba_pci_err) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~(NLP_UNREG_INP |
+ NLP_RPI_REGISTERED);
+ spin_unlock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli_rpi_release(vports[i],
+ ndlp);
+ } else {
+ lpfc_unreg_rpi(vports[i], ndlp);
+ }
/*
* Whenever an SLI4 port goes offline, free the
* RPI. Get a new RPI when the adapter port
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ lpfc_printf_vlog(vports[i], KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"0011 Free RPI x%x on "
- "ndlp:x%px did x%x "
- "usgmap:x%x\n",
+ "ndlp: x%px did x%x\n",
ndlp->nlp_rpi, ndlp,
- ndlp->nlp_DID,
- ndlp->nlp_usg_map);
+ ndlp->nlp_DID);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
- lpfc_unreg_rpi(vports[i], ndlp);
+
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ lpfc_disc_state_machine(vports[i], ndlp,
+ NULL, NLP_EVT_DEVICE_RECOVERY);
+
+ /* Don't remove the node unless the node
+ * has been unregistered with the
+ * transport, and we're not in recovery
+ * before dev_loss_tmo triggered.
+ * Otherwise, let dev_loss take care of
+ * the node.
+ */
+ if (!(ndlp->save_flags &
+ NLP_IN_RECOV_POST_DEV_LOSS) &&
+ !(ndlp->fc4_xpt_flags &
+ (NVME_XPT_REGD | SCSI_XPT_REGD)))
+ lpfc_disc_state_machine
+ (vports[i], ndlp,
+ NULL,
+ NLP_EVT_DEVICE_RM);
+ }
}
}
}
@@ -3550,7 +3939,11 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
- __lpfc_cpuhp_remove(phba);
+ /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
+ * in hba_unset
+ */
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ __lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
lpfc_destroy_multixri_pools(phba);
@@ -3689,7 +4082,8 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
GFP_KERNEL);
if (sglq_entry == NULL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2562 Failure to allocate an "
"ELS sgl entry:%d\n", i);
rc = -ENOMEM;
@@ -3700,7 +4094,8 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
&sglq_entry->phys);
if (sglq_entry->virt == NULL) {
kfree(sglq_entry);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2563 Failure to allocate an "
"ELS mbuf:%d\n", i);
rc = -ENOMEM;
@@ -3711,12 +4106,10 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
sglq_entry->state = SGL_FREED;
list_add_tail(&sglq_entry->list, &els_sgl_list);
}
- spin_lock_irq(&phba->hbalock);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_splice_init(&els_sgl_list,
&phba->sli4_hba.lpfc_els_sgl_list);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
/* els xri-sgl shrinked */
xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
@@ -3724,8 +4117,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
"3158 ELS xri-sgl count decreased from "
"%d to %d\n", phba->sli4_hba.els_xri_cnt,
els_xri_cnt);
- spin_lock_irq(&phba->hbalock);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
&els_sgl_list);
/* release extra els sgls from list */
@@ -3740,8 +4132,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
}
list_splice_init(&els_sgl_list,
&phba->sli4_hba.lpfc_els_sgl_list);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
} else
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3163 ELS xri-sgl count unchanged: %d\n",
@@ -3755,7 +4146,8 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
&phba->sli4_hba.lpfc_els_sgl_list, list) {
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2400 Failed to allocate xri for "
"ELS sgl\n");
rc = -ENOMEM;
@@ -3810,7 +4202,8 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
GFP_KERNEL);
if (sglq_entry == NULL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6303 Failure to allocate an "
"NVMET sgl entry:%d\n", i);
rc = -ENOMEM;
@@ -3821,7 +4214,8 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
&sglq_entry->phys);
if (sglq_entry->virt == NULL) {
kfree(sglq_entry);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6304 Failure to allocate an "
"NVMET buf:%d\n", i);
rc = -ENOMEM;
@@ -3877,7 +4271,8 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
&phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6307 Failed to allocate xri for "
"NVMET sgl\n");
rc = -ENOMEM;
@@ -3974,8 +4369,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
qp = &phba->sli4_hba.hdwq[idx];
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
- lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
- lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+ lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
spin_lock(&qp->io_buf_list_put_lock);
list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put);
@@ -4019,9 +4413,10 @@ lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"6074 Current allocated XRI sgl count:%d, "
- "maximum XRI count:%d\n",
+ "maximum XRI count:%d els_xri_cnt:%d\n\n",
phba->sli4_hba.io_xri_cnt,
- phba->sli4_hba.io_xri_max);
+ phba->sli4_hba.io_xri_max,
+ els_xri_cnt);
cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
@@ -4051,7 +4446,8 @@ lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
&io_sgl_list, list) {
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6075 Failed to allocate xri for "
"nvme buffer\n");
rc = -ENOMEM;
@@ -4070,8 +4466,8 @@ out_free_mem:
/**
* lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
- * @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
+ * @phba: Pointer to lpfc hba data structure.
+ * @num_to_alloc: The requested number of buffers to allocate.
*
* This routine allocates nvme buffers for device with SLI-4 interface spec,
* the nvme buffer contains all the necessary information needed to initiate
@@ -4121,7 +4517,8 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
(((unsigned long)(lpfc_ncmd->data) &
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"3369 Memory alignment err: "
"addr=%lx\n",
(unsigned long)lpfc_ncmd->data);
@@ -4150,7 +4547,7 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6121 Failed to allocate IOTAG for"
" XRI:0x%x\n", lxri);
lpfc_sli4_free_xri(phba, lxri);
@@ -4158,12 +4555,11 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
}
pwqeq->sli4_lxritag = lxri;
pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
- pwqeq->context1 = lpfc_ncmd;
/* Initialize local short-hand pointers. */
lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
- lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+ lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
spin_lock_init(&lpfc_ncmd->buf_lock);
/* add the nvme buffer to a post list */
@@ -4172,7 +4568,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
}
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6114 Allocate %d out of %d requested new NVME "
- "buffers\n", bcnt, num_to_alloc);
+ "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
+ sizeof(*lpfc_ncmd));
+
/* post the list of nvme buffer sgls to port if available */
if (!list_empty(&post_nblist))
@@ -4201,7 +4599,7 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
lpfc_read_nv(phba, mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6019 Mailbox failed , mbxCmd x%x "
"READ_NV, mbxStatus x%x\n",
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
@@ -4219,6 +4617,66 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
return rol64(wwn, 32);
}
+static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ return LPFC_MAX_SG_TABLESIZE;
+ else
+ return phba->cfg_scsi_seg_cnt;
+ else
+ return phba->cfg_sg_seg_cnt;
+}
+
+/**
+ * lpfc_vmid_res_alloc - Allocates resources for VMID
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to vport data structure
+ *
+ * This routine allocated the resources needed for the VMID.
+ *
+ * Return codes
+ * 0 on Success
+ * Non-0 on Failure
+ */
+static int
+lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ /* VMID feature is supported only on SLI4 */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ phba->cfg_vmid_app_header = 0;
+ phba->cfg_vmid_priority_tagging = 0;
+ }
+
+ if (lpfc_is_vmid_enabled(phba)) {
+ vport->vmid =
+ kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
+ GFP_KERNEL);
+ if (!vport->vmid)
+ return -ENOMEM;
+
+ rwlock_init(&vport->vmid_lock);
+
+ /* Set the VMID parameters for the vport */
+ vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
+ vport->vmid_inactivity_timeout =
+ phba->cfg_vmid_inactivity_timeout;
+ vport->max_vmid = phba->cfg_max_vmid;
+ vport->cur_vmid_cnt = 0;
+
+ vport->vmid_priority_range = bitmap_zalloc
+ (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
+
+ if (!vport->vmid_priority_range) {
+ kfree(vport->vmid);
+ return -ENOMEM;
+ }
+
+ hash_init(vport->hash_table);
+ }
+ return 0;
+}
+
/**
* lpfc_create_port - Create an FC port
* @phba: pointer to lpfc hba data structure.
@@ -4240,6 +4698,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
{
struct lpfc_vport *vport;
struct Scsi_Host *shost = NULL;
+ struct scsi_host_template *template;
int error = 0;
int i;
uint64_t wwn;
@@ -4260,7 +4719,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
if (wwn == lpfc_no_hba_reset[i]) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6020 Setting use_no_reset port=%llx\n",
wwn);
use_no_reset_hba = true;
@@ -4268,22 +4728,31 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
}
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- if (dev != &phba->pcidev->dev) {
- shost = scsi_host_alloc(&lpfc_vport_template,
- sizeof(struct lpfc_vport));
+ /* Seed template for SCSI host registration */
+ if (dev == &phba->pcidev->dev) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ /* Seed physical port template */
+ template = &lpfc_template;
+
+ if (use_no_reset_hba)
+ /* template is for a no reset SCSI Host */
+ template->eh_host_reset_handler = NULL;
+
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
} else {
- if (!use_no_reset_hba)
- shost = scsi_host_alloc(&lpfc_template,
- sizeof(struct lpfc_vport));
- else
- shost = scsi_host_alloc(&lpfc_template_no_hr,
- sizeof(struct lpfc_vport));
+ /* NVMET is for physical port only */
+ template = &lpfc_template_nvme;
}
- } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- shost = scsi_host_alloc(&lpfc_template_nvme,
- sizeof(struct lpfc_vport));
+ } else {
+ /* Seed vport template */
+ template = &lpfc_vport_template;
+
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
}
+
+ shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
if (!shost)
goto out;
@@ -4313,11 +4782,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
-
- if (phba->cfg_xpsgl && !phba->nvmet_support)
- shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
- else
- shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} else
/* SLI-3 has a limited number of hardware queues (3),
* thus there is only one for FCP processing.
@@ -4338,6 +4802,18 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport->port_type = LPFC_PHYSICAL_PORT;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+ "9081 CreatePort TMPLATE type %x TBLsize %d "
+ "SEGcnt %d/%d\n",
+ vport->port_type, shost->sg_tablesize,
+ phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
+
+ /* Allocate the resources for VMID */
+ rc = lpfc_vmid_res_alloc(phba, vport);
+
+ if (rc)
+ goto out_put_shost;
+
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
INIT_LIST_HEAD(&vport->rcv_buffer_list);
@@ -4354,13 +4830,16 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
- goto out_put_shost;
+ goto out_free_vmid;
spin_lock_irq(&phba->port_list_lock);
list_add_tail(&vport->listentry, &phba->port_list);
spin_unlock_irq(&phba->port_list_lock);
return vport;
+out_free_vmid:
+ kfree(vport->vmid);
+ bitmap_free(vport->vmid_priority_range);
out_put_shost:
scsi_host_put(shost);
out:
@@ -4476,6 +4955,15 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
struct lpfc_hba *phba = vport->phba;
fc_host_supported_speeds(shost) = 0;
+ /*
+ * Avoid reporting supported link speed for FCoE as it can't be
+ * controlled via FCoE.
+ */
+ if (phba->hba_flag & HBA_FCOE_MODE)
+ return;
+
+ if (phba->lmt & LMT_256Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
if (phba->lmt & LMT_128Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
if (phba->lmt & LMT_64Gb)
@@ -4623,7 +5111,7 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
/**
* lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
- * @ptr: Map to lpfc_hba data structure pointer.
+ * @t: Timer context used to obtain the pointer to lpfc hba data structure.
*
* This routine is invoked when waiting for FCF table rediscover has been
* timed out. If new FCF record(s) has (have) been discovered during the
@@ -4654,6 +5142,42 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
}
/**
+ * lpfc_vmid_poll - VMID timeout detection
+ * @t: Timer context used to obtain the pointer to lpfc hba data structure.
+ *
+ * This routine is invoked when there is no I/O on by a VM for the specified
+ * amount of time. When this situation is detected, the VMID has to be
+ * deregistered from the switch and all the local resources freed. The VMID
+ * will be reassigned to the VM once the I/O begins.
+ **/
+static void
+lpfc_vmid_poll(struct timer_list *t)
+{
+ struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
+ u32 wake_up = 0;
+
+ /* check if there is a need to issue QFPA */
+ if (phba->pport->vmid_priority_tagging) {
+ wake_up = 1;
+ phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ }
+
+ /* Is the vmid inactivity timer enabled */
+ if (phba->pport->vmid_inactivity_timeout ||
+ phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
+ wake_up = 1;
+ phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
+ }
+
+ if (wake_up)
+ lpfc_worker_wake_up(phba);
+
+ /* restart the timer for the next iteration */
+ mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
+ LPFC_VMID_TIMER));
+}
+
+/**
* lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
* @phba: pointer to lpfc hba data structure.
* @acqe_link: pointer to the async link completion queue entry.
@@ -4671,7 +5195,7 @@ lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
case LPFC_ASYNC_LINK_FAULT_LR_LRR:
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0398 Unknown link fault code: x%x\n",
bf_get(lpfc_acqe_link_fault, acqe_link));
break;
@@ -4707,7 +5231,7 @@ lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
att_type = LPFC_ATT_LINK_UP;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0399 Invalid link attention type: x%x\n",
bf_get(lpfc_acqe_link_status, acqe_link));
att_type = LPFC_ATT_RESERVED;
@@ -4809,6 +5333,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
case LPFC_ASYNC_LINK_SPEED_40GBPS:
port_speed = 40000;
break;
+ case LPFC_ASYNC_LINK_SPEED_100GBPS:
+ port_speed = 100000;
+ break;
default:
port_speed = 0;
}
@@ -4845,6 +5372,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
case LPFC_FC_LA_SPEED_128G:
port_speed = 128000;
break;
+ case LPFC_FC_LA_SPEED_256G:
+ port_speed = 256000;
+ break;
default:
port_speed = 0;
}
@@ -4866,7 +5396,6 @@ static void
lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
struct lpfc_acqe_link *acqe_link)
{
- struct lpfc_dmabuf *mp;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_mbx_read_top *la;
@@ -4879,22 +5408,17 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
phba->fcoe_eventtag = acqe_link->event_tag;
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0395 The mboxq allocation failed\n");
return;
}
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0396 The lpfc_dmabuf allocation failed\n");
+
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0396 mailbox allocation failed\n");
goto out_free_pmb;
}
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0397 The mbuf allocation failed\n");
- goto out_free_dmabuf;
- }
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
@@ -4906,7 +5430,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, mp);
+ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -4945,7 +5469,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
- goto out_free_dmabuf;
+ goto out_free_pmb;
return;
}
/*
@@ -4980,17 +5504,14 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
return;
-out_free_dmabuf:
- kfree(mp);
out_free_pmb:
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
* lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
* topology.
* @phba: pointer to lpfc hba data structure.
- * @evt_code: asynchronous event code.
* @speed_code: asynchronous event link speed code.
*
* This routine is to parse the giving SLI4 async event link speed code into
@@ -5039,6 +5560,620 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
return port_speed;
}
+void
+lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
+{
+ if (!phba->rx_monitor) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4411 Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
+ LPFC_MAX_RXMONITOR_DUMP);
+ }
+}
+
+/**
+ * lpfc_cgn_update_stat - Save data into congestion stats buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @dtag: FPIN descriptor received
+ *
+ * Increment the FPIN received counter/time when it happens.
+ */
+void
+lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
+{
+ struct lpfc_cgn_info *cp;
+ struct tm broken;
+ struct timespec64 cur_time;
+ u32 cnt;
+ u32 value;
+
+ /* Make sure we have a congestion info buffer */
+ if (!phba->cgn_i)
+ return;
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+ ktime_get_real_ts64(&cur_time);
+ time64_to_tm(cur_time.tv_sec, 0, &broken);
+
+ /* Update congestion statistics */
+ switch (dtag) {
+ case ELS_DTAG_LNK_INTEGRITY:
+ cnt = le32_to_cpu(cp->link_integ_notification);
+ cnt++;
+ cp->link_integ_notification = cpu_to_le32(cnt);
+
+ cp->cgn_stat_lnk_month = broken.tm_mon + 1;
+ cp->cgn_stat_lnk_day = broken.tm_mday;
+ cp->cgn_stat_lnk_year = broken.tm_year - 100;
+ cp->cgn_stat_lnk_hour = broken.tm_hour;
+ cp->cgn_stat_lnk_min = broken.tm_min;
+ cp->cgn_stat_lnk_sec = broken.tm_sec;
+ break;
+ case ELS_DTAG_DELIVERY:
+ cnt = le32_to_cpu(cp->delivery_notification);
+ cnt++;
+ cp->delivery_notification = cpu_to_le32(cnt);
+
+ cp->cgn_stat_del_month = broken.tm_mon + 1;
+ cp->cgn_stat_del_day = broken.tm_mday;
+ cp->cgn_stat_del_year = broken.tm_year - 100;
+ cp->cgn_stat_del_hour = broken.tm_hour;
+ cp->cgn_stat_del_min = broken.tm_min;
+ cp->cgn_stat_del_sec = broken.tm_sec;
+ break;
+ case ELS_DTAG_PEER_CONGEST:
+ cnt = le32_to_cpu(cp->cgn_peer_notification);
+ cnt++;
+ cp->cgn_peer_notification = cpu_to_le32(cnt);
+
+ cp->cgn_stat_peer_month = broken.tm_mon + 1;
+ cp->cgn_stat_peer_day = broken.tm_mday;
+ cp->cgn_stat_peer_year = broken.tm_year - 100;
+ cp->cgn_stat_peer_hour = broken.tm_hour;
+ cp->cgn_stat_peer_min = broken.tm_min;
+ cp->cgn_stat_peer_sec = broken.tm_sec;
+ break;
+ case ELS_DTAG_CONGESTION:
+ cnt = le32_to_cpu(cp->cgn_notification);
+ cnt++;
+ cp->cgn_notification = cpu_to_le32(cnt);
+
+ cp->cgn_stat_cgn_month = broken.tm_mon + 1;
+ cp->cgn_stat_cgn_day = broken.tm_mday;
+ cp->cgn_stat_cgn_year = broken.tm_year - 100;
+ cp->cgn_stat_cgn_hour = broken.tm_hour;
+ cp->cgn_stat_cgn_min = broken.tm_min;
+ cp->cgn_stat_cgn_sec = broken.tm_sec;
+ }
+ if (phba->cgn_fpin_frequency &&
+ phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
+ value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
+ cp->cgn_stat_npm = value;
+ }
+ value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
+ LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(value);
+}
+
+/**
+ * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Save the congestion event data every minute.
+ * On the hour collapse all the minute data into hour data. Every day
+ * collapse all the hour data into daily data. Separate driver
+ * and fabrc congestion event counters that will be saved out
+ * to the registered congestion buffer every minute.
+ */
+static void
+lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
+{
+ struct lpfc_cgn_info *cp;
+ struct tm broken;
+ struct timespec64 cur_time;
+ uint32_t i, index;
+ uint16_t value, mvalue;
+ uint64_t bps;
+ uint32_t mbps;
+ uint32_t dvalue, wvalue, lvalue, avalue;
+ uint64_t latsum;
+ __le16 *ptr;
+ __le32 *lptr;
+ __le16 *mptr;
+
+ /* Make sure we have a congestion info buffer */
+ if (!phba->cgn_i)
+ return;
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+ if (time_before(jiffies, phba->cgn_evt_timestamp))
+ return;
+ phba->cgn_evt_timestamp = jiffies +
+ msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
+ phba->cgn_evt_minute++;
+
+ /* We should get to this point in the routine on 1 minute intervals */
+
+ ktime_get_real_ts64(&cur_time);
+ time64_to_tm(cur_time.tv_sec, 0, &broken);
+
+ if (phba->cgn_fpin_frequency &&
+ phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
+ value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
+ cp->cgn_stat_npm = value;
+ }
+
+ /* Read and clear the latency counters for this minute */
+ lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
+ latsum = atomic64_read(&phba->cgn_latency_evt);
+ atomic_set(&phba->cgn_latency_evt_cnt, 0);
+ atomic64_set(&phba->cgn_latency_evt, 0);
+
+ /* We need to store MB/sec bandwidth in the congestion information.
+ * block_cnt is count of 512 byte blocks for the entire minute,
+ * bps will get bytes per sec before finally converting to MB/sec.
+ */
+ bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
+ phba->rx_block_cnt = 0;
+ mvalue = bps / (1024 * 1024); /* convert to MB/sec */
+
+ /* Every minute */
+ /* cgn parameters */
+ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
+ cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
+ cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
+ cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
+
+ /* Fill in default LUN qdepth */
+ value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
+ cp->cgn_lunq = cpu_to_le16(value);
+
+ /* Record congestion buffer info - every minute
+ * cgn_driver_evt_cnt (Driver events)
+ * cgn_fabric_warn_cnt (Congestion Warnings)
+ * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
+ * cgn_fabric_alarm_cnt (Congestion Alarms)
+ */
+ index = ++cp->cgn_index_minute;
+ if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
+ cp->cgn_index_minute = 0;
+ index = 0;
+ }
+
+ /* Get the number of driver events in this sample and reset counter */
+ dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
+ atomic_set(&phba->cgn_driver_evt_cnt, 0);
+
+ /* Get the number of warning events - FPIN and Signal for this minute */
+ wvalue = 0;
+ if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+ wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
+ atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+
+ /* Get the number of alarm events - FPIN and Signal for this minute */
+ avalue = 0;
+ if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+ avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
+ atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+
+ /* Collect the driver, warning, alarm and latency counts for this
+ * minute into the driver congestion buffer.
+ */
+ ptr = &cp->cgn_drvr_min[index];
+ value = (uint16_t)dvalue;
+ *ptr = cpu_to_le16(value);
+
+ ptr = &cp->cgn_warn_min[index];
+ value = (uint16_t)wvalue;
+ *ptr = cpu_to_le16(value);
+
+ ptr = &cp->cgn_alarm_min[index];
+ value = (uint16_t)avalue;
+ *ptr = cpu_to_le16(value);
+
+ lptr = &cp->cgn_latency_min[index];
+ if (lvalue) {
+ lvalue = (uint32_t)div_u64(latsum, lvalue);
+ *lptr = cpu_to_le32(lvalue);
+ } else {
+ *lptr = 0;
+ }
+
+ /* Collect the bandwidth value into the driver's congesion buffer. */
+ mptr = &cp->cgn_bw_min[index];
+ *mptr = cpu_to_le16(mvalue);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
+ index, dvalue, wvalue, *lptr, mvalue, avalue);
+
+ /* Every hour */
+ if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
+ /* Record congestion buffer info - every hour
+ * Collapse all minutes into an hour
+ */
+ index = ++cp->cgn_index_hour;
+ if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
+ cp->cgn_index_hour = 0;
+ index = 0;
+ }
+
+ dvalue = 0;
+ wvalue = 0;
+ lvalue = 0;
+ avalue = 0;
+ mvalue = 0;
+ mbps = 0;
+ for (i = 0; i < LPFC_MIN_HOUR; i++) {
+ dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
+ wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
+ lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
+ mbps += le16_to_cpu(cp->cgn_bw_min[i]);
+ avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
+ }
+ if (lvalue) /* Avg of latency averages */
+ lvalue /= LPFC_MIN_HOUR;
+ if (mbps) /* Avg of Bandwidth averages */
+ mvalue = mbps / LPFC_MIN_HOUR;
+
+ lptr = &cp->cgn_drvr_hr[index];
+ *lptr = cpu_to_le32(dvalue);
+ lptr = &cp->cgn_warn_hr[index];
+ *lptr = cpu_to_le32(wvalue);
+ lptr = &cp->cgn_latency_hr[index];
+ *lptr = cpu_to_le32(lvalue);
+ mptr = &cp->cgn_bw_hr[index];
+ *mptr = cpu_to_le16(mvalue);
+ lptr = &cp->cgn_alarm_hr[index];
+ *lptr = cpu_to_le32(avalue);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "2419 Congestion Info - hour "
+ "(%d): %d %d %d %d %d\n",
+ index, dvalue, wvalue, lvalue, mvalue, avalue);
+ }
+
+ /* Every day */
+ if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
+ /* Record congestion buffer info - every hour
+ * Collapse all hours into a day. Rotate days
+ * after LPFC_MAX_CGN_DAYS.
+ */
+ index = ++cp->cgn_index_day;
+ if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
+ cp->cgn_index_day = 0;
+ index = 0;
+ }
+
+ /* Anytime we overwrite daily index 0, after we wrap,
+ * we will be overwriting the oldest day, so we must
+ * update the congestion data start time for that day.
+ * That start time should have previously been saved after
+ * we wrote the last days worth of data.
+ */
+ if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
+ time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
+
+ cp->cgn_info_month = broken.tm_mon + 1;
+ cp->cgn_info_day = broken.tm_mday;
+ cp->cgn_info_year = broken.tm_year - 100;
+ cp->cgn_info_hour = broken.tm_hour;
+ cp->cgn_info_minute = broken.tm_min;
+ cp->cgn_info_second = broken.tm_sec;
+
+ lpfc_printf_log
+ (phba, KERN_INFO, LOG_CGN_MGMT,
+ "2646 CGNInfo idx0 Start Time: "
+ "%d/%d/%d %d:%d:%d\n",
+ cp->cgn_info_day, cp->cgn_info_month,
+ cp->cgn_info_year, cp->cgn_info_hour,
+ cp->cgn_info_minute, cp->cgn_info_second);
+ }
+
+ dvalue = 0;
+ wvalue = 0;
+ lvalue = 0;
+ mvalue = 0;
+ mbps = 0;
+ avalue = 0;
+ for (i = 0; i < LPFC_HOUR_DAY; i++) {
+ dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
+ wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
+ lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
+ mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
+ avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
+ }
+ if (lvalue) /* Avg of latency averages */
+ lvalue /= LPFC_HOUR_DAY;
+ if (mbps) /* Avg of Bandwidth averages */
+ mvalue = mbps / LPFC_HOUR_DAY;
+
+ lptr = &cp->cgn_drvr_day[index];
+ *lptr = cpu_to_le32(dvalue);
+ lptr = &cp->cgn_warn_day[index];
+ *lptr = cpu_to_le32(wvalue);
+ lptr = &cp->cgn_latency_day[index];
+ *lptr = cpu_to_le32(lvalue);
+ mptr = &cp->cgn_bw_day[index];
+ *mptr = cpu_to_le16(mvalue);
+ lptr = &cp->cgn_alarm_day[index];
+ *lptr = cpu_to_le32(avalue);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "2420 Congestion Info - daily (%d): "
+ "%d %d %d %d %d\n",
+ index, dvalue, wvalue, lvalue, mvalue, avalue);
+
+ /* We just wrote LPFC_MAX_CGN_DAYS of data,
+ * so we are wrapped on any data after this.
+ * Save this as the start time for the next day.
+ */
+ if (index == (LPFC_MAX_CGN_DAYS - 1)) {
+ phba->hba_flag |= HBA_CGN_DAY_WRAP;
+ ktime_get_real_ts64(&phba->cgn_daily_ts);
+ }
+ }
+
+ /* Use the frequency found in the last rcv'ed FPIN */
+ value = phba->cgn_fpin_frequency;
+ cp->cgn_warn_freq = cpu_to_le16(value);
+ cp->cgn_alarm_freq = cpu_to_le16(value);
+
+ lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
+ LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(lvalue);
+}
+
+/**
+ * lpfc_calc_cmf_latency - latency from start of rxate timer interval
+ * @phba: The Hba for which this call is being executed.
+ *
+ * The routine calculates the latency from the beginning of the CMF timer
+ * interval to the current point in time. It is called from IO completion
+ * when we exceed our Bandwidth limitation for the time interval.
+ */
+uint32_t
+lpfc_calc_cmf_latency(struct lpfc_hba *phba)
+{
+ struct timespec64 cmpl_time;
+ uint32_t msec = 0;
+
+ ktime_get_real_ts64(&cmpl_time);
+
+ /* This routine works on a ms granularity so sec and usec are
+ * converted accordingly.
+ */
+ if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
+ msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
+ NSEC_PER_MSEC;
+ } else {
+ if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
+ msec = (cmpl_time.tv_sec -
+ phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
+ msec += ((cmpl_time.tv_nsec -
+ phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
+ } else {
+ msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
+ 1) * MSEC_PER_SEC;
+ msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
+ cmpl_time.tv_nsec) / NSEC_PER_MSEC);
+ }
+ }
+ return msec;
+}
+
+/**
+ * lpfc_cmf_timer - This is the timer function for one congestion
+ * rate interval.
+ * @timer: Pointer to the high resolution timer that expired
+ */
+static enum hrtimer_restart
+lpfc_cmf_timer(struct hrtimer *timer)
+{
+ struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
+ cmf_timer);
+ struct rx_info_entry entry;
+ uint32_t io_cnt;
+ uint32_t busy, max_read;
+ uint64_t total, rcv, lat, mbpi, extra, cnt;
+ int timer_interval = LPFC_CMF_INTERVAL;
+ uint32_t ms;
+ struct lpfc_cgn_stat *cgs;
+ int cpu;
+
+ /* Only restart the timer if congestion mgmt is on */
+ if (phba->cmf_active_mode == LPFC_CFG_OFF ||
+ !phba->cmf_latency.tv_sec) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6224 CMF timer exit: %d %lld\n",
+ phba->cmf_active_mode,
+ (uint64_t)phba->cmf_latency.tv_sec);
+ return HRTIMER_NORESTART;
+ }
+
+ /* If pport is not ready yet, just exit and wait for
+ * the next timer cycle to hit.
+ */
+ if (!phba->pport)
+ goto skip;
+
+ /* Do not block SCSI IO while in the timer routine since
+ * total_bytes will be cleared
+ */
+ atomic_set(&phba->cmf_stop_io, 1);
+
+ /* First we need to calculate the actual ms between
+ * the last timer interrupt and this one. We ask for
+ * LPFC_CMF_INTERVAL, however the actual time may
+ * vary depending on system overhead.
+ */
+ ms = lpfc_calc_cmf_latency(phba);
+
+
+ /* Immediately after we calculate the time since the last
+ * timer interrupt, set the start time for the next
+ * interrupt
+ */
+ ktime_get_real_ts64(&phba->cmf_latency);
+
+ phba->cmf_link_byte_count =
+ div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
+
+ /* Collect all the stats from the prior timer interval */
+ total = 0;
+ io_cnt = 0;
+ lat = 0;
+ rcv = 0;
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ total += atomic64_xchg(&cgs->total_bytes, 0);
+ io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
+ lat += atomic64_xchg(&cgs->rx_latency, 0);
+ rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
+ }
+
+ /* Before we issue another CMF_SYNC_WQE, retrieve the BW
+ * returned from the last CMF_SYNC_WQE issued, from
+ * cmf_last_sync_bw. This will be the target BW for
+ * this next timer interval.
+ */
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
+ phba->link_state != LPFC_LINK_DOWN &&
+ phba->hba_flag & HBA_SETUP) {
+ mbpi = phba->cmf_last_sync_bw;
+ phba->cmf_last_sync_bw = 0;
+ extra = 0;
+
+ /* Calculate any extra bytes needed to account for the
+ * timer accuracy. If we are less than LPFC_CMF_INTERVAL
+ * calculate the adjustment needed for total to reflect
+ * a full LPFC_CMF_INTERVAL.
+ */
+ if (ms && ms < LPFC_CMF_INTERVAL) {
+ cnt = div_u64(total, ms); /* bytes per ms */
+ cnt *= LPFC_CMF_INTERVAL; /* what total should be */
+
+ /* If the timeout is scheduled to be shorter,
+ * this value may skew the data, so cap it at mbpi.
+ */
+ if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
+ cnt = mbpi;
+
+ extra = cnt - total;
+ }
+ lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
+ } else {
+ /* For Monitor mode or link down we want mbpi
+ * to be the full link speed
+ */
+ mbpi = phba->cmf_link_byte_count;
+ extra = 0;
+ }
+ phba->cmf_timer_cnt++;
+
+ if (io_cnt) {
+ /* Update congestion info buffer latency in us */
+ atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
+ atomic64_add(lat, &phba->cgn_latency_evt);
+ }
+ busy = atomic_xchg(&phba->cmf_busy, 0);
+ max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
+
+ /* Calculate MBPI for the next timer interval */
+ if (mbpi) {
+ if (mbpi > phba->cmf_link_byte_count ||
+ phba->cmf_active_mode == LPFC_CFG_MONITOR)
+ mbpi = phba->cmf_link_byte_count;
+
+ /* Change max_bytes_per_interval to what the prior
+ * CMF_SYNC_WQE cmpl indicated.
+ */
+ if (mbpi != phba->cmf_max_bytes_per_interval)
+ phba->cmf_max_bytes_per_interval = mbpi;
+ }
+
+ /* Save rxmonitor information for debug */
+ if (phba->rx_monitor) {
+ entry.total_bytes = total;
+ entry.cmf_bytes = total + extra;
+ entry.rcv_bytes = rcv;
+ entry.cmf_busy = busy;
+ entry.cmf_info = phba->cmf_active_info;
+ if (io_cnt) {
+ entry.avg_io_latency = div_u64(lat, io_cnt);
+ entry.avg_io_size = div_u64(rcv, io_cnt);
+ } else {
+ entry.avg_io_latency = 0;
+ entry.avg_io_size = 0;
+ }
+ entry.max_read_cnt = max_read;
+ entry.io_cnt = io_cnt;
+ entry.max_bytes_per_interval = mbpi;
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+ entry.timer_utilization = phba->cmf_last_ts;
+ else
+ entry.timer_utilization = ms;
+ entry.timer_interval = ms;
+ phba->cmf_last_ts = 0;
+
+ lpfc_rx_monitor_record(phba->rx_monitor, &entry);
+ }
+
+ if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
+ /* If Monitor mode, check if we are oversubscribed
+ * against the full line rate.
+ */
+ if (mbpi && total > mbpi)
+ atomic_inc(&phba->cgn_driver_evt_cnt);
+ }
+ phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
+
+ /* Each minute save Fabric and Driver congestion information */
+ lpfc_cgn_save_evt_cnt(phba);
+
+ phba->hba_flag &= ~HBA_SHORT_CMF;
+
+ /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
+ * minute, adjust our next timer interval, if needed, to ensure a
+ * 1 minute granularity when we get the next timer interrupt.
+ */
+ if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
+ phba->cgn_evt_timestamp)) {
+ timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
+ jiffies);
+ if (timer_interval <= 0)
+ timer_interval = LPFC_CMF_INTERVAL;
+ else
+ phba->hba_flag |= HBA_SHORT_CMF;
+
+ /* If we adjust timer_interval, max_bytes_per_interval
+ * needs to be adjusted as well.
+ */
+ phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
+ timer_interval, 1000);
+ if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
+ phba->cmf_max_bytes_per_interval =
+ phba->cmf_link_byte_count;
+ }
+
+ /* Since total_bytes has already been zero'ed, its okay to unblock
+ * after max_bytes_per_interval is setup.
+ */
+ if (atomic_xchg(&phba->cmf_bw_wait, 0))
+ queue_work(phba->wq, &phba->unblock_request_work);
+
+ /* SCSI IO is now unblocked */
+ atomic_set(&phba->cmf_stop_io, 0);
+
+skip:
+ hrtimer_forward_now(timer,
+ ktime_set(0, timer_interval * NSEC_PER_MSEC));
+ return HRTIMER_RESTART;
+}
+
#define trunk_link_status(__idx)\
bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
@@ -5054,6 +6189,7 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
{
uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
+ u8 cnt = 0;
phba->sli4_hba.link_state.speed =
lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
@@ -5072,27 +6208,37 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
phba->trunk_link.link1.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
phba->trunk_link.link2.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
phba->trunk_link.link3.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
+ cnt++;
}
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ if (cnt)
+ phba->trunk_link.phy_lnk_speed =
+ phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
+ else
+ phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2910 Async FC Trunking Event - Speed:%d\n"
"\tLogical speed:%d "
"port0: %s port1: %s port2: %s port3: %s\n",
@@ -5101,8 +6247,11 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
trunk_link_status(0), trunk_link_status(1),
trunk_link_status(2), trunk_link_status(3));
+ if (phba->cmf_active_mode != LPFC_CFG_OFF)
+ lpfc_cmf_signal_init(phba);
+
if (port_fault)
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3202 trunk error:0x%x (%s) seen on port0:%s "
/*
* SLI-4: We have only 0xA error codes
@@ -5128,7 +6277,6 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
static void
lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
{
- struct lpfc_dmabuf *mp;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_mbx_read_top *la;
@@ -5136,7 +6284,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
if (bf_get(lpfc_trailer_type, acqe_fc) !=
LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2895 Non FC link Event detected.(%d)\n",
bf_get(lpfc_trailer_type, acqe_fc));
return;
@@ -5167,7 +6315,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
LPFC_FC_LA_TYPE_LINK_DOWN)
phba->sli4_hba.link_state.logical_speed = 0;
- else if (!phba->sli4_hba.conf_trunk)
+ else if (!phba->sli4_hba.conf_trunk)
phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
@@ -5184,22 +6332,16 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
phba->sli4_hba.link_state.fault);
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2897 The mboxq allocation failed\n");
return;
}
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2898 The lpfc_dmabuf allocation failed\n");
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2898 The mboxq prep failed\n");
goto out_free_pmb;
}
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2899 The mbuf allocation failed\n");
- goto out_free_dmabuf;
- }
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
@@ -5211,7 +6353,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, mp);
+ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -5256,19 +6398,17 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
- goto out_free_dmabuf;
+ goto out_free_pmb;
return;
-out_free_dmabuf:
- kfree(mp);
out_free_pmb:
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
* lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
* @phba: pointer to lpfc hba data structure.
- * @acqe_fc: pointer to the async SLI completion queue entry.
+ * @acqe_sli: pointer to the async SLI completion queue entry.
*
* This routine is to handle the SLI4 asynchronous SLI events.
**/
@@ -5282,9 +6422,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
uint8_t operational = 0;
struct temp_event temp_event_data;
struct lpfc_acqe_misconfigured_event *misconfigured;
+ struct lpfc_acqe_cgn_signal *cgn_signal;
struct Scsi_Host *shost;
struct lpfc_vport **vports;
- int rc, i;
+ int rc, i, cnt;
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
@@ -5292,7 +6433,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"2901 Async SLI event - Type:%d, Event Data: x%08x "
"x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- acqe_sli->reserved, acqe_sli->trailer);
+ acqe_sli->event_data3, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -5321,7 +6462,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
temp_event_data.event_code = LPFC_NORMAL_TEMP;
temp_event_data.data = (uint32_t)acqe_sli->event_data1;
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
"3191 Normal Temperature:%d Celsius - Port Name %c\n",
acqe_sli->event_data1, port_name);
@@ -5363,7 +6504,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
&misconfigured->theEvent);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3296 "
"LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
"event: Invalid link %d",
@@ -5415,10 +6556,17 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
rc = lpfc_sli4_read_config(phba);
if (rc) {
phba->lmt = 0;
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"3194 Unable to retrieve supported "
"speeds, rc = 0x%x\n", rc);
}
+ rc = lpfc_sli4_refresh_params(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3174 Unable to update pls support, "
+ "rc x%x\n", rc);
+ }
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL;
@@ -5439,15 +6587,22 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"Event Data1:x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
+ /* Call FW to obtain active parms */
+ lpfc_sli4_cgn_parm_chg_evt(phba);
+ break;
case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
/* Misconfigured WWN. Reports that the SLI Port is configured
* to use FA-WWN, but the attached device doesn’t support it.
- * No driver action is required.
* Event Data1 - N.A, Event Data2 - N.A
+ * This event only happens on the physical port.
*/
- lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
- "2699 Misconfigured FA-WWN - Attached device does "
- "not support FA-WWN\n");
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
+ "2699 Misconfigured FA-PWWN - Attached device "
+ "does not support FA-PWWN\n");
+ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
+ memset(phba->pport->fc_portname.u.wwn, 0,
+ sizeof(struct lpfc_name));
break;
case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
/* EEPROM failure. No driver action is required */
@@ -5456,6 +6611,44 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"Event Data1: x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
+ if (phba->cmf_active_mode == LPFC_CFG_OFF)
+ break;
+ cgn_signal = (struct lpfc_acqe_cgn_signal *)
+ &acqe_sli->event_data1;
+ phba->cgn_acqe_cnt++;
+
+ cnt = bf_get(lpfc_warn_acqe, cgn_signal);
+ atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
+ atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
+
+ /* no threshold for CMF, even 1 signal will trigger an event */
+
+ /* Alarm overrides warning, so check that first */
+ if (cgn_signal->alarm_cnt) {
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ /* Keep track of alarm cnt for CMF_SYNC_WQE */
+ atomic_add(cgn_signal->alarm_cnt,
+ &phba->cgn_sync_alarm_cnt);
+ }
+ } else if (cnt) {
+ /* signal action needs to be taken */
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ /* Keep track of warning cnt for CMF_SYNC_WQE */
+ atomic_add(cnt, &phba->cgn_sync_warn_cnt);
+ }
+ }
+ break;
+ case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
+ /* May be accompanied by a temperature event */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
+ "2902 Remote Degrade Signaling: x%08x x%08x "
+ "x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ acqe_sli->event_data3);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3193 Unrecognized SLI event, type: 0x%x",
@@ -5491,16 +6684,11 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
/* Cannot find existing Fabric ndlp, so allocate a new one */
ndlp = lpfc_nlp_init(vport, Fabric_DID);
if (!ndlp)
- return 0;
+ return NULL;
/* Set the node type */
ndlp->nlp_type |= NLP_FABRIC;
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
}
if ((phba->pport->port_state < LPFC_FLOGI) &&
(phba->pport->port_state != LPFC_VPORT_FAILED))
@@ -5523,7 +6711,7 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
/**
* lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
- * @vport: pointer to lpfc hba data structure.
+ * @phba: pointer to lpfc hba data structure.
*
* This routine is to perform Clear Virtual Link (CVL) on all vports in
* response to a FCF dead event.
@@ -5544,7 +6732,7 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
/**
* lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
* @phba: pointer to lpfc hba data structure.
- * @acqe_link: pointer to the async fcoe completion queue entry.
+ * @acqe_fip: pointer to the async fcoe completion queue entry.
*
* This routine is to handle the SLI4 asynchronous fcoe event.
**/
@@ -5556,7 +6744,6 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
int rc;
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
int active_vlink_present;
struct lpfc_vport **vports;
int i;
@@ -5567,8 +6754,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
case LPFC_FIP_EVENT_TYPE_NEW_FCF:
case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
- LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2546 New FCF event, evt_tag:x%x, "
"index:x%x\n",
acqe_fip->event_tag,
@@ -5621,23 +6807,24 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc)
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2547 Issue FCF scan read FCF mailbox "
"command failed (x%x)\n", rc);
break;
case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2548 FCF Table full count 0x%x tag 0x%x\n",
- bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
- acqe_fip->event_tag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2548 FCF Table full count 0x%x tag 0x%x\n",
+ bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
+ acqe_fip->event_tag);
break;
case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
- "2549 FCF (x%x) disconnected from network, "
- "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2549 FCF (x%x) disconnected from network, "
+ "tag:x%x\n", acqe_fip->index,
+ acqe_fip->event_tag);
/*
* If we are in the middle of FCF failover process, clear
* the corresponding FCF bit in the roundrobin bitmap.
@@ -5674,7 +6861,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
- LOG_DISCOVERY,
+ LOG_TRACE_EVENT,
"2772 Issue FCF rediscover mailbox "
"command failed, fail through to FCF "
"dead event\n");
@@ -5698,7 +6885,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
break;
case LPFC_FIP_EVENT_TYPE_CVL:
phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -5736,10 +6924,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
*/
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
@@ -5765,7 +6952,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
- LOG_DISCOVERY,
+ LOG_TRACE_EVENT,
"2774 Issue FCF rediscover "
"mailbox command failed, "
"through to CVL event\n");
@@ -5786,9 +6973,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
}
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0288 Unknown FCoE event type 0x%x event tag "
- "0x%x\n", event_type, acqe_fip->event_tag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0288 Unknown FCoE event type 0x%x event tag "
+ "0x%x\n", event_type, acqe_fip->event_tag);
break;
}
}
@@ -5796,7 +6983,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
/**
* lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
* @phba: pointer to lpfc hba data structure.
- * @acqe_link: pointer to the async dcbx completion queue entry.
+ * @acqe_dcbx: pointer to the async dcbx completion queue entry.
*
* This routine is to handle the SLI4 asynchronous dcbx event.
**/
@@ -5805,7 +6992,7 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
struct lpfc_acqe_dcbx *acqe_dcbx)
{
phba->fc_eventTag = acqe_dcbx->event_tag;
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0290 The SLI4 DCBX asynchronous event is not "
"handled yet\n");
}
@@ -5813,7 +7000,7 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
/**
* lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
* @phba: pointer to lpfc hba data structure.
- * @acqe_link: pointer to the async grp5 completion queue entry.
+ * @acqe_grp5: pointer to the async grp5 completion queue entry.
*
* This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
* is an asynchronous notified of a logical link speed change. The Port
@@ -5837,6 +7024,292 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
+ * is an asynchronous notification of a request to reset CM stats.
+ **/
+static void
+lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
+{
+ if (!phba->cgn_i)
+ return;
+ lpfc_init_congestion_stat(phba);
+}
+
+/**
+ * lpfc_cgn_params_val - Validate FW congestion parameters.
+ * @phba: pointer to lpfc hba data structure.
+ * @p_cfg_param: pointer to FW provided congestion parameters.
+ *
+ * This routine validates the congestion parameters passed
+ * by the FW to the driver via an ACQE event.
+ **/
+static void
+lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
+{
+ spin_lock_irq(&phba->hbalock);
+
+ if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
+ LPFC_CFG_MONITOR)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+ "6225 CMF mode param out of range: %d\n",
+ p_cfg_param->cgn_param_mode);
+ p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+}
+
+static const char * const lpfc_cmf_mode_to_str[] = {
+ "OFF",
+ "MANAGED",
+ "MONITOR",
+};
+
+/**
+ * lpfc_cgn_params_parse - Process a FW cong parm change event
+ * @phba: pointer to lpfc hba data structure.
+ * @p_cgn_param: pointer to a data buffer with the FW cong params.
+ * @len: the size of pdata in bytes.
+ *
+ * This routine validates the congestion management buffer signature
+ * from the FW, validates the contents and makes corrections for
+ * valid, in-range values. If the signature magic is correct and
+ * after parameter validation, the contents are copied to the driver's
+ * @phba structure. If the magic is incorrect, an error message is
+ * logged.
+ **/
+static void
+lpfc_cgn_params_parse(struct lpfc_hba *phba,
+ struct lpfc_cgn_param *p_cgn_param, uint32_t len)
+{
+ struct lpfc_cgn_info *cp;
+ uint32_t crc, oldmode;
+ char acr_string[4] = {0};
+
+ /* Make sure the FW has encoded the correct magic number to
+ * validate the congestion parameter in FW memory.
+ */
+ if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+ "4668 FW cgn parm buffer data: "
+ "magic 0x%x version %d mode %d "
+ "level0 %d level1 %d "
+ "level2 %d byte13 %d "
+ "byte14 %d byte15 %d "
+ "byte11 %d byte12 %d activeMode %d\n",
+ p_cgn_param->cgn_param_magic,
+ p_cgn_param->cgn_param_version,
+ p_cgn_param->cgn_param_mode,
+ p_cgn_param->cgn_param_level0,
+ p_cgn_param->cgn_param_level1,
+ p_cgn_param->cgn_param_level2,
+ p_cgn_param->byte13,
+ p_cgn_param->byte14,
+ p_cgn_param->byte15,
+ p_cgn_param->byte11,
+ p_cgn_param->byte12,
+ phba->cmf_active_mode);
+
+ oldmode = phba->cmf_active_mode;
+
+ /* Any parameters out of range are corrected to defaults
+ * by this routine. No need to fail.
+ */
+ lpfc_cgn_params_val(phba, p_cgn_param);
+
+ /* Parameters are verified, move them into driver storage */
+ spin_lock_irq(&phba->hbalock);
+ memcpy(&phba->cgn_p, p_cgn_param,
+ sizeof(struct lpfc_cgn_param));
+
+ /* Update parameters in congestion info buffer now */
+ if (phba->cgn_i) {
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
+ cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
+ cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
+ cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
+ crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
+ LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(crc);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
+
+ switch (oldmode) {
+ case LPFC_CFG_OFF:
+ if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
+ /* Turning CMF on */
+ lpfc_cmf_start(phba);
+
+ if (phba->link_state >= LPFC_LINK_UP) {
+ phba->cgn_reg_fpin =
+ phba->cgn_init_reg_fpin;
+ phba->cgn_reg_signal =
+ phba->cgn_init_reg_signal;
+ lpfc_issue_els_edc(phba->pport, 0);
+ }
+ }
+ break;
+ case LPFC_CFG_MANAGED:
+ switch (phba->cgn_p.cgn_param_mode) {
+ case LPFC_CFG_OFF:
+ /* Turning CMF off */
+ lpfc_cmf_stop(phba);
+ if (phba->link_state >= LPFC_LINK_UP)
+ lpfc_issue_els_edc(phba->pport, 0);
+ break;
+ case LPFC_CFG_MONITOR:
+ phba->cmf_max_bytes_per_interval =
+ phba->cmf_link_byte_count;
+
+ /* Resume blocked IO - unblock on workqueue */
+ queue_work(phba->wq,
+ &phba->unblock_request_work);
+ break;
+ }
+ break;
+ case LPFC_CFG_MONITOR:
+ switch (phba->cgn_p.cgn_param_mode) {
+ case LPFC_CFG_OFF:
+ /* Turning CMF off */
+ lpfc_cmf_stop(phba);
+ if (phba->link_state >= LPFC_LINK_UP)
+ lpfc_issue_els_edc(phba->pport, 0);
+ break;
+ case LPFC_CFG_MANAGED:
+ lpfc_cmf_signal_init(phba);
+ break;
+ }
+ break;
+ }
+ if (oldmode != LPFC_CFG_OFF ||
+ oldmode != phba->cgn_p.cgn_param_mode) {
+ if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
+ scnprintf(acr_string, sizeof(acr_string), "%u",
+ phba->cgn_p.cgn_param_level0);
+ else
+ scnprintf(acr_string, sizeof(acr_string), "NA");
+
+ dev_info(&phba->pcidev->dev, "%d: "
+ "4663 CMF: Mode %s acr %s\n",
+ phba->brd_no,
+ lpfc_cmf_mode_to_str
+ [phba->cgn_p.cgn_param_mode],
+ acr_string);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4669 FW cgn parm buf wrong magic 0x%x "
+ "version %d\n", p_cgn_param->cgn_param_magic,
+ p_cgn_param->cgn_param_version);
+ }
+}
+
+/**
+ * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues a read_object mailbox command to
+ * get the congestion management parameters from the FW
+ * parses it and updates the driver maintained values.
+ *
+ * Returns
+ * 0 if the object was empty
+ * -Eval if an error was encountered
+ * Count if bytes were read from object
+ **/
+int
+lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
+{
+ int ret = 0;
+ struct lpfc_cgn_param *p_cgn_param = NULL;
+ u32 *pdata = NULL;
+ u32 len = 0;
+
+ /* Find out if the FW has a new set of congestion parameters. */
+ len = sizeof(struct lpfc_cgn_param);
+ pdata = kzalloc(len, GFP_KERNEL);
+ ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
+ pdata, len);
+
+ /* 0 means no data. A negative means error. A positive means
+ * bytes were copied.
+ */
+ if (!ret) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4670 CGN RD OBJ returns no data\n");
+ goto rd_obj_err;
+ } else if (ret < 0) {
+ /* Some error. Just exit and return it to the caller.*/
+ goto rd_obj_err;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+ "6234 READ CGN PARAMS Successful %d\n", len);
+
+ /* Parse data pointer over len and update the phba congestion
+ * parameters with values passed back. The receive rate values
+ * may have been altered in FW, but take no action here.
+ */
+ p_cgn_param = (struct lpfc_cgn_param *)pdata;
+ lpfc_cgn_params_parse(phba, p_cgn_param, len);
+
+ rd_obj_err:
+ kfree(pdata);
+ return ret;
+}
+
+/**
+ * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The FW generated Async ACQE SLI event calls this routine when
+ * the event type is an SLI Internal Port Event and the Event Code
+ * indicates a change to the FW maintained congestion parameters.
+ *
+ * This routine executes a Read_Object mailbox call to obtain the
+ * current congestion parameters maintained in FW and corrects
+ * the driver's active congestion parameters.
+ *
+ * The acqe event is not passed because there is no further data
+ * required.
+ *
+ * Returns nonzero error if event processing encountered an error.
+ * Zero otherwise for success.
+ **/
+static int
+lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
+{
+ int ret = 0;
+
+ if (!phba->sli4_hba.pc_sli4_params.cmf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4664 Cgn Evt when E2E off. Drop event\n");
+ return -EACCES;
+ }
+
+ /* If the event is claiming an empty object, it's ok. A write
+ * could have cleared it. Only error is a negative return
+ * status.
+ */
+ ret = lpfc_sli4_cgn_params_read(phba);
+ if (ret < 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4667 Error reading Cgn Params (%d)\n",
+ ret);
+ } else if (!ret) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "4673 CGN Event empty object.\n");
+ }
+ return ret;
+}
+
+/**
* lpfc_sli4_async_event_proc - Process all the pending asynchronous event
* @phba: pointer to lpfc hba data structure.
*
@@ -5846,18 +7319,21 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
{
struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
/* First, declare the async event has been handled */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~ASYNC_EVENT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
/* Now, handle all the async events */
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
+ iflags);
+
/* Process the asynchronous event */
switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
case LPFC_TRAILER_CODE_LINK:
@@ -5881,16 +7357,23 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
case LPFC_TRAILER_CODE_SLI:
lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
break;
+ case LPFC_TRAILER_CODE_CMSTAT:
+ lpfc_sli4_async_cmstat_evt(phba);
+ break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"1804 Invalid asynchronous event code: "
"x%x\n", bf_get(lpfc_trailer_code,
&cq_event->cqe.mcqe_cmpl));
break;
}
+
/* Free the completion event processed to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event);
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
}
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
}
/**
@@ -5918,7 +7401,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
"2777 Start post-quiescent FCF table scan\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc)
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2747 Issue FCF scan read FCF mailbox "
"command failed 0x%x\n", rc);
}
@@ -5989,7 +7472,7 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
"0480 Enabled MSI-X interrupt mode.\n");
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0482 Illegal interrupt mode.\n");
break;
}
@@ -5997,29 +7480,6 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
}
/**
- * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
- * @phba: Pointer to HBA context object.
- *
- **/
-static void
-lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
-{
- unsigned int cpu, numa_node;
- struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
-
- cpumask_clear(numa_mask);
-
- /* Check if we're a NUMA architecture */
- numa_node = dev_to_node(&phba->pcidev->dev);
- if (numa_node == NUMA_NO_NODE)
- return;
-
- for_each_possible_cpu(cpu)
- if (cpu_to_node(cpu) == numa_node)
- cpumask_set_cpu(cpu, numa_mask);
-}
-
-/**
* lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure.
*
@@ -6106,10 +7566,14 @@ lpfc_reset_hba(struct lpfc_hba *phba)
phba->link_state = LPFC_HBA_ERROR;
return;
}
- if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+
+ /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
- else
+ } else {
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ lpfc_sli_flush_io_rings(phba);
+ }
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
lpfc_online(phba);
@@ -6161,7 +7625,7 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
if (nr_vfn > max_nr_vfn) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3057 Requested vfs (%d) greater than "
"supported vfs (%d)", nr_vfn, max_nr_vfn);
return -EINVAL;
@@ -6180,6 +7644,15 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
return rc;
}
+static void
+lpfc_unblock_requests_work(struct work_struct *work)
+{
+ struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
+ unblock_request_work);
+
+ lpfc_unblock_requests(phba);
+}
+
/**
* lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
* @phba: pointer to lpfc hba data structure.
@@ -6200,17 +7673,16 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
* Driver resources common to all SLI revisions
*/
atomic_set(&phba->fast_event_count, 0);
+ atomic_set(&phba->dbg_log_idx, 0);
+ atomic_set(&phba->dbg_log_cnt, 0);
+ atomic_set(&phba->dbg_log_dmping, 0);
spin_lock_init(&phba->hbalock);
- /* Initialize ndlp management spinlock */
- spin_lock_init(&phba->ndlp_lock);
-
/* Initialize port_list spinlock */
spin_lock_init(&phba->port_list_lock);
INIT_LIST_HEAD(&phba->port_list);
INIT_LIST_HEAD(&phba->work_list);
- init_waitqueue_head(&phba->wait_4_mlo_m_q);
/* Initialize the wait queue head for the kernel thread */
init_waitqueue_head(&phba->work_waitq);
@@ -6253,6 +7725,9 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
+ INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
+ lpfc_idle_stat_delay_work);
+ INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
return 0;
}
@@ -6291,13 +7766,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
return -ENODEV;
- if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
- phba->menlo_flag |= HBA_MENLO_SUPPORT;
- /* check for menlo minimum sg count */
- if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
- phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
- }
-
if (!phba->sli.sli3_ring)
phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
sizeof(struct lpfc_sli_ring),
@@ -6310,11 +7778,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
* used to create the sg_dma_buf_pool must be dynamically calculated.
*/
- /* Initialize the host templates the configured values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-
if (phba->sli_rev == LPFC_SLI_REV4)
entry_sz = sizeof(struct sli4_sge);
else
@@ -6355,7 +7818,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+ "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
phba->cfg_total_seg_cnt);
@@ -6451,8 +7914,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mboxq;
MAILBOX_t *mb;
int rc, i, max_buf_size;
- uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
- struct lpfc_mqe *mqe;
int longs;
int extra;
uint64_t wwn;
@@ -6462,7 +7923,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
phba->sli4_hba.curr_disp_cpu = 0;
- lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -6481,6 +7941,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* The lpfc_wq workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ if (!phba->wq)
+ return -ENOMEM;
/*
* Initialize timers used by driver
@@ -6491,6 +7953,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* FCF rediscover timer */
timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
+ /* CMF congestion timer */
+ hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ phba->cmf_timer.function = lpfc_cmf_timer;
+
/*
* Control structure for handling external multi-buffer mailbox
* command pass-through.
@@ -6521,6 +7987,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
+ /* for VMID idle timeout if VMID is enabled */
+ if (lpfc_is_vmid_enabled(phba))
+ timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
+
/*
* Initialize the SLI Layer to run with lpfc SLI4 HBAs.
*/
@@ -6540,6 +8010,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
+ spin_lock_init(&phba->sli4_hba.asynce_list_lock);
+ spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
/*
* Initialize driver internal slow-path work queues
@@ -6551,8 +8023,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
/* Asynchronous event CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
/* Slow-path XRI aborted CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
/* Receive queue CQ Event work queue list */
@@ -6576,7 +8046,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
- return -ENOMEM;
+ goto out_destroy_workqueue;
/* IF Type 2 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -6603,6 +8073,18 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_read_config(phba);
if (unlikely(rc))
goto out_free_bsmbx;
+
+ if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
+ /* Right now the link is down, if FA-PWWN is configured the
+ * firmware will try FLOGI before the driver gets a link up.
+ * If it fails, the driver should get a MISCONFIGURED async
+ * event which will clear this flag. The only notification
+ * the driver gets is if it fails, if it succeeds there is no
+ * notification given. Assume success.
+ */
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+ }
+
rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
if (unlikely(rc))
goto out_free_bsmbx;
@@ -6630,7 +8112,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_read_nv(phba, mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6016 Mailbox failed , mbxCmd x%x "
"READ_NV, mbxStatus x%x\n",
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
@@ -6659,17 +8142,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->nvmet_support = 1; /* a match */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6017 NVME Target %016llx\n",
wwn);
#else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6021 Can't enable NVME Target."
" NVME_TARGET_FC infrastructure"
" is not in kernel\n");
#endif
/* Not supported for NVMET */
phba->cfg_xri_rebalancing = 0;
+ if (phba->irq_chann_mode == NHT_MODE) {
+ phba->cfg_irq_chann =
+ phba->sli4_hba.num_present_cpu;
+ phba->cfg_hdw_queue =
+ phba->sli4_hba.num_present_cpu;
+ phba->irq_chann_mode = NORMAL_MODE;
+ }
break;
}
}
@@ -6677,32 +8169,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_nvme_mod_param_dep(phba);
- /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
- lpfc_supported_pages(mboxq);
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (!rc) {
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
- }
- }
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
- if (rc) {
- mempool_free(mboxq, phba->mbox_mem_pool);
- rc = -EIO;
- goto out_free_bsmbx;
- }
- }
-
/*
* Get sli4 parameters that override parameters from Port capabilities.
* If this call fails, it isn't critical unless the SLI4 parameters come
@@ -6716,9 +8182,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
&phba->sli4_hba.sli_intf);
if (phba->sli4_hba.extents_in_use &&
phba->sli4_hba.rpi_hdrs_in_use) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2999 Unsupported SLI4 Parameters "
- "Extents and RPI headers enabled.\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2999 Unsupported SLI4 Parameters "
+ "Extents and RPI headers enabled.\n");
if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -6825,11 +8291,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
- /* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
-
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9087 sg_seg_cnt:%d dmabuf_size:%d "
"total:%d scsi:%d nvme:%d\n",
@@ -6847,8 +8308,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
&phba->pcidev->dev,
phba->cfg_sg_dma_buf_size,
i, 0);
- if (!phba->lpfc_sg_dma_buf_pool)
+ if (!phba->lpfc_sg_dma_buf_pool) {
+ rc = -ENOMEM;
goto out_free_bsmbx;
+ }
phba->lpfc_cmd_rsp_buf_pool =
dma_pool_create("lpfc_cmd_rsp_buf_pool",
@@ -6856,8 +8319,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp),
i, 0);
- if (!phba->lpfc_cmd_rsp_buf_pool)
+ if (!phba->lpfc_cmd_rsp_buf_pool) {
+ rc = -ENOMEM;
goto out_free_sg_dma_buf;
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -6883,13 +8348,13 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate and initialize active sgl array */
rc = lpfc_init_active_sgl_array(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1430 Failed to initialize sgl list.\n");
goto out_destroy_cq_event_pool;
}
rc = lpfc_sli4_init_rpi_hdrs(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1432 Failed to initialize rpi headers.\n");
goto out_free_active_sgl;
}
@@ -6899,7 +8364,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
GFP_KERNEL);
if (!phba->fcf.fcf_rr_bmask) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2759 Failed allocate memory for FCF round "
"robin failover bmask\n");
rc = -ENOMEM;
@@ -6910,7 +8375,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct lpfc_hba_eq_hdl),
GFP_KERNEL);
if (!phba->sli4_hba.hba_eq_hdl) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2572 Failed allocate memory for "
"fast-path per-EQ handle array\n");
rc = -ENOMEM;
@@ -6921,7 +8386,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct lpfc_vector_map_info),
GFP_KERNEL);
if (!phba->sli4_hba.cpu_map) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3327 Failed allocate memory for msi-x "
"interrupt vector mapping\n");
rc = -ENOMEM;
@@ -6930,11 +8395,40 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
if (!phba->sli4_hba.eq_info) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3321 Failed allocation for per_cpu stats\n");
rc = -ENOMEM;
goto out_free_hba_cpu_map;
}
+
+ phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
+ sizeof(*phba->sli4_hba.idle_stat),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.idle_stat) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3390 Failed allocation for idle_stat\n");
+ rc = -ENOMEM;
+ goto out_free_hba_eq_info;
+ }
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
+ if (!phba->sli4_hba.c_stat) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3332 Failed allocating per cpu hdwq stats\n");
+ rc = -ENOMEM;
+ goto out_free_hba_idle_stat;
+ }
+#endif
+
+ phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
+ if (!phba->cmf_stat) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3331 Failed allocating per cpu cgn stats\n");
+ rc = -ENOMEM;
+ goto out_free_hba_hdwq_info;
+ }
+
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -6954,6 +8448,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
+out_free_hba_hdwq_info:
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ free_percpu(phba->sli4_hba.c_stat);
+out_free_hba_idle_stat:
+#endif
+ kfree(phba->sli4_hba.idle_stat);
+out_free_hba_eq_info:
+ free_percpu(phba->sli4_hba.eq_info);
out_free_hba_cpu_map:
kfree(phba->sli4_hba.cpu_map);
out_free_hba_eq_hdl:
@@ -6976,6 +8478,9 @@ out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
lpfc_mem_free(phba);
+out_destroy_workqueue:
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
return rc;
}
@@ -6992,13 +8497,18 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
free_percpu(phba->sli4_hba.eq_info);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ free_percpu(phba->sli4_hba.c_stat);
+#endif
+ free_percpu(phba->cmf_stat);
+ kfree(phba->sli4_hba.idle_stat);
/* Free memory allocated for msi-x interrupt vector to CPU mapping */
kfree(phba->sli4_hba.cpu_map);
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
- cpumask_clear(&phba->sli4_hba.numa_mask);
+ cpumask_clear(&phba->sli4_hba.irq_aff_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -7070,7 +8580,6 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
"1431 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
return 0;
}
@@ -7114,7 +8623,6 @@ static void
lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
{
if (phba->wq) {
- flush_workqueue(phba->wq);
destroy_workqueue(phba->wq);
phba->wq = NULL;
}
@@ -7150,6 +8658,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
/**
* lpfc_init_iocb_list - Allocate and initialize iocb list.
* @phba: pointer to lpfc hba data structure.
+ * @iocb_count: number of requested iocbs
*
* This routine is invoked to allocate and initizlize the driver's IOCB
* list and set up the IOCB tag array accordingly.
@@ -7231,11 +8740,9 @@ lpfc_free_els_sgl_list(struct lpfc_hba *phba)
LIST_HEAD(sglq_list);
/* Retrieve all els sgls from driver list */
- spin_lock_irq(&phba->hbalock);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
/* Now free the sgl list */
lpfc_free_sgl_list(phba, &sglq_list);
@@ -7361,7 +8868,7 @@ lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0391 Error during rpi post operation\n");
lpfc_sli4_remove_rpis(phba);
rc = -ENODEV;
@@ -7559,6 +9066,36 @@ lpfc_hba_free(struct lpfc_hba *phba)
}
/**
+ * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is will setup initial FDMI attribute masks for
+ * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
+ * to get these attributes first before falling back, the attribute
+ * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
+ **/
+void
+lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+
+ vport->load_flag |= FC_ALLOW_FDMI;
+ if (phba->cfg_enable_SmartSAN ||
+ phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
+ /* Setup appropriate attribute masks */
+ vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
+ if (phba->cfg_enable_SmartSAN)
+ vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
+ else
+ vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "6077 Setup FDMI mask: hba x%x port x%x\n",
+ vport->fdmi_hba_mask, vport->fdmi_port_mask);
+}
+
+/**
* lpfc_create_shost - Create hba physical port with associated scsi host.
* @phba: pointer to lpfc hba data structure.
*
@@ -7601,21 +9138,12 @@ lpfc_create_shost(struct lpfc_hba *phba)
/* Put reference to SCSI host to driver's device private data */
pci_set_drvdata(phba->pcidev, shost);
+ lpfc_setup_fdmi_mask(vport);
+
/*
* At this point we are fully registered with PSA. In addition,
* any initial discovery should be completed.
*/
- vport->load_flag |= FC_ALLOW_FDMI;
- if (phba->cfg_enable_SmartSAN ||
- (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
-
- /* Setup appropriate attribute masks */
- vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
- if (phba->cfg_enable_SmartSAN)
- vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
- else
- vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
- }
return 0;
}
@@ -7673,7 +9201,7 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
if ((old_mask != phba->cfg_prot_mask) ||
(old_guard != phba->cfg_prot_guard))
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1475 Registering BlockGuard with the "
"SCSI layer: mask %d guard %d\n",
phba->cfg_prot_mask,
@@ -7682,7 +9210,7 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
scsi_host_set_prot(shost, phba->cfg_prot_mask);
scsi_host_set_guard(shost, phba->cfg_prot_guard);
} else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1479 Not Registering BlockGuard with the SCSI "
"layer, Bad protection parameters: %d %d\n",
old_mask, old_guard);
@@ -7913,7 +9441,7 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
* other register reads as the data may not be valid. Just exit.
*/
if (port_error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1408 Port Failed POST - portsmphr=0x%x, "
"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
@@ -7962,7 +9490,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
(~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"1422 Unrecoverable Error "
"Detected during POST "
"uerr_lo_reg=0x%x, "
@@ -7989,7 +9518,7 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.
ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2888 Unrecoverable port error "
"following POST: port status reg "
"0x%x, port_smphr reg 0x%x, "
@@ -7999,7 +9528,15 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
phba->work_status[0],
phba->work_status[1]);
port_error = -ENODEV;
+ break;
}
+
+ if (lpfc_pldv_detect &&
+ bf_get(lpfc_sli_intf_sli_family,
+ &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_G6)
+ pci_write_config_byte(phba->pcidev,
+ LPFC_SLI_INTF, CFG_PLD);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -8102,6 +9639,7 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
/**
* lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
* @phba: pointer to lpfc hba data structure.
+ * @if_type: sli if type to operate on.
*
* This routine is invoked to set up SLI4 BAR1 register memory map.
**/
@@ -8283,20 +9821,19 @@ static const char * const lpfc_topo_to_str[] = {
"P2P then Loop",
};
+#define LINK_FLAGS_DEF 0x0
+#define LINK_FLAGS_P2P 0x1
+#define LINK_FLAGS_LOOP 0x2
/**
* lpfc_map_topology - Map the topology read from READ_CONFIG
* @phba: pointer to lpfc hba data structure.
- * @rdconf: pointer to read config data
+ * @rd_config: pointer to read config data
*
* This routine is invoked to map the topology values as read
* from the read config mailbox command. If the persistent
* topology feature is supported, the firmware will provide the
* saved topology information to be used in INIT_LINK
- *
**/
-#define LINK_FLAGS_DEF 0x0
-#define LINK_FLAGS_P2P 0x1
-#define LINK_FLAGS_LOOP 0x2
static void
lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
{
@@ -8318,9 +9855,12 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
}
/* FW supports persistent topology - override module parameter value */
phba->hba_flag |= HBA_PERSISTENT_TOPO;
- switch (phba->pcidev->device) {
- case PCI_DEVICE_ID_LANCER_G7_FC:
- case PCI_DEVICE_ID_LANCER_G6_FC:
+
+ /* if ASIC_GEN_NUM >= 0xC) */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_6) ||
+ (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_G6)) {
if (!tf) {
phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
? FLAGS_TOPOLOGY_MODE_LOOP
@@ -8328,8 +9868,7 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
} else {
phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
}
- break;
- default: /* G5 */
+ } else { /* G5 */
if (tf) {
/* If topology failover set - pt is '0' or '1' */
phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
@@ -8339,7 +9878,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
? FLAGS_TOPOLOGY_MODE_PT_PT
: FLAGS_TOPOLOGY_MODE_LOOP);
}
- break;
}
if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -8378,12 +9916,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
struct lpfc_rsrc_desc_fcfcoe *desc;
char *pdesc_0;
uint16_t forced_link_speed;
- uint32_t if_type, qmin;
+ uint32_t if_type, qmin, fawwpn;
int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2011 Unable to allocate memory for issuing "
"SLI_CONFIG_SPECIAL mailbox command\n");
return -ENOMEM;
@@ -8393,11 +9931,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2012 Mailbox failed , mbxCmd x%x "
- "READ_CONFIG, mbxStatus x%x\n",
- bf_get(lpfc_mqe_command, &pmb->u.mqe),
- bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2012 Mailbox failed , mbxCmd x%x "
+ "READ_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
@@ -8420,10 +9958,24 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
}
+ fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
+
+ if (fawwpn) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_INIT | LOG_DISCOVERY,
+ "2702 READ_CONFIG: FA-PWWN is "
+ "configured on\n");
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
+ } else {
+ /* Clear FW configured flag, preserve driver flag */
+ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
+ }
+
phba->sli4_hba.conf_trunk =
bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
phba->sli4_hba.extents_in_use =
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
+
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
/* Reduce resource usage in kdump environment */
@@ -8464,6 +10016,52 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
+
+ /* Next decide on FPIN or Signal E2E CGN support
+ * For congestion alarms and warnings valid combination are:
+ * 1. FPIN alarms / FPIN warnings
+ * 2. Signal alarms / Signal warnings
+ * 3. FPIN alarms / Signal warnings
+ * 4. Signal alarms / FPIN warnings
+ *
+ * Initialize the adapter frequency to 100 mSecs
+ */
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+
+ if (lpfc_use_cgn_signal) {
+ if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
+ phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
+ phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
+ }
+ if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
+ /* MUST support both alarm and warning
+ * because EDC does not support alarm alone.
+ */
+ if (phba->cgn_reg_signal !=
+ EDC_CG_SIG_WARN_ONLY) {
+ /* Must support both or none */
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
+ phba->cgn_reg_signal =
+ EDC_CG_SIG_NOTSUPPORTED;
+ } else {
+ phba->cgn_reg_signal =
+ EDC_CG_SIG_WARN_ALARM;
+ phba->cgn_reg_fpin =
+ LPFC_CGN_FPIN_NONE;
+ }
+ }
+ }
+
+ /* Set the congestion initial signal and fpin values. */
+ phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
+ phba->cgn_init_reg_signal = phba->cgn_reg_signal;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
+ phba->cgn_reg_signal, phba->cgn_reg_fpin);
+
lpfc_map_topology(phba, rd_config);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2003 cfg params Extents? %d "
@@ -8471,7 +10069,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
"VPI(B:%d M:%d) "
"VFI(B:%d M:%d) "
"RPI(B:%d M:%d) "
- "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
+ "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
phba->sli4_hba.extents_in_use,
phba->sli4_hba.max_cfg_param.xri_base,
phba->sli4_hba.max_cfg_param.max_xri,
@@ -8485,7 +10083,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_eq,
phba->sli4_hba.max_cfg_param.max_cq,
phba->sli4_hba.max_cfg_param.max_wq,
- phba->sli4_hba.max_cfg_param.max_rq);
+ phba->sli4_hba.max_cfg_param.max_rq,
+ phba->lmt);
/*
* Calculate queue resources based on how
@@ -8507,8 +10106,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
/* Check to see if there is enough for NVME */
if ((phba->cfg_irq_chann > qmin) ||
(phba->cfg_hdw_queue > qmin)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2005 Reducing Queues: "
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2005 Reducing Queues - "
+ "FW resource limitation: "
"WQ %d CQ %d EQ %d: min %d: "
"IRQ %d HDWQ %d\n",
phba->sli4_hba.max_cfg_param.max_wq,
@@ -8573,7 +10173,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
LPFC_USER_LINK_SPEED_AUTO;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"0047 Unrecognized link "
"speed : %d\n",
forced_link_speed);
@@ -8610,7 +10211,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (rc2 || shdr_status || shdr_add_status) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3026 Mailbox failed , mbxCmd x%x "
"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
bf_get(lpfc_mqe_command, &pmb->u.mqe),
@@ -8647,7 +10248,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
phba->sli4_hba.iov.vf_number);
else
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3028 GET_FUNCTION_CONFIG: failed to find "
"Resource Descriptor:x%x\n",
LPFC_RSRC_DESC_TYPE_FCFCOE);
@@ -8684,7 +10285,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0492 Unable to allocate memory for "
"issuing SLI_CONFIG_SPECIAL mailbox "
"command\n");
@@ -8699,7 +10300,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0493 SLI_CONFIG_SPECIAL mailbox "
"failed with status x%x\n",
rc);
@@ -8779,8 +10380,9 @@ lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0499 Failed allocate fast-path IO CQ (%d)\n",
+ idx);
return 1;
}
qdesc->qe_valid = 1;
@@ -8802,7 +10404,7 @@ lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0503 Failed allocate fast-path IO WQ (%d)\n",
idx);
return 1;
@@ -8858,7 +10460,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
GFP_KERNEL);
if (!phba->sli4_hba.hdwq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6427 Failed allocate memory for "
"fast-path Hardware Queue array\n");
goto out_error;
@@ -8890,7 +10492,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
sizeof(struct lpfc_queue *),
GFP_KERNEL);
if (!phba->sli4_hba.nvmet_cqset) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3121 Fail allocate memory for "
"fast-path CQ set array\n");
goto out_error;
@@ -8900,7 +10502,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
sizeof(struct lpfc_queue *),
GFP_KERNEL);
if (!phba->sli4_hba.nvmet_mrq_hdr) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3122 Fail allocate memory for "
"fast-path RQ set hdr array\n");
goto out_error;
@@ -8910,7 +10512,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
sizeof(struct lpfc_queue *),
GFP_KERNEL);
if (!phba->sli4_hba.nvmet_mrq_data) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3124 Fail allocate memory for "
"fast-path RQ set data array\n");
goto out_error;
@@ -8938,7 +10540,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0497 Failed allocate EQ (%d)\n",
cpup->hdwq);
goto out_error;
@@ -8992,7 +10594,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.cq_ecount,
cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3142 Failed allocate NVME "
"CQ Set (%d)\n", idx);
goto out_error;
@@ -9014,7 +10616,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0500 Failed allocate slow-path mailbox CQ\n");
goto out_error;
}
@@ -9026,7 +10628,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0501 Failed allocate slow-path ELS CQ\n");
goto out_error;
}
@@ -9045,7 +10647,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0505 Failed allocate slow-path MQ\n");
goto out_error;
}
@@ -9061,7 +10663,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0504 Failed allocate slow-path ELS WQ\n");
goto out_error;
}
@@ -9075,7 +10677,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6079 Failed allocate NVME LS CQ\n");
goto out_error;
}
@@ -9088,7 +10690,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6080 Failed allocate NVME LS WQ\n");
goto out_error;
}
@@ -9106,7 +10708,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0506 Failed allocate receive HRQ\n");
goto out_error;
}
@@ -9117,7 +10719,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0507 Failed allocate receive DRQ\n");
goto out_error;
}
@@ -9135,7 +10737,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
LPFC_NVMET_RQE_DEF_COUNT,
cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3146 Failed allocate "
"receive HRQ\n");
goto out_error;
@@ -9148,7 +10750,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
GFP_KERNEL,
cpu_to_node(cpu));
if (qdesc->rqbp == NULL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6131 Failed allocate "
"Header RQBP\n");
goto out_error;
@@ -9164,7 +10766,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
LPFC_NVMET_RQE_DEF_COUNT,
cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3156 Failed allocate "
"receive DRQ\n");
goto out_error;
@@ -9235,6 +10837,7 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
/* Free the CQ/WQ corresponding to the Hardware Queue */
lpfc_sli4_queue_free(hdwq[idx].io_cq);
lpfc_sli4_queue_free(hdwq[idx].io_wq);
+ hdwq[idx].hba_eq = NULL;
hdwq[idx].io_cq = NULL;
hdwq[idx].io_wq = NULL;
if (phba->cfg_xpsgl && !phba->nvmet_support)
@@ -9354,7 +10957,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
int rc;
if (!eq || !cq || !wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6085 Fast-path %s (%d) not allocated\n",
((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
return -ENOMEM;
@@ -9364,9 +10967,9 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
rc = lpfc_cq_create(phba, cq, eq,
(qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6086 Failed setup of CQ (%d), rc = 0x%x\n",
- qidx, (uint32_t)rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6086 Failed setup of CQ (%d), rc = 0x%x\n",
+ qidx, (uint32_t)rc);
return rc;
}
@@ -9382,7 +10985,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
/* create the wq */
rc = lpfc_wq_create(phba, wq, cq, qtype);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
qidx, (uint32_t)rc);
/* no need to tear down cq - caller will do so */
@@ -9400,9 +11003,9 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
} else {
rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0539 Failed setup of slow-path MQ: "
- "rc = 0x%x\n", rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0539 Failed setup of slow-path MQ: "
+ "rc = 0x%x\n", rc);
/* no need to tear down cq - caller will do so */
return rc;
}
@@ -9475,7 +11078,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
/* Check for dual-ULP support */
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3249 Unable to allocate memory for "
"QUERY_FW_CFG mailbox command\n");
return -ENOMEM;
@@ -9493,12 +11096,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3250 QUERY_FW_CFG mailbox failed with status "
"x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
+ mempool_free(mboxq, phba->mbox_mem_pool);
rc = -ENXIO;
goto out_error;
}
@@ -9514,8 +11116,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
+ mempool_free(mboxq, phba->mbox_mem_pool);
/*
* Set up HBA Event Queues (EQs)
@@ -9524,7 +11125,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
/* Set up HBA event queue */
if (!qp) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3147 Fast-path EQs not allocated\n");
rc = -ENOMEM;
goto out_error;
@@ -9548,7 +11149,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
phba->cfg_fcp_imax);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0523 Failed setup of fast-path"
" EQ (%d), rc = 0x%x\n",
cpup->eq, (uint32_t)rc);
@@ -9580,7 +11181,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
qidx,
LPFC_IO);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0535 Failed to setup fastpath "
"IO WQ/CQ (%d), rc = 0x%x\n",
qidx, (uint32_t)rc);
@@ -9595,7 +11196,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
/* Set up slow-path MBOX CQ/MQ */
if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0528 %s not allocated\n",
phba->sli4_hba.mbx_cq ?
"Mailbox WQ" : "Mailbox CQ");
@@ -9608,14 +11209,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.mbx_wq,
NULL, 0, LPFC_MBOX);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
(uint32_t)rc);
goto out_destroy;
}
if (phba->nvmet_support) {
if (!phba->sli4_hba.nvmet_cqset) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3165 Fast-path NVME CQ Set "
"array not allocated\n");
rc = -ENOMEM;
@@ -9627,7 +11228,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
qp,
LPFC_WCQ, LPFC_NVMET);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3164 Failed setup of NVME CQ "
"Set, rc = 0x%x\n",
(uint32_t)rc);
@@ -9639,7 +11240,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
qp[0].hba_eq,
LPFC_WCQ, LPFC_NVMET);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6089 Failed setup NVMET CQ: "
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
@@ -9656,7 +11257,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
/* Set up slow-path ELS WQ/CQ */
if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0530 ELS %s not allocated\n",
phba->sli4_hba.els_cq ? "WQ" : "CQ");
rc = -ENOMEM;
@@ -9667,7 +11268,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_wq,
NULL, 0, LPFC_ELS);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
(uint32_t)rc);
goto out_destroy;
@@ -9680,7 +11281,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Set up NVME LS Complete Queue */
if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6091 LS %s not allocated\n",
phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
rc = -ENOMEM;
@@ -9691,7 +11292,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.nvmels_wq,
NULL, 0, LPFC_NVME_LS);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0526 Failed setup of NVVME LS WQ/CQ: "
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
@@ -9711,7 +11312,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if ((!phba->sli4_hba.nvmet_cqset) ||
(!phba->sli4_hba.nvmet_mrq_hdr) ||
(!phba->sli4_hba.nvmet_mrq_data)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6130 MRQ CQ Queues not "
"allocated\n");
rc = -ENOMEM;
@@ -9724,7 +11325,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.nvmet_cqset,
LPFC_NVMET);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6098 Failed setup of NVMET "
"MRQ: rc = 0x%x\n",
(uint32_t)rc);
@@ -9738,7 +11339,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.nvmet_cqset[0],
LPFC_NVMET);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6057 Failed setup of NVMET "
"Receive Queue: rc = 0x%x\n",
(uint32_t)rc);
@@ -9757,7 +11358,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0540 Receive Queue not allocated\n");
rc = -ENOMEM;
goto out_destroy;
@@ -9766,7 +11367,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
phba->sli4_hba.els_cq, LPFC_USOL);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0541 Failed setup of Receive Queue: "
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
@@ -9794,7 +11395,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
sizeof(struct lpfc_queue *), GFP_KERNEL);
if (!phba->sli4_hba.cq_lookup) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0549 Failed setup of CQ Lookup table: "
"size 0x%x\n", phba->sli4_hba.cq_max);
rc = -ENOMEM;
@@ -10041,26 +11642,28 @@ lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
static void
lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
{
- LIST_HEAD(cqelist);
- struct lpfc_cq_event *cqe;
+ LIST_HEAD(cq_event_list);
+ struct lpfc_cq_event *cq_event;
unsigned long iflags;
/* Retrieve all the pending WCQEs from pending WCQE lists */
- spin_lock_irqsave(&phba->hbalock, iflags);
- /* Pending FCP XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
- &cqelist);
+
/* Pending ELS XRI abort events */
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
- &cqelist);
+ &cq_event_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
+
/* Pending asynnc events */
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
- &cqelist);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ &cq_event_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
- while (!list_empty(&cqelist)) {
- list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
- lpfc_sli4_cq_event_release(phba, cqe);
+ while (!list_empty(&cq_event_list)) {
+ list_remove_head(&cq_event_list, cq_event,
+ struct lpfc_cq_event, list);
+ lpfc_sli4_cq_event_release(phba, cq_event);
}
}
@@ -10094,7 +11697,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0494 Unable to allocate memory for "
"issuing SLI_FUNCTION_RESET mailbox "
"command\n");
@@ -10111,10 +11714,9 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
&shdr->response);
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
+ mempool_free(mboxq, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0495 SLI_FUNCTION_RESET mailbox "
"failed with status x%x add_status x%x,"
" mbx status x%x\n",
@@ -10146,7 +11748,7 @@ wait:
phba->sli4_hba.u.if_type2.ERR1regaddr);
phba->work_status[1] = readl(
phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2890 Port not ready, port status reg "
"0x%x error 1=0x%x, error 2=0x%x\n",
reg_data.word0,
@@ -10156,6 +11758,9 @@ wait:
goto out;
}
+ if (bf_get(lpfc_sliport_status_pldv, &reg_data))
+ lpfc_pldv_detect = true;
+
if (!port_reset) {
/*
* Reset the port now
@@ -10188,7 +11793,7 @@ wait:
out:
/* Catch the not-ready port failure after a port reset. */
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3317 HBA not functional: IP Reset Failed "
"try: echo fw_reset > board_mode\n");
rc = -ENODEV;
@@ -10513,7 +12118,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
if (!pmb) {
rc = -ENOMEM;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0474 Unable to allocate memory for issuing "
"MBOX_CONFIG_MSI command\n");
goto mem_fail_out;
@@ -10576,7 +12181,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
rc = pci_enable_msi(phba->pcidev);
if (!rc)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0462 PCI enable MSI mode success.\n");
+ "0012 PCI enable MSI mode success.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0471 PCI enable MSI mode failed (%d)\n", rc);
@@ -10596,6 +12201,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
/**
* lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
* @phba: pointer to lpfc hba data structure.
+ * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
*
* This routine is invoked to enable device interrupt and associate driver's
* interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
@@ -10615,17 +12221,19 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
uint32_t intr_mode = LPFC_INTR_ERROR;
int retval;
+ /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+ retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+ if (retval)
+ return intr_mode;
+ phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
+
if (cfg_mode == 2) {
- /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
- retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+ /* Now, try to enable MSI-X interrupt mode */
+ retval = lpfc_sli_enable_msix(phba);
if (!retval) {
- /* Now, try to enable MSI-X interrupt mode */
- retval = lpfc_sli_enable_msix(phba);
- if (!retval) {
- /* Indicate initialization to MSI-X mode */
- phba->intr_type = MSIX;
- intr_mode = 2;
- }
+ /* Indicate initialization to MSI-X mode */
+ phba->intr_type = MSIX;
+ intr_mode = 2;
}
}
@@ -10805,7 +12413,7 @@ lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
for (i = 0; i < phba->cfg_irq_chann; i++) {
eqhdl = lpfc_get_eq_hdl(i);
- eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->irq = LPFC_IRQ_EMPTY;
eqhdl->phba = phba;
}
}
@@ -10831,6 +12439,9 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_hdwq_stat *c_stat;
+#endif
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
@@ -10990,7 +12601,7 @@ found_any:
/* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
cpup->hdwq = idx;
idx++;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3333 Set Affinity: CPU %d (phys %d core %d): "
"hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
@@ -11068,7 +12679,7 @@ found_any:
start_cpu = first_cpu;
cpup->hdwq = new_cpup->hdwq;
logit:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3335 Set Affinity: CPU %d (phys %d core %d): "
"hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
@@ -11082,10 +12693,17 @@ found_any:
idx = 0;
for_each_possible_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
+ c_stat->hdwq_no = cpup->hdwq;
+#endif
if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
continue;
cpup->hdwq = idx++ % phba->cfg_hdw_queue;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ c_stat->hdwq_no = cpup->hdwq;
+#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3340 Set Affinity: not present "
"CPU %d hdwq %d\n",
@@ -11103,17 +12721,21 @@ found_any:
*
* @phba: pointer to lpfc hba data structure.
* @cpu: cpu going offline
- * @eqlist:
+ * @eqlist: eq list to append to
*/
-static void
+static int
lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
struct list_head *eqlist)
{
const struct cpumask *maskp;
struct lpfc_queue *eq;
- cpumask_t tmp;
+ struct cpumask *tmp;
u16 idx;
+ tmp = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
maskp = pci_irq_get_affinity(phba->pcidev, idx);
if (!maskp)
@@ -11123,7 +12745,7 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
* then we don't need to poll the eq attached
* to it.
*/
- if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
continue;
/* get the cpus that are online and are affini-
* tized to this irq vector. If the count is
@@ -11131,8 +12753,8 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
* down this vector. Since this cpu has not
* gone offline yet, we need >1.
*/
- cpumask_and(&tmp, maskp, cpu_online_mask);
- if (cpumask_weight(&tmp) > 1)
+ cpumask_and(tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(tmp) > 1)
continue;
/* Now that we have an irq to shutdown, get the eq
@@ -11143,6 +12765,8 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
list_add(&eq->_poll_list, eqlist);
}
+ kfree(tmp);
+ return 0;
}
static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
@@ -11162,7 +12786,7 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
{
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
__lpfc_cpuhp_remove(phba);
@@ -11175,11 +12799,9 @@ static void lpfc_cpuhp_add(struct lpfc_hba *phba)
rcu_read_lock();
- if (!list_empty(&phba->poll_list)) {
- timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
- }
rcu_read_unlock();
@@ -11215,7 +12837,7 @@ lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
cpumask_clear(&eqhdl->aff_mask);
cpumask_set_cpu(cpu, &eqhdl->aff_mask);
irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
- irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+ irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
}
/**
@@ -11228,7 +12850,6 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
{
cpumask_clear(&eqhdl->aff_mask);
irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
- irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
}
/**
@@ -11238,11 +12859,12 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
* @offline: true, cpu is going offline. false, cpu is coming online.
*
* If cpu is going offline, we'll try our best effort to find the next
- * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ * online cpu on the phba's original_mask and migrate all offlining IRQ
+ * affinities.
*
- * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
*
- * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
* PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
*
**/
@@ -11252,14 +12874,14 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
struct lpfc_vector_map_info *cpup;
struct cpumask *aff_mask;
unsigned int cpu_select, cpu_next, idx;
- const struct cpumask *numa_mask;
+ const struct cpumask *orig_mask;
- if (!phba->cfg_irq_numa)
+ if (phba->irq_chann_mode == NORMAL_MODE)
return;
- numa_mask = &phba->sli4_hba.numa_mask;
+ orig_mask = &phba->sli4_hba.irq_aff_mask;
- if (!cpumask_test_cpu(cpu, numa_mask))
+ if (!cpumask_test_cpu(cpu, orig_mask))
return;
cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -11268,9 +12890,9 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
return;
if (offline) {
- /* Find next online CPU on NUMA node */
- cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+ /* Find next online CPU on original mask */
+ cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
@@ -11313,7 +12935,9 @@ static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
lpfc_irq_rebalance(phba, cpu, true);
- lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+ retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+ if (retval)
+ return retval;
/* start polling on these eq's */
list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
@@ -11383,26 +13007,28 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
char *name;
- const struct cpumask *numa_mask = NULL;
+ const struct cpumask *aff_mask = NULL;
unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
+ struct lpfc_vector_map_info *cpup;
struct lpfc_hba_eq_hdl *eqhdl;
const struct cpumask *maskp;
- bool first;
unsigned int flags = PCI_IRQ_MSIX;
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- if (phba->cfg_irq_numa) {
- numa_mask = &phba->sli4_hba.numa_mask;
- cpu_cnt = cpumask_weight(numa_mask);
+ if (phba->irq_chann_mode != NORMAL_MODE)
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+ if (aff_mask) {
+ cpu_cnt = cpumask_weight(aff_mask);
vectors = min(phba->cfg_irq_chann, cpu_cnt);
- /* cpu: iterates over numa_mask including offline or online
- * cpu_select: iterates over online numa_mask to set affinity
+ /* cpu: iterates over aff_mask including offline or online
+ * cpu_select: iterates over online aff_mask to set affinity
*/
- cpu = cpumask_first(numa_mask);
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ cpu = cpumask_first(aff_mask);
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else {
flags |= PCI_IRQ_AFFINITY;
}
@@ -11424,9 +13050,17 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_DRIVER_HANDLER_NAME"%d", index);
eqhdl->idx = index;
- rc = request_irq(pci_irq_vector(phba->pcidev, index),
- &lpfc_sli4_hba_intr_handler, 0,
- name, eqhdl);
+ rc = pci_irq_vector(phba->pcidev, index);
+ if (rc < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0489 MSI-X fast-path (%d) "
+ "pci_irq_vec failed (%d)\n", index, rc);
+ goto cfg_fail_out;
+ }
+ eqhdl->irq = rc;
+
+ rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -11434,9 +13068,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
goto cfg_fail_out;
}
- eqhdl->irq = pci_irq_vector(phba->pcidev, index);
-
- if (phba->cfg_irq_numa) {
+ if (aff_mask) {
/* If found a neighboring online cpu, set affinity */
if (cpu_select < nr_cpu_ids)
lpfc_irq_set_aff(eqhdl, cpu_select);
@@ -11446,11 +13078,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_CPU_FIRST_IRQ,
cpu);
- /* Iterate to next offline or online cpu in numa_mask */
- cpu = cpumask_next(cpu, numa_mask);
+ /* Iterate to next offline or online cpu in aff_mask */
+ cpu = cpumask_next(cpu, aff_mask);
- /* Find next online cpu in numa_mask to set affinity */
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ /* Find next online cpu in aff_mask to set affinity */
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else if (vectors == 1) {
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
@@ -11458,24 +13090,34 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
} else {
maskp = pci_irq_get_affinity(phba->pcidev, index);
- first = true;
/* Loop through all CPUs associated with vector index */
for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
/* If this is the first CPU thats assigned to
* this vector, set LPFC_CPU_FIRST_IRQ.
+ *
+ * With certain platforms its possible that irq
+ * vectors are affinitized to all the cpu's.
+ * This can result in each cpu_map.eq to be set
+ * to the last vector, resulting in overwrite
+ * of all the previous cpu_map.eq. Ensure that
+ * each vector receives a place in cpu_map.
+ * Later call to lpfc_cpu_affinity_check will
+ * ensure we are nicely balanced out.
*/
+ if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
+ continue;
lpfc_assign_eq_map_info(phba, index,
- first ?
- LPFC_CPU_FIRST_IRQ : 0,
+ LPFC_CPU_FIRST_IRQ,
cpu);
- if (first)
- first = false;
+ break;
}
}
}
if (vectors != phba->cfg_irq_chann) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3238 Reducing IO channels to match number of "
"MSI-X vectors, requested %d got %d\n",
phba->cfg_irq_chann, vectors);
@@ -11490,7 +13132,6 @@ cfg_fail_out:
for (--index; index >= 0; index--) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
@@ -11543,7 +13184,14 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
}
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ rc = pci_irq_vector(phba->pcidev, 0);
+ if (rc < 0) {
+ pci_free_irq_vectors(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0496 MSI pci_irq_vec failed (%d)\n", rc);
+ return rc;
+ }
+ eqhdl->irq = rc;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
@@ -11559,6 +13207,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
/**
* lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
* @phba: pointer to lpfc hba data structure.
+ * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
*
* This routine is invoked to enable device interrupt and associate driver's
* interrupt handler(s) to interrupt vector(s) to device with SLI-4
@@ -11569,8 +13218,8 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
* MSI-X -> MSI -> IRQ.
*
* Return codes
- * 0 - successful
- * other values - error
+ * Interrupt mode (2, 1, 0) - successful
+ * LPFC_INTR_ERROR - error
**/
static uint32_t
lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
@@ -11615,7 +13264,14 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
intr_mode = 0;
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ retval = pci_irq_vector(phba->pcidev, 0);
+ if (retval < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0502 INTR pci_irq_vec failed (%d)\n",
+ retval);
+ return LPFC_INTR_ERROR;
+ }
+ eqhdl->irq = retval;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
@@ -11650,7 +13306,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
for (index = 0; index < phba->cfg_irq_chann; index++) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
} else {
@@ -11748,17 +13403,17 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
if (!nvmet_xri_cmpl)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6424 NVMET XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
if (!io_xri_cmpl)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6100 IO XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
if (!els_xri_cmpl)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2878 ELS XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
@@ -11808,6 +13463,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
struct pci_dev *pdev = phba->pcidev;
lpfc_stop_hba_timers(phba);
+ hrtimer_cancel(&phba->cmf_timer);
+
if (phba->pport)
phba->sli4_hba.intr_enable = 0;
@@ -11840,11 +13497,13 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Abort all iocbs associated with the hba */
lpfc_sli_hba_iocb_abort(phba);
- /* Wait for completion of device XRI exchange busy */
- lpfc_sli4_xri_exchange_busy_wait(phba);
+ if (!pci_channel_offline(phba->pcidev))
+ /* Wait for completion of device XRI exchange busy */
+ lpfc_sli4_xri_exchange_busy_wait(phba);
/* per-phba callback de-registration for hotplug event */
- lpfc_cpuhp_remove(phba);
+ if (phba->pport)
+ lpfc_cpuhp_remove(phba);
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
@@ -11859,15 +13518,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable FW logging to host memory */
lpfc_ras_stop_fwlog(phba);
- /* Unset the queues shared with the hardware then release all
- * allocated resources.
- */
- lpfc_sli4_queue_unset(phba);
- lpfc_sli4_queue_destroy(phba);
-
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
+ /* release all queue allocated resources. */
+ lpfc_sli4_queue_destroy(phba);
+
/* Free RAS DMA memory */
if (phba->ras_fwlog.ras_enabled)
lpfc_sli4_ras_dma_free(phba);
@@ -11877,76 +13533,236 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
phba->pport->work_port_events = 0;
}
- /**
- * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
- * @phba: Pointer to HBA context object.
- * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
- *
- * This function is called in the SLI4 code path to read the port's
- * sli4 capabilities.
- *
- * This function may be be called from any context that can block-wait
- * for the completion. The expectation is that this routine is called
- * typically from probe_one or from the online routine.
- **/
-int
-lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+static uint32_t
+lpfc_cgn_crc32(uint32_t crc, u8 byte)
{
- int rc;
- struct lpfc_mqe *mqe;
- struct lpfc_pc_sli4_params *sli4_params;
- uint32_t mbox_tmo;
+ uint32_t msb = 0;
+ uint32_t bit;
- rc = 0;
- mqe = &mboxq->u.mqe;
+ for (bit = 0; bit < 8; bit++) {
+ msb = (crc >> 31) & 1;
+ crc <<= 1;
- /* Read the port's SLI4 Parameters port capabilities */
- lpfc_pc_sli4_params(mboxq);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ if (msb ^ (byte & 1)) {
+ crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
+ crc |= 1;
+ }
+ byte >>= 1;
}
+ return crc;
+}
- if (unlikely(rc))
- return 1;
+static uint32_t
+lpfc_cgn_reverse_bits(uint32_t wd)
+{
+ uint32_t result = 0;
+ uint32_t i;
- sli4_params = &phba->sli4_hba.pc_sli4_params;
- sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
- sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
- sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
- sli4_params->featurelevel_1 = bf_get(featurelevel_1,
- &mqe->un.sli4_params);
- sli4_params->featurelevel_2 = bf_get(featurelevel_2,
- &mqe->un.sli4_params);
- sli4_params->proto_types = mqe->un.sli4_params.word3;
- sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
- sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
- sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
- sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
- sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
- sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
- sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
- sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
- sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
- sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
- sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
- sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
- sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
- sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
- sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
- sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
- sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
- sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
- sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
- sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+ for (i = 0; i < 32; i++) {
+ result <<= 1;
+ result |= (1 & (wd >> i));
+ }
+ return result;
+}
- /* Make sure that sge_supp_len can be handled by the driver */
- if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
- sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+/*
+ * The routine corresponds with the algorithm the HBA firmware
+ * uses to validate the data integrity.
+ */
+uint32_t
+lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
+{
+ uint32_t i;
+ uint32_t result;
+ uint8_t *data = (uint8_t *)ptr;
- return rc;
+ for (i = 0; i < byteLen; ++i)
+ crc = lpfc_cgn_crc32(crc, data[i]);
+
+ result = ~lpfc_cgn_reverse_bits(crc);
+ return result;
+}
+
+void
+lpfc_init_congestion_buf(struct lpfc_hba *phba)
+{
+ struct lpfc_cgn_info *cp;
+ struct timespec64 cmpl_time;
+ struct tm broken;
+ uint16_t size;
+ uint32_t crc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
+
+ if (!phba->cgn_i)
+ return;
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+ atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+ atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_warn_cnt, 0);
+
+ atomic_set(&phba->cgn_driver_evt_cnt, 0);
+ atomic_set(&phba->cgn_latency_evt_cnt, 0);
+ atomic64_set(&phba->cgn_latency_evt, 0);
+ phba->cgn_evt_minute = 0;
+ phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
+
+ memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
+ cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
+ cp->cgn_info_version = LPFC_CGN_INFO_V3;
+
+ /* cgn parameters */
+ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
+ cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
+ cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
+ cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
+
+ ktime_get_real_ts64(&cmpl_time);
+ time64_to_tm(cmpl_time.tv_sec, 0, &broken);
+
+ cp->cgn_info_month = broken.tm_mon + 1;
+ cp->cgn_info_day = broken.tm_mday;
+ cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
+ cp->cgn_info_hour = broken.tm_hour;
+ cp->cgn_info_minute = broken.tm_min;
+ cp->cgn_info_second = broken.tm_sec;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+ "2643 CGNInfo Init: Start Time "
+ "%d/%d/%d %d:%d:%d\n",
+ cp->cgn_info_day, cp->cgn_info_month,
+ cp->cgn_info_year, cp->cgn_info_hour,
+ cp->cgn_info_minute, cp->cgn_info_second);
+
+ /* Fill in default LUN qdepth */
+ if (phba->pport) {
+ size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
+ cp->cgn_lunq = cpu_to_le16(size);
+ }
+
+ /* last used Index initialized to 0xff already */
+
+ cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
+ cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
+ crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(crc);
+
+ phba->cgn_evt_timestamp = jiffies +
+ msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
+}
+
+void
+lpfc_init_congestion_stat(struct lpfc_hba *phba)
+{
+ struct lpfc_cgn_info *cp;
+ struct timespec64 cmpl_time;
+ struct tm broken;
+ uint32_t crc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6236 INIT Congestion Stat %p\n", phba->cgn_i);
+
+ if (!phba->cgn_i)
+ return;
+
+ cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+ memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
+
+ ktime_get_real_ts64(&cmpl_time);
+ time64_to_tm(cmpl_time.tv_sec, 0, &broken);
+
+ cp->cgn_stat_month = broken.tm_mon + 1;
+ cp->cgn_stat_day = broken.tm_mday;
+ cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
+ cp->cgn_stat_hour = broken.tm_hour;
+ cp->cgn_stat_minute = broken.tm_min;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+ "2647 CGNstat Init: Start Time "
+ "%d/%d/%d %d:%d\n",
+ cp->cgn_stat_day, cp->cgn_stat_month,
+ cp->cgn_stat_year, cp->cgn_stat_hour,
+ cp->cgn_stat_minute);
+
+ crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+ cp->cgn_info_crc = cpu_to_le32(crc);
+}
+
+/**
+ * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
+ * @phba: Pointer to hba context object.
+ * @reg: flag to determine register or unregister.
+ */
+static int
+__lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
+{
+ struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ LPFC_MBOXQ_t *mboxq;
+ int length, rc;
+
+ if (!phba->cgn_i)
+ return -ENXIO;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2641 REG_CONGESTION_BUF mbox allocation fail: "
+ "HBA state x%x reg %d\n",
+ phba->pport->port_state, reg);
+ return -ENOMEM;
+ }
+
+ length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
+ LPFC_SLI4_MBX_EMBED);
+ reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
+ bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
+ if (reg > 0)
+ bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
+ else
+ bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
+ reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
+ reg_congestion_buf->addr_lo =
+ putPaddrLow(phba->cgn_i->phys);
+ reg_congestion_buf->addr_hi =
+ putPaddrHigh(phba->cgn_i->phys);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2642 REG_CONGESTION_BUF mailbox "
+ "failed with status x%x add_status x%x,"
+ " mbx status x%x reg %d\n",
+ shdr_status, shdr_add_status, rc, reg);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+int
+lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
+{
+ lpfc_cmf_stop(phba);
+ return __lpfc_reg_congestion_buf(phba, 0);
+}
+
+int
+lpfc_reg_congestion_buf(struct lpfc_hba *phba)
+{
+ return __lpfc_reg_congestion_buf(phba, 1);
}
/**
@@ -12007,7 +13823,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
else
phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
- sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
+ mbx_sli4_parameters);
sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
@@ -12056,7 +13873,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
bf_get(cfg_xib, mbx_sli4_parameters),
phba->cfg_enable_fc4_type);
fcponly:
- phba->nvme_support = 0;
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0;
phba->cfg_nvme_seg_cnt = 0;
@@ -12074,9 +13890,10 @@ fcponly:
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
- if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
+ /* Enable embedded Payload BDE if support is indicated */
+ if (bf_get(cfg_pbde, mbx_sli4_parameters))
+ phba->cfg_enable_pbde = 1;
+ else
phba->cfg_enable_pbde = 0;
/*
@@ -12114,7 +13931,7 @@ fcponly:
"6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
bf_get(cfg_xib, mbx_sli4_parameters),
phba->cfg_enable_pbde,
- phba->fcp_embed_io, phba->nvme_support,
+ phba->fcp_embed_io, sli4_params->nvme,
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -12252,14 +14069,14 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Configure and enable interrupt */
intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0431 Failed to enable interrupt.\n");
error = -ENODEV;
goto out_free_sysfs_attr;
}
/* SLI-3 HBA setup */
if (lpfc_sli_hba_setup(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1477 Failed to set up hba\n");
error = -ENODEV;
goto out_remove_device;
@@ -12351,10 +14168,11 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
}
lpfc_destroy_vport_work_array(phba, vports);
- /* Remove FC host and then SCSI host with the physical port */
+ /* Remove FC host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
+ /* Clean up all nodes, mailboxes and IOs. */
lpfc_cleanup(vport);
/*
@@ -12417,8 +14235,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
/**
* lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
- * @pdev: pointer to PCI device
- * @msg: power management message
+ * @dev_d: pointer to device
*
* This routine is to be called from the kernel's PCI subsystem to support
* system Power Management (PM) to device with SLI-3 interface spec. When
@@ -12436,10 +14253,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
+static int __maybe_unused
+lpfc_pci_suspend_one_s3(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12453,16 +14270,12 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
/* Disable interrupt from device */
lpfc_sli_disable_intr(phba);
- /* Save device state to PCI config space */
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
/**
* lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
- * @pdev: pointer to PCI device
+ * @dev_d: pointer to device
*
* This routine is to be called from the kernel's PCI subsystem to support
* system Power Management (PM) to device with SLI-3 interface spec. When PM
@@ -12479,10 +14292,10 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_resume_one_s3(struct pci_dev *pdev)
+static int __maybe_unused
+lpfc_pci_resume_one_s3(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
uint32_t intr_mode;
int error;
@@ -12490,19 +14303,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0452 PCI device Power Management resume.\n");
- /* Restore device state from PCI config space */
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- /*
- * As the new kernel behavior of pci_restore_state() API call clears
- * device saved_state flag, need to save the restored state again.
- */
- pci_save_state(pdev);
-
- if (pdev->is_busmaster)
- pci_set_master(pdev);
-
/* Startup the kernel thread for this host adapter. */
phba->worker_thread = kthread_run(lpfc_do_work, phba,
"lpfc_worker_%d", phba->brd_no);
@@ -12514,10 +14314,14 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
return error;
}
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
+ /* Init hba_eq_hdl array */
+ lpfc_hba_eq_hdl_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0430 PM resume Failed to enable interrupt\n");
return -EIO;
} else
@@ -12543,7 +14347,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
static void
lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2723 PCI channel I/O abort preparing for recovery\n");
/*
@@ -12564,7 +14368,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2710 PCI channel disable preparing for reset\n");
/* Block any management I/Os to the device */
@@ -12595,10 +14399,11 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
static void
lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2711 PCI channel permanent disable for failure\n");
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
+ lpfc_sli4_prep_dev_for_reset(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -12646,7 +14451,7 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
return PCI_ERS_RESULT_DISCONNECT;
default:
/* Unknown state, prepare and request slot reset */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0472 Unknown PCI error state: x%x\n", state);
lpfc_sli_prep_dev_for_reset(phba);
return PCI_ERS_RESULT_NEED_RESET;
@@ -12704,7 +14509,7 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
/* Configure and enable interrupt */
intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0427 Cannot re-enable interrupt after "
"slot reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
@@ -12795,7 +14600,9 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
const struct firmware *fw)
{
int rc;
+ u8 sli_family;
+ sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
/* Three cases: (1) FW was not supported on the detected adapter.
* (2) FW update has been locked out administratively.
* (3) Some other error during FW update.
@@ -12803,11 +14610,13 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
* for admin diagnosis.
*/
if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+ (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
magic_number != MAGIC_NUMBER_G6) ||
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
- magic_number != MAGIC_NUMBER_G7)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
+ magic_number != MAGIC_NUMBER_G7) ||
+ (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
+ magic_number != MAGIC_NUMBER_G7P)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3030 This firmware version is not supported on"
" this HBA model. Device:%x Magic:%x Type:%x "
"ID:%x Size %d %zd\n",
@@ -12815,7 +14624,7 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
fsize, fw->size);
rc = -EINVAL;
} else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3021 Firmware downloads have been prohibited "
"by a system configuration setting on "
"Device:%x Magic:%x Type:%x ID:%x Size %d "
@@ -12824,7 +14633,7 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
fsize, fw->size);
rc = -EACCES;
} else {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3022 FW Download failed. Add Status x%x "
"Device:%x Magic:%x Type:%x ID:%x Size %d "
"%zd\n",
@@ -12839,7 +14648,6 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
* lpfc_write_firmware - attempt to write a firmware image to the port
* @fw: pointer to firmware image returned from request_firmware.
* @context: pointer to firmware image returned from request_firmware.
- * @ret: return value this routine provides to the caller.
*
**/
static void
@@ -12869,7 +14677,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
INIT_LIST_HEAD(&dma_buffer_list);
lpfc_decode_firmware_rev(phba, fwrev, 1);
if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3023 Updating Firmware, Current Version:%s "
"New Version:%s\n",
fwrev, image->revision);
@@ -12919,7 +14727,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
}
rc = offset;
} else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3029 Skipped Firmware update, Current "
"Version:%s New Version:%s\n",
fwrev, image->revision);
@@ -12934,16 +14742,17 @@ release_out:
release_firmware(fw);
out:
if (rc < 0)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3062 Firmware update error, status %d.\n", rc);
else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3024 Firmware update success: size %d.\n", rc);
}
/**
* lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
* @phba: pointer to lpfc hba data structure.
+ * @fw_upgrade: which firmware to update.
*
* This routine is called to perform Linux generic firmware upgrade on device
* that supports such feature.
@@ -12963,7 +14772,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
if (fw_upgrade == INT_FW_UPGRADE) {
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
file_name, &phba->pcidev->dev,
GFP_KERNEL, (void *)phba,
lpfc_write_firmware);
@@ -13010,6 +14819,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
if (!phba)
return -ENOMEM;
+ INIT_LIST_HEAD(&phba->poll_list);
+
/* Perform generic PCI device enabling operation */
error = lpfc_enable_pci_dev(phba);
if (error)
@@ -13066,7 +14877,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0426 Failed to enable interrupt.\n");
error = -ENODEV;
goto out_unset_driver_resource;
@@ -13101,7 +14912,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1421 Failed to set up hba\n");
error = -ENODEV;
goto out_free_sysfs_attr;
@@ -13126,7 +14937,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
*/
error = lpfc_nvme_create_localport(vport);
if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6004 NVME registration "
"failed, error x%x\n",
error);
@@ -13141,10 +14952,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
- /* Enable RAS FW log support */
- lpfc_sli4_ras_setup(phba);
-
- INIT_LIST_HEAD(&phba->poll_list);
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
return 0;
@@ -13192,8 +15000,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
+ if (phba->cgn_i)
+ lpfc_unreg_congestion_buf(phba);
- /* Free the HBA sysfs attributes */
lpfc_free_sysfs_attr(vport);
/* Release all the vports against this physical port */
@@ -13206,7 +15015,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
}
lpfc_destroy_vport_work_array(phba, vports);
- /* Remove FC host and then SCSI host with the physical port */
+ /* Remove FC host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
@@ -13258,8 +15067,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
/**
* lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
- * @pdev: pointer to PCI device
- * @msg: power management message
+ * @dev_d: pointer to device
*
* This routine is called from the kernel's PCI subsystem to support system
* Power Management (PM) to device with SLI-4 interface spec. When PM invokes
@@ -13277,10 +15085,10 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
+static int __maybe_unused
+lpfc_pci_suspend_one_s4(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -13295,16 +15103,12 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
lpfc_sli4_disable_intr(phba);
lpfc_sli4_queue_destroy(phba);
- /* Save device state to PCI config space */
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
/**
* lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
- * @pdev: pointer to PCI device
+ * @dev_d: pointer to device
*
* This routine is called from the kernel's PCI subsystem to support system
* Power Management (PM) to device with SLI-4 interface spac. When PM invokes
@@ -13321,10 +15125,10 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_resume_one_s4(struct pci_dev *pdev)
+static int __maybe_unused
+lpfc_pci_resume_one_s4(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
uint32_t intr_mode;
int error;
@@ -13332,19 +15136,6 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0292 PCI device Power Management resume.\n");
- /* Restore device state from PCI config space */
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- /*
- * As the new kernel behavior of pci_restore_state() API call clears
- * device saved_state flag, need to save the restored state again.
- */
- pci_save_state(pdev);
-
- if (pdev->is_busmaster)
- pci_set_master(pdev);
-
/* Startup the kernel thread for this host adapter. */
phba->worker_thread = kthread_run(lpfc_do_work, phba,
"lpfc_worker_%d", phba->brd_no);
@@ -13359,7 +15150,7 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0294 PM resume Failed to enable interrupt\n");
return -EIO;
} else
@@ -13385,7 +15176,7 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
static void
lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2828 PCI channel I/O abort preparing for recovery\n");
/*
* There may be errored I/Os through HBA, abort all I/Os on txcmplq
@@ -13405,24 +15196,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
{
+ int offline = pci_channel_offline(phba->pcidev);
+
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2826 PCI channel disable preparing for reset\n");
+ "2826 PCI channel disable preparing for reset offline"
+ " %d\n", offline);
/* Block any management I/Os to the device */
lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
+ /* HBA_PCI_ERR was set in io_error_detect */
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
/* Flush all driver's outstanding I/Os as we are to reset */
lpfc_sli_flush_io_rings(phba);
+ lpfc_offline(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
+ lpfc_sli4_queue_destroy(phba);
/* Disable interrupt and pci device */
lpfc_sli4_disable_intr(phba);
- lpfc_sli4_queue_destroy(phba);
pci_disable_device(phba->pcidev);
}
@@ -13437,7 +15232,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
static void
lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2827 PCI channel permanent disable for failure\n");
/* Block all SCSI devices' I/Os on the host */
@@ -13471,6 +15266,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ bool hba_pci_err;
switch (state) {
case pci_channel_io_normal:
@@ -13478,16 +15274,26 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
lpfc_sli4_prep_dev_for_recover(phba);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Fatal error, prepare for slot reset */
- lpfc_sli4_prep_dev_for_reset(phba);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2832 Already handling PCI error "
+ "state: x%x\n", state);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
+ set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Permanent failure, prepare for device down */
lpfc_sli4_prep_dev_for_perm_failure(phba);
return PCI_ERS_RESULT_DISCONNECT;
default:
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
/* Unknown state, prepare and request slot reset */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2825 Unknown PCI error state: x%x\n", state);
lpfc_sli4_prep_dev_for_reset(phba);
return PCI_ERS_RESULT_NEED_RESET;
@@ -13519,16 +15325,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
uint32_t intr_mode;
+ bool hba_pci_err;
dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
if (pci_enable_device_mem(pdev)) {
printk(KERN_ERR "lpfc: Cannot re-enable "
- "PCI device after reset.\n");
+ "PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_restore_state(pdev);
+ hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ dev_info(&pdev->dev,
+ "hba_pci_err was not set, recovering slot reset.\n");
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
@@ -13542,15 +15353,18 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2824 Cannot re-enable interrupt after "
"slot reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
} else
phba->intr_mode = intr_mode;
+ lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
/* Log the current active interrupt mode */
lpfc_log_intr_mode(phba, phba->intr_mode);
@@ -13582,8 +15396,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
*/
if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
/* Perform device reset */
- lpfc_offline_prep(phba, LPFC_MBX_WAIT);
- lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
/* Bring the device back online */
lpfc_online(phba);
@@ -13650,7 +15462,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
lpfc_pci_remove_one_s4(pdev);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1424 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13660,8 +15472,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
/**
* lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
- * @pdev: pointer to PCI device
- * @msg: power management message
+ * @dev: pointer to device
*
* This routine is to be registered to the kernel's PCI subsystem to support
* system Power Management (PM). When PM invokes this method, it dispatches
@@ -13672,22 +15483,22 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+static int __maybe_unused
+lpfc_pci_suspend_one(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
int rc = -ENODEV;
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
- rc = lpfc_pci_suspend_one_s3(pdev, msg);
+ rc = lpfc_pci_suspend_one_s3(dev);
break;
case LPFC_PCI_DEV_OC:
- rc = lpfc_pci_suspend_one_s4(pdev, msg);
+ rc = lpfc_pci_suspend_one_s4(dev);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1425 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13697,7 +15508,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
/**
* lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
- * @pdev: pointer to PCI device
+ * @dev: pointer to device
*
* This routine is to be registered to the kernel's PCI subsystem to support
* system Power Management (PM). When PM invokes this method, it dispatches
@@ -13708,22 +15519,22 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_resume_one(struct pci_dev *pdev)
+static int __maybe_unused
+lpfc_pci_resume_one(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
int rc = -ENODEV;
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
- rc = lpfc_pci_resume_one_s3(pdev);
+ rc = lpfc_pci_resume_one_s3(dev);
break;
case LPFC_PCI_DEV_OC:
- rc = lpfc_pci_resume_one_s4(pdev);
+ rc = lpfc_pci_resume_one_s4(dev);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1426 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13753,6 +15564,10 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+ if (phba->link_state == LPFC_HBA_ERROR &&
+ phba->hba_flag & HBA_IOQ_FLUSH)
+ return PCI_ERS_RESULT_NEED_RESET;
+
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
rc = lpfc_io_error_detected_s3(pdev, state);
@@ -13761,7 +15576,7 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
rc = lpfc_io_error_detected_s4(pdev, state);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1427 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13798,7 +15613,7 @@ lpfc_io_slot_reset(struct pci_dev *pdev)
rc = lpfc_io_slot_reset_s4(pdev);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1428 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13830,7 +15645,7 @@ lpfc_io_resume(struct pci_dev *pdev)
lpfc_io_resume_s4(pdev);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1429 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13876,17 +15691,18 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
void
lpfc_sli4_ras_init(struct lpfc_hba *phba)
{
- switch (phba->pcidev->device) {
- case PCI_DEVICE_ID_LANCER_G6_FC:
- case PCI_DEVICE_ID_LANCER_G7_FC:
+ /* if ASIC_GEN_NUM >= 0xC) */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_6) ||
+ (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_G6)) {
phba->ras_fwlog.ras_hwsupport = true;
if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
phba->cfg_ras_fwlog_buffsize)
phba->ras_fwlog.ras_enabled = true;
else
phba->ras_fwlog.ras_enabled = false;
- break;
- default:
+ } else {
phba->ras_fwlog.ras_hwsupport = false;
}
}
@@ -13900,14 +15716,17 @@ static const struct pci_error_handlers lpfc_err_handler = {
.resume = lpfc_io_resume,
};
+static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
+ lpfc_pci_suspend_one,
+ lpfc_pci_resume_one);
+
static struct pci_driver lpfc_driver = {
.name = LPFC_DRIVER_NAME,
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
.shutdown = lpfc_pci_remove_one,
- .suspend = lpfc_pci_suspend_one,
- .resume = lpfc_pci_resume_one,
+ .driver.pm = &lpfc_pci_pm_ops_one,
.err_handler = &lpfc_err_handler,
};
@@ -13938,32 +15757,35 @@ lpfc_init(void)
{
int error = 0;
- printk(LPFC_MODULE_DESC "\n");
- printk(LPFC_COPYRIGHT "\n");
+ pr_info(LPFC_MODULE_DESC "\n");
+ pr_info(LPFC_COPYRIGHT "\n");
error = misc_register(&lpfc_mgmt_dev);
if (error)
printk(KERN_ERR "Could not register lpfcmgmt device, "
"misc_register returned with status %d", error);
+ error = -ENOMEM;
lpfc_transport_functions.vport_create = lpfc_vport_create;
lpfc_transport_functions.vport_delete = lpfc_vport_delete;
lpfc_transport_template =
fc_attach_transport(&lpfc_transport_functions);
if (lpfc_transport_template == NULL)
- return -ENOMEM;
+ goto unregister;
lpfc_vport_transport_template =
fc_attach_transport(&lpfc_vport_transport_functions);
if (lpfc_vport_transport_template == NULL) {
fc_release_transport(lpfc_transport_template);
- return -ENOMEM;
+ goto unregister;
}
- lpfc_nvme_cmd_template();
+ lpfc_wqe_cmd_template();
lpfc_nvmet_cmd_template();
/* Initialize in case vector mapping is needed */
lpfc_present_cpu = num_present_cpus();
+ lpfc_pldv_detect = false;
+
error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"lpfc/sli4:online",
lpfc_cpu_online, lpfc_cpu_offline);
@@ -13982,10 +15804,91 @@ unwind:
cpuhp_failure:
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
+unregister:
+ misc_deregister(&lpfc_mgmt_dev);
return error;
}
+void lpfc_dmp_dbg(struct lpfc_hba *phba)
+{
+ unsigned int start_idx;
+ unsigned int dbg_cnt;
+ unsigned int temp_idx;
+ int i;
+ int j = 0;
+ unsigned long rem_nsec;
+
+ if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
+ return;
+
+ start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
+ dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
+ if (!dbg_cnt)
+ goto out;
+ temp_idx = start_idx;
+ if (dbg_cnt >= DBG_LOG_SZ) {
+ dbg_cnt = DBG_LOG_SZ;
+ temp_idx -= 1;
+ } else {
+ if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
+ temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
+ } else {
+ if (start_idx < dbg_cnt)
+ start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
+ else
+ start_idx -= dbg_cnt;
+ }
+ }
+ dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
+ start_idx, temp_idx, dbg_cnt);
+
+ for (i = 0; i < dbg_cnt; i++) {
+ if ((start_idx + i) < DBG_LOG_SZ)
+ temp_idx = (start_idx + i) % DBG_LOG_SZ;
+ else
+ temp_idx = j++;
+ rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
+ dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
+ temp_idx,
+ (unsigned long)phba->dbg_log[temp_idx].t_ns,
+ rem_nsec / 1000,
+ phba->dbg_log[temp_idx].log);
+ }
+out:
+ atomic_set(&phba->dbg_log_cnt, 0);
+ atomic_set(&phba->dbg_log_dmping, 0);
+}
+
+__printf(2, 3)
+void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
+{
+ unsigned int idx;
+ va_list args;
+ int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
+ struct va_format vaf;
+
+
+ va_start(args, fmt);
+ if (unlikely(dbg_dmping)) {
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ dev_info(&phba->pcidev->dev, "%pV", &vaf);
+ va_end(args);
+ return;
+ }
+ idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
+ DBG_LOG_SZ;
+
+ atomic_inc(&phba->dbg_log_cnt);
+
+ vscnprintf(phba->dbg_log[idx].log,
+ sizeof(phba->dbg_log[idx].log), fmt, args);
+ va_end(args);
+
+ phba->dbg_log[idx].t_ns = local_clock();
+}
+
/**
* lpfc_exit - lpfc module removal routine
*
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 148d02a27b58..b39cefcd8703 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -35,7 +35,7 @@
#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
#define LOG_LIBDFC 0x00002000 /* Libdfc events */
#define LOG_VPORT 0x00004000 /* NPIV events */
-#define LOG_SECURITY 0x00008000 /* Security events */
+#define LOG_LDS_EVENT 0x00008000 /* Link Degrade Signaling events */
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
@@ -44,7 +44,14 @@
#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */
#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
-#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
+#define LOG_RSVD1 0x01000000 /* Reserved */
+#define LOG_RSVD2 0x02000000 /* Reserved */
+#define LOG_CGN_MGMT 0x04000000 /* Congestion Mgmt events */
+#define LOG_TRACE_EVENT 0x80000000 /* Dmp the DBG log on this err */
+#define LOG_ALL_MSG 0x7fffffff /* LOG all messages */
+
+void lpfc_dmp_dbg(struct lpfc_hba *phba);
+void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...);
/* generate message by verbose log setting or severity */
#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \
@@ -65,9 +72,15 @@ do { \
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
- { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
+ { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \
+ if ((mask) & LOG_TRACE_EVENT && !(vport)->cfg_log_verbose) \
+ lpfc_dmp_dbg((vport)->phba); \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
- fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
+ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \
+ } else if (!(vport)->cfg_log_verbose) \
+ lpfc_dbg_print((vport)->phba, "%d:(%d):" fmt, \
+ (vport)->phba->brd_no, (vport)->vpi, ##arg); \
+ } \
} while (0)
#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
@@ -75,8 +88,12 @@ do { \
{ uint32_t log_verbose = (phba)->pport ? \
(phba)->pport->cfg_log_verbose : \
(phba)->cfg_log_verbose; \
- if (((mask) & log_verbose) || (level[1] <= '3')) \
+ if (((mask) & log_verbose) || (level[1] <= '3')) { \
+ if ((mask) & LOG_TRACE_EVENT && !log_verbose) \
+ lpfc_dmp_dbg(phba); \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
- fmt, phba->brd_no, ##arg); \
+ fmt, phba->brd_no, ##arg); \
+ } else if (!log_verbose)\
+ lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \
} \
} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index d1773c01d2b3..9858b1743769 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -44,6 +44,80 @@
#include "lpfc_compat.h"
/**
+ * lpfc_mbox_rsrc_prep - Prepare a mailbox with DMA buffer memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * A mailbox command consists of the pool memory for the command, @mbox, and
+ * one or more DMA buffers for the data transfer. This routine provides
+ * a standard framework for allocating the dma buffer and assigning to the
+ * @mbox. Callers should cleanup the mbox with a call to
+ * lpfc_mbox_rsrc_cleanup.
+ *
+ * The lpfc_mbuf_alloc routine acquires the hbalock so the caller is
+ * responsible to ensure the hbalock is released. Also note that the
+ * driver design is a single dmabuf/mbuf per mbox in the ctx_buf.
+ *
+ **/
+int
+lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_dmabuf *mp;
+
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt) {
+ kfree(mp);
+ return -ENOMEM;
+ }
+
+ memset(mp->virt, 0, LPFC_BPL_SIZE);
+
+ /* Initialization only. Driver does not use a list of dmabufs. */
+ INIT_LIST_HEAD(&mp->list);
+ mbox->ctx_buf = mp;
+ return 0;
+}
+
+/**
+ * lpfc_mbox_rsrc_cleanup - Free the mailbox DMA buffer and virtual memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ * @locked: value that indicates if the hbalock is held (1) or not (0).
+ *
+ * A mailbox command consists of the pool memory for the command, @mbox, and
+ * possibly a DMA buffer for the data transfer. This routine provides
+ * a standard framework for releasing any dma buffers and freeing all
+ * memory resources in it as well as releasing the @mbox back to the @phba pool.
+ * Callers should use this routine for cleanup for all mailboxes prepped with
+ * lpfc_mbox_rsrc_prep.
+ *
+ **/
+void
+lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+ enum lpfc_mbox_ctx locked)
+{
+ struct lpfc_dmabuf *mp;
+
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ mbox->ctx_buf = NULL;
+
+ /* Release the generic BPL buffer memory. */
+ if (mp) {
+ if (locked == MBOX_THD_LOCKED)
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ else
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
* lpfc_dump_static_vport - Dump HBA's static vport information.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
@@ -61,6 +135,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
{
MAILBOX_t *mb;
struct lpfc_dmabuf *mp;
+ int rc;
mb = &pmb->u.mb;
@@ -79,22 +154,15 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
return 0;
}
- /* For SLI4 HBAs driver need to allocate memory */
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-
- if (!mp || !mp->virt) {
- kfree(mp);
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "2605 lpfc_dump_static_vport: memory"
- " allocation failed\n");
+ "2605 %s: memory allocation failed\n",
+ __func__);
return 1;
}
- memset(mp->virt, 0, LPFC_BPL_SIZE);
- INIT_LIST_HEAD(&mp->list);
- /* save address for completion */
- pmb->ctx_buf = (uint8_t *)mp;
+
+ mp = pmb->ctx_buf;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -429,7 +497,7 @@ lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
/*
- * SLI-3, Message Signaled Interrupt Fearure.
+ * SLI-3, Message Signaled Interrupt Feature.
*/
/* Multi-message attention configuration */
@@ -513,8 +581,9 @@ lpfc_init_link(struct lpfc_hba * phba,
break;
}
- if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ /* Topology handling for ASIC_GEN_NUM 0xC and later */
+ if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
+ phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
!(phba->sli4_hba.pc_sli4_params.pls) &&
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
@@ -522,7 +591,8 @@ lpfc_init_link(struct lpfc_hba * phba,
}
/* Enable asynchronous ABTS responses from firmware */
- mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
+ if (phba->sli_rev == LPFC_SLI_REV3 && !phba->cfg_fcp_wait_abts_rsp)
+ mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
/* NEW_FEATURE
* Setting up the link speed
@@ -604,26 +674,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
struct lpfc_dmabuf *mp;
MAILBOX_t *mb;
+ int rc;
- mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->mbxOwner = OWN_HOST;
-
/* Get a buffer to hold the HBAs Service Parameters */
-
- mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp || !mp->virt) {
- kfree(mp);
- mb->mbxCommand = MBX_READ_SPARM64;
- /* READ_SPARAM: no buffers */
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0301 READ_SPARAM: no buffers\n");
- return (1);
+ return 1;
}
- INIT_LIST_HEAD(&mp->list);
+
+ mp = pmb->ctx_buf;
+ mb = &pmb->u.mb;
+ mb->mbxOwner = OWN_HOST;
mb->mbxCommand = MBX_READ_SPARM64;
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
@@ -631,9 +696,6 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
if (phba->sli_rev >= LPFC_SLI_REV3)
mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
- /* save address for completion */
- pmb->ctx_buf = mp;
-
return (0);
}
@@ -754,6 +816,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
MAILBOX_t *mb = &pmb->u.mb;
uint8_t *sparam;
struct lpfc_dmabuf *mp;
+ int rc;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -764,12 +827,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
mb->un.varRegLogin.did = did;
mb->mbxOwner = OWN_HOST;
+
/* Get a buffer to hold NPorts Service Parameters */
- mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp || !mp->virt) {
- kfree(mp);
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
@@ -777,15 +838,13 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
"rpi x%x\n", vpi, did, rpi);
return 1;
}
- INIT_LIST_HEAD(&mp->list);
- sparam = mp->virt;
/* Copy param's into a new buffer */
+ mp = pmb->ctx_buf;
+ sparam = mp->virt;
memcpy(sparam, param, sizeof (struct serv_parm));
- /* save address for completion */
- pmb->ctx_buf = (uint8_t *)mp;
-
+ /* Finish initializing the mailbox. */
mb->mbxCommand = MBX_REG_LOGIN64;
mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
@@ -868,9 +927,7 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
/**
* lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
- * @phba: pointer to lpfc hba data structure.
- * @vpi: virtual N_Port identifier.
- * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
+ * @vport: pointer to a vport object.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The registration vport identifier mailbox command is used to activate a
@@ -1199,7 +1256,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
/**
* lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring
* @phba: pointer to lpfc hba data structure.
- * @ring:
+ * @ring: ring number/index
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure ring mailbox command is used to configure an IOCB ring. This
@@ -1299,8 +1356,6 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
- if (phba->cfg_enable_dss)
- mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
@@ -1380,7 +1435,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*/
if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
- phba->host_gp = &phba->mbox->us.s2.host[0];
+ phba->host_gp = (struct lpfc_hgp __iomem *)
+ &phba->mbox->us.s2.host[0];
phba->hbq_put = NULL;
offset = (uint8_t *)&phba->mbox->us.s2.host -
(uint8_t *)phba->slim2p.virt;
@@ -1614,7 +1670,7 @@ lpfc_mbox_dev_check(struct lpfc_hba *phba)
/**
* lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
* @phba: pointer to lpfc hba data structure.
- * @cmd: mailbox command code.
+ * @mboxq: pointer to the driver internal queue element for mailbox command.
*
* This routine retrieves the proper timeout value according to the mailbox
* command code.
@@ -1701,6 +1757,7 @@ lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
* lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
* @mbox: pointer to lpfc mbox command.
* @sgentry: sge entry index.
+ * @sge: pointer to lpfc mailbox sge to load into.
*
* This routine gets an entry from the non-embedded mailbox command at the sge
* index location.
@@ -1723,7 +1780,9 @@ lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
*
- * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ * This routine cleans up and releases an SLI4 mailbox command that was
+ * configured using lpfc_sli4_config. It accounts for the embedded and
+ * non-embedded config types.
**/
void
lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
@@ -1768,6 +1827,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* @subsystem: The sli4 config sub mailbox subsystem.
* @opcode: The sli4 config sub mailbox command opcode.
* @length: Length of the sli4 config mailbox command (including sub-header).
+ * @emb: True if embedded mbox command should be setup.
*
* This routine sets up the header fields of SLI4 specific mailbox command
* for sending IOCTL command.
@@ -2013,6 +2073,7 @@ lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
/**
* lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
* @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to lpfc mbox command.
* @fcf_index: index to fcf table.
*
* This routine routine allocates and constructs non-embedded mailbox command
@@ -2069,6 +2130,7 @@ lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
/**
* lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
+ * @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to lpfc mbox command.
*
* This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
@@ -2099,6 +2161,12 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
}
+
+ /* Enable Application Services Header for appheader VMID */
+ if (phba->cfg_vmid_app_header) {
+ bf_set(lpfc_mbx_rq_ftr_rq_ashdr, &mboxq->u.mqe.un.req_ftrs, 1);
+ bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 1);
+ }
return;
}
@@ -2268,33 +2336,24 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_dmabuf *mp = NULL;
MAILBOX_t *mb;
+ int rc;
memset(mbox, 0, sizeof(*mbox));
mb = &mbox->u.mb;
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-
- if (!mp || !mp->virt) {
- kfree(mp);
- /* dump config region 23 failed to allocate memory */
+ rc = lpfc_mbox_rsrc_prep(phba, mbox);
+ if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "2569 lpfc dump config region 23: memory"
- " allocation failed\n");
+ "2569 %s: memory allocation failed\n",
+ __func__);
return 1;
}
- memset(mp->virt, 0, LPFC_BPL_SIZE);
- INIT_LIST_HEAD(&mp->list);
-
- /* save address for completion */
- mbox->ctx_buf = (uint8_t *)mp;
-
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.region_id = DMP_REGION_23;
mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
+ mp = mbox->ctx_buf;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
return 0;
@@ -2317,7 +2376,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
rc = SUCCESS;
mbx_failed:
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
rdp_context->cmpl(phba, rdp_context, rc);
}
@@ -2329,30 +2388,25 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
(struct lpfc_rdp_context *)(mbox->ctx_ndlp);
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
- goto error_mbuf_free;
+ goto error_mbox_free;
lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
DMP_SFF_PAGE_A2_SIZE);
- /* We don't need dma buffer for link stat. */
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
-
- memset(mbox, 0, sizeof(*mbox));
lpfc_read_lnk_stat(phba, mbox);
mbox->vport = rdp_context->ndlp->vport;
+
+ /* Save the dma buffer for cleanup in the final completion. */
+ mbox->ctx_buf = mp;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
- goto error_cmd_free;
+ goto error_mbox_free;
return;
-error_mbuf_free:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
-error_cmd_free:
- lpfc_sli4_mbox_cmd_free(phba, mbox);
+error_mbox_free:
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
rdp_context->cmpl(phba, rdp_context, FAILURE);
}
@@ -2400,15 +2454,13 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
return;
error:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- lpfc_sli4_mbox_cmd_free(phba, mbox);
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
rdp_context->cmpl(phba, rdp_context, FAILURE);
}
/*
- * lpfc_sli4_dump_sfp_pagea0 - Dump sli4 read SFP Diagnostic.
+ * lpfc_sli4_dump_page_a0 - Dump sli4 read SFP Diagnostic.
* @phba: pointer to the hba structure containing.
* @mbox: pointer to lpfc mbox command to initialize.
*
@@ -2418,27 +2470,19 @@ error:
int
lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
+ int rc;
struct lpfc_dmabuf *mp = NULL;
memset(mbox, 0, sizeof(*mbox));
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp || !mp->virt) {
- kfree(mp);
+ rc = lpfc_mbox_rsrc_prep(phba, mbox);
+ if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"3569 dump type 3 page 0xA0 allocation failed\n");
return 1;
}
- memset(mp->virt, 0, LPFC_BPL_SIZE);
- INIT_LIST_HEAD(&mp->list);
-
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
- /* save address for completion */
- mbox->ctx_buf = mp;
-
bf_set(lpfc_mbx_memory_dump_type3_type,
&mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
bf_set(lpfc_mbx_memory_dump_type3_link,
@@ -2447,6 +2491,8 @@ lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
&mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
bf_set(lpfc_mbx_memory_dump_type3_length,
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
+
+ mp = mbox->ctx_buf;
mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
@@ -2623,39 +2669,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
-/**
- * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
- * mailbox command.
- * @mbox: pointer to lpfc mbox command to initialize.
- *
- * The PORT_CAPABILITIES supported pages mailbox command is issued to
- * retrieve the particular feature pages supported by the port.
- **/
-void
-lpfc_supported_pages(struct lpfcMboxq *mbox)
-{
- struct lpfc_mbx_supp_pages *supp_pages;
-
- memset(mbox, 0, sizeof(*mbox));
- supp_pages = &mbox->u.mqe.un.supp_pages;
- bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
- bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
-}
-
-/**
- * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
- * @mbox: pointer to lpfc mbox command to initialize.
- *
- * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
- * retrieve the particular SLI4 features supported by the port.
- **/
-void
-lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
-{
- struct lpfc_mbx_pc_sli4_params *sli4_params;
-
- memset(mbox, 0, sizeof(*mbox));
- sli4_params = &mbox->u.mqe.un.sli4_params;
- bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
- bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
-}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 7082279e4c01..89cbeba06aea 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -31,8 +31,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -41,14 +39,14 @@
#include "lpfc_disc.h"
#include "lpfc.h"
#include "lpfc_scsi.h"
-#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
+#define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */
+#define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */
int
lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
@@ -71,6 +69,7 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
/**
* lpfc_mem_alloc - create and allocate all PCI and memory pools
* @phba: HBA to allocate pools for
+ * @align: alignment requirement for blocks; must be a power of two
*
* Description: Creates and allocates PCI pools lpfc_mbuf_pool,
* lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
@@ -113,8 +112,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
pool->current_count++;
}
- phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
- sizeof(LPFC_MBOXQ_t));
+ phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE,
+ sizeof(LPFC_MBOXQ_t));
if (!phba->mbox_mem_pool)
goto fail_free_mbuf_pool;
@@ -125,7 +124,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
if (phba->sli_rev == LPFC_SLI_REV4) {
phba->rrq_pool =
- mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ mempool_create_kmalloc_pool(LPFC_RRQ_POOL_SIZE,
sizeof(struct lpfc_node_rrq));
if (!phba->rrq_pool)
goto fail_free_nlp_mem_pool;
@@ -336,6 +335,22 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
phba->lpfc_cmd_rsp_buf_pool = NULL;
+ /* Free Congestion Data buffer */
+ if (phba->cgn_i) {
+ dma_free_coherent(&phba->pcidev->dev,
+ sizeof(struct lpfc_cgn_info),
+ phba->cgn_i->virt, phba->cgn_i->phys);
+ kfree(phba->cgn_i);
+ phba->cgn_i = NULL;
+ }
+
+ /* Free RX Monitor */
+ if (phba->rx_monitor) {
+ lpfc_rx_monitor_destroy_ring(phba->rx_monitor);
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ }
+
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
@@ -590,8 +605,6 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
* Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
* pool along a non-DMA-mapped container for it.
*
- * Notes: Not interrupt-safe. Must be called with no locks held.
- *
* Returns:
* pointer to HBQ on success
* NULL on failure
@@ -601,7 +614,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
{
struct rqb_dmabuf *dma_buf;
- dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
+ dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
if (!dma_buf)
return NULL;
@@ -724,7 +737,6 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
- (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to HRQ %d: %x %x %x "
"DRQ %x %x\n",
@@ -734,6 +746,7 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
rqb_entry->hrq->entry_count,
rqb_entry->drq->host_index,
rqb_entry->drq->hba_index);
+ (rqbp->rqb_free_buffer)(phba, rqb_entry);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index a024e5a3918f..b86ff9fcdf0c 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -32,8 +32,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -154,7 +152,7 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
return 1;
bad_service_param:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0207 Device %x "
"(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
"invalid service parameters. Ignoring device.\n",
@@ -173,12 +171,11 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
void *ptr = NULL;
- IOCB_t *irsp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
- /* For lpfc_els_abort, context2 could be zero'ed to delay
+ /* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay
* freeing associated memory till after ABTS completes.
*/
if (pcmd) {
@@ -189,10 +186,16 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
}
} else {
- /* Force ulpStatus error since we are returning NULL ptr */
- if (!(irsp->ulpStatus)) {
- irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
- irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ /* Force ulp_status error since we are returning NULL ptr */
+ if (!(ulp_status)) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(lpfc_wcqe_c_status, &rspiocb->wcqe_cmpl,
+ IOSTAT_LOCAL_REJECT);
+ rspiocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
+ } else {
+ rspiocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+ rspiocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+ }
}
ptr = NULL;
}
@@ -249,9 +252,11 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock);
list_del_init(&iocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
INIT_LIST_HEAD(&abort_list);
@@ -279,118 +284,52 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
}
-/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
+/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
* @phba: pointer to lpfc hba data structure.
- * @link_mbox: pointer to CONFIG_LINK mailbox object
+ * @login_mbox: pointer to REG_RPI mailbox object
*
- * This routine is only called if we are SLI3, direct connect pt2pt
- * mode and the remote NPort issues the PLOGI after link up.
+ * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
*/
static void
-lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
+lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
{
- LPFC_MBOXQ_t *login_mbox;
- MAILBOX_t *mb = &link_mbox->u.mb;
struct lpfc_iocbq *save_iocb;
struct lpfc_nodelist *ndlp;
+ MAILBOX_t *mb = &login_mbox->u.mb;
+
int rc;
- ndlp = link_mbox->ctx_ndlp;
- login_mbox = link_mbox->context3;
+ ndlp = login_mbox->ctx_ndlp;
save_iocb = login_mbox->context3;
- link_mbox->context3 = NULL;
- login_mbox->context3 = NULL;
- /* Check for CONFIG_LINK error */
- if (mb->mbxStatus) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
- mb->mbxStatus);
- mempool_free(login_mbox, phba->mbox_mem_pool);
- mempool_free(link_mbox, phba->mbox_mem_pool);
- kfree(save_iocb);
- return;
- }
-
- /* Now that CONFIG_LINK completed, and our SID is configured,
- * we can now proceed with sending the PLOGI ACC.
- */
- rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
- save_iocb, ndlp, login_mbox);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "4576 PLOGI ACC fails pt2pt discovery: %x\n",
- rc);
- mempool_free(login_mbox, phba->mbox_mem_pool);
+ if (mb->mbxStatus == MBX_SUCCESS) {
+ /* Now that REG_RPI completed successfully,
+ * we can now proceed with sending the PLOGI ACC.
+ */
+ rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, NULL);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "4576 PLOGI ACC fails pt2pt discovery: "
+ "DID %x Data: %x\n", ndlp->nlp_DID, rc);
+ }
}
- mempool_free(link_mbox, phba->mbox_mem_pool);
+ /* Now process the REG_RPI cmpl */
+ lpfc_mbx_cmpl_reg_login(phba, login_mbox);
+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
kfree(save_iocb);
}
-/**
- * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
- * @phba: Pointer to HBA context object.
- * @pmb: Pointer to mailbox object.
- *
- * This function provides the unreg rpi mailbox completion handler for a tgt.
- * The routine frees the memory resources associated with the completed
- * mailbox command and transmits the ELS ACC.
- *
- * This routine is only called if we are SLI4, acting in target
- * mode and the remote NPort issues the PLOGI after link up.
- **/
-static void
-lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
-{
- struct lpfc_vport *vport = pmb->vport;
- struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
- LPFC_MBOXQ_t *mbox = pmb->context3;
- struct lpfc_iocbq *piocb = NULL;
- int rc;
-
- if (mbox) {
- pmb->context3 = NULL;
- piocb = mbox->context3;
- mbox->context3 = NULL;
- }
-
- /*
- * Complete the unreg rpi mbx request, and update flags.
- * This will also restart any deferred events.
- */
- lpfc_nlp_get(ndlp);
- lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
-
- if (!piocb) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
- "4578 PLOGI ACC fail\n");
- if (mbox)
- mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
- }
-
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
- if (rc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
- "4579 PLOGI ACC fail %x\n", rc);
- if (mbox)
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- kfree(piocb);
-out:
- lpfc_nlp_put(ndlp);
-}
-
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint64_t nlp_portwwn = 0;
uint32_t *lp;
+ union lpfc_wqe128 *wqe;
IOCB_t *icmd;
struct serv_parm *sp;
uint32_t ed_tov;
@@ -399,16 +338,16 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *save_iocb;
struct ls_rjt stat;
uint32_t vid, flag;
- u16 rpi;
- int rc, defer_acc;
+ int rc;
+ u32 remote_did;
memset(&stat, 0, sizeof (struct ls_rjt));
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
if (wwn_to_u64(sp->portName.u.wwn) == 0) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0140 PLOGI Reject: invalid nname\n");
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0140 PLOGI Reject: invalid pname\n");
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
@@ -416,8 +355,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0141 PLOGI Reject: invalid pname\n");
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0141 PLOGI Reject: invalid nname\n");
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
@@ -434,7 +373,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NULL);
return 0;
}
- icmd = &cmdiocb->iocb;
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ wqe = &cmdiocb->wqe;
+ else
+ icmd = &cmdiocb->iocb;
/* PLOGI chkparm OK */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -449,7 +392,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
ndlp->nlp_fcp_info |= CLASS3;
- defer_acc = 0;
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -466,7 +408,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
break;
- /* fall through */
+ fallthrough;
case NLP_STE_REG_LOGIN_ISSUE:
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
@@ -477,13 +419,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
if (!(ndlp->nlp_type & NLP_FABRIC) &&
!(phba->nvmet_support)) {
+ /* Clear ndlp info, since follow up PRLI may have
+ * updated ndlp information
+ */
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
ndlp, NULL);
return 1;
}
if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0143 PLOGI recv'd from DID: x%x "
"WWPN changed: old %llx new %llx\n",
ndlp->nlp_DID,
@@ -491,6 +442,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(unsigned long long)
wwn_to_u64(sp->portName.u.wwn));
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
ndlp->nlp_prev_state = ndlp->nlp_state;
/* rport needs to be unregistered first */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -500,6 +456,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
login_mbox = NULL;
@@ -510,7 +467,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* rcv'ed PLOGI decides what our NPortId will be */
- vport->fc_myDID = icmd->un.rcvels.parmRo;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ vport->fc_myDID = bf_get(els_rsp64_sid,
+ &cmdiocb->wqe.xmit_els_rsp);
+ } else {
+ vport->fc_myDID = icmd->un.rcvels.parmRo;
+ }
+
+ /* If there is an outstanding FLOGI, abort it now.
+ * The remote NPort is not going to ACC our FLOGI
+ * if its already issuing a PLOGI for pt2pt mode.
+ * This indicates our FLOGI was dropped; however, we
+ * must have ACCed the remote NPorts FLOGI to us
+ * to make it here.
+ */
+ if (phba->hba_flag & HBA_FLOGI_OUTSTANDING)
+ lpfc_els_abort_flogi(phba);
ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
if (sp->cmn.edtovResolution) {
@@ -528,27 +500,30 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
- /* Issue config_link / reg_vfi to account for updated TOV's */
-
+ /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
+ * to account for updated TOV's / parameters
+ */
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport);
else {
- defer_acc = 1;
link_mbox = mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!link_mbox)
goto out;
lpfc_config_link(phba, link_mbox);
- link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
+ link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
link_mbox->vport = vport;
+
+ /* The default completion handling for CONFIG_LINK
+ * does not require the ndlp so no reference is needed.
+ */
link_mbox->ctx_ndlp = ndlp;
- save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
- if (!save_iocb)
+ rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(link_mbox, phba->mbox_mem_pool);
goto out;
- /* Save info from cmd IOCB used in rsp */
- memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
- sizeof(struct lpfc_iocbq));
+ }
}
lpfc_can_disctmo(vport);
@@ -567,56 +542,32 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!login_mbox)
goto out;
- /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
- if (phba->nvmet_support && !defer_acc) {
- link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!link_mbox)
- goto out;
-
- /* As unique identifiers such as iotag would be overwritten
- * with those from the cmdiocb, allocate separate temporary
- * storage for the copy.
- */
- save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
- if (!save_iocb)
- goto out;
-
- /* Unreg RPI is required for SLI4. */
- rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
- link_mbox->vport = vport;
- link_mbox->ctx_ndlp = ndlp;
- link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
-
- if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
- (!(vport->fc_flag & FC_OFFLINE_MODE)))
- ndlp->nlp_flag |= NLP_UNREG_INP;
+ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+ if (!save_iocb)
+ goto out;
- /* Save info from cmd IOCB used in rsp */
- memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
+ /* Save info from cmd IOCB to be used in rsp after all mbox completes */
+ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+ sizeof(struct lpfc_iocbq));
- /* Delay sending ACC till unreg RPI completes. */
- defer_acc = 1;
- } else if (phba->sli_rev == LPFC_SLI_REV4)
+ /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+ if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_unreg_rpi(vport, ndlp);
- rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
+ * always be deferring the ACC.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ remote_did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
+ else
+ remote_did = icmd->un.rcvels.remoteID;
+ rc = lpfc_reg_rpi(phba, vport->vpi, remote_did,
(uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
if (rc)
goto out;
- /* ACC PLOGI rsp command needs to execute first,
- * queue this login_mbox command to be processed later.
- */
login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
- /*
- * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
- * command issued in lpfc_cmpl_els_acc().
- */
login_mbox->vport = vport;
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
- spin_unlock_irq(shost->host_lock);
/*
* If there is an outstanding PLOGI issued, abort it before
@@ -640,58 +591,72 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* no deferred ACC */
kfree(save_iocb);
- /* In order to preserve RPIs, we want to cleanup
- * the default RPI the firmware created to rcv
- * this ELS request. The only way to do this is
- * to register, then unregister the RPI.
+ /* This is an NPIV SLI4 instance that does not need to register
+ * a default RPI.
*/
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
- spin_unlock_irq(shost->host_lock);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_mbox_rsrc_cleanup(phba, login_mbox,
+ MBOX_THD_UNLOCKED);
+ login_mbox = NULL;
+ } else {
+ /* In order to preserve RPIs, we want to cleanup
+ * the default RPI the firmware created to rcv
+ * this ELS request. The only way to do this is
+ * to register, then unregister the RPI.
+ */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
+ NLP_RCV_PLOGI);
+ spin_unlock_irq(&ndlp->lock);
+ }
+
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, login_mbox);
- if (rc)
- mempool_free(login_mbox, phba->mbox_mem_pool);
+ ndlp, login_mbox);
+ if (rc && login_mbox)
+ lpfc_mbox_rsrc_cleanup(phba, login_mbox,
+ MBOX_THD_UNLOCKED);
return 1;
}
- if (defer_acc) {
- /* So the order here should be:
- * SLI3 pt2pt
- * Issue CONFIG_LINK mbox
- * CONFIG_LINK cmpl
- * SLI4 tgt
- * Issue UNREG RPI mbx
- * UNREG RPI cmpl
- * Issue PLOGI ACC
- * PLOGI ACC cmpl
- * Issue REG_LOGIN mbox
- */
- /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
- link_mbox->context3 = login_mbox;
- login_mbox->context3 = save_iocb;
+ /* So the order here should be:
+ * SLI3 pt2pt
+ * Issue CONFIG_LINK mbox
+ * CONFIG_LINK cmpl
+ * SLI4 pt2pt
+ * Issue REG_VFI mbox
+ * REG_VFI cmpl
+ * SLI4
+ * Issue UNREG RPI mbx
+ * UNREG RPI cmpl
+ * Issue REG_RPI mbox
+ * REG RPI cmpl
+ * Issue PLOGI ACC
+ * PLOGI ACC cmpl
+ */
+ login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
+ login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!login_mbox->ctx_ndlp)
+ goto out;
- /* Start the ball rolling by issuing CONFIG_LINK here */
- rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
- goto out;
- return 1;
+ login_mbox->context3 = save_iocb; /* For PLOGI ACC */
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+ spin_unlock_irq(&ndlp->lock);
+
+ /* Start the ball rolling by issuing REG_LOGIN here */
+ rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_nlp_put(ndlp);
+ goto out;
}
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
- if (rc)
- mempool_free(login_mbox, phba->mbox_mem_pool);
return 1;
out:
- if (defer_acc)
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "4577 discovery failure: %p %p %p\n",
- save_iocb, link_mbox, login_mbox);
kfree(save_iocb);
- if (link_mbox)
- mempool_free(link_mbox, phba->mbox_mem_pool);
if (login_mbox)
mempool_free(login_mbox, phba->mbox_mem_pool);
@@ -728,6 +693,10 @@ lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
ndlp, NULL);
}
+
+ /* This nlp_put pairs with lpfc_sli4_resume_rpi */
+ lpfc_nlp_put(ndlp);
+
kfree(elsiocb);
mempool_free(mboxq, phba->mbox_mem_pool);
}
@@ -736,18 +705,17 @@ static int
lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd;
struct serv_parm *sp;
struct lpfc_name *pnn, *ppn;
struct ls_rjt stat;
ADISC *ap;
- IOCB_t *icmd;
uint32_t *lp;
uint32_t cmd;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
cmd = *lp++;
@@ -761,8 +729,8 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ppn = (struct lpfc_name *) & sp->portName;
}
- icmd = &cmdiocb->iocb;
- if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
+ if (get_job_ulpstatus(phba, cmdiocb) == 0 &&
+ lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
/*
* As soon as we send ACC, the remote NPort can
@@ -773,7 +741,6 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
GFP_KERNEL);
if (elsiocb) {
-
/* Save info from cmd IOCB used in rsp */
memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
sizeof(struct lpfc_iocbq));
@@ -794,11 +761,21 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp, NULL);
}
out:
- /* If we are authenticated, move to the proper state */
- if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
- else
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ /* If we are authenticated, move to the proper state.
+ * It is possible an ADISC arrived and the remote nport
+ * is already in MAPPED or UNMAPPED state. Catch this
+ * condition and don't set the nlp_state again because
+ * it causes an unnecessary transport unregister/register.
+ *
+ * Nodes marked for ADISC will move MAPPED or UNMAPPED state
+ * after issuing ADISC
+ */
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
+ if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
+ !(ndlp->nlp_flag & NLP_NPR_ADISC))
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_MAPPED_NODE);
+ }
return 1;
}
@@ -812,9 +789,9 @@ out:
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -834,15 +811,31 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
* PLOGIs during LOGO storms from a device.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
if (els_cmd == ELS_CMD_PRLO)
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+ /* This clause allows the initiator to ACC the LOGO back to the
+ * Fabric Domain Controller. It does deliberately skip all other
+ * steps because some fabrics send RDP requests after logging out
+ * from the initiator.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC &&
+ ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
+ return 0;
+
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
if (ndlp->nlp_DID == Fabric_DID) {
- if (vport->port_state <= LPFC_FDISC)
+ if (vport->port_state <= LPFC_FDISC ||
+ vport->fc_flag & FC_PT2PT)
goto out;
lpfc_linkdown_port(vport);
spin_lock_irq(shost->host_lock);
@@ -875,9 +868,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
@@ -888,24 +881,32 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
((ndlp->nlp_type & NLP_FCP_TARGET) ||
- !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+ (ndlp->nlp_type & NLP_NVME_TARGET) ||
+ (vport->fc_flag & FC_PT2PT))) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
- /* Only try to re-login if this is NOT a Fabric Node */
+ /* Only try to re-login if this is NOT a Fabric Node
+ * AND the remote NPORT is a FCP/NVME Target or we
+ * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
+ * case for LOGO as a response to ADISC behavior.
+ */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
}
out:
+ /* Unregister from backend, could have been skipped due to ADISC */
+ lpfc_nlp_unreg_node(vport, ndlp);
+
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* The driver has to wait until the ACC completes before it continues
* processing the LOGO. The action will resume in
* lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
@@ -923,7 +924,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
uint32_t *payload;
uint32_t cmd;
- payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ payload = cmdiocb->cmd_dmabuf->virt;
cmd = *payload;
if (vport->phba->nvmet_support) {
/* Must be a NVME PRLI */
@@ -960,9 +961,9 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct fc_rport *rport = ndlp->rport;
u32 roles;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
- npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
+ pcmd = cmdiocb->cmd_dmabuf;
+ lp = (uint32_t *)pcmd->virt;
+ npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t));
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
@@ -998,7 +999,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
- if (npr->prliType == PRLI_FCP_TYPE)
+
+ /* Fabric Controllers send FCP PRLI as an initiator but should
+ * not get recognized as FCP type and registered with transport.
+ */
+ if (npr->prliType == PRLI_FCP_TYPE &&
+ !(ndlp->nlp_type & NLP_FABRIC))
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
}
if (rport) {
@@ -1021,12 +1027,10 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
static uint32_t
lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 0;
}
@@ -1035,16 +1039,16 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
(ndlp->nlp_type & NLP_FCP_TARGET)))) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 1;
}
}
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
@@ -1053,6 +1057,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
* @phba : Pointer to lpfc_hba structure.
* @vport: Pointer to lpfc_vport structure.
+ * @ndlp: Pointer to lpfc_nodelist structure.
* @rpi : rpi to be release.
*
* This function will send a unreg_login mailbox command to the firmware
@@ -1082,13 +1087,17 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!pmb)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "2796 mailbox memory allocation failed \n");
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2796 mailbox memory allocation failed \n");
else {
lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
pmb->vport = vport;
- pmb->ctx_ndlp = ndlp;
+ pmb->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!pmb->ctx_ndlp) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
(!(vport->fc_flag & FC_OFFLINE_MODE)))
@@ -1100,8 +1109,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_nlp_put(ndlp);
mempool_free(pmb, phba->mbox_mem_pool);
+ }
}
}
@@ -1121,7 +1132,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
rpi = pmb->u.mb.un.varWords[0];
lpfc_release_rpi(phba, vport, ndlp, rpi);
}
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0271 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
@@ -1139,11 +1150,11 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* to stop it.
*/
if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0272 Illegal State Transition: node x%x "
- "event x%x, state x%x Data: x%x x%x\n",
- ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
- ndlp->nlp_flag);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0272 Illegal State Transition: node x%x "
+ "event x%x, state x%x Data: x%x x%x\n",
+ ndlp->nlp_DID, evt, ndlp->nlp_state,
+ ndlp->nlp_rpi, ndlp->nlp_flag);
}
return ndlp->nlp_state;
}
@@ -1176,12 +1187,11 @@ static uint32_t
lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
@@ -1216,7 +1226,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg;
- struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *lp = (uint32_t *) pcmd->virt;
struct serv_parm *sp = (struct serv_parm *) (lp + 1);
struct ls_rjt stat;
@@ -1242,9 +1252,9 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(vport->num_disc_nodes)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
@@ -1294,7 +1304,6 @@ static uint32_t
lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1309,9 +1318,9 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -1326,31 +1335,31 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
uint32_t evt)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
- struct lpfc_dmabuf *pcmd, *prsp, *mp;
+ struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
uint32_t vid, flag;
- IOCB_t *irsp;
struct serv_parm *sp;
uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
int rc;
+ u32 ulp_status;
+ u32 did;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
/* Recovery from PLOGI collision logic */
return ndlp->nlp_state;
}
- irsp = &rspiocb->iocb;
-
- if (irsp->ulpStatus)
+ if (ulp_status)
goto out;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
@@ -1363,7 +1372,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
if ((ndlp->nlp_DID != FDMI_DID) &&
(wwn_to_u64(sp->portName.u.wwn) == 0 ||
wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0142 PLOGI RSP: Invalid WWN.\n");
goto out;
}
@@ -1425,7 +1434,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
} else {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0133 PLOGI: no memory "
"for config_link "
"Data: x%x x%x x%x x%x\n",
@@ -1450,7 +1460,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0018 PLOGI: no memory for reg_login "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
@@ -1458,11 +1468,15 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
goto out;
}
- if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (lpfc_reg_rpi(phba, vport->vpi, did,
(uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
+ /* Fabric Controller Node needs these parameters. */
+ memcpy(&ndlp->fc_sparam, sp, sizeof(struct serv_parm));
break;
case FDMI_DID:
mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
@@ -1471,7 +1485,11 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
}
+
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ goto out;
+
mbox->vport = vport;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
!= MBX_NOT_FINISHED) {
@@ -1485,12 +1503,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
* command
*/
lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(mbox, phba->mbox_mem_pool);
-
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0134 PLOGI: cannot issue reg_login "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
@@ -1498,7 +1512,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
} else {
mempool_free(mbox, phba->mbox_mem_pool);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0135 PLOGI: cannot format reg_login "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
@@ -1509,7 +1523,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
out:
if (ndlp->nlp_DID == NameServer_DID) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0261 Cannot Register NameServer login\n");
}
@@ -1520,9 +1534,6 @@ out:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_DEFER_RM;
- spin_unlock_irq(shost->host_lock);
return NLP_STE_FREED_NODE;
}
@@ -1556,12 +1567,10 @@ static uint32_t
lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
/* software abort outstanding PLOGI */
@@ -1578,7 +1587,6 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
@@ -1592,9 +1600,9 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
@@ -1603,7 +1611,6 @@ static uint32_t
lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
@@ -1614,9 +1621,9 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
if (vport->num_disc_nodes)
lpfc_more_adisc(vport);
}
@@ -1687,32 +1694,29 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
ADISC *ap;
int rc;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
- irsp = &rspiocb->iocb;
- if ((irsp->ulpStatus) ||
+ if ((ulp_status) ||
(!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
- memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
- memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
-
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
lpfc_unreg_rpi(vport, ndlp);
@@ -1728,7 +1732,13 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
}
}
- if (ndlp->nlp_type & NLP_FCP_TARGET) {
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
} else {
@@ -1743,12 +1753,10 @@ static uint32_t
lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
/* software abort outstanding ADISC */
@@ -1765,7 +1773,6 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
@@ -1779,9 +1786,9 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -1847,7 +1854,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
- struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ns_ndlp;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1867,16 +1873,11 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
- mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
list_del(&mb->list);
phba->sli.mboxq_cnt--;
- mempool_free(mb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
}
}
spin_unlock_irq(&phba->hbalock);
@@ -1884,7 +1885,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
/* software abort if any GID_FT is outstanding */
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
+ if (ns_ndlp)
lpfc_els_abort(phba, ns_ndlp);
}
@@ -1923,7 +1924,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
MAILBOX_t *mb = &pmb->u.mb;
@@ -1931,8 +1931,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
if (mb->mbxStatus) {
/* RegLogin failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0246 RegLogin failed Data: x%x x%x x%x x%x "
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0246 RegLogin failed Data: x%x x%x x%x x%x "
"x%x\n",
did, mb->mbxStatus, vport->port_state,
mb->un.varRegLogin.vpi,
@@ -1950,14 +1950,12 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_issue_els_logo(vport, ndlp, 0);
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@@ -1984,8 +1982,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
lpfc_nvme_update_localport(vport);
@@ -2022,6 +2021,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* must complete PRLI.
*/
if (ndlp->nlp_type & NLP_FABRIC) {
+ ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
@@ -2035,12 +2035,10 @@ lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
lpfc_drop_node(vport, ndlp);
@@ -2054,8 +2052,6 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
@@ -2064,7 +2060,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
/* If we are a target we won't immediately transition into PRLI,
* so if REG_LOGIN already completed we don't need to ignore it.
@@ -2074,7 +2070,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2099,6 +2095,7 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
return ndlp->nlp_state;
+ lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -2145,16 +2142,17 @@ static uint32_t
lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_hba *phba = vport->phba;
- IOCB_t *irsp;
PRLI *npr;
struct lpfc_nvme_prli *nvpr;
void *temp_ptr;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
/* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
* format is different so NULL the two PRLI types so that the
@@ -2163,13 +2161,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr = NULL;
nvpr = NULL;
temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
- if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
+ if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ)
npr = (PRLI *) temp_ptr;
- else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
+ else if (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ)
nvpr = (struct lpfc_nvme_prli *) temp_ptr;
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
+ if (ulp_status) {
if ((vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
goto out;
@@ -2267,9 +2264,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
out:
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_TARGET_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
@@ -2321,12 +2318,10 @@ static uint32_t
lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
/* software abort outstanding PLOGI */
@@ -2360,7 +2355,6 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
@@ -2374,9 +2368,9 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2413,12 +2407,11 @@ static uint32_t
lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
@@ -2455,13 +2448,11 @@ static uint32_t
lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2550,19 +2541,27 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
static uint32_t
+lpfc_device_rm_unmap_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
@@ -2615,12 +2614,10 @@ static uint32_t
lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* flush the target */
- lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
@@ -2633,15 +2630,14 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ lpfc_disc_set_adisc(vport, ndlp);
ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- spin_unlock_irq(shost->host_lock);
- lpfc_disc_set_adisc(vport, ndlp);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
@@ -2649,7 +2645,6 @@ static uint32_t
lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* Ignore PLOGI if we have an outstanding LOGO */
@@ -2657,9 +2652,9 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
/* send PLOGI immediately, move to PLOGI issue state */
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
@@ -2675,7 +2670,6 @@ static uint32_t
lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
@@ -2685,14 +2679,13 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
- if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- spin_unlock_irq(shost->host_lock);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(vport, ndlp, 0);
- } else {
+ /*
+ * ADISC nodes will be handled in regular discovery path after
+ * receiving response from NS.
+ *
+ * For other nodes, Send PLOGI to trigger an implicit LOGO.
+ */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
@@ -2725,12 +2718,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
- if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(vport, ndlp, 0);
- } else {
+ /*
+ * ADISC nodes will be handled in regular discovery path after
+ * receiving response from NS.
+ *
+ * For other nodes, Send PLOGI to trigger an implicit LOGO.
+ */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
@@ -2743,27 +2737,26 @@ static uint32_t
lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
} else {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
return ndlp->nlp_state;
}
@@ -2772,20 +2765,18 @@ static uint32_t
lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_DEFER_RM;
- spin_unlock_irq(shost->host_lock);
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
+ if (ulp_status)
return NLP_STE_FREED_NODE;
- }
+
return ndlp->nlp_state;
}
@@ -2793,14 +2784,16 @@ static uint32_t
lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
+ if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
@@ -2827,14 +2820,16 @@ static uint32_t
lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
@@ -2870,12 +2865,10 @@ static uint32_t
lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
lpfc_drop_node(vport, ndlp);
@@ -2886,8 +2879,6 @@ static uint32_t
lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
@@ -2895,10 +2886,10 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
@@ -3057,7 +3048,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_disc_illegal, /* CMPL_LOGO */
lpfc_disc_illegal, /* CMPL_ADISC */
lpfc_disc_illegal, /* CMPL_REG_LOGIN */
- lpfc_disc_illegal, /* DEVICE_RM */
+ lpfc_device_rm_unmap_node, /* DEVICE_RM */
lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index f6c8963c915d..152245f7cacc 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -36,9 +36,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme.h>
-#include <linux/nvme-fc-driver.h>
-#include <linux/nvme-fc.h>
#include "lpfc_version.h"
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -65,180 +62,11 @@ lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
static struct nvme_fc_port_template lpfc_nvme_template;
-static union lpfc_wqe128 lpfc_iread_cmd_template;
-static union lpfc_wqe128 lpfc_iwrite_cmd_template;
-static union lpfc_wqe128 lpfc_icmnd_cmd_template;
-
-/* Setup WQE templates for NVME IOs */
-void
-lpfc_nvme_cmd_template(void)
-{
- union lpfc_wqe128 *wqe;
-
- /* IREAD template */
- wqe = &lpfc_iread_cmd_template;
- memset(wqe, 0, sizeof(union lpfc_wqe128));
-
- /* Word 0, 1, 2 - BDE is variable */
-
- /* Word 3 - cmd_buff_len, payload_offset_len is zero */
-
- /* Word 4 - total_xfer_len is variable */
-
- /* Word 5 - is zero */
-
- /* Word 6 - ctxt_tag, xri_tag is variable */
-
- /* Word 7 */
- bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
- bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
- bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
- bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
-
- /* Word 8 - abort_tag is variable */
-
- /* Word 9 - reqtag is variable */
-
- /* Word 10 - dbde, wqes is variable */
- bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
- bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
- bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
-
- /* Word 11 - pbde is variable */
- bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
- bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
-
- /* Word 12 - is zero */
-
- /* Word 13, 14, 15 - PBDE is variable */
-
- /* IWRITE template */
- wqe = &lpfc_iwrite_cmd_template;
- memset(wqe, 0, sizeof(union lpfc_wqe128));
-
- /* Word 0, 1, 2 - BDE is variable */
-
- /* Word 3 - cmd_buff_len, payload_offset_len is zero */
-
- /* Word 4 - total_xfer_len is variable */
-
- /* Word 5 - initial_xfer_len is variable */
-
- /* Word 6 - ctxt_tag, xri_tag is variable */
-
- /* Word 7 */
- bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
- bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
- bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
- bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
-
- /* Word 8 - abort_tag is variable */
-
- /* Word 9 - reqtag is variable */
-
- /* Word 10 - dbde, wqes is variable */
- bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
- bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
- bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
-
- /* Word 11 - pbde is variable */
- bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
- bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
-
- /* Word 12 - is zero */
-
- /* Word 13, 14, 15 - PBDE is variable */
-
- /* ICMND template */
- wqe = &lpfc_icmnd_cmd_template;
- memset(wqe, 0, sizeof(union lpfc_wqe128));
-
- /* Word 0, 1, 2 - BDE is variable */
-
- /* Word 3 - payload_offset_len is variable */
-
- /* Word 4, 5 - is zero */
-
- /* Word 6 - ctxt_tag, xri_tag is variable */
-
- /* Word 7 */
- bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
- bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
- bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
- bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
-
- /* Word 8 - abort_tag is variable */
-
- /* Word 9 - reqtag is variable */
-
- /* Word 10 - dbde, wqes is variable */
- bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
- bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
- bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
-
- /* Word 11 */
- bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
- bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
-
- /* Word 12, 13, 14, 15 - is zero */
-}
-
-/**
- * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
- * @pwqeq: Pointer to command iocb.
- * @xritag: Tag that uniqely identifies the local exchange resource.
- * @opt: Option bits -
- * bit 0 = inhibit sending abts on the link
- *
- * This function is called with hbalock held.
- **/
-void
-lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
-{
- union lpfc_wqe128 *wqe = &pwqeq->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(wqe, 0, sizeof(*wqe));
-
- if (opt & INHIBIT_ABORT)
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- /* Abort specified xri tag, with the mask deliberately zeroed */
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* Abort the IO associated with this outstanding exchange ID. */
- wqe->abort_cmd.wqe_com.abort_tag = xritag;
-
- /* iotag for the wqe completion. */
- bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
-
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-}
-
/**
* lpfc_nvme_create_queue -
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @pnvme_lport: Transport localport that LS is to be issued from
* @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
+ * @qsize: Size of the queue in bytes
* @handle: An opaque driver handle used in follow-up calls.
*
* Driver registers this routine to preallocate and initialize any
@@ -265,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
+
+ if (!vport || vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
+
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
if (qhandle == NULL)
return -ENOMEM;
@@ -295,7 +128,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
/**
* lpfc_nvme_delete_queue -
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @pnvme_lport: Transport localport that LS is to be issued from
* @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
* @handle: An opaque driver handle from lpfc_nvme_create_queue
*
@@ -358,108 +191,205 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
struct lpfc_nvme_rport *rport = remoteport->private;
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
+ u32 fc4_xpt_flags;
ndlp = rport->ndlp;
- if (!ndlp)
+ if (!ndlp) {
+ pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
+ __func__, rport, remoteport);
goto rport_err;
+ }
vport = ndlp->vport;
- if (!vport)
+ if (!vport) {
+ pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
+ __func__, ndlp, ndlp->nlp_state, rport);
goto rport_err;
+ }
+
+ fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
/* Remove this rport from the lport's list - memory is owned by the
* transport. Remove the ndlp reference for the NVME transport before
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete of remoteport x%px\n",
- remoteport);
- spin_lock_irq(&vport->phba->hbalock);
+ "6146 remoteport delete of remoteport x%px, ndlp x%px "
+ "DID x%x xflags x%x\n",
+ remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
+ spin_lock_irq(&ndlp->lock);
/* The register rebind might have occurred before the delete
* downcall. Guard against this race.
*/
- if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
- ndlp->nrport = NULL;
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- }
- spin_unlock_irq(&vport->phba->hbalock);
+ if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
+ ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
+
+ spin_unlock_irq(&ndlp->lock);
- /* Remove original register reference. The host transport
- * won't reference this rport/remoteport any further.
+ /* On a devloss timeout event, one more put is executed provided the
+ * NVME and SCSI rport unregister requests are complete. If the vport
+ * is unloading, this extra put is executed by lpfc_drop_node.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
rport_err:
return;
}
-static void
-lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+/**
+ * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
+ * @phba: pointer to lpfc hba data structure.
+ * @axchg: pointer to exchange context for the NVME LS request
+ *
+ * This routine is used for processing an asychronously received NVME LS
+ * request. Any remaining validation is done and the LS is then forwarded
+ * to the nvme-fc transport via nvme_fc_rcv_ls_req().
+ *
+ * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
+ * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
+ * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
+ *
+ * Returns 0 if LS was handled and delivered to the transport
+ * Returns 1 if LS failed to be handled and should be dropped
+ */
+int
+lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *axchg)
{
- struct lpfc_vport *vport = cmdwqe->vport;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_vport *vport;
+ struct lpfc_nvme_rport *lpfc_rport;
+ struct nvme_fc_remote_port *remoteport;
struct lpfc_nvme_lport *lport;
- uint32_t status;
+ uint32_t *payload = axchg->payload;
+ int rc;
+
+ vport = axchg->ndlp->vport;
+ lpfc_rport = axchg->ndlp->nrport;
+ if (!lpfc_rport)
+ return -EINVAL;
+
+ remoteport = lpfc_rport->remoteport;
+ if (!vport->localport ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -EINVAL;
+
+ lport = vport->localport->private;
+ if (!lport)
+ return -EINVAL;
+
+ rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
+ axchg->size);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
+ "%08x %08x %08x\n",
+ axchg->size, rc,
+ *payload, *(payload+1), *(payload+2),
+ *(payload+3), *(payload+4), *(payload+5));
+
+ if (!rc)
+ return 0;
+#endif
+ return 1;
+}
+
+/**
+ * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
+ * LS request.
+ * @phba: Pointer to HBA context object
+ * @vport: The local port that issued the LS
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * This function is the generic completion handler for NVME LS requests.
+ * The function updates any states and statistics, calls the transport
+ * ls_req done() routine, then tears down the command and buffers used
+ * for the LS request.
+ **/
+void
+__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
struct nvmefc_ls_req *pnvme_lsreq;
struct lpfc_dmabuf *buf_ptr;
struct lpfc_nodelist *ndlp;
+ uint32_t status;
- pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
- status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
+ ndlp = cmdwqe->ndlp;
+ buf_ptr = cmdwqe->bpl_dmabuf;
- if (vport->localport) {
- lport = (struct lpfc_nvme_lport *)vport->localport->private;
- if (lport) {
- atomic_inc(&lport->fc4NvmeLsCmpls);
- if (status) {
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
- atomic_inc(&lport->cmpl_ls_xb);
- atomic_inc(&lport->cmpl_ls_err);
- }
- }
- }
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
- ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6047 nvme cmpl Enter "
- "Data %px DID %x Xri: %x status %x reason x%x "
- "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
+ "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
+ "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
+ "ndlp:x%px\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status,
(wcqe->parameter & 0xffff),
- cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
+ cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
+ ndlp);
- lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
+ lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
cmdwqe->sli4_xritag, status, wcqe->parameter);
- if (cmdwqe->context3) {
- buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
+ if (buf_ptr) {
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- cmdwqe->context3 = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
}
if (pnvme_lsreq->done)
pnvme_lsreq->done(pnvme_lsreq, status);
else
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6046 nvme cmpl without done call back? "
- "Data %px DID %x Xri: %x status %x\n",
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6046 NVMEx cmpl without done call back? "
+ "Data x%px DID %x Xri: %x status %x\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status);
if (ndlp) {
lpfc_nlp_put(ndlp);
- cmdwqe->context1 = NULL;
+ cmdwqe->ndlp = NULL;
}
lpfc_sli_release_iocbq(phba, cmdwqe);
}
+static void
+lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_iocbq *rspwqe)
+{
+ struct lpfc_vport *vport = cmdwqe->vport;
+ struct lpfc_nvme_lport *lport;
+ uint32_t status;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
+
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+
+ if (vport->localport) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (lport) {
+ atomic_inc(&lport->fc4NvmeLsCmpls);
+ if (status) {
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_ls_xb);
+ atomic_inc(&lport->cmpl_ls_err);
+ }
+ }
+ }
+
+ __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
+}
+
static int
lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_wcqe_complete *),
+ struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
@@ -479,12 +409,19 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Initialize only 64 bytes */
memset(wqe, 0, sizeof(union lpfc_wqe));
- genwqe->context3 = (uint8_t *)bmp;
- genwqe->iocb_flag |= LPFC_IO_NVME_LS;
+ genwqe->bpl_dmabuf = bmp;
+ genwqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Save for completion so we can release these resources */
- genwqe->context1 = lpfc_nlp_get(ndlp);
- genwqe->context2 = (uint8_t *)pnvme_lsreq;
+ genwqe->ndlp = lpfc_nlp_get(ndlp);
+ if (!genwqe->ndlp) {
+ dev_warn(&phba->pcidev->dev,
+ "Warning: Failed node ref, not sending LS_REQ\n");
+ lpfc_sli_release_iocbq(phba, genwqe);
+ return 1;
+ }
+
+ genwqe->context_un.nvme_lsreq = pnvme_lsreq;
/* Fill in payload, bp points to frame payload */
if (!tmo)
@@ -504,7 +441,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
first_len = xmit_len;
}
- genwqe->rsvd2 = num_entry;
+ genwqe->num_bdes = num_entry;
genwqe->hba_wqidx = 0;
/* Words 0 - 2 */
@@ -531,7 +468,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
/* Word 7 */
- bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
+ bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
@@ -555,15 +492,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Issue GEN REQ WQE for NPORT <did> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "6050 Issue GEN REQ WQE to NPORT x%x "
- "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
- "xmit:%d 1st:%d\n",
- ndlp->nlp_DID, genwqe->iotag,
- vport->port_state,
- genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
- genwqe->wqe_cmpl = cmpl;
- genwqe->iocb_cmpl = NULL;
+ genwqe->cmd_cmpl = cmpl;
genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
genwqe->vport = vport;
genwqe->retry = retry;
@@ -573,105 +502,110 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
if (rc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6045 Issue GEN REQ WQE to NPORT x%x "
- "Data: x%x x%x\n",
+ "Data: x%x x%x rc x%x\n",
ndlp->nlp_DID, genwqe->iotag,
- vport->port_state);
+ vport->port_state, rc);
+ lpfc_nlp_put(ndlp);
lpfc_sli_release_iocbq(phba, genwqe);
return 1;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
+ "6050 Issue GEN REQ WQE to NPORT x%x "
+ "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
+ "bmp:x%px xmit:%d 1st:%d\n",
+ ndlp->nlp_DID, genwqe->sli4_xritag,
+ vport->port_state,
+ genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
return 0;
}
+
/**
- * lpfc_nvme_ls_req - Issue an Link Service request
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
+ * @vport: The local port issuing the LS
+ * @ndlp: The remote port to send the LS to
+ * @pnvme_lsreq: Pointer to LS request structure from the transport
+ * @gen_req_cmp: Completion call-back
*
- * Driver registers this routine to handle any link service request
- * from the nvme_fc transport to a remote nvme-aware port.
+ * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
+ * WQE to perform the LS operation.
*
* Return value :
* 0 - Success
- * TODO: What are the failure codes.
+ * non-zero: various error codes, in form of -Exxx
**/
-static int
-lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
- struct nvme_fc_remote_port *pnvme_rport,
- struct nvmefc_ls_req *pnvme_lsreq)
+int
+__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct nvmefc_ls_req *pnvme_lsreq,
+ void (*gen_req_cmp)(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_iocbq *rspwqe))
{
- int ret = 0;
- struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_rport *rport;
- struct lpfc_vport *vport;
- struct lpfc_nodelist *ndlp;
- struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp;
+ struct ulp_bde64 *bpl;
+ int ret;
uint16_t ntype, nstate;
- /* there are two dma buf in the request, actually there is one and
- * the second one is just the start address + cmd size.
- * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
- * in a lpfc_dmabuf struct. When freeing we just free the wrapper
- * because the nvem layer owns the data bufs.
- * We do not have to break these packets open, we don't care what is in
- * them. And we do not have to look at the resonse data, we only care
- * that we got a response. All of the caring is going to happen in the
- * nvme-fc layer.
- */
-
- lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
- rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
- if (unlikely(!lport) || unlikely(!rport))
- return -EINVAL;
-
- vport = lport->vport;
-
- if (vport->load_flag & FC_UNLOADING)
- return -ENODEV;
-
- /* Need the ndlp. It is stored in the driver's rport. */
- ndlp = rport->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6051 Remoteport x%px, rport has invalid ndlp. "
- "Failing LS Req\n", pnvme_rport);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
+ "LS Req\n",
+ ndlp);
return -ENODEV;
}
- /* The remote node has to be a mapped nvme target or an
- * unmapped nvme initiator or it's an error.
- */
ntype = ndlp->nlp_type;
nstate = ndlp->nlp_state;
if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
(ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6088 DID x%06x not ready for "
- "IO. State x%x, Type x%x\n",
- pnvme_rport->port_id,
- ndlp->nlp_state, ndlp->nlp_type);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6088 NVMEx LS REQ: Fail DID x%06x not "
+ "ready for IO. Type x%x, State x%x\n",
+ ndlp->nlp_DID, ntype, nstate);
return -ENODEV;
}
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
+ if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
+
+ if (!vport->phba->sli4_hba.nvmels_wq)
+ return -ENOMEM;
+
+ /*
+ * there are two dma buf in the request, actually there is one and
+ * the second one is just the start address + cmd size.
+ * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
+ * in a lpfc_dmabuf struct. When freeing we just free the wrapper
+ * because the nvem layer owns the data bufs.
+ * We do not have to break these packets open, we don't care what is
+ * in them. And we do not have to look at the resonse data, we only
+ * care that we got a response. All of the caring is going to happen
+ * in the nvme-fc layer.
+ */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6044 Could not find node for DID %x\n",
- pnvme_rport->port_id);
- return 2;
+ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
+ if (!bmp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6044 NVMEx LS REQ: Could not alloc LS buf "
+ "for DID %x\n",
+ ndlp->nlp_DID);
+ return -ENOMEM;
}
- INIT_LIST_HEAD(&bmp->list);
+
bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
if (!bmp->virt) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6042 Could not find node for DID %x\n",
- pnvme_rport->port_id);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6042 NVMEx LS REQ: Could not alloc mbuf "
+ "for DID %x\n",
+ ndlp->nlp_DID);
kfree(bmp);
- return 3;
+ return -ENOMEM;
}
+
+ INIT_LIST_HEAD(&bmp->list);
+
bpl = (struct ulp_bde64 *)bmp->virt;
bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
@@ -686,118 +620,202 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
- /* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6149 Issue LS Req to DID 0x%06x lport x%px, "
- "rport x%px lsreq x%px rqstlen:%d rsplen:%d "
- "%pad %pad\n",
- ndlp->nlp_DID, pnvme_lport, pnvme_rport,
- pnvme_lsreq, pnvme_lsreq->rqstlen,
- pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
- &pnvme_lsreq->rspdma);
+ "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
+ "rqstlen:%d rsplen:%d %pad %pad\n",
+ ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
+ pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
+ &pnvme_lsreq->rspdma);
- atomic_inc(&lport->fc4NvmeLsRequests);
-
- /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
- * This code allows it all to work.
- */
ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
- pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
- ndlp, 2, 30, 0);
+ pnvme_lsreq, gen_req_cmp, ndlp, 2,
+ pnvme_lsreq->timeout, 0);
if (ret != WQE_SUCCESS) {
- atomic_inc(&lport->xmt_ls_err);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6052 EXIT. issue ls wqe failed lport x%px, "
- "rport x%px lsreq x%px Status %x DID %x\n",
- pnvme_lport, pnvme_rport, pnvme_lsreq,
- ret, ndlp->nlp_DID);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6052 NVMEx REQ: EXIT. issue ls wqe failed "
+ "lsreq x%px Status %x DID %x\n",
+ pnvme_lsreq, ret, ndlp->nlp_DID);
lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
kfree(bmp);
- return ret;
+ return -EIO;
}
- /* Stub in routine and return 0 for now. */
- return ret;
+ return 0;
}
/**
- * lpfc_nvme_ls_abort - Issue an Link Service request
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * lpfc_nvme_ls_req - Issue an NVME Link Service request
+ * @pnvme_lport: Transport localport that LS is to be issued from.
+ * @pnvme_rport: Transport remoteport that LS is to be sent to.
+ * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
*
* Driver registers this routine to handle any link service request
* from the nvme_fc transport to a remote nvme-aware port.
*
* Return value :
* 0 - Success
- * TODO: What are the failure codes.
+ * non-zero: various error codes, in form of -Exxx
**/
-static void
-lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
- struct nvme_fc_remote_port *pnvme_rport,
- struct nvmefc_ls_req *pnvme_lsreq)
+static int
+lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ struct nvme_fc_remote_port *pnvme_rport,
+ struct nvmefc_ls_req *pnvme_lsreq)
{
struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_rport *rport;
struct lpfc_vport *vport;
- struct lpfc_hba *phba;
- struct lpfc_nodelist *ndlp;
- LIST_HEAD(abort_list);
- struct lpfc_sli_ring *pring;
- struct lpfc_iocbq *wqe, *next_wqe;
+ int ret;
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
- if (unlikely(!lport))
- return;
+ rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ if (unlikely(!lport) || unlikely(!rport))
+ return -EINVAL;
+
vport = lport->vport;
- phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
- if (vport->load_flag & FC_UNLOADING)
- return;
+ atomic_inc(&lport->fc4NvmeLsRequests);
+
+ ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
+ lpfc_nvme_ls_req_cmp);
+ if (ret)
+ atomic_inc(&lport->xmt_ls_err);
+
+ return ret;
+}
+
+/**
+ * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
+ * NVME LS request
+ * @vport: The local port that issued the LS
+ * @ndlp: The remote port the LS was sent to
+ * @pnvme_lsreq: Pointer to LS request structure from the transport
+ *
+ * The driver validates the ndlp, looks for the LS, and aborts the
+ * LS if found.
+ *
+ * Returns:
+ * 0 : if LS found and aborted
+ * non-zero: various error conditions in form -Exxx
+ **/
+int
+__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *wqe, *next_wqe;
+ bool foundit = false;
- ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6049 Could not find node for DID %x\n",
- pnvme_rport->port_id);
- return;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
+ "x%06x, Failing LS Req\n",
+ ndlp, ndlp ? ndlp->nlp_DID : 0);
+ return -EINVAL;
}
- /* Expand print to include key fields. */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
- "6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d "
- "rsplen:%d %pad %pad\n",
- pnvme_lport, pnvme_rport,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
+ "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
+ "x%px rqstlen:%d rsplen:%d %pad %pad\n",
pnvme_lsreq, pnvme_lsreq->rqstlen,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
/*
- * Lock the ELS ring txcmplq and build a local list of all ELS IOs
- * that need an ABTS. The IOs need to stay on the txcmplq so that
- * the abort operation completes them successfully.
+ * Lock the ELS ring txcmplq and look for the wqe that matches
+ * this ELS. If found, issue an abort on the wqe.
*/
pring = phba->sli4_hba.nvmels_wq->pring;
spin_lock_irq(&phba->hbalock);
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
- /* Add to abort_list on on NDLP match. */
- if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
- wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
- list_add_tail(&wqe->dlist, &abort_list);
+ if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
+ wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
+ foundit = true;
+ break;
}
}
spin_unlock(&pring->ring_lock);
+
+ if (foundit)
+ lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
spin_unlock_irq(&phba->hbalock);
- /* Abort the targeted IOs and remove them from the abort list. */
- list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
- atomic_inc(&lport->xmt_ls_abort);
- spin_lock_irq(&phba->hbalock);
- list_del_init(&wqe->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, wqe);
- spin_unlock_irq(&phba->hbalock);
+ if (foundit)
+ return 0;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
+ "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
+ pnvme_lsreq);
+ return -EINVAL;
+}
+
+static int
+lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_rsp *ls_rsp)
+{
+ struct lpfc_async_xchg_ctx *axchg =
+ container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
+ struct lpfc_nvme_lport *lport;
+ int rc;
+
+ if (axchg->phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
+ lport = (struct lpfc_nvme_lport *)localport->private;
+
+ rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
+
+ if (rc) {
+ /*
+ * unless the failure is due to having already sent
+ * the response, an abort will be generated for the
+ * exchange if the rsp can't be sent.
+ */
+ if (rc != -EALREADY)
+ atomic_inc(&lport->xmt_ls_abort);
+ return rc;
}
+
+ return 0;
+}
+
+/**
+ * lpfc_nvme_ls_abort - Abort a prior NVME LS request
+ * @pnvme_lport: Transport localport that LS is to be issued from.
+ * @pnvme_rport: Transport remoteport that LS is to be sent to.
+ * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
+ *
+ * Driver registers this routine to abort a NVME LS request that is
+ * in progress (from the transports perspective).
+ **/
+static void
+lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
+ struct nvme_fc_remote_port *pnvme_rport,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ int ret;
+
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ if (unlikely(!lport))
+ return;
+ vport = lport->vport;
+
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
+ ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
+
+ ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
+ if (!ret)
+ atomic_inc(&lport->xmt_ls_abort);
}
/* Fix up the existing sgls for NVME IO. */
@@ -897,94 +915,9 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
sgl->sge_len = cpu_to_le32(nCmd->rsplen);
}
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-static void
-lpfc_nvme_ktime(struct lpfc_hba *phba,
- struct lpfc_io_buf *lpfc_ncmd)
-{
- uint64_t seg1, seg2, seg3, seg4;
- uint64_t segsum;
-
- if (!lpfc_ncmd->ts_last_cmd ||
- !lpfc_ncmd->ts_cmd_start ||
- !lpfc_ncmd->ts_cmd_wqput ||
- !lpfc_ncmd->ts_isr_cmpl ||
- !lpfc_ncmd->ts_data_nvme)
- return;
-
- if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
- return;
- if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
- return;
- if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
- return;
- if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
- return;
- if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
- return;
- /*
- * Segment 1 - Time from Last FCP command cmpl is handed
- * off to NVME Layer to start of next command.
- * Segment 2 - Time from Driver receives a IO cmd start
- * from NVME Layer to WQ put is done on IO cmd.
- * Segment 3 - Time from Driver WQ put is done on IO cmd
- * to MSI-X ISR for IO cmpl.
- * Segment 4 - Time from MSI-X ISR for IO cmpl to when
- * cmpl is handled off to the NVME Layer.
- */
- seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
- if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
- seg1 = 0;
-
- /* Calculate times relative to start of IO */
- seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
- segsum = seg2;
- seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
- if (segsum > seg3)
- return;
- seg3 -= segsum;
- segsum += seg3;
-
- seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
- if (segsum > seg4)
- return;
- seg4 -= segsum;
-
- phba->ktime_data_samples++;
- phba->ktime_seg1_total += seg1;
- if (seg1 < phba->ktime_seg1_min)
- phba->ktime_seg1_min = seg1;
- else if (seg1 > phba->ktime_seg1_max)
- phba->ktime_seg1_max = seg1;
- phba->ktime_seg2_total += seg2;
- if (seg2 < phba->ktime_seg2_min)
- phba->ktime_seg2_min = seg2;
- else if (seg2 > phba->ktime_seg2_max)
- phba->ktime_seg2_max = seg2;
- phba->ktime_seg3_total += seg3;
- if (seg3 < phba->ktime_seg3_min)
- phba->ktime_seg3_min = seg3;
- else if (seg3 > phba->ktime_seg3_max)
- phba->ktime_seg3_max = seg3;
- phba->ktime_seg4_total += seg4;
- if (seg4 < phba->ktime_seg4_min)
- phba->ktime_seg4_min = seg4;
- else if (seg4 > phba->ktime_seg4_max)
- phba->ktime_seg4_max = seg4;
-
- lpfc_ncmd->ts_last_cmd = 0;
- lpfc_ncmd->ts_cmd_start = 0;
- lpfc_ncmd->ts_cmd_wqput = 0;
- lpfc_ncmd->ts_isr_cmpl = 0;
- lpfc_ncmd->ts_data_nvme = 0;
-}
-#endif
-/**
- * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+/*
+ * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
@@ -995,11 +928,11 @@ lpfc_nvme_ktime(struct lpfc_hba *phba,
* TODO: What are the failure codes.
**/
static void
-lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
- struct lpfc_wcqe_complete *wcqe)
+lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ struct lpfc_iocbq *pwqeOut)
{
- struct lpfc_io_buf *lpfc_ncmd =
- (struct lpfc_io_buf *)pwqeIn->context1;
+ struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
+ struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct nvmefc_fcp_req *nCmd;
struct nvme_fc_ersp_iu *ep;
@@ -1010,11 +943,16 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
uint32_t code, status, idx;
uint16_t cid, sqhd, data;
uint32_t *ptr;
+ uint32_t lat;
+ bool call_done = false;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ int cpu;
+#endif
+ int offline = 0;
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_NODE | LOG_NVME_IOERR,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6071 Null lpfc_ncmd pointer. No "
"release, skip completion\n");
return;
@@ -1025,7 +963,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (!lpfc_ncmd->nvmeCmd) {
spin_unlock(&lpfc_ncmd->buf_lock);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
"nvmeCmd x%px\n",
lpfc_ncmd, lpfc_ncmd->nvmeCmd);
@@ -1057,8 +995,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
* transport is still transitioning.
*/
ndlp = lpfc_ncmd->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6062 Ignoring NVME cmpl. No ndlp\n");
goto out_err;
}
@@ -1126,16 +1064,38 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->transferred_length = wcqe->total_data_placed;
nCmd->rcv_rsplen = wcqe->parameter;
nCmd->status = 0;
- /* Sanity check */
- if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
+
+ /* Get the NVME cmd details for this unique error. */
+ cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
+ ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
+
+ /* Check if this is really an ERSP */
+ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ lpfc_ncmd->result = 0;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+ "6084 NVME FCP_ERR ERSP: "
+ "xri %x placed x%x opcode x%x cmd_id "
+ "x%x cqe_status x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ wcqe->total_data_placed,
+ cp->sqe.common.opcode,
+ cp->sqe.common.command_id,
+ ep->cqe.status);
break;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ }
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6081 NVME Completion Protocol Error: "
"xri %x status x%x result x%x "
- "placed x%x\n",
+ "placed x%x opcode x%x cmd_id x%x, "
+ "cqe_status x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->status, lpfc_ncmd->result,
- wcqe->total_data_placed);
+ wcqe->total_data_placed,
+ cp->sqe.common.opcode,
+ cp->sqe.common.command_id,
+ ep->cqe.status);
break;
case IOSTAT_LOCAL_REJECT:
/* Let fall through to set command final state. */
@@ -1148,7 +1108,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_ncmd, nCmd,
lpfc_ncmd->cur_iocbq.sli4_xritag,
bf_get(lpfc_wcqe_c_xb, wcqe));
- /* fall through */
+ fallthrough;
default:
out_err:
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
@@ -1162,11 +1122,12 @@ out_err:
nCmd->transferred_length = 0;
nCmd->rcv_rsplen = 0;
nCmd->status = NVME_SC_INTERNAL;
+ offline = pci_channel_offline(vport->phba->pcidev);
}
}
/* pick up SLI4 exhange busy condition */
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
else
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
@@ -1178,23 +1139,19 @@ out_err:
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_ncmd->ts_cmd_start) {
lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
- lpfc_ncmd->ts_data_nvme = ktime_get_ns();
- phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
- lpfc_nvme_ktime(phba, lpfc_ncmd);
+ lpfc_ncmd->ts_data_io = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
+ lpfc_io_ktime(phba, lpfc_ncmd);
}
- if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
- uint32_t cpu;
- idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT) {
- if (lpfc_ncmd->cpu != cpu)
- lpfc_printf_vlog(vport,
- KERN_INFO, LOG_NVME_IOERR,
- "6701 CPU Check cmpl: "
- "cpu %d expect %d\n",
- cpu, lpfc_ncmd->cpu);
- phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
- }
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+ if (lpfc_ncmd->cpu != cpu)
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_NVME_IOERR,
+ "6701 CPU Check cmpl: "
+ "cpu %d expect %d\n",
+ cpu, lpfc_ncmd->cpu);
}
#endif
@@ -1206,10 +1163,21 @@ out_err:
freqpriv = nCmd->private;
freqpriv->nvme_buf = NULL;
lpfc_ncmd->nvmeCmd = NULL;
- spin_unlock(&lpfc_ncmd->buf_lock);
+ call_done = true;
+ }
+ spin_unlock(&lpfc_ncmd->buf_lock);
+
+ /* Check if IO qualified for CMF */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+ nCmd->io_dir == NVMEFC_FCP_READ &&
+ nCmd->payload_length) {
+ /* Used when calculating average latency */
+ lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
+ lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
+ }
+
+ if (call_done)
nCmd->done(nCmd);
- } else
- spin_unlock(&lpfc_ncmd->buf_lock);
/* Call release with XB=1 to queue the IO into the abort list. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
@@ -1218,11 +1186,10 @@ out_err:
/**
* lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
- * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ * @vport: pointer to a host virtual N_Port data structure
+ * @lpfc_ncmd: Pointer to lpfc scsi command
+ * @pnode: pointer to a node-list data structure
+ * @cstat: pointer to the control status structure
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
@@ -1240,13 +1207,11 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
- struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
+ struct nvme_common_command *sqe;
+ struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
- if (!NLP_CHK_NODE_ACT(pnode))
- return -EINVAL;
-
/*
* There are three possibilities here - use scatter-gather segment, use
* the single mapping, or neither.
@@ -1287,6 +1252,10 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 5 */
wqe->fcp_iread.rsrvd5 = 0;
+ /* For a CMF Managed port, iod must be zero'ed */
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_IOD_NONE);
cstat->input_requests++;
}
} else {
@@ -1296,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
cstat->control_requests++;
}
- if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+ sqe = &((struct nvme_fc_cmd_iu *)
+ nCmd->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_async_event)
+ bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
+ }
+
/*
* Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer
@@ -1318,8 +1293,24 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 9 */
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+ /* Word 10 */
+ bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
+
/* Words 13 14 15 are for PBDE support */
+ /* add the VMID tags as per switch response */
+ if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_priority_tagging) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
+ } else {
+ bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
+ }
+ }
+
pwqeq->vport = vport;
return 0;
}
@@ -1327,11 +1318,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/**
* lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
- * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ * @vport: pointer to a host virtual N_Port data structure
+ * @lpfc_ncmd: Pointer to lpfc scsi command
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
@@ -1354,7 +1342,6 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
struct sli4_sge *first_data_sgl;
struct ulp_bde64 *bde;
dma_addr_t physaddr = 0;
- uint32_t num_bde = 0;
uint32_t dma_len = 0;
uint32_t dma_offset = 0;
int nseg, i, j;
@@ -1377,7 +1364,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
"nvmeIO sg_cnt %d\n",
@@ -1400,7 +1387,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
j = 2;
for (i = 0; i < nseg; i++) {
if (data_sg == NULL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6059 dptr err %d, nseg %d\n",
i, nseg);
lpfc_ncmd->seg_cnt = 0;
@@ -1408,7 +1395,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
}
sgl->word2 = 0;
- if ((num_bde + 1) == nseg) {
+ if (nseg == 1) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
bf_set(lpfc_sli4_sge_type, sgl,
LPFC_SGE_TYPE_DATA);
@@ -1446,8 +1433,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
if ((nseg - 1) == i)
bf_set(lpfc_sli4_sge_last, sgl, 1);
- physaddr = data_sg->dma_address;
- dma_len = data_sg->length;
+ physaddr = sg_dma_address(data_sg);
+ dma_len = sg_dma_len(data_sg);
sgl->addr_lo = cpu_to_le32(
putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(
@@ -1477,8 +1464,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
j++;
}
- if (phba->cfg_enable_pbde) {
- /* Use PBDE support for first SGL only, offset == 0 */
+
+ /* PBDE support for first data SGE only */
+ if (nseg == 1 && phba->cfg_enable_pbde) {
/* Words 13-15 */
bde = (struct ulp_bde64 *)
&wqe->words[13];
@@ -1488,10 +1476,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
le32_to_cpu(first_data_sgl->sge_len);
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
- /* wqe_pbde is 1 in template */
+
+ /* Word 11 - set PBDE bit */
+ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
} else {
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
- bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
+ /* Word 11 - PBDE bit disabled by default template */
}
} else {
@@ -1501,7 +1491,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
* and sg_cnt must zero.
*/
if (nCmd->payload_length != 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6063 NVME DMA Prep Err: sg_cnt %d "
"payload_length x%x\n",
nCmd->sg_cnt, nCmd->payload_length);
@@ -1513,16 +1503,14 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
/**
* lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @pnvme_lport: Pointer to the driver's local port data
+ * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
* @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ * @pnvme_fcreq: IO request from nvme fc to driver.
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
- * data structure to the rport
- indicated in @lpfc_nvme_rport.
+ * data structure to the rport indicated in @lpfc_nvme_rport.
*
* Return value :
* 0 - Success
@@ -1547,8 +1535,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_qhandle *lpfc_queue_info;
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct nvme_common_command *sqe;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u8 *uuid = NULL;
+ int err;
+ enum dma_data_direction iodir;
#endif
/* Validate pointers. LLDD fault handling with transport does
@@ -1572,12 +1563,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if (vport->load_flag & FC_UNLOADING) {
- ret = -ENODEV;
- goto out_fail;
- }
-
- if (unlikely(vport->load_flag & FC_UNLOADING)) {
+ if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+ phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -1606,7 +1593,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* transport is still transitioning.
*/
ndlp = rport->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
"6053 Busy IO, ndlp not ready: rport x%px "
"ndlp x%px, DID x%06x\n",
@@ -1624,7 +1611,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"IO. State x%x, Type x%x Flg x%x\n",
pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type,
- ndlp->upcall_flags);
+ ndlp->fc4_xpt_flags);
atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -EBUSY;
goto out_fail;
@@ -1642,6 +1629,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
expedite = 1;
}
+ /* Check if IO qualifies for CMF */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+ pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
+ pnvme_fcreq->payload_length) {
+ ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
+ if (ret) {
+ ret = -EBUSY;
+ goto out_fail;
+ }
+ /* Get start time for IO latency */
+ start = ktime_get_ns();
+ }
+
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
@@ -1656,7 +1656,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ndlp->cmd_qdepth);
atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
- goto out_fail;
+ goto out_fail1;
}
}
@@ -1676,7 +1676,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"idx %d DID %x\n",
lpfc_queue_info->index, ndlp->nlp_DID);
ret = -EBUSY;
- goto out_fail;
+ goto out_fail1;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (start) {
@@ -1686,6 +1686,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ts_cmd_start = 0;
}
#endif
+ lpfc_ncmd->rx_cmd_start = start;
/*
* Store the data needed by the driver to issue, abort, and complete
@@ -1698,6 +1699,33 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->qidx = lpfc_queue_info->qidx;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ /* check the necessary and sufficient condition to support VMID */
+ if (lpfc_is_vmid_enabled(phba) &&
+ (ndlp->vmid_support ||
+ phba->pport->vmid_priority_tagging ==
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+ /* is the I/O generated by a VM, get the associated virtual */
+ /* entity id */
+ uuid = nvme_fc_io_getuuid(pnvme_fcreq);
+
+ if (uuid) {
+ if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
+ iodir = DMA_TO_DEVICE;
+ else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
+ iodir = DMA_FROM_DEVICE;
+ else
+ iodir = DMA_NONE;
+
+ err = lpfc_vmid_get_appid(vport, uuid, iodir,
+ (union lpfc_vmid_io_tag *)
+ &lpfc_ncmd->cur_iocbq.vmid_tag);
+ if (!err)
+ lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
+ }
+ }
+#endif
+
/*
* Issue the IO on the WQ indicated by index in the hw_queue_handle.
* This identfier was create in our hardware queue create callback
@@ -1743,19 +1771,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
if (lpfc_ncmd->ts_cmd_start)
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
- if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+ if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT) {
- lpfc_ncmd->cpu = cpu;
- if (idx != cpu)
- lpfc_printf_vlog(vport,
- KERN_INFO, LOG_NVME_IOERR,
- "6702 CPU Check cmd: "
- "cpu %d wq %d\n",
- lpfc_ncmd->cpu,
- lpfc_queue_info->index);
- phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
- }
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
+ lpfc_ncmd->cpu = cpu;
+ if (idx != cpu)
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_NVME_IOERR,
+ "6702 CPU Check cmd: "
+ "cpu %d wq %d\n",
+ lpfc_ncmd->cpu,
+ lpfc_queue_info->index);
}
#endif
return 0;
@@ -1769,6 +1795,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
} else
cstat->control_requests--;
lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ out_fail1:
+ lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
+ pnvme_fcreq->payload_length, NULL);
out_fail:
return ret;
}
@@ -1786,15 +1815,16 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
**/
void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *abts_cmpl)
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6145 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"req_tag x%x, status x%x, hwstatus x%x\n",
- cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag,
+ bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com),
+ get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag,
bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
bf_get(lpfc_wcqe_c_status, abts_cmpl),
bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
@@ -1803,11 +1833,10 @@ lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
* lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @pnvme_lport: Pointer to the driver's local port data
+ * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
* @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ * @pnvme_fcreq: IO request from nvme fc to driver.
*
* Driver registers this routine as its nvme request io abort handler. This
* routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
@@ -1828,11 +1857,11 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_vport *vport;
struct lpfc_hba *phba;
struct lpfc_io_buf *lpfc_nbuf;
- struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
int ret_val;
+ struct nvme_fc_cmd_iu *cp;
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1871,7 +1900,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6139 Driver in reset cleanup - flushing "
"NVME Req now. hba_flag x%x\n",
phba->hba_flag);
@@ -1881,13 +1910,13 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
lpfc_nbuf = freqpriv->nvme_buf;
if (!lpfc_nbuf) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6140 NVME IO req has no matching lpfc nvme "
"io buffer. Skipping abort req.\n");
return;
} else if (!lpfc_nbuf->nvmeCmd) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6141 lpfc NVME IO req has no nvme_fcreq "
"io buffer. Skipping abort req.\n");
return;
@@ -1905,7 +1934,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
* has not seen it yet.
*/
if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6143 NVME req mismatch: "
"lpfc_nbuf x%px nvmeCmd x%px, "
"pnvme_fcreq x%px. Skipping Abort xri x%x\n",
@@ -1915,8 +1944,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
}
/* Don't abort IOs no longer on the pending queue. */
- if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6142 NVME IO req x%px not queued - skipping "
"abort req xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
@@ -1929,52 +1958,43 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
/* Outstanding abort is in progress */
- if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq x%px, "
- "lpfc_ncmd %px xri x%x\n",
+ "lpfc_ncmd x%px xri x%x\n",
pnvme_fcreq, lpfc_nbuf,
nvmereq_wqe->sli4_xritag);
goto out_unlock;
}
- abts_buf = __lpfc_sli_get_iocbq(phba);
- if (!abts_buf) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6136 No available abort wqes. Skipping "
- "Abts req for nvme_fcreq x%px xri x%x\n",
- pnvme_fcreq, nvmereq_wqe->sli4_xritag);
- goto out_unlock;
- }
-
- /* Ready - mark outstanding as aborted by driver. */
- nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
+ ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
+ lpfc_nvme_abort_fcreq_cmpl);
- lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abts_buf->iocb_flag |= LPFC_IO_NVME;
- abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
- abts_buf->vport = vport;
- abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
- ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
spin_unlock(&lpfc_nbuf->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
- if (ret_val) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
+ if (ret_val != WQE_SUCCESS) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6137 Failed abts issue_wqe with status x%x "
"for nvme_fcreq x%px.\n",
ret_val, pnvme_fcreq);
- lpfc_sli_release_iocbq(phba, abts_buf);
return;
}
+ /*
+ * Get Command Id from cmd to plug into response. This
+ * code is not needed in the next NVME Transport drop.
+ */
+ cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for "
- "ox_id x%x on reqtag x%x\n",
- nvmereq_wqe->sli4_xritag,
- abts_buf->iotag);
+ "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
+ nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
+ cp->sqe.common.command_id);
return;
out_unlock:
@@ -1985,8 +2005,6 @@ out_unlock:
/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = {
- .module = THIS_MODULE,
-
/* initiator-based functions */
.localport_delete = lpfc_nvme_localport_delete,
.remoteport_delete = lpfc_nvme_remoteport_delete,
@@ -1996,6 +2014,7 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
.fcp_io = lpfc_nvme_fcp_io_submit,
.ls_abort = lpfc_nvme_ls_abort,
.fcp_abort = lpfc_nvme_fcp_abort,
+ .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
@@ -2011,9 +2030,8 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
.fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
};
-/**
+/*
* lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
- * @phba: The HBA for which this call is being executed.
*
* This routine removes a nvme buffer from head of @hdwq io_buf_list
* and returns to caller.
@@ -2041,8 +2059,8 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
- pwqeq->iocb_flag = LPFC_IO_NVME;
- pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
+ pwqeq->cmd_flag = LPFC_IO_NVME;
+ pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
lpfc_ncmd->start_time = jiffies;
lpfc_ncmd->flags = 0;
@@ -2113,7 +2131,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
/**
* lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
- * @pvport - the lpfc_vport instance requesting a localport.
+ * @vport: the lpfc_vport instance requesting a localport.
*
* This routine is invoked to create an nvme localport instance to bind
* to the nvme_fc_transport. It is called once during driver load
@@ -2191,6 +2209,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
atomic_set(&lport->cmpl_fcp_err, 0);
atomic_set(&lport->cmpl_ls_xb, 0);
atomic_set(&lport->cmpl_ls_err, 0);
+
atomic_set(&lport->fc4NvmeLsRequests, 0);
atomic_set(&lport->fc4NvmeLsCmpls, 0);
}
@@ -2218,6 +2237,8 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
int ret, i, pending = 0;
struct lpfc_sli_ring *pring;
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli4_hdw_queue *qp;
+ int abts_scsi, abts_nvme;
/* Host transport has to clean up and confirm requiring an indefinite
* wait. Print a message if a 10 second wait expires and renew the
@@ -2228,17 +2249,31 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
pending = 0;
+ abts_scsi = 0;
+ abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].io_wq->pring;
+ qp = &phba->sli4_hba.hdwq[i];
+ if (!vport->localport || !qp || !qp->io_wq)
+ return;
+
+ pring = qp->io_wq->pring;
if (!pring)
continue;
- if (pring->txcmplq_cnt)
- pending += pring->txcmplq_cnt;
+ pending += pring->txcmplq_cnt;
+ abts_scsi += qp->abts_scsi_io_bufs;
+ abts_nvme += qp->abts_nvme_io_bufs;
}
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ if (!vport->localport ||
+ test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
+ vport->load_flag & FC_UNLOADING)
+ return;
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6176 Lport x%px Localport x%px wait "
- "timed out. Pending %d. Renewing.\n",
- lport, vport->localport, pending);
+ "timed out. Pending %d [%d:%d]. "
+ "Renewing.\n",
+ lport, vport->localport, pending,
+ abts_scsi, abts_nvme);
continue;
}
break;
@@ -2251,7 +2286,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
/**
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
- * @pnvme: pointer to lpfc nvme data structure.
+ * @vport: pointer to a host virtual N_Port data structure
*
* This routine is invoked to destroy all lports bound to the phba.
* The lport memory was allocated by the nvme fc transport and is
@@ -2272,6 +2307,8 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
return;
localport = vport->localport;
+ if (!localport)
+ return;
lport = (struct lpfc_nvme_lport *)localport->private;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
@@ -2357,6 +2394,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
struct lpfc_nodelist *prev_ndlp = NULL;
+ struct fc_rport *srport = ndlp->rport;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
"6006 Register NVME PORT. DID x%06x nlptype x%x\n",
@@ -2386,15 +2424,28 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ if (srport)
+ rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
+ else
+ rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
+
+ spin_lock_irq(&ndlp->lock);
- spin_lock_irq(&vport->phba->hbalock);
+ /* If an oldrport exists, so does the ndlp reference. If not
+ * a new reference is needed because either the node has never
+ * been registered or it's been unregistered and getting deleted.
+ */
oldrport = lpfc_ndlp_get_nrport(ndlp);
if (oldrport) {
prev_ndlp = oldrport->ndlp;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
} else {
- spin_unlock_irq(&vport->phba->hbalock);
- lpfc_nlp_get(ndlp);
+ spin_unlock_irq(&ndlp->lock);
+ if (!lpfc_nlp_get(ndlp)) {
+ dev_warn(&vport->phba->pcidev->dev,
+ "Warning - No node ref - exit register\n");
+ return 0;
+ }
}
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
@@ -2406,52 +2457,21 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Guard against an unregister/reregister
* race that leaves the WAIT flag set.
*/
- spin_lock_irq(&vport->phba->hbalock);
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
+ ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
+ spin_unlock_irq(&ndlp->lock);
rport = remote_port->private;
if (oldrport) {
- /* New remoteport record does not guarantee valid
- * host private memory area.
- */
- if (oldrport == remote_port->private) {
- /* Same remoteport - ndlp should match.
- * Just reuse.
- */
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebind lport to current "
- "remoteport x%px wwpn 0x%llx, "
- "Data: x%x x%x x%px x%px x%x "
- " x%06x\n",
- remote_port,
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- oldrport->ndlp,
- ndlp,
- ndlp->nlp_type,
- ndlp->nlp_DID);
-
- /* It's a complete rebind only if the driver
- * is registering with the same ndlp. Otherwise
- * the driver likely executed a node swap
- * prior to this registration and the ndlp to
- * remoteport binding needs to be redone.
- */
- if (prev_ndlp == ndlp)
- return 0;
-
- }
/* Sever the ndlp<->rport association
* before dropping the ndlp ref from
* register.
*/
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nrport = NULL;
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- spin_unlock_irq(&vport->phba->hbalock);
+ ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
+ spin_unlock_irq(&ndlp->lock);
rport->ndlp = NULL;
rport->remoteport = NULL;
@@ -2460,8 +2480,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* reference would cause a premature cleanup.
*/
if (prev_ndlp && prev_ndlp != ndlp) {
- if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
- (!prev_ndlp->nrport))
+ if (!prev_ndlp->nrport)
lpfc_nlp_put(prev_ndlp);
}
}
@@ -2470,9 +2489,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rport->remoteport = remote_port;
rport->lport = lport;
rport->ndlp = ndlp;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nrport = rport;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Bind lport x%px to remoteport x%px "
@@ -2485,7 +2504,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp, prev_ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
- LOG_NVME_DISC | LOG_NODE,
+ LOG_TRACE_EVENT,
"6031 RemotePort Registration failed "
"err: %d, DID x%06x\n",
ret, ndlp->nlp_DID);
@@ -2497,7 +2516,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
#endif
}
-/**
+/*
* lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
*
* If the ndlp represents an NVME Target, that we are logged into,
@@ -2511,11 +2530,11 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_rport *nrport;
struct nvme_fc_remote_port *remoteport = NULL;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
nrport = lpfc_ndlp_get_nrport(ndlp);
if (nrport)
remoteport = nrport->remoteport;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6170 Rescan NPort DID x%06x type x%x "
@@ -2526,12 +2545,12 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!nrport || !remoteport)
goto rescan_exit;
- /* Only rescan if we are an NVME target in the MAPPED state */
+ /* Rescan an NVME target in MAPPED state with DISCOVERY role set */
if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
nvme_fc_rescan_remoteport(remoteport);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6172 NVME rescanned DID x%06x "
"port_state x%x\n",
ndlp->nlp_DID, remoteport->port_state);
@@ -2578,20 +2597,21 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!lport)
goto input_err;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
remoteport = rport->remoteport;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
if (!remoteport)
goto input_err;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6033 Unreg nvme remoteport x%px, portname x%llx, "
- "port_id x%06x, portstate x%x port type x%x\n",
+ "port_id x%06x, portstate x%x port type x%x "
+ "refcnt %d\n",
remoteport, remoteport->port_name,
remoteport->port_id, remoteport->port_state,
- ndlp->nlp_type);
+ ndlp->nlp_type, kref_read(&ndlp->kref));
/* Sanity check ndlp type. Only call for NVME ports. Don't
* clear any rport state until the transport calls back.
@@ -2601,7 +2621,9 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
- ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
+ spin_lock_irq(&vport->phba->hbalock);
+ ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
+ spin_unlock_irq(&vport->phba->hbalock);
/* Don't let the host nvme transport keep sending keep-alives
* on this remoteport. Vport is unloading, no recovery. The
@@ -2612,9 +2634,16 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
+
+ /* The driver no longer knows if the nrport memory is valid.
+ * because the controller teardown process has begun and
+ * is asynchronous. Break the binding in the ndlp. Also
+ * remove the register ndlp reference to setup node release.
+ */
+ ndlp->nrport = NULL;
+ lpfc_nlp_put(ndlp);
if (ret != 0) {
- lpfc_nlp_put(ndlp);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6167 NVME unregister failed %d "
"port_state x%x\n",
ret, remoteport->port_state);
@@ -2624,12 +2653,48 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
input_err:
#endif
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6168 State error: lport x%px, rport x%px FCID x%06x\n",
vport->localport, ndlp->rport, ndlp->nlp_DID);
}
/**
+ * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @lpfc_ncmd: The nvme job structure for the request being aborted.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * NVME aborted xri. Aborted NVME IO commands are completed to the transport
+ * here.
+ **/
+void
+lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_ncmd)
+{
+ struct nvmefc_fcp_req *nvme_cmd = NULL;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6533 %s nvme_cmd %p tag x%x abort complete and "
+ "xri released\n", __func__,
+ lpfc_ncmd->nvmeCmd,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->transferred_length = 0;
+ nvme_cmd->rcv_rsplen = 0;
+ nvme_cmd->status = NVME_SC_INTERNAL;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+}
+
+/**
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
@@ -2709,23 +2774,30 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
* dump a message. Something is wrong.
*/
if ((wait_cnt % 1000) == 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6178 NVME IO not empty, "
"cnt %d\n", wait_cnt);
}
}
}
+
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
}
void
-lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
+lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ uint32_t stat, uint32_t param)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_io_buf *lpfc_ncmd;
struct nvmefc_fcp_req *nCmd;
- struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct lpfc_wcqe_complete wcqe;
+ struct lpfc_wcqe_complete *wcqep = &wcqe;
- if (!pwqeIn->context1) {
+ lpfc_ncmd = pwqeIn->io_buf;
+ if (!lpfc_ncmd) {
lpfc_sli_release_iocbq(phba, pwqeIn);
return;
}
@@ -2735,31 +2807,31 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
lpfc_sli_release_iocbq(phba, pwqeIn);
return;
}
- lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
spin_lock(&lpfc_ncmd->buf_lock);
- if (!lpfc_ncmd->nvmeCmd) {
+ nCmd = lpfc_ncmd->nvmeCmd;
+ if (!nCmd) {
spin_unlock(&lpfc_ncmd->buf_lock);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
+ spin_unlock(&lpfc_ncmd->buf_lock);
- nCmd = lpfc_ncmd->nvmeCmd;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6194 NVME Cancel xri %x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag);
- nCmd->transferred_length = 0;
- nCmd->rcv_rsplen = 0;
- nCmd->status = NVME_SC_INTERNAL;
- freqpriv = nCmd->private;
- freqpriv->nvme_buf = NULL;
- lpfc_ncmd->nvmeCmd = NULL;
-
- spin_unlock(&lpfc_ncmd->buf_lock);
- nCmd->done(nCmd);
+ wcqep->word0 = 0;
+ bf_set(lpfc_wcqe_c_status, wcqep, stat);
+ wcqep->parameter = param;
+ wcqep->total_data_placed = 0;
+ wcqep->word3 = 0; /* xb is 0 */
/* Call release with XB=1 to queue the IO into the abort list. */
- lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ bf_set(lpfc_wcqe_c_xb, wcqep, 1);
+
+ memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
+ (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
#endif
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 593c48ff634e..733c277948c0 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -21,6 +21,10 @@
* included with this package. *
********************************************************************/
+#include <linux/nvme.h>
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
#define LPFC_NVME_ERSP_LEN 0x20
@@ -30,11 +34,8 @@
#define LPFC_NVME_FB_SHIFT 9
#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
-#define LPFC_MAX_NVME_INFO_TMP_LEN 100
-#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
-
-#define lpfc_ndlp_get_nrport(ndlp) \
- ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
+#define lpfc_ndlp_get_nrport(ndlp) \
+ ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\
? NULL : ndlp->nrport)
struct lpfc_nvme_qhandle {
@@ -74,3 +75,179 @@ struct lpfc_nvme_rport {
struct lpfc_nvme_fcpreq_priv {
struct lpfc_io_buf *nvme_buf;
};
+
+/*
+ * set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
+ * set by the spec, which appears to have issues with some devices.
+ */
+#define LPFC_NVME_LS_TIMEOUT 30
+
+
+#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
+#define LPFC_NVMET_RQE_MIN_POST 128
+#define LPFC_NVMET_RQE_DEF_POST 512
+#define LPFC_NVMET_RQE_DEF_COUNT 2048
+#define LPFC_NVMET_SUCCESS_LEN 12
+
+#define LPFC_NVMET_MRQ_AUTO 0
+#define LPFC_NVMET_MRQ_MAX 16
+
+#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
+
+/* Used for NVME Target */
+#define LPFC_NVMET_INV_HOST_ACTIVE 1
+
+struct lpfc_nvmet_tgtport {
+ struct lpfc_hba *phba;
+ struct completion *tport_unreg_cmp;
+ atomic_t state; /* tracks nvmet hosthandle invalidation */
+
+ /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
+ atomic_t rcv_ls_req_in;
+ atomic_t rcv_ls_req_out;
+ atomic_t rcv_ls_req_drop;
+ atomic_t xmt_ls_abort;
+ atomic_t xmt_ls_abort_cmpl;
+
+ /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
+ atomic_t xmt_ls_rsp;
+ atomic_t xmt_ls_drop;
+
+ /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
+ atomic_t xmt_ls_rsp_error;
+ atomic_t xmt_ls_rsp_aborted;
+ atomic_t xmt_ls_rsp_xb_set;
+ atomic_t xmt_ls_rsp_cmpl;
+
+ /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
+ atomic_t rcv_fcp_cmd_in;
+ atomic_t rcv_fcp_cmd_out;
+ atomic_t rcv_fcp_cmd_drop;
+ atomic_t rcv_fcp_cmd_defer;
+ atomic_t xmt_fcp_release;
+
+ /* Stats counters - lpfc_nvmet_xmt_fcp_op */
+ atomic_t xmt_fcp_drop;
+ atomic_t xmt_fcp_read_rsp;
+ atomic_t xmt_fcp_read;
+ atomic_t xmt_fcp_write;
+ atomic_t xmt_fcp_rsp;
+
+ /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
+ atomic_t xmt_fcp_rsp_xb_set;
+ atomic_t xmt_fcp_rsp_cmpl;
+ atomic_t xmt_fcp_rsp_error;
+ atomic_t xmt_fcp_rsp_aborted;
+ atomic_t xmt_fcp_rsp_drop;
+
+ /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+ atomic_t xmt_fcp_xri_abort_cqe;
+ atomic_t xmt_fcp_abort;
+ atomic_t xmt_fcp_abort_cmpl;
+ atomic_t xmt_abort_sol;
+ atomic_t xmt_abort_unsol;
+ atomic_t xmt_abort_rsp;
+ atomic_t xmt_abort_rsp_error;
+
+ /* Stats counters - defer IO */
+ atomic_t defer_ctx;
+ atomic_t defer_fod;
+ atomic_t defer_wqfull;
+};
+
+struct lpfc_nvmet_ctx_info {
+ struct list_head nvmet_ctx_list;
+ spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
+ struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
+ struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
+ uint16_t nvmet_ctx_list_cnt;
+ char pad[16]; /* pad to a cache-line */
+};
+
+/* This retrieves the context info associated with the specified cpu / mrq */
+#define lpfc_get_ctx_list(phba, cpu, mrq) \
+ (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
+
+/* Values for state field of struct lpfc_async_xchg_ctx */
+#define LPFC_NVME_STE_LS_RCV 1
+#define LPFC_NVME_STE_LS_ABORT 2
+#define LPFC_NVME_STE_LS_RSP 3
+#define LPFC_NVME_STE_RCV 4
+#define LPFC_NVME_STE_DATA 5
+#define LPFC_NVME_STE_ABORT 6
+#define LPFC_NVME_STE_DONE 7
+#define LPFC_NVME_STE_FREE 0xff
+
+/* Values for flag field of struct lpfc_async_xchg_ctx */
+#define LPFC_NVME_IO_INP 0x1 /* IO is in progress on exchange */
+#define LPFC_NVME_ABORT_OP 0x2 /* Abort WQE issued on exchange */
+#define LPFC_NVME_XBUSY 0x4 /* XB bit set on IO cmpl */
+#define LPFC_NVME_CTX_RLS 0x8 /* ctx free requested */
+#define LPFC_NVME_ABTS_RCV 0x10 /* ABTS received on exchange */
+#define LPFC_NVME_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
+#define LPFC_NVME_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
+#define LPFC_NVME_TNOTIFY 0x80 /* notify transport of abts */
+
+struct lpfc_async_xchg_ctx {
+ union {
+ struct nvmefc_tgt_fcp_req fcp_req;
+ } hdlrctx;
+ struct list_head list;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ struct nvmefc_ls_req *ls_req;
+ struct nvmefc_ls_rsp ls_rsp;
+ struct lpfc_iocbq *wqeq;
+ struct lpfc_iocbq *abort_wqeq;
+ spinlock_t ctxlock; /* protect flag access */
+ uint32_t sid;
+ uint32_t offset;
+ uint16_t oxid;
+ uint16_t size;
+ uint16_t entry_cnt;
+ uint16_t cpu;
+ uint16_t idx;
+ uint16_t state;
+ uint16_t flag;
+ void *payload;
+ struct rqb_dmabuf *rqb_buffer;
+ struct lpfc_nvmet_ctxbuf *ctxbuf;
+ struct lpfc_sli4_hdw_queue *hdwq;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint64_t ts_isr_cmd;
+ uint64_t ts_cmd_nvme;
+ uint64_t ts_nvme_data;
+ uint64_t ts_data_wqput;
+ uint64_t ts_isr_data;
+ uint64_t ts_data_nvme;
+ uint64_t ts_nvme_status;
+ uint64_t ts_status_wqput;
+ uint64_t ts_isr_status;
+ uint64_t ts_status_nvme;
+#endif
+};
+
+
+/* routines found in lpfc_nvme.c */
+int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct nvmefc_ls_req *pnvme_lsreq,
+ void (*gen_req_cmp)(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_iocbq *rspwqe));
+void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
+int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq);
+
+/* routines found in lpfc_nvmet.c */
+int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
+ uint16_t xri);
+int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
+ struct nvmefc_ls_rsp *ls_rsp,
+ void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_iocbq *rspwqe));
+void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 9dc9afe1c255..f7cfac0da9b6 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
- * Fibre Channsel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -36,10 +36,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme.h>
-#include <linux/nvme-fc-driver.h>
-#include <linux/nvme-fc.h>
-
#include "lpfc_version.h"
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -50,29 +46,25 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#include "lpfc_debugfs.h"
static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *,
+ struct lpfc_async_xchg_ctx *,
dma_addr_t rspbuf,
uint16_t rspsize);
static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *);
+ struct lpfc_async_xchg_ctx *);
static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *,
+ struct lpfc_async_xchg_ctx *,
uint32_t, uint16_t);
static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *,
+ struct lpfc_async_xchg_ctx *,
uint32_t, uint16_t);
-static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *,
- uint32_t, uint16_t);
static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_nvmet_rcv_ctx *);
+ struct lpfc_async_xchg_ctx *);
static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
@@ -113,7 +105,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 9 - reqtag, rcvoxid is variable */
/* Word 10 - wqes, xc is variable */
- bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
@@ -161,7 +153,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 10 - xc is variable */
bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
- bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
+ bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
@@ -203,7 +195,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 10 wqes, xc is variable */
bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
- bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
+ bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
@@ -221,10 +213,10 @@ lpfc_nvmet_cmd_template(void)
}
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
-static struct lpfc_nvmet_rcv_ctx *
+static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
unsigned long iflag;
bool found = false;
@@ -243,10 +235,10 @@ lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
return NULL;
}
-static struct lpfc_nvmet_rcv_ctx *
+static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
unsigned long iflag;
bool found = false;
@@ -267,7 +259,8 @@ lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
#endif
static void
-lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
+lpfc_nvmet_defer_release(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *ctxp)
{
lockdep_assert_held(&ctxp->ctxlock);
@@ -275,10 +268,10 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
"6313 NVMET Defer ctx release oxid x%x flg x%x\n",
ctxp->oxid, ctxp->flag);
- if (ctxp->flag & LPFC_NVMET_CTX_RLS)
+ if (ctxp->flag & LPFC_NVME_CTX_RLS)
return;
- ctxp->flag |= LPFC_NVMET_CTX_RLS;
+ ctxp->flag |= LPFC_NVME_CTX_RLS;
spin_lock(&phba->sli4_hba.t_active_list_lock);
list_del(&ctxp->list);
spin_unlock(&phba->sli4_hba.t_active_list_lock);
@@ -288,40 +281,79 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
}
/**
- * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
+ * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
+ * transmission of an NVME LS response.
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
- * lock held. This function is the completion handler for NVME LS commands
- * The function frees memory resources used for the NVME commands.
+ * lock held. The function frees memory resources used for the command
+ * used to send the NVME LS RSP.
**/
-static void
-lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+void
+__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_iocbq *rspwqe)
{
- struct lpfc_nvmet_tgtport *tgtp;
- struct nvmefc_tgt_ls_req *rsp;
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
+ struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
uint32_t status, result;
- status = bf_get(lpfc_wcqe_c_status, wcqe);
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
result = wcqe->parameter;
- ctxp = cmdwqe->context2;
- if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6410 NVMET LS cmpl state mismatch IO x%x: "
+ if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6410 NVMEx LS cmpl state mismatch IO x%x: "
"%d %d\n",
- ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ axchg->oxid, axchg->state, axchg->entry_cnt);
}
+ lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
+ axchg->oxid, status, result);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
+ status, result, axchg->oxid);
+
+ lpfc_nlp_put(cmdwqe->ndlp);
+ cmdwqe->context_un.axchg = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
+ lpfc_sli_release_iocbq(phba, cmdwqe);
+ ls_rsp->done(ls_rsp);
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
+ status, axchg->oxid);
+ kfree(axchg);
+}
+
+/**
+ * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @rspwqe: Pointer to driver response WQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME LS commands
+ * The function updates any states and statistics, then calls the
+ * generic completion handler to free resources.
+ **/
+static void
+lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_iocbq *rspwqe)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ uint32_t status, result;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
+
if (!phba->targetport)
- goto out;
+ goto finish;
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ result = wcqe->parameter;
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (tgtp) {
if (status) {
atomic_inc(&tgtp->xmt_ls_rsp_error);
@@ -334,29 +366,14 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
}
}
-out:
- rsp = &ctxp->ctx.ls_req;
-
- lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
- ctxp->oxid, status, result);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
- status, result, ctxp->oxid);
-
- lpfc_nlp_put(cmdwqe->context1);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
- lpfc_sli_release_iocbq(phba, cmdwqe);
- rsp->done(rsp);
- kfree(ctxp);
+finish:
+ __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe);
}
/**
* lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
* @phba: HBA buffer is associated with
- * @ctxp: context to clean up
- * @mp: Buffer to free
+ * @ctx_buf: ctx buffer context
*
* Description: Frees the given DMA buffer in the appropriate way given by
* reposting it to its associated RQ so it can be reused.
@@ -369,7 +386,7 @@ void
lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
{
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
- struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+ struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct rqb_dmabuf *nvmebuf;
@@ -378,8 +395,8 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
int cpu;
unsigned long iflag;
- if (ctxp->state == LPFC_NVMET_STE_FREE) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ if (ctxp->state == LPFC_NVME_STE_FREE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6411 NVMET free, already free IO x%x: %d %d\n",
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
}
@@ -390,8 +407,8 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
/* check if freed in another path whilst acquiring lock */
if (nvmebuf) {
ctxp->rqb_buffer = NULL;
- if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
- ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
+ if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
+ ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
nvmebuf);
@@ -404,7 +421,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
}
- ctxp->state = LPFC_NVMET_STE_FREE;
+ ctxp->state = LPFC_NVME_STE_FREE;
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
if (phba->sli4_hba.nvmet_io_wait_cnt) {
@@ -421,14 +438,14 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
size = nvmebuf->bytes_recv;
sid = sli4_sid_from_fc_hdr(fc_hdr);
- ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+ ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
ctxp->wqeq = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
ctxp->oxid = oxid;
ctxp->sid = sid;
- ctxp->state = LPFC_NVMET_STE_RCV;
+ ctxp->state = LPFC_NVME_STE_RCV;
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
@@ -453,12 +470,12 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
/* Indicate that a replacement buffer has been posted */
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
+ ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6181 Unable to queue deferred work "
"for oxid x%x. "
"FCP Drop IO [x%x x%x x%x]\n",
@@ -495,7 +512,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
static void
lpfc_nvmet_ktime(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp)
+ struct lpfc_async_xchg_ctx *ctxp)
{
uint64_t seg1, seg2, seg3, seg4, seg5;
uint64_t seg6, seg7, seg8, seg9, seg10;
@@ -692,7 +709,7 @@ out:
* lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME FCP commands
@@ -700,20 +717,21 @@ out:
**/
static void
lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
- struct lpfc_nvmet_rcv_ctx *ctxp;
- uint32_t status, result, op, start_clean, logerr;
+ struct lpfc_async_xchg_ctx *ctxp;
+ uint32_t status, result, op, logerr;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint32_t id;
+ int id;
#endif
- ctxp = cmdwqe->context2;
- ctxp->flag &= ~LPFC_NVMET_IO_INP;
+ ctxp = cmdwqe->context_un.axchg;
+ ctxp->flag &= ~LPFC_NVME_IO_INP;
- rsp = &ctxp->ctx.fcp_req;
+ rsp = &ctxp->hdlrctx.fcp_req;
op = rsp->op;
status = bf_get(lpfc_wcqe_c_status, wcqe);
@@ -740,13 +758,13 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* pick up SLI4 exhange busy condition */
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
- ctxp->flag |= LPFC_NVMET_XBUSY;
+ ctxp->flag |= LPFC_NVME_XBUSY;
logerr |= LOG_NVME_ABTS;
if (tgtp)
atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
} else {
- ctxp->flag &= ~LPFC_NVMET_XBUSY;
+ ctxp->flag &= ~LPFC_NVME_XBUSY;
}
lpfc_printf_log(phba, KERN_INFO, logerr,
@@ -768,7 +786,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if ((op == NVMET_FCOP_READDATA_RSP) ||
(op == NVMET_FCOP_RSP)) {
/* Sanity check */
- ctxp->state = LPFC_NVMET_STE_DONE;
+ ctxp->state = LPFC_NVME_STE_DONE;
ctxp->entry_cnt++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -802,9 +820,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
} else {
ctxp->entry_cnt++;
- start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
- memset(((char *)cmdwqe) + start_clean, 0,
- (sizeof(struct lpfc_iocbq) - start_clean));
+ memset_startat(cmdwqe, 0, cmd_flag);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
@@ -814,31 +830,44 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
rsp->done(rsp);
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
id = raw_smp_processor_id();
- if (id < LPFC_CHECK_CPU_CNT) {
- if (ctxp->cpu != id)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6704 CPU Check cmdcmpl: "
- "cpu %d expect %d\n",
- id, ctxp->cpu);
- phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
- }
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+ if (ctxp->cpu != id)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6704 CPU Check cmdcmpl: "
+ "cpu %d expect %d\n",
+ id, ctxp->cpu);
}
#endif
}
-static int
-lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_ls_req *rsp)
+/**
+ * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
+ * an NVME LS rsp for a prior NVME LS request that was received.
+ * @axchg: pointer to exchange context for the NVME LS request the response
+ * is for.
+ * @ls_rsp: pointer to the transport LS RSP that is to be sent
+ * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
+ *
+ * This routine is used to format and send a WQE to transmit a NVME LS
+ * Response. The response is for a prior NVME LS request that was
+ * received and posted to the transport.
+ *
+ * Returns:
+ * 0 : if response successfully transmit
+ * non-zero : if response failed to transmit, of the form -Exxx.
+ **/
+int
+__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
+ struct nvmefc_ls_rsp *ls_rsp,
+ void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_iocbq *rspwqe))
{
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
- struct lpfc_hba *phba = ctxp->phba;
- struct hbq_dmabuf *nvmebuf =
- (struct hbq_dmabuf *)ctxp->rqb_buffer;
+ struct lpfc_hba *phba = axchg->phba;
+ struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
struct lpfc_iocbq *nvmewqeq;
- struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
struct lpfc_dmabuf dmabuf;
struct ulp_bde64 bpl;
int rc;
@@ -846,76 +875,134 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return -ENODEV;
- if (phba->pport->load_flag & FC_UNLOADING)
- return -ENODEV;
-
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
+ "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
- if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
- (ctxp->entry_cnt != 1)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6412 NVMET LS rsp state mismatch "
+ if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6412 NVMEx LS rsp state mismatch "
"oxid x%x: %d %d\n",
- ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ axchg->oxid, axchg->state, axchg->entry_cnt);
+ return -EALREADY;
}
- ctxp->state = LPFC_NVMET_STE_LS_RSP;
- ctxp->entry_cnt++;
+ axchg->state = LPFC_NVME_STE_LS_RSP;
+ axchg->entry_cnt++;
- nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
- rsp->rsplen);
+ nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
+ ls_rsp->rsplen);
if (nvmewqeq == NULL) {
- atomic_inc(&nvmep->xmt_ls_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6150 LS Drop IO x%x: Prep\n",
- ctxp->oxid);
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- atomic_inc(&nvmep->xmt_ls_abort);
- lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
- ctxp->sid, ctxp->oxid);
- return -ENOMEM;
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6150 NVMEx LS Drop Rsp x%x: Prep\n",
+ axchg->oxid);
+ rc = -ENOMEM;
+ goto out_free_buf;
}
/* Save numBdes for bpl2sgl */
- nvmewqeq->rsvd2 = 1;
+ nvmewqeq->num_bdes = 1;
nvmewqeq->hba_wqidx = 0;
- nvmewqeq->context3 = &dmabuf;
+ nvmewqeq->bpl_dmabuf = &dmabuf;
dmabuf.virt = &bpl;
bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
- bpl.tus.f.bdeSize = rsp->rsplen;
+ bpl.tus.f.bdeSize = ls_rsp->rsplen;
bpl.tus.f.bdeFlags = 0;
bpl.tus.w = le32_to_cpu(bpl.tus.w);
+ /*
+ * Note: although we're using stack space for the dmabuf, the
+ * call to lpfc_sli4_issue_wqe is synchronous, so it will not
+ * be referenced after it returns back to this routine.
+ */
- nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
- nvmewqeq->iocb_cmpl = NULL;
- nvmewqeq->context2 = ctxp;
+ nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
+ nvmewqeq->context_un.axchg = axchg;
- lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
- ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
+ lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
+ axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
+
+ rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
+
+ /* clear to be sure there's no reference */
+ nvmewqeq->bpl_dmabuf = NULL;
- rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
if (rc == WQE_SUCCESS) {
/*
* Okay to repost buffer here, but wait till cmpl
* before freeing ctxp and iocbq.
*/
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- atomic_inc(&nvmep->xmt_ls_rsp);
return 0;
}
- /* Give back resources */
- atomic_inc(&nvmep->xmt_ls_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6151 LS Drop IO x%x: Issue %d\n",
- ctxp->oxid, rc);
- lpfc_nlp_put(nvmewqeq->context1);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
+ axchg->oxid, rc);
+
+ rc = -ENXIO;
+
+ lpfc_nlp_put(nvmewqeq->ndlp);
+out_free_buf:
+ /* Give back resources */
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- atomic_inc(&nvmep->xmt_ls_abort);
- lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
- return -ENXIO;
+
+ /*
+ * As transport doesn't track completions of responses, if the rsp
+ * fails to send, the transport will effectively ignore the rsp
+ * and consider the LS done. However, the driver has an active
+ * exchange open for the LS - so be sure to abort the exchange
+ * if the response isn't sent.
+ */
+ lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
+ return rc;
+}
+
+/**
+ * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
+ * @tgtport: pointer to target port that NVME LS is to be transmit from.
+ * @ls_rsp: pointer to the transport LS RSP that is to be sent
+ *
+ * Driver registers this routine to transmit responses for received NVME
+ * LS requests.
+ *
+ * This routine is used to format and send a WQE to transmit a NVME LS
+ * Response. The ls_rsp is used to reverse-map the LS to the original
+ * NVME LS request sequence, which provides addressing information for
+ * the remote port the LS to be sent to, as well as the exchange id
+ * that is the LS is bound to.
+ *
+ * Returns:
+ * 0 : if response successfully transmit
+ * non-zero : if response failed to transmit, of the form -Exxx.
+ **/
+static int
+lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_ls_rsp *ls_rsp)
+{
+ struct lpfc_async_xchg_ctx *axchg =
+ container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
+ struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
+ int rc;
+
+ if (axchg->phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
+ rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
+
+ if (rc) {
+ atomic_inc(&nvmep->xmt_ls_drop);
+ /*
+ * unless the failure is due to having already sent
+ * the response, an abort will be generated for the
+ * exchange if the rsp can't be sent.
+ */
+ if (rc != -EALREADY)
+ atomic_inc(&nvmep->xmt_ls_abort);
+ return rc;
+ }
+
+ atomic_inc(&nvmep->xmt_ls_rsp);
+ return 0;
}
static int
@@ -923,19 +1010,17 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp)
{
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_async_xchg_ctx *ctxp =
+ container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
struct lpfc_hba *phba = ctxp->phba;
struct lpfc_queue *wq;
struct lpfc_iocbq *nvmewqeq;
struct lpfc_sli_ring *pring;
unsigned long iflags;
int rc;
-
- if (phba->pport->load_flag & FC_UNLOADING) {
- rc = -ENODEV;
- goto aerr;
- }
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ int id;
+#endif
if (phba->pport->load_flag & FC_UNLOADING) {
rc = -ENODEV;
@@ -954,25 +1039,23 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- int id = raw_smp_processor_id();
- if (id < LPFC_CHECK_CPU_CNT) {
- if (rsp->hwqid != id)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6705 CPU Check OP: "
- "cpu %d expect %d\n",
- id, rsp->hwqid);
- phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
- }
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
+ id = raw_smp_processor_id();
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
+ if (rsp->hwqid != id)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6705 CPU Check OP: "
+ "cpu %d expect %d\n",
+ id, rsp->hwqid);
ctxp->cpu = id; /* Setup cpu for cmpl check */
}
#endif
/* Sanity check */
- if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
- (ctxp->state == LPFC_NVMET_STE_ABORT)) {
+ if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
+ (ctxp->state == LPFC_NVME_STE_ABORT)) {
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6102 IO oxid x%x aborted\n",
ctxp->oxid);
rc = -ENXIO;
@@ -982,23 +1065,22 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
if (nvmewqeq == NULL) {
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6152 FCP Drop IO x%x: Prep\n",
ctxp->oxid);
rc = -ENXIO;
goto aerr;
}
- nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
- nvmewqeq->iocb_cmpl = NULL;
- nvmewqeq->context2 = ctxp;
- nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
+ nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
+ nvmewqeq->context_un.axchg = ctxp;
+ nvmewqeq->cmd_flag |= LPFC_IO_NVMET;
ctxp->wqeq->hba_wqidx = rsp->hwqid;
lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
ctxp->oxid, rsp->op, rsp->rsplen);
- ctxp->flag |= LPFC_NVMET_IO_INP;
+ ctxp->flag |= LPFC_NVME_IO_INP;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
if (rc == WQE_SUCCESS) {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1017,7 +1099,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQ was full, so queue nvmewqeq to be sent after
* WQE release CQE
*/
- ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
+ ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
wq = ctxp->hdwq->io_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -1030,13 +1112,13 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
/* Give back resources */
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6153 FCP Drop IO x%x: Issue: %d\n",
ctxp->oxid, rc);
ctxp->wqeq->hba_wqidx = 0;
- nvmewqeq->context2 = NULL;
- nvmewqeq->context3 = NULL;
+ nvmewqeq->context_un.axchg = NULL;
+ nvmewqeq->bpl_dmabuf = NULL;
rc = -EBUSY;
aerr:
return rc;
@@ -1057,8 +1139,8 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *req)
{
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_async_xchg_ctx *ctxp =
+ container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
struct lpfc_hba *phba = ctxp->phba;
struct lpfc_queue *wq;
unsigned long flags;
@@ -1066,9 +1148,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
- if (phba->pport->load_flag & FC_UNLOADING)
- return;
-
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
@@ -1086,13 +1165,13 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
/* Since iaab/iaar are NOT set, we need to check
* if the firmware is in process of aborting IO
*/
- if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
+ if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return;
}
- ctxp->flag |= LPFC_NVMET_ABORT_OP;
+ ctxp->flag |= LPFC_NVME_ABORT_OP;
- if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
+ if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
@@ -1102,11 +1181,11 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
}
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
- /* An state of LPFC_NVMET_STE_RCV means we have just received
+ /* A state of LPFC_NVME_STE_RCV means we have just received
* the NVME command and have not started processing it.
* (by issuing any IO WQEs on this exchange yet)
*/
- if (ctxp->state == LPFC_NVMET_STE_RCV)
+ if (ctxp->state == LPFC_NVME_STE_RCV)
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
else
@@ -1119,26 +1198,26 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp)
{
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_async_xchg_ctx *ctxp =
+ container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
struct lpfc_hba *phba = ctxp->phba;
unsigned long flags;
bool aborting = false;
spin_lock_irqsave(&ctxp->ctxlock, flags);
- if (ctxp->flag & LPFC_NVMET_XBUSY)
+ if (ctxp->flag & LPFC_NVME_XBUSY)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6027 NVMET release with XBUSY flag x%x"
" oxid x%x\n",
ctxp->flag, ctxp->oxid);
- else if (ctxp->state != LPFC_NVMET_STE_DONE &&
- ctxp->state != LPFC_NVMET_STE_ABORT)
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ else if (ctxp->state != LPFC_NVME_STE_DONE &&
+ ctxp->state != LPFC_NVME_STE_ABORT)
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6413 NVMET release bad state %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
- if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
- (ctxp->flag & LPFC_NVMET_XBUSY)) {
+ if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
+ (ctxp->flag & LPFC_NVME_XBUSY)) {
aborting = true;
/* let the abort path do the real release */
lpfc_nvmet_defer_release(phba, ctxp);
@@ -1149,7 +1228,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
ctxp->state, aborting);
atomic_inc(&lpfc_nvmep->xmt_fcp_release);
- ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
+ ctxp->flag &= ~LPFC_NVME_TNOTIFY;
if (aborting)
return;
@@ -1162,8 +1241,8 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp)
{
struct lpfc_nvmet_tgtport *tgtp;
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_async_xchg_ctx *ctxp =
+ container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
struct lpfc_hba *phba = ctxp->phba;
unsigned long iflag;
@@ -1191,6 +1270,122 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
+/**
+ * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
+ * @phba: Pointer to HBA context object
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @rspwqe: Pointer to driver response WQE object.
+ *
+ * This function is the completion handler for NVME LS requests.
+ * The function updates any states and statistics, then calls the
+ * generic completion handler to finish completion of the request.
+ **/
+static void
+lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_iocbq *rspwqe)
+{
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
+ __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
+}
+
+/**
+ * lpfc_nvmet_ls_req - Issue an Link Service request
+ * @targetport: pointer to target instance registered with nvmet transport.
+ * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
+ * Driver sets this value to the ndlp pointer.
+ * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
+ *
+ * Driver registers this routine to handle any link service request
+ * from the nvme_fc transport to a remote nvme-aware port.
+ *
+ * Return value :
+ * 0 - Success
+ * non-zero: various error codes, in form of -Exxx
+ **/
+static int
+lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
+ void *hosthandle,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ int ret;
+ u32 hstate;
+
+ if (!lpfc_nvmet)
+ return -EINVAL;
+
+ phba = lpfc_nvmet->phba;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -EINVAL;
+
+ hstate = atomic_read(&lpfc_nvmet->state);
+ if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
+ return -EACCES;
+
+ ndlp = (struct lpfc_nodelist *)hosthandle;
+
+ ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
+ lpfc_nvmet_ls_req_cmp);
+
+ return ret;
+}
+
+/**
+ * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
+ * @targetport: Transport targetport, that LS was issued from.
+ * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
+ * Driver sets this value to the ndlp pointer.
+ * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted
+ *
+ * Driver registers this routine to abort an NVME LS request that is
+ * in progress (from the transports perspective).
+ **/
+static void
+lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
+ void *hosthandle,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ int ret;
+
+ phba = lpfc_nvmet->phba;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
+ ndlp = (struct lpfc_nodelist *)hosthandle;
+
+ ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
+ if (!ret)
+ atomic_inc(&lpfc_nvmet->xmt_ls_abort);
+}
+
+static void
+lpfc_nvmet_host_release(void *hosthandle)
+{
+ struct lpfc_nodelist *ndlp = hosthandle;
+ struct lpfc_hba *phba = ndlp->phba;
+ struct lpfc_nvmet_tgtport *tgtp;
+
+ if (!phba->targetport || !phba->targetport->private)
+ return;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6202 NVMET XPT releasing hosthandle x%px "
+ "DID x%x xflags x%x refcnt %d\n",
+ hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ spin_lock_irq(&ndlp->lock);
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_put(ndlp);
+ atomic_set(&tgtp->state, 0);
+}
+
static void
lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
{
@@ -1202,7 +1397,7 @@ lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
phba = tgtp->phba;
rc = lpfc_issue_els_rscn(phba->pport, 0);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6420 NVMET subsystem change: Notification %s\n",
(rc) ? "Failed" : "Sent");
}
@@ -1215,6 +1410,9 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
.defer_rcv = lpfc_nvmet_defer_rcv,
.discovery_event = lpfc_nvmet_discovery_event,
+ .ls_req = lpfc_nvmet_ls_req,
+ .ls_abort = lpfc_nvmet_ls_abort,
+ .host_release = lpfc_nvmet_host_release,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1225,6 +1423,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
+ .lsrqst_priv_sz = 0,
};
static void
@@ -1241,7 +1440,10 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
list_del_init(&ctx_buf->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock(&phba->hbalock);
__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
+ spin_unlock(&phba->hbalock);
+
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
@@ -1296,7 +1498,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
if (!phba->sli4_hba.nvmet_ctx_info) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6419 Failed allocate memory for "
"nvmet context lists\n");
return -ENOMEM;
@@ -1354,7 +1556,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
if (!ctx_buf) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6404 Ran out of memory for NVMET\n");
return -ENOMEM;
}
@@ -1363,30 +1565,30 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
GFP_KERNEL);
if (!ctx_buf->context) {
kfree(ctx_buf);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6405 Ran out of NVMET "
"context memory\n");
return -ENOMEM;
}
ctx_buf->context->ctxbuf = ctx_buf;
- ctx_buf->context->state = LPFC_NVMET_STE_FREE;
+ ctx_buf->context->state = LPFC_NVME_STE_FREE;
ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
if (!ctx_buf->iocbq) {
kfree(ctx_buf->context);
kfree(ctx_buf);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6406 Ran out of NVMET iocb/WQEs\n");
return -ENOMEM;
}
- ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
+ ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET;
nvmewqe = ctx_buf->iocbq;
wqe = &nvmewqe->wqe;
/* Initialize WQE */
memset(wqe, 0, sizeof(union lpfc_wqe));
- ctx_buf->iocbq->context1 = NULL;
+ ctx_buf->iocbq->cmd_dmabuf = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
@@ -1394,7 +1596,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
kfree(ctx_buf);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6407 Ran out of NVMET XRIs\n");
return -ENOMEM;
}
@@ -1473,7 +1675,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
error = -ENOENT;
#endif
if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6025 Cannot register NVME targetport x%x: "
"portnm %llx nodenm %llx segs %d qs %d\n",
error,
@@ -1569,7 +1771,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
- struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *req = NULL;
struct lpfc_nodelist *ndlp;
@@ -1588,31 +1790,33 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
}
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue;
- spin_lock(&ctxp->ctxlock);
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
+ iflag);
+
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
- if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
- !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
+ if (ctxp->flag & LPFC_NVME_CTX_RLS &&
+ !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctxp->list);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
- ctxp->flag &= ~LPFC_NVMET_XBUSY;
- spin_unlock(&ctxp->ctxlock);
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ ctxp->flag &= ~LPFC_NVME_XBUSY;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
rrq_empty = list_empty(&phba->active_rrq_list);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ if (ndlp &&
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
lpfc_set_rrq_active(phba, ndlp,
@@ -1631,9 +1835,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
lpfc_worker_wake_up(phba);
return;
}
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
if (ctxp) {
/*
@@ -1647,15 +1849,15 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
rxid);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->flag |= LPFC_NVMET_ABTS_RCV;
- ctxp->state = LPFC_NVMET_STE_ABORT;
+ ctxp->flag |= LPFC_NVME_ABTS_RCV;
+ ctxp->state = LPFC_NVME_STE_ABORT;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
lpfc_nvmeio_data(phba,
"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
xri, raw_smp_processor_id(), 0);
- req = &ctxp->ctx.fcp_req;
+ req = &ctxp->hdlrctx.fcp_req;
if (req)
nvmet_fc_rcv_fcp_abort(phba->targetport, req);
}
@@ -1668,7 +1870,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
{
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_hba *phba = vport->phba;
- struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
struct nvmefc_tgt_fcp_req *rsp;
uint32_t sid;
uint16_t oxid, xri;
@@ -1677,8 +1879,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
sid = sli4_sid_from_fc_hdr(fc_hdr);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
@@ -1687,11 +1888,10 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
xri = ctxp->ctxbuf->sglq->sli4_xritag;
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
+ iflag);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->flag |= LPFC_NVMET_ABTS_RCV;
+ ctxp->flag |= LPFC_NVME_ABTS_RCV;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
lpfc_nvmeio_data(phba,
@@ -1701,16 +1901,14 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
- rsp = &ctxp->ctx.fcp_req;
+ rsp = &ctxp->hdlrctx.fcp_req;
nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
/* Respond with BA_ACC accordingly */
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
return 0;
}
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
/* check the wait list */
if (phba->sli4_hba.nvmet_io_wait_cnt) {
struct rqb_dmabuf *nvmebuf;
@@ -1760,7 +1958,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
xri = ctxp->ctxbuf->sglq->sli4_xritag;
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
+ ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
lpfc_nvmeio_data(phba,
@@ -1772,10 +1970,10 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
"flag x%x state x%x\n",
ctxp->oxid, xri, ctxp->flag, ctxp->state);
- if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
+ if (ctxp->flag & LPFC_NVME_TNOTIFY) {
/* Notify the transport */
nvmet_fc_rcv_fcp_abort(phba->targetport,
- &ctxp->ctx.fcp_req);
+ &ctxp->hdlrctx.fcp_req);
} else {
cancel_work_sync(&ctxp->ctxbuf->defer_work);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
@@ -1803,7 +2001,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
static void
lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
- struct lpfc_nvmet_rcv_ctx *ctxp)
+ struct lpfc_async_xchg_ctx *ctxp)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *nvmewqeq;
@@ -1825,12 +2023,14 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
&wq->wqfull_list, list) {
if (ctxp) {
/* Checking for a specific IO to flush */
- if (nvmewqeq->context2 == ctxp) {
+ if (nvmewqeq->context_un.axchg == ctxp) {
list_del(&nvmewqeq->list);
spin_unlock_irqrestore(&pring->ring_lock,
iflags);
+ memcpy(&nvmewqeq->wcqe_cmpl, wcqep,
+ sizeof(*wcqep));
lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
- wcqep);
+ nvmewqeq);
return;
}
continue;
@@ -1838,7 +2038,8 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
/* Flush all IOs */
list_del(&nvmewqeq->list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
+ memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep));
+ lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
}
}
@@ -1854,7 +2055,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *nvmewqeq;
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
unsigned long iflags;
int rc;
@@ -1868,7 +2069,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
+ ctxp = nvmewqeq->context_un.axchg;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
if (rc == -EBUSY) {
@@ -1880,7 +2081,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
if (rc == WQE_SUCCESS) {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
- if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
+ if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
ctxp->ts_status_wqput = ktime_get_ns();
else
ctxp->ts_data_wqput = ktime_get_ns();
@@ -1915,9 +2116,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
- if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
+ if (!wait_for_completion_timeout(&tport_unreg_cmp,
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6179 Unreg targetport x%px timeout "
"reached.\n", phba->targetport);
lpfc_nvmet_cleanup_io_context(phba);
@@ -1927,114 +2128,61 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
}
/**
- * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
+ * lpfc_nvmet_handle_lsreq - Process an NVME LS request
* @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
- * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ * @axchg: pointer to exchange context for the NVME LS request
*
- * This routine is used for processing the WQE associated with a unsolicited
- * event. It first determines whether there is an existing ndlp that matches
- * the DID from the unsolicited WQE. If not, it will create a new one with
- * the DID from the unsolicited WQE. The ELS command from the unsolicited
- * WQE is then used to invoke the proper routine and to set up proper state
- * of the discovery state machine.
- **/
-static void
-lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct hbq_dmabuf *nvmebuf)
+ * This routine is used for processing an asychronously received NVME LS
+ * request. Any remaining validation is done and the LS is then forwarded
+ * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
+ *
+ * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
+ * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
+ * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
+ *
+ * Returns 0 if LS was handled and delivered to the transport
+ * Returns 1 if LS failed to be handled and should be dropped
+ */
+int
+lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *axchg)
{
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
- struct lpfc_nvmet_tgtport *tgtp;
- struct fc_frame_header *fc_hdr;
- struct lpfc_nvmet_rcv_ctx *ctxp;
- uint32_t *payload;
- uint32_t size, oxid, sid, rc;
-
-
- if (!nvmebuf || !phba->targetport) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO\n");
- oxid = 0;
- size = 0;
- sid = 0;
- ctxp = NULL;
- goto dropit;
- }
-
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
-
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- payload = (uint32_t *)(nvmebuf->dbuf.virt);
- size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
- sid = sli4_sid_from_fc_hdr(fc_hdr);
+ struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
+ uint32_t *payload = axchg->payload;
+ int rc;
- ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
- if (ctxp == NULL) {
- atomic_inc(&tgtp->rcv_ls_req_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6155 LS Drop IO x%x: Alloc\n",
- oxid);
-dropit:
- lpfc_nvmeio_data(phba, "NVMET LS DROP: "
- "xri x%x sz %d from %06x\n",
- oxid, size, sid);
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- return;
- }
- ctxp->phba = phba;
- ctxp->size = size;
- ctxp->oxid = oxid;
- ctxp->sid = sid;
- ctxp->wqeq = NULL;
- ctxp->state = LPFC_NVMET_STE_LS_RCV;
- ctxp->entry_cnt = 1;
- ctxp->rqb_buffer = (void *)nvmebuf;
- ctxp->hdwq = &phba->sli4_hba.hdwq[0];
+ atomic_inc(&tgtp->rcv_ls_req_in);
- lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
- oxid, size, sid);
/*
- * The calling sequence should be:
- * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
- * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
+ * Driver passes the ndlp as the hosthandle argument allowing
+ * the transport to generate LS requests for any associateions
+ * that are created.
*/
- atomic_inc(&tgtp->rcv_ls_req_in);
- rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
- payload, size);
+ rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
+ axchg->payload, axchg->size);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
- "%08x %08x %08x\n", size, rc,
+ "%08x %08x %08x\n", axchg->size, rc,
*payload, *(payload+1), *(payload+2),
*(payload+3), *(payload+4), *(payload+5));
- if (rc == 0) {
+ if (!rc) {
atomic_inc(&tgtp->rcv_ls_req_out);
- return;
+ return 0;
}
- lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
- oxid, size, sid);
-
atomic_inc(&tgtp->rcv_ls_req_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
- ctxp->oxid, rc);
-
- /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
-
- atomic_inc(&tgtp->xmt_ls_abort);
- lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
#endif
+ return 1;
}
static void
lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
{
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
- struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+ struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
struct lpfc_hba *phba = ctxp->phba;
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
struct lpfc_nvmet_tgtport *tgtp;
@@ -2043,7 +2191,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
unsigned long iflags;
if (!nvmebuf) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6159 process_rcv_fcp_req, nvmebuf is NULL, "
"oxid: x%x flg: x%x state: x%x\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@@ -2055,8 +2203,8 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
return;
}
- if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6324 IO oxid x%x aborted\n",
ctxp->oxid);
return;
@@ -2064,7 +2212,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
payload = (uint32_t *)(nvmebuf->dbuf.virt);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- ctxp->flag |= LPFC_NVMET_TNOTIFY;
+ ctxp->flag |= LPFC_NVME_TNOTIFY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_isr_cmd)
ctxp->ts_cmd_nvme = ktime_get_ns();
@@ -2078,13 +2226,13 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
* A buffer has already been reposted for this IO, so just free
* the nvmebuf.
*/
- rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+ rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
payload, ctxp->size);
/* Process FCP command */
if (rc == 0) {
atomic_inc(&tgtp->rcv_fcp_cmd_out);
spin_lock_irqsave(&ctxp->ctxlock, iflags);
- if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
+ if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
(nvmebuf != ctxp->rqb_buffer)) {
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
return;
@@ -2103,7 +2251,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
atomic_inc(&tgtp->rcv_fcp_cmd_out);
atomic_inc(&tgtp->defer_fod);
spin_lock_irqsave(&ctxp->ctxlock, iflags);
- if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
+ if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
return;
}
@@ -2118,9 +2266,9 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
return;
}
- ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
+ ctxp->flag &= ~LPFC_NVME_TNOTIFY;
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
ctxp->oxid, rc,
atomic_read(&tgtp->rcv_fcp_cmd_in),
@@ -2225,7 +2373,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
uint64_t isr_timestamp,
uint8_t cqflag)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_ctxbuf *ctx_buf;
@@ -2239,7 +2387,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctx_buf = NULL;
if (!nvmebuf || !phba->targetport) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6157 NVMET FCP Drop IO\n");
if (nvmebuf)
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
@@ -2270,15 +2418,13 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
size = nvmebuf->bytes_recv;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
- if (current_cpu < LPFC_CHECK_CPU_CNT) {
- if (idx != current_cpu)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6703 CPU Check rcv: "
- "cpu %d expect %d\n",
- current_cpu, idx);
- phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
- }
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
+ this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
+ if (idx != current_cpu)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6703 CPU Check rcv: "
+ "cpu %d expect %d\n",
+ current_cpu, idx);
}
#endif
@@ -2309,12 +2455,12 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
sid = sli4_sid_from_fc_hdr(fc_hdr);
- ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+ ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
- if (ctxp->state != LPFC_NVMET_STE_FREE) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ if (ctxp->state != LPFC_NVME_STE_FREE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6414 NVMET Context corrupt %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
}
@@ -2325,7 +2471,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->oxid = oxid;
ctxp->sid = sid;
ctxp->idx = idx;
- ctxp->state = LPFC_NVMET_STE_RCV;
+ ctxp->state = LPFC_NVME_STE_RCV;
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
@@ -2356,7 +2502,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6325 Unable to queue work for oxid x%x. "
"FCP Drop IO [x%x x%x x%x]\n",
ctxp->oxid,
@@ -2372,40 +2518,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
}
/**
- * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
- * @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
- * @nvmebuf: pointer to received nvme data structure.
- *
- * This routine is used to process an unsolicited event received from a SLI
- * (Service Level Interface) ring. The actual processing of the data buffer
- * associated with the unsolicited event is done by invoking the routine
- * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
- * SLI RQ on which the unsolicited event was received.
- **/
-void
-lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *piocb)
-{
- struct lpfc_dmabuf *d_buf;
- struct hbq_dmabuf *nvmebuf;
-
- d_buf = piocb->context2;
- nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
-
- if (!nvmebuf) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "3015 LS Drop IO\n");
- return;
- }
- if (phba->nvmet_support == 0) {
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- return;
- }
- lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
-}
-
-/**
* lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
* @phba: pointer to lpfc hba data structure.
* @idx: relative index of MRQ vector
@@ -2427,7 +2539,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint8_t cqflag)
{
if (!nvmebuf) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3167 NVMET FCP Drop IO\n");
return;
}
@@ -2465,7 +2577,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
**/
static struct lpfc_iocbq *
lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_async_xchg_ctx *ctxp,
dma_addr_t rspbuf, uint16_t rspsize)
{
struct lpfc_nodelist *ndlp;
@@ -2473,7 +2585,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
union lpfc_wqe128 *wqe;
if (!lpfc_is_link_up(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6104 NVMET prep LS wqe: link err: "
"NPORT x%x oxid:x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@@ -2483,7 +2595,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
/* Allocate buffer for command wqe */
nvmewqe = lpfc_sli_get_iocbq(phba);
if (nvmewqe == NULL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6105 NVMET prep LS wqe: No WQE: "
"NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@@ -2491,10 +2603,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
}
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6106 NVMET prep LS wqe: No ndlp: "
"NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@@ -2503,10 +2615,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
ctxp->wqeq = nvmewqe;
/* prevent preparing wqe with NULL ndlp reference */
- nvmewqe->context1 = lpfc_nlp_get(ndlp);
- if (nvmewqe->context1 == NULL)
+ nvmewqe->ndlp = lpfc_nlp_get(ndlp);
+ if (!nvmewqe->ndlp)
goto nvme_wqe_free_wqeq_exit;
- nvmewqe->context2 = ctxp;
+ nvmewqe->context_un.axchg = ctxp;
wqe = &nvmewqe->wqe;
memset(wqe, 0, sizeof(union lpfc_wqe));
@@ -2567,7 +2679,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
nvmewqe->retry = 1;
nvmewqe->vport = phba->pport;
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
- nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
+ nvmewqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Xmit NVMET response to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
@@ -2578,8 +2690,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
return nvmewqe;
nvme_wqe_free_wqeq_exit:
- nvmewqe->context2 = NULL;
- nvmewqe->context3 = NULL;
+ nvmewqe->context_un.axchg = NULL;
+ nvmewqe->ndlp = NULL;
+ nvmewqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, nvmewqe);
return NULL;
}
@@ -2587,9 +2700,9 @@ nvme_wqe_free_wqeq_exit:
static struct lpfc_iocbq *
lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp)
+ struct lpfc_async_xchg_ctx *ctxp)
{
- struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
+ struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
struct lpfc_nvmet_tgtport *tgtp;
struct sli4_sge *sgl;
struct lpfc_nodelist *ndlp;
@@ -2598,12 +2711,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde;
dma_addr_t physaddr;
- int i, cnt;
- int do_pbde;
+ int i, cnt, nsegs;
+ bool use_pbde = false;
int xc = 1;
if (!lpfc_is_link_up(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6107 NVMET prep FCP wqe: link err:"
"NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@@ -2611,10 +2724,10 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
}
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6108 NVMET prep FCP wqe: no ndlp: "
"NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@@ -2622,13 +2735,14 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
}
if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6109 NVMET prep FCP wqe: seg cnt err: "
"NPORT x%x oxid x%x ste %d cnt %d\n",
ctxp->sid, ctxp->oxid, ctxp->state,
phba->cfg_nvme_seg_cnt);
return NULL;
}
+ nsegs = rsp->sg_cnt;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
nvmewqe = ctxp->wqeq;
@@ -2636,7 +2750,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Allocate buffer for command wqe */
nvmewqe = ctxp->ctxbuf->iocbq;
if (nvmewqe == NULL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6110 NVMET prep FCP wqe: No "
"WQE: NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@@ -2649,12 +2763,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
}
/* Sanity check */
- if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
+ if (((ctxp->state == LPFC_NVME_STE_RCV) &&
(ctxp->entry_cnt == 1)) ||
- (ctxp->state == LPFC_NVMET_STE_DATA)) {
+ (ctxp->state == LPFC_NVME_STE_DATA)) {
wqe = &nvmewqe->wqe;
} else {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6111 Wrong state NVMET FCP: %d cnt %d\n",
ctxp->state, ctxp->entry_cnt);
return NULL;
@@ -2706,9 +2820,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
if (!xc)
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
- /* Word 11 - set sup, irsp, irsplen later */
- do_pbde = 0;
-
/* Word 12 */
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
@@ -2786,12 +2897,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
if (!xc)
bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
- /* Word 11 - set pbde later */
- if (phba->cfg_enable_pbde) {
- do_pbde = 1;
+ /* Word 11 - check for pbde */
+ if (nsegs == 1 && phba->cfg_enable_pbde) {
+ use_pbde = true;
+ /* Word 11 - PBDE bit already preset by template */
} else {
+ /* Overwrite default template setting */
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
- do_pbde = 0;
}
/* Word 12 */
@@ -2862,13 +2974,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
((rsp->rsplen >> 2) - 1));
memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
}
- do_pbde = 0;
/* Word 12 */
wqe->fcp_trsp.rsvd_12_15[0] = 0;
/* Use rspbuf, NOT sg list */
- rsp->sg_cnt = 0;
+ nsegs = 0;
sgl->word2 = 0;
atomic_inc(&tgtp->xmt_fcp_rsp);
break;
@@ -2883,9 +2994,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->retry = 1;
nvmewqe->vport = phba->pport;
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
- nvmewqe->context1 = ndlp;
+ nvmewqe->ndlp = ndlp;
- for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
+ for_each_sg(rsp->sg, sgel, nsegs, i) {
physaddr = sg_dma_address(sgel);
cnt = sg_dma_len(sgel);
sgl->addr_hi = putPaddrHigh(physaddr);
@@ -2897,24 +3008,25 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(cnt);
- if (i == 0) {
- bde = (struct ulp_bde64 *)&wqe->words[13];
- if (do_pbde) {
- /* Words 13-15 (PBDE) */
- bde->addrLow = sgl->addr_lo;
- bde->addrHigh = sgl->addr_hi;
- bde->tus.f.bdeSize =
- le32_to_cpu(sgl->sge_len);
- bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bde->tus.w = cpu_to_le32(bde->tus.w);
- } else {
- memset(bde, 0, sizeof(struct ulp_bde64));
- }
- }
sgl++;
ctxp->offset += cnt;
}
- ctxp->state = LPFC_NVMET_STE_DATA;
+
+ bde = (struct ulp_bde64 *)&wqe->words[13];
+ if (use_pbde) {
+ /* decrement sgl ptr backwards once to first data sge */
+ sgl--;
+
+ /* Words 13-15 (PBDE) */
+ bde->addrLow = sgl->addr_lo;
+ bde->addrHigh = sgl->addr_hi;
+ bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ } else {
+ memset(bde, 0, sizeof(struct ulp_bde64));
+ }
+ ctxp->state = LPFC_NVME_STE_DATA;
ctxp->entry_cnt++;
return nvmewqe;
}
@@ -2923,7 +3035,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
* lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
@@ -2931,35 +3043,36 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
**/
static void
lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
unsigned long flags;
bool released = false;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+ if (ctxp->flag & LPFC_NVME_ABORT_OP)
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->state = LPFC_NVMET_STE_DONE;
+ ctxp->state = LPFC_NVME_STE_DONE;
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
- if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
- !(ctxp->flag & LPFC_NVMET_XBUSY)) {
+ if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
+ !(ctxp->flag & LPFC_NVME_XBUSY)) {
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctxp->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp);
@@ -2970,8 +3083,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ cmdwqe->rsp_dmabuf = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
@@ -2983,7 +3096,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
lpfc_sli_release_iocbq(phba, cmdwqe);
/* Since iaab/iaar are NOT set, there is no work left.
- * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
+ * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
* should have been called already.
*/
}
@@ -2992,7 +3105,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
@@ -3000,15 +3113,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
**/
static void
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
unsigned long flags;
uint32_t result;
bool released = false;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
if (!ctxp) {
@@ -3022,12 +3136,12 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
spin_lock_irqsave(&ctxp->ctxlock, flags);
- if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+ if (ctxp->flag & LPFC_NVME_ABORT_OP)
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
/* Sanity check */
- if (ctxp->state != LPFC_NVMET_STE_ABORT) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ if (ctxp->state != LPFC_NVME_STE_ABORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6112 ABTS Wrong state:%d oxid x%x\n",
ctxp->state, ctxp->oxid);
}
@@ -3035,15 +3149,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
- ctxp->state = LPFC_NVMET_STE_DONE;
- if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
- !(ctxp->flag & LPFC_NVMET_XBUSY)) {
+ ctxp->state = LPFC_NVME_STE_DONE;
+ if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
+ !(ctxp->flag & LPFC_NVME_XBUSY)) {
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctxp->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp);
@@ -3054,8 +3168,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ cmdwqe->rsp_dmabuf = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
@@ -3064,7 +3178,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
/* Since iaab/iaar are NOT set, there is no work left.
- * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
+ * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
* should have been called already.
*/
}
@@ -3073,7 +3187,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for LS cmds
@@ -3081,17 +3195,20 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
**/
static void
lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- atomic_inc(&tgtp->xmt_ls_abort_cmpl);
+ if (phba->nvmet_support) {
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_ls_abort_cmpl);
+ }
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
@@ -3099,7 +3216,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
result, wcqe->word3);
if (!ctxp) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6415 NVMET LS Abort No ctx: WCQE: "
"%08x %08x %08x %08x\n",
wcqe->word0, wcqe->total_data_placed,
@@ -3109,25 +3226,25 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
return;
}
- if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6416 NVMET LS abort cmpl state mismatch: "
"oxid x%x: %d %d\n",
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
}
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ cmdwqe->rsp_dmabuf = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, cmdwqe);
kfree(ctxp);
}
static int
lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_async_xchg_ctx *ctxp,
uint32_t sid, uint16_t xri)
{
- struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_nvmet_tgtport *tgtp = NULL;
struct lpfc_iocbq *abts_wqeq;
union lpfc_wqe128 *wqe_abts;
struct lpfc_nodelist *ndlp;
@@ -3136,14 +3253,16 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
"6067 ABTS: sid %x xri x%x/x%x\n",
sid, xri, ctxp->wqeq->sli4_xritag);
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (phba->nvmet_support && phba->targetport)
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
ndlp = lpfc_findnode_did(phba->pport, sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
- atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6134 Drop ABTS - wrong NDLP state x%x.\n",
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
@@ -3189,7 +3308,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
/* Word 10 */
- bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
LPFC_WQE_LENLOC_WORD12);
@@ -3203,10 +3321,10 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
OTHER_COMMAND);
abts_wqeq->vport = phba->pport;
- abts_wqeq->context1 = ndlp;
- abts_wqeq->context2 = ctxp;
- abts_wqeq->context3 = NULL;
- abts_wqeq->rsvd2 = 0;
+ abts_wqeq->ndlp = ndlp;
+ abts_wqeq->context_un.axchg = ctxp;
+ abts_wqeq->bpl_dmabuf = NULL;
+ abts_wqeq->num_bdes = 0;
/* hba_wqidx should already be setup from command we are aborting */
abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
abts_wqeq->iocb.ulpLe = 1;
@@ -3219,14 +3337,14 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_async_xchg_ctx *ctxp,
uint32_t sid, uint16_t xri)
{
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_iocbq *abts_wqeq;
struct lpfc_nodelist *ndlp;
unsigned long flags;
- u8 opt;
+ bool ia;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3236,17 +3354,17 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
ndlp = lpfc_findnode_did(phba->pport, sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6160 Drop ABORT - wrong NDLP state x%x.\n",
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
/* No failure to an ABTS request. */
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
@@ -3256,17 +3374,17 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&ctxp->ctxlock, flags);
if (!ctxp->abort_wqeq) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6161 ABORT failed: No wqeqs: "
"xri: x%x\n", ctxp->oxid);
/* No failure to an ABTS request. */
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
abts_wqeq = ctxp->abort_wqeq;
- ctxp->state = LPFC_NVMET_STE_ABORT;
- opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
+ ctxp->state = LPFC_NVME_STE_ABORT;
+ ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3283,43 +3401,44 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6163 Driver in reset cleanup - flushing "
"NVME Req now. hba_flag x%x oxid x%x\n",
phba->hba_flag, ctxp->oxid);
lpfc_sli_release_iocbq(phba, abts_wqeq);
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
/* Outstanding abort is in progress */
- if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6164 Outstanding NVME I/O Abort Request "
"still pending on oxid x%x\n",
ctxp->oxid);
lpfc_sli_release_iocbq(phba, abts_wqeq);
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
/* Ready - mark outstanding as aborted by driver. */
- abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
+ abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
- lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
+ lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
+ abts_wqeq->iotag, CLASS3,
+ LPFC_WQE_CQ_ID_DEFAULT, ia, true);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
- abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVME;
- abts_wqeq->context2 = ctxp;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVME;
+ abts_wqeq->context_un.axchg = ctxp;
abts_wqeq->vport = phba->pport;
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
@@ -3333,10 +3452,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_abort_rsp_error);
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_sli_release_iocbq(phba, abts_wqeq);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6166 Failed ABORT issue_wqe with status x%x "
"for oxid x%x.\n",
rc, ctxp->oxid);
@@ -3345,7 +3464,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
static int
lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_async_xchg_ctx *ctxp,
uint32_t sid, uint16_t xri)
{
struct lpfc_nvmet_tgtport *tgtp;
@@ -3360,14 +3479,14 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->wqeq->hba_wqidx = 0;
}
- if (ctxp->state == LPFC_NVMET_STE_FREE) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ if (ctxp->state == LPFC_NVME_STE_FREE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
rc = WQE_BUSY;
goto aerr;
}
- ctxp->state = LPFC_NVMET_STE_ABORT;
+ ctxp->state = LPFC_NVME_STE_ABORT;
ctxp->entry_cnt++;
rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
if (rc == 0)
@@ -3375,9 +3494,8 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq = ctxp->wqeq;
- abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVMET;
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
@@ -3389,17 +3507,17 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
aerr:
spin_lock_irqsave(&ctxp->ctxlock, flags);
- if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
+ if (ctxp->flag & LPFC_NVME_CTX_RLS) {
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctxp->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
- ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
+ ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6135 Failed to Issue ABTS for oxid x%x. Status x%x "
"(%x)\n",
ctxp->oxid, rc, released);
@@ -3408,34 +3526,44 @@ aerr:
return 1;
}
-static int
-lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+/**
+ * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
+ * via async frame receive where the frame is not handled.
+ * @phba: pointer to adapter structure
+ * @ctxp: pointer to the asynchronously received received sequence
+ * @sid: address of the remote port to send the ABTS to
+ * @xri: oxid value to for the ABTS (other side's exchange id).
+ **/
+int
+lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *ctxp,
uint32_t sid, uint16_t xri)
{
- struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_nvmet_tgtport *tgtp = NULL;
struct lpfc_iocbq *abts_wqeq;
unsigned long flags;
int rc;
- if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
- (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
- ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+ if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
+ (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
+ ctxp->state = LPFC_NVME_STE_LS_ABORT;
ctxp->entry_cnt++;
} else {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6418 NVMET LS abort state mismatch "
"IO x%x: %d %d\n",
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
- ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+ ctxp->state = LPFC_NVME_STE_LS_ABORT;
}
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (phba->nvmet_support && phba->targetport)
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+
if (!ctxp->wqeq) {
/* Issue ABTS for this WQE based on iotag */
ctxp->wqeq = lpfc_sli_get_iocbq(phba);
if (!ctxp->wqeq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6068 Abort failed: No wqeqs: "
"xri: x%x\n", xri);
/* No failure to an ABTS request. */
@@ -3451,22 +3579,67 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
}
spin_lock_irqsave(&phba->hbalock, flags);
- abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
- atomic_inc(&tgtp->xmt_abort_unsol);
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_abort_unsol);
return 0;
}
out:
- atomic_inc(&tgtp->xmt_abort_rsp_error);
- abts_wqeq->context2 = NULL;
- abts_wqeq->context3 = NULL;
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
+ abts_wqeq->rsp_dmabuf = NULL;
+ abts_wqeq->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, abts_wqeq);
- kfree(ctxp);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6056 Failed to Issue ABTS. Status x%x\n", rc);
- return 0;
+ return 1;
+}
+
+/**
+ * lpfc_nvmet_invalidate_host
+ *
+ * @phba: pointer to the driver instance bound to an adapter port.
+ * @ndlp: pointer to an lpfc_nodelist type
+ *
+ * This routine upcalls the nvmet transport to invalidate an NVME
+ * host to which this target instance had active connections.
+ */
+void
+lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ u32 ndlp_has_hh;
+ struct lpfc_nvmet_tgtport *tgtp;
+
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
+ "6203 Invalidating hosthandle x%px\n",
+ ndlp);
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
+ spin_unlock_irq(&ndlp->lock);
+
+ /* Do not invalidate any nodes that do not have a hosthandle.
+ * The host_release callbk will cause a node reference
+ * count imbalance and a crash.
+ */
+ if (!ndlp_has_hh) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
+ "6204 Skip invalidate on node x%px DID x%x\n",
+ ndlp, ndlp->nlp_DID);
+ return;
+ }
+
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+ /* Need to get the nvmet_fc_target_port pointer here.*/
+ nvmet_fc_invalidate_host(phba->targetport, ndlp);
+#endif
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
deleted file mode 100644
index b80b1639b9a7..000000000000
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*******************************************************************
- * This file is part of the Emulex Linux Device Driver for *
- * Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
- * Copyright (C) 2004-2016 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.broadcom.com *
- * Portions Copyright (C) 2004-2005 Christoph Hellwig *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
- ********************************************************************/
-
-#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
-#define LPFC_NVMET_RQE_MIN_POST 128
-#define LPFC_NVMET_RQE_DEF_POST 512
-#define LPFC_NVMET_RQE_DEF_COUNT 2048
-#define LPFC_NVMET_SUCCESS_LEN 12
-
-#define LPFC_NVMET_MRQ_AUTO 0
-#define LPFC_NVMET_MRQ_MAX 16
-
-#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
-
-/* Used for NVME Target */
-struct lpfc_nvmet_tgtport {
- struct lpfc_hba *phba;
- struct completion *tport_unreg_cmp;
-
- /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
- atomic_t rcv_ls_req_in;
- atomic_t rcv_ls_req_out;
- atomic_t rcv_ls_req_drop;
- atomic_t xmt_ls_abort;
- atomic_t xmt_ls_abort_cmpl;
-
- /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
- atomic_t xmt_ls_rsp;
- atomic_t xmt_ls_drop;
-
- /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
- atomic_t xmt_ls_rsp_error;
- atomic_t xmt_ls_rsp_aborted;
- atomic_t xmt_ls_rsp_xb_set;
- atomic_t xmt_ls_rsp_cmpl;
-
- /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
- atomic_t rcv_fcp_cmd_in;
- atomic_t rcv_fcp_cmd_out;
- atomic_t rcv_fcp_cmd_drop;
- atomic_t rcv_fcp_cmd_defer;
- atomic_t xmt_fcp_release;
-
- /* Stats counters - lpfc_nvmet_xmt_fcp_op */
- atomic_t xmt_fcp_drop;
- atomic_t xmt_fcp_read_rsp;
- atomic_t xmt_fcp_read;
- atomic_t xmt_fcp_write;
- atomic_t xmt_fcp_rsp;
-
- /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
- atomic_t xmt_fcp_rsp_xb_set;
- atomic_t xmt_fcp_rsp_cmpl;
- atomic_t xmt_fcp_rsp_error;
- atomic_t xmt_fcp_rsp_aborted;
- atomic_t xmt_fcp_rsp_drop;
-
- /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
- atomic_t xmt_fcp_xri_abort_cqe;
- atomic_t xmt_fcp_abort;
- atomic_t xmt_fcp_abort_cmpl;
- atomic_t xmt_abort_sol;
- atomic_t xmt_abort_unsol;
- atomic_t xmt_abort_rsp;
- atomic_t xmt_abort_rsp_error;
-
- /* Stats counters - defer IO */
- atomic_t defer_ctx;
- atomic_t defer_fod;
- atomic_t defer_wqfull;
-};
-
-struct lpfc_nvmet_ctx_info {
- struct list_head nvmet_ctx_list;
- spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
- struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
- struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
- uint16_t nvmet_ctx_list_cnt;
- char pad[16]; /* pad to a cache-line */
-};
-
-/* This retrieves the context info associated with the specified cpu / mrq */
-#define lpfc_get_ctx_list(phba, cpu, mrq) \
- (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
-
-struct lpfc_nvmet_rcv_ctx {
- union {
- struct nvmefc_tgt_ls_req ls_req;
- struct nvmefc_tgt_fcp_req fcp_req;
- } ctx;
- struct list_head list;
- struct lpfc_hba *phba;
- struct lpfc_iocbq *wqeq;
- struct lpfc_iocbq *abort_wqeq;
- spinlock_t ctxlock; /* protect flag access */
- uint32_t sid;
- uint32_t offset;
- uint16_t oxid;
- uint16_t size;
- uint16_t entry_cnt;
- uint16_t cpu;
- uint16_t idx;
- uint16_t state;
- /* States */
-#define LPFC_NVMET_STE_LS_RCV 1
-#define LPFC_NVMET_STE_LS_ABORT 2
-#define LPFC_NVMET_STE_LS_RSP 3
-#define LPFC_NVMET_STE_RCV 4
-#define LPFC_NVMET_STE_DATA 5
-#define LPFC_NVMET_STE_ABORT 6
-#define LPFC_NVMET_STE_DONE 7
-#define LPFC_NVMET_STE_FREE 0xff
- uint16_t flag;
-#define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */
-#define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */
-#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
-#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
-#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
-#define LPFC_NVMET_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
-#define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
-#define LPFC_NVMET_TNOTIFY 0x80 /* notify transport of abts */
- struct rqb_dmabuf *rqb_buffer;
- struct lpfc_nvmet_ctxbuf *ctxbuf;
- struct lpfc_sli4_hdw_queue *hdwq;
-
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint64_t ts_isr_cmd;
- uint64_t ts_cmd_nvme;
- uint64_t ts_nvme_data;
- uint64_t ts_data_wqput;
- uint64_t ts_isr_data;
- uint64_t ts_data_nvme;
- uint64_t ts_nvme_status;
- uint64_t ts_status_wqput;
- uint64_t ts_isr_status;
- uint64_t ts_status_nvme;
-#endif
-};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2c7e0b22db2f..7a1563564df7 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -28,6 +28,7 @@
#include <asm/unaligned.h>
#include <linux/t10-pi.h>
#include <linux/crc-t10dif.h>
+#include <linux/blk-cgroup.h>
#include <net/checksum.h>
#include <scsi/scsi.h>
@@ -87,30 +88,6 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
-static inline unsigned
-lpfc_cmd_blksize(struct scsi_cmnd *sc)
-{
- return sc->device->sector_size;
-}
-
-#define LPFC_CHECK_PROTECT_GUARD 1
-#define LPFC_CHECK_PROTECT_REF 2
-static inline unsigned
-lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
-{
- return 1;
-}
-
-static inline unsigned
-lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
-{
- if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
- return 0;
- if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
- return 1;
- return 0;
-}
-
/**
* lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
* @phba: Pointer to HBA object.
@@ -132,61 +109,7 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
}
}
-/**
- * lpfc_update_stats - Update statistical data for the command completion
- * @vport: The virtual port on which this call is executing.
- * @lpfc_cmd: lpfc scsi command object pointer.
- *
- * This function is called when there is a command completion and this
- * function updates the statistical data for the command completion.
- **/
-static void
-lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *pnode;
- struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
- unsigned long flags;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- unsigned long latency;
- int i;
-
- if (!vport->stat_data_enabled ||
- vport->stat_data_blocked ||
- (cmd->result))
- return;
-
- latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
- rdata = lpfc_cmd->rdata;
- pnode = rdata->pnode;
-
- spin_lock_irqsave(shost->host_lock, flags);
- if (!pnode ||
- !pnode->lat_data ||
- (phba->bucket_type == LPFC_NO_BUCKET)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- return;
- }
-
- if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
- i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
- phba->bucket_step;
- /* check array subscript bounds */
- if (i < 0)
- i = 0;
- else if (i >= LPFC_MAX_BUCKET_COUNT)
- i = LPFC_MAX_BUCKET_COUNT - 1;
- } else {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
- if (latency <= (phba->bucket_base +
- ((1<<i)*phba->bucket_step)))
- break;
- }
-
- pnode->lat_data[i].cmd_count++;
- spin_unlock_irqrestore(shost->host_lock, flags);
-}
+#define LPFC_INVALID_REFTAG ((u32)-1)
/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
@@ -313,7 +236,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
/**
* lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
* @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
+ * @num_to_alloc: The requested number of buffers to allocate.
*
* This routine allocates a scsi buffer for device with SLI-3 interface spec,
* the scsi buffer contains all the necessary information needed to initiate
@@ -375,7 +298,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
kfree(psb);
break;
}
- psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+ psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
psb->fcp_cmnd = psb->data;
psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
@@ -446,7 +369,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
iocb->ulpClass = CLASS3;
psb->status = IOSTAT_SUCCESS;
/* Put it back into the SCSI buffer list */
- psb->cur_iocbq.context1 = psb;
+ psb->cur_iocbq.io_buf = psb;
spin_lock_init(&psb->buf_lock);
lpfc_release_scsi_buf_s3(phba, psb);
@@ -481,7 +404,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_io_buf_list, list) {
- if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
+ if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
continue;
if (psb->rdata && psb->rdata->pnode &&
@@ -497,6 +420,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
* lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
+ * @idx: index into hdwq
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
* FCP or NVME aborted xri.
@@ -505,8 +429,8 @@ void
lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx)
{
- uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ u16 xri = 0;
+ u16 rxid = 0;
struct lpfc_io_buf *psb, *next_psb;
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
@@ -515,25 +439,40 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp;
int rrq_empty = 0;
struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
+ struct scsi_cmnd *cmd;
+ int offline = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
-
+ offline = pci_channel_offline(phba->pcidev);
+ if (!offline) {
+ xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ }
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_io_buf_list, list) {
+ if (offline)
+ xri = psb->cur_iocbq.sli4_xritag;
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del_init(&psb->list);
psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
- if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
+ if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
- return;
+ if (!offline) {
+ lpfc_sli4_nvme_xri_aborted(phba, axri,
+ psb);
+ return;
+ }
+ lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&qp->abts_io_buf_list_lock);
+ continue;
}
qp->abts_scsi_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
@@ -545,33 +484,63 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (ndlp) {
+ if (ndlp && !offline) {
lpfc_set_rrq_active(phba, ndlp,
psb->cur_iocbq.sli4_lxritag, rxid, 1);
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
}
+
+ if (phba->cfg_fcp_wait_abts_rsp || offline) {
+ spin_lock_irqsave(&psb->buf_lock, iflag);
+ cmd = psb->pCmd;
+ psb->pCmd = NULL;
+ spin_unlock_irqrestore(&psb->buf_lock, iflag);
+
+ /* The sdev is not guaranteed to be valid post
+ * scsi_done upcall.
+ */
+ if (cmd)
+ scsi_done(cmd);
+
+ /*
+ * We expect there is an abort thread waiting
+ * for command completion wake up the thread.
+ */
+ spin_lock_irqsave(&psb->buf_lock, iflag);
+ psb->cur_iocbq.cmd_flag &=
+ ~LPFC_DRIVER_ABORTED;
+ if (psb->waitq)
+ wake_up(psb->waitq);
+ spin_unlock_irqrestore(&psb->buf_lock, iflag);
+ }
+
lpfc_release_scsi_buf_s4(phba, psb);
if (rrq_empty)
lpfc_worker_wake_up(phba);
- return;
+ if (!offline)
+ return;
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&qp->abts_io_buf_list_lock);
+ continue;
}
}
spin_unlock(&qp->abts_io_buf_list_lock);
- for (i = 1; i <= phba->sli.last_iotag; i++) {
- iocbq = phba->sli.iocbq_lookup[i];
-
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- (iocbq->iocb_flag & LPFC_IO_LIBDFC))
- continue;
- if (iocbq->sli4_xritag != xri)
- continue;
- psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
- psb->flags &= ~LPFC_SBUF_XBUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (!list_empty(&pring->txq))
- lpfc_worker_wake_up(phba);
- return;
+ if (!offline) {
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ (iocbq->cmd_flag & LPFC_IO_LIBDFC))
+ continue;
+ if (iocbq->sli4_xritag != xri)
+ continue;
+ psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
+ psb->flags &= ~LPFC_SBUF_XBUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (!list_empty(&pring->txq))
+ lpfc_worker_wake_up(phba);
+ return;
+ }
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
@@ -579,6 +548,8 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
/**
* lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
* and returns to caller.
@@ -618,6 +589,8 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/**
* lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine removes a scsi buffer from head of @hdwq io_buf_list
* and returns to caller.
@@ -633,7 +606,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_sli4_hdw_queue *qp;
struct sli4_sge *sgl;
- IOCB_t *iocb;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_fcp_cmd;
uint32_t cpu, idx;
@@ -642,7 +614,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
cpu = raw_smp_processor_id();
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
- tag = blk_mq_unique_tag(cmnd->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
idx = blk_mq_unique_tag_to_hwq(tag);
} else {
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
@@ -659,7 +631,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
- lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP;
lpfc_cmd->prot_seg_cnt = 0;
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->timeout = 0;
@@ -671,8 +643,10 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
lpfc_cmd->prot_data_type = 0;
#endif
tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
- if (!tmp)
+ if (!tmp) {
+ lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
return NULL;
+ }
lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
@@ -701,24 +675,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
- /*
- * Since the IOCB for the FCP I/O is built into this
- * lpfc_io_buf, initialize it with all known data now.
- */
- iocb = &lpfc_cmd->cur_iocbq.iocb;
- iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
- iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
- /* setting the BLP size to 2 * sizeof BDE may not be correct.
- * We are setting the bpl to point to out sgl. An sgl's
- * entries are 16 bytes, a bpl entries are 12 bytes.
- */
- iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
- iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
- iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
- iocb->ulpBdeCount = 1;
- iocb->ulpLe = 1;
- iocb->ulpClass = CLASS3;
-
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
@@ -728,6 +684,8 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/**
* lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
* and returns to caller.
@@ -744,7 +702,7 @@ lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
/**
- * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
+ * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
* @phba: The Hba for which this call is being executed.
* @psb: The scsi buffer which is being released.
*
@@ -761,7 +719,7 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
psb->pCmd = NULL;
- psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
}
@@ -788,7 +746,8 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
qp = psb->hdwq;
if (psb->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
- psb->pCmd = NULL;
+ if (!phba->cfg_fcp_wait_abts_rsp)
+ psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs++;
spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
@@ -816,6 +775,25 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
}
/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+ i += sizeof(uint32_t), j++) {
+ ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+ }
+}
+
+/**
* lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
* @phba: The Hba for which this call is being executed.
* @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -852,7 +830,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
bpl += 2;
if (scsi_sg_count(scsi_cmnd)) {
/*
- * The driver stores the segment count returned from pci_map_sg
+ * The driver stores the segment count returned from dma_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
@@ -865,11 +843,11 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9064 BLKGRD: %s: Too many sg segments from "
- "dma_map_sg. Config %d, seg_cnt %d\n",
- __func__, phba->cfg_sg_seg_cnt,
- lpfc_cmd->seg_cnt);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "9064 BLKGRD: %s: Too many sg segments"
+ " from dma_map_sg. Config %d, seg_cnt"
+ " %d\n", __func__, phba->cfg_sg_seg_cnt,
+ lpfc_cmd->seg_cnt);
WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
@@ -889,7 +867,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
physaddr = sg_dma_address(sgel);
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
+ !(iocbq->cmd_flag & DSS_SECURITY_OP) &&
nseg <= LPFC_EXT_DATA_BDE_COUNT) {
data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -917,7 +895,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
*/
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
+ !(iocbq->cmd_flag & DSS_SECURITY_OP)) {
if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
/*
* The extended IOCB format can only fit 3 BDE or a BPL.
@@ -951,6 +929,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
return 0;
}
@@ -962,10 +941,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
#define BG_ERR_TGT 0x2
/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP 0x10
-/**
+/*
* Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
* error injection
- **/
+ */
#define BG_ERR_CHECK 0x20
/**
@@ -974,7 +953,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* @sc: The SCSI command to examine
* @reftag: (out) BlockGuard reference tag for transmitted data
* @apptag: (out) BlockGuard application tag for transmitted data
- * @new_guard (in) Value to replace CRC with if needed
+ * @new_guard: (in) Value to replace CRC with if needed
*
* Returns BG_ERR_* bit mask or 0 if request ignored
**/
@@ -990,7 +969,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t op = scsi_get_prot_op(sc);
uint32_t blksize;
uint32_t numblks;
- sector_t lba;
+ u32 lba;
int rc = 0;
int blockoff = 0;
@@ -998,19 +977,21 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return 0;
sgpe = scsi_prot_sglist(sc);
- lba = scsi_get_lba(sc);
+ lba = scsi_prot_ref_tag(sc);
+ if (lba == LPFC_INVALID_REFTAG)
+ return 0;
/* First check if we need to match the LBA */
if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
- blksize = lpfc_cmd_blksize(sc);
+ blksize = scsi_prot_interval(sc);
numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
/* Make sure we have the right LBA if one is specified */
- if ((phba->lpfc_injerr_lba < lba) ||
- (phba->lpfc_injerr_lba >= (lba + numblks)))
+ if (phba->lpfc_injerr_lba < (u64)lba ||
+ (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
return 0;
if (sgpe) {
- blockoff = phba->lpfc_injerr_lba - lba;
+ blockoff = phba->lpfc_injerr_lba - (u64)lba;
numblks = sg_dma_len(sgpe) /
sizeof(struct scsi_dif_tuple);
if (numblks < blockoff)
@@ -1059,7 +1040,8 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* inserted in middle of the IO.
*/
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"9076 BLKGRD: Injecting reftag error: "
"write lba x%lx + x%x oldrefTag x%x\n",
(unsigned long)lba, blockoff,
@@ -1090,7 +1072,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
break;
}
- /* fall through */
+ fallthrough;
case SCSI_PROT_WRITE_INSERT:
/*
* For WRITE_INSERT, force the error
@@ -1109,7 +1091,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_TGT | BG_ERR_CHECK;
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9078 BLKGRD: Injecting reftag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@@ -1130,7 +1112,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_INIT;
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9077 BLKGRD: Injecting reftag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@@ -1157,7 +1139,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_INIT;
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9079 BLKGRD: Injecting reftag error: "
"read lba x%lx\n", (unsigned long)lba);
break;
@@ -1179,7 +1161,8 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* inserted in middle of the IO.
*/
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"9080 BLKGRD: Injecting apptag error: "
"write lba x%lx + x%x oldappTag x%x\n",
(unsigned long)lba, blockoff,
@@ -1209,7 +1192,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
rc = BG_ERR_TGT | BG_ERR_CHECK;
break;
}
- /* fall through */
+ fallthrough;
case SCSI_PROT_WRITE_INSERT:
/*
* For WRITE_INSERT, force the
@@ -1228,7 +1211,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_TGT | BG_ERR_CHECK;
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0813 BLKGRD: Injecting apptag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@@ -1249,7 +1232,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_INIT;
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0812 BLKGRD: Injecting apptag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@@ -1276,7 +1259,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_INIT;
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0814 BLKGRD: Injecting apptag error: "
"read lba x%lx\n", (unsigned long)lba);
break;
@@ -1291,7 +1274,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
switch (op) {
case SCSI_PROT_WRITE_PASS:
rc = BG_ERR_CHECK;
- /* fall through */
+ fallthrough;
case SCSI_PROT_WRITE_INSERT:
/*
@@ -1311,7 +1294,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
rc |= BG_ERR_TGT | BG_ERR_SWAP;
/* Signals the caller to swap CRC->CSUM */
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0817 BLKGRD: Injecting guard error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@@ -1333,7 +1316,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
rc = BG_ERR_INIT | BG_ERR_SWAP;
/* Signals the caller to swap CRC->CSUM */
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0816 BLKGRD: Injecting guard error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@@ -1361,7 +1344,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
rc = BG_ERR_INIT | BG_ERR_SWAP;
/* Signals the caller to swap CRC->CSUM */
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0818 BLKGRD: Injecting guard error: "
"read lba x%lx\n", (unsigned long)lba);
}
@@ -1377,8 +1360,8 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* the specified SCSI command.
* @phba: The Hba for which this call is being executed.
* @sc: The SCSI command to examine
- * @txopt: (out) BlockGuard operation for transmitted data
- * @rxopt: (out) BlockGuard operation for received data
+ * @txop: (out) BlockGuard operation for transmitted data
+ * @rxop: (out) BlockGuard operation for received data
*
* Returns: zero on success; non-zero if tx and/or rx op cannot be determined
*
@@ -1389,7 +1372,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
{
uint8_t ret = 0;
- if (lpfc_cmd_guard_csum(sc)) {
+ if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
@@ -1411,7 +1394,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
case SCSI_PROT_NORMAL:
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
scsi_get_prot_op(sc));
ret = 1;
@@ -1440,7 +1423,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
case SCSI_PROT_NORMAL:
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
scsi_get_prot_op(sc));
ret = 1;
@@ -1457,8 +1440,8 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* the specified SCSI command in order to force a guard tag error.
* @phba: The Hba for which this call is being executed.
* @sc: The SCSI command to examine
- * @txopt: (out) BlockGuard operation for transmitted data
- * @rxopt: (out) BlockGuard operation for received data
+ * @txop: (out) BlockGuard operation for transmitted data
+ * @rxop: (out) BlockGuard operation for received data
*
* Returns: zero on success; non-zero if tx and/or rx op cannot be determined
*
@@ -1467,9 +1450,8 @@ static int
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint8_t *txop, uint8_t *rxop)
{
- uint8_t ret = 0;
- if (lpfc_cmd_guard_csum(sc)) {
+ if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
@@ -1520,7 +1502,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
}
- return ret;
+ return 0;
}
#endif
@@ -1529,7 +1511,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* @phba: The Hba for which this call is being executed.
* @sc: pointer to scsi command we're working on
* @bpl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
+ * @datasegcnt: number of segments of data that have been dma mapped
*
* This function sets up BPL buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
@@ -1577,7 +1559,9 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+ reftag = scsi_prot_ref_tag(sc);
+ if (reftag == LPFC_INVALID_REFTAG)
+ goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -1614,12 +1598,12 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* protection data is automatically generated, not checked.
*/
if (datadir == DMA_FROM_DEVICE) {
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
bf_set(pde6_ce, pde6, checking);
else
bf_set(pde6_ce, pde6, 0);
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
bf_set(pde6_re, pde6, checking);
else
bf_set(pde6_re, pde6, 0);
@@ -1726,7 +1710,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgde = scsi_sglist(sc);
if (!sgpe || !sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9020 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
@@ -1737,8 +1721,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command */
- blksize = lpfc_cmd_blksize(sc);
- reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+ blksize = scsi_prot_interval(sc);
+ reftag = scsi_prot_ref_tag(sc);
+ if (reftag == LPFC_INVALID_REFTAG)
+ goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -1776,12 +1762,12 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_optx, pde6, txop);
bf_set(pde6_oprx, pde6, rxop);
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
bf_set(pde6_ce, pde6, checking);
else
bf_set(pde6_ce, pde6, 0);
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
bf_set(pde6_re, pde6, checking);
else
bf_set(pde6_re, pde6, 0);
@@ -1838,7 +1824,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return num_bde + 1;
if (!sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9065 BLKGRD:%s Invalid data segment\n",
__func__);
return 0;
@@ -1901,8 +1887,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
reftag += protgrp_blks;
} else {
/* if we're here, we have a bug */
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9054 BLKGRD: bug in %s\n", __func__);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "9054 BLKGRD: bug in %s\n", __func__);
}
} while (!alldone);
@@ -1916,7 +1902,8 @@ out:
* @phba: The Hba for which this call is being executed.
* @sc: pointer to scsi command we're working on
* @sgl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
+ * @datasegcnt: number of segments of data that have been dma mapped
+ * @lpfc_cmd: lpfc scsi command object pointer.
*
* This function sets up SGL buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
@@ -1966,7 +1953,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+ reftag = scsi_prot_ref_tag(sc);
+ if (reftag == LPFC_INVALID_REFTAG)
+ goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -1992,12 +1981,12 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* protection data is automatically generated, not checked.
*/
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
else
bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
else
bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
@@ -2090,6 +2079,7 @@ out:
* @sgl: pointer to buffer list for protection groups
* @datacnt: number of segments of data that have been dma mapped
* @protcnt: number of segment of protection data that have been dma mapped
+ * @lpfc_cmd: lpfc scsi command object pointer.
*
* This function sets up SGL buffer list for protection groups of
* type LPFC_PG_TYPE_DIF
@@ -2152,7 +2142,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgde = scsi_sglist(sc);
if (!sgpe || !sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9082 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
@@ -2163,8 +2153,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command */
- blksize = lpfc_cmd_blksize(sc);
- reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+ blksize = scsi_prot_interval(sc);
+ reftag = scsi_prot_ref_tag(sc);
+ if (reftag == LPFC_INVALID_REFTAG)
+ goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -2219,9 +2211,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
diseed->ref_tag = cpu_to_le32(reftag);
diseed->ref_tag_tran = diseed->ref_tag;
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-
} else {
bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
/*
@@ -2238,7 +2229,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
- if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
else
bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
@@ -2305,7 +2296,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return num_sge + 1;
if (!sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9086 BLKGRD:%s Invalid data segment\n",
__func__);
return 0;
@@ -2410,8 +2401,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
reftag += protgrp_blks;
} else {
/* if we're here, we have a bug */
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9085 BLKGRD: bug in %s\n", __func__);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "9085 BLKGRD: bug in %s\n", __func__);
}
} while (!alldone);
@@ -2451,7 +2442,7 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
break;
default:
if (phba)
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9021 Unsupported protection op:%d\n",
op);
break;
@@ -2495,7 +2486,7 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
* DIF (trailer) attached to it. Must ajust FCP data length
* to account for the protection data.
*/
- fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+ fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
return fcpdl;
}
@@ -2534,7 +2525,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
bpl += 2;
if (scsi_sg_count(scsi_cmnd)) {
/*
- * The driver stores the segment count returned from pci_map_sg
+ * The driver stores the segment count returned from dma_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
@@ -2615,7 +2606,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
scsi_dma_unmap(scsi_cmnd);
lpfc_cmd->seg_cnt = 0;
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9022 Unexpected protection group %i\n",
prot_group_type);
return 2;
@@ -2659,7 +2650,7 @@ err:
scsi_prot_sg_count(scsi_cmnd),
scsi_cmnd->sc_data_direction);
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9023 Cannot setup S/G List for HBA"
"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
@@ -2749,14 +2740,16 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* data length is a multiple of the blksize.
*/
sgde = scsi_sglist(cmd);
- blksize = lpfc_cmd_blksize(cmd);
+ blksize = scsi_prot_interval(cmd);
data_src = (uint8_t *)sg_virt(sgde);
data_len = sgde->length;
if ((data_len & (blksize - 1)) == 0)
chk_guard = 1;
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
- start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+ start_ref_tag = scsi_prot_ref_tag(cmd);
+ if (start_ref_tag == LPFC_INVALID_REFTAG)
+ goto out;
start_app_tag = src->app_tag;
len = sgpe->length;
while (src && protsegcnt) {
@@ -2775,7 +2768,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
/* First Guard Tag checking */
if (chk_guard) {
guard_tag = src->guard_tag;
- if (lpfc_cmd_guard_csum(cmd))
+ if (cmd->prot_flags
+ & SCSI_PROT_IP_CHECKSUM)
sum = lpfc_bg_csum(data_src,
blksize);
else
@@ -2841,43 +2835,36 @@ skipit:
}
out:
if (err_type == BGS_GUARD_ERR_MASK) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
- (unsigned long)scsi_get_lba(cmd),
+ "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
+ scsi_prot_ref_tag(cmd),
sum, guard_tag);
} else if (err_type == BGS_REFTAG_ERR_MASK) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
- (unsigned long)scsi_get_lba(cmd),
+ "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
+ scsi_prot_ref_tag(cmd),
ref_tag, start_ref_tag);
} else if (err_type == BGS_APPTAG_ERR_MASK) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
- (unsigned long)scsi_get_lba(cmd),
+ "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
+ scsi_prot_ref_tag(cmd),
app_tag, start_app_tag);
}
}
-
/*
* This function checks for BlockGuard errors detected by
* the HBA. In case of errors, the ASC/ASCQ fields in the
@@ -2895,20 +2882,63 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
struct lpfc_iocbq *pIocbOut)
{
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
- struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+ struct sli3_bg_fields *bgf;
int ret = 0;
- uint32_t bghm = bgf->bghm;
- uint32_t bgstat = bgf->bgstat;
- uint64_t failing_sector = 0;
+ struct lpfc_wcqe_complete *wcqe;
+ u32 status;
+ u32 bghm = 0;
+ u32 bgstat = 0;
+ u64 failing_sector = 0;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wcqe = &pIocbOut->wcqe_cmpl;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+
+ if (status == CQE_STATUS_DI_ERROR) {
+ /* Guard Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
+ bgstat |= BGS_GUARD_ERR_MASK;
+
+ /* AppTag Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
+ bgstat |= BGS_APPTAG_ERR_MASK;
+
+ /* RefTag Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
+ bgstat |= BGS_REFTAG_ERR_MASK;
+
+ /* Check to see if there was any good data before the
+ * error
+ */
+ if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+ bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
+ bghm = wcqe->total_data_placed;
+ }
+
+ /*
+ * Set ALL the error bits to indicate we don't know what
+ * type of error it is.
+ */
+ if (!bgstat)
+ bgstat |= (BGS_REFTAG_ERR_MASK |
+ BGS_APPTAG_ERR_MASK |
+ BGS_GUARD_ERR_MASK);
+ }
+
+ } else {
+ bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+ bghm = bgf->bghm;
+ bgstat = bgf->bgstat;
+ }
if (lpfc_bgs_get_invalid_prof(bgstat)) {
cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9072 BLKGRD: Invalid BG Profile in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9072 BLKGRD: Invalid BG Profile in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
ret = (-1);
goto out;
}
@@ -2916,63 +2946,52 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9073 BLKGRD: Invalid BG PDIF Block in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9073 BLKGRD: Invalid BG PDIF Block in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
ret = (-1);
goto out;
}
if (lpfc_bgs_get_guard_err(bgstat)) {
ret = 1;
-
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9055 BLKGRD: Guard Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9055 BLKGRD: Guard Tag error in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
}
if (lpfc_bgs_get_reftag_err(bgstat)) {
ret = 1;
-
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
-
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9056 BLKGRD: Ref Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9056 BLKGRD: Ref Tag error in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
}
if (lpfc_bgs_get_apptag_err(bgstat)) {
ret = 1;
-
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
-
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9061 BLKGRD: App Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9061 BLKGRD: App Tag error in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
}
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -3012,13 +3031,13 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (!ret) {
/* No error was reported - problem in FW? */
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9057 BLKGRD: Unknown error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9057 BLKGRD: Unknown error in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- blk_rq_sectors(cmd->request), bgstat, bghm);
+ scsi_prot_ref_tag(cmd),
+ scsi_logical_block_count(cmd), bgstat, bghm);
- /* Calcuate what type of error it was */
+ /* Calculate what type of error it was */
lpfc_calc_bg_err(phba, lpfc_cmd);
}
out:
@@ -3046,9 +3065,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
struct sli4_sge *first_data_sgl;
- IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ struct lpfc_vport *vport = phba->pport;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
dma_addr_t physaddr;
- uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
int nseg, i, j;
@@ -3064,7 +3084,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
*/
if (scsi_sg_count(scsi_cmnd)) {
/*
- * The driver stores the segment count returned from pci_map_sg
+ * The driver stores the segment count returned from dma_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
@@ -3083,11 +3103,12 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
lpfc_cmd->seg_cnt = nseg;
if (!phba->cfg_xpsgl &&
lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
- " %s: Too many sg segments from "
- "dma_map_sg. Config %d, seg_cnt %d\n",
- __func__, phba->cfg_sg_seg_cnt,
- lpfc_cmd->seg_cnt);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "9074 BLKGRD:"
+ " %s: Too many sg segments from "
+ "dma_map_sg. Config %d, seg_cnt %d\n",
+ __func__, phba->cfg_sg_seg_cnt,
+ lpfc_cmd->seg_cnt);
WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
@@ -3109,7 +3130,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
j = 2;
for (i = 0; i < nseg; i++) {
sgl->word2 = 0;
- if ((num_bde + 1) == nseg) {
+ if (nseg == 1) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
bf_set(lpfc_sli4_sge_type, sgl,
LPFC_SGE_TYPE_DATA);
@@ -3178,25 +3199,33 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
j++;
}
- /*
- * Setup the first Payload BDE. For FCoE we just key off
- * Performance Hints, for FC we use lpfc_enable_pbde.
- * We populate words 13-15 of IOCB/WQE.
+
+ /* PBDE support for first data SGE only.
+ * For FCoE, we key off Performance Hints.
+ * For FC, we key off lpfc_enable_pbde.
*/
- if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
- phba->cfg_enable_pbde) {
+ if (nseg == 1 &&
+ ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
+ phba->cfg_enable_pbde)) {
+ /* Words 13-15 */
bde = (struct ulp_bde64 *)
- &(iocb_cmd->unsli3.sli3Words[5]);
+ &wqe->words[13];
bde->addrLow = first_data_sgl->addr_lo;
bde->addrHigh = first_data_sgl->addr_hi;
bde->tus.f.bdeSize =
le32_to_cpu(first_data_sgl->sge_len);
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
+
+ /* Word 11 - set PBDE bit */
+ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
+ } else {
+ memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
+ /* Word 11 - PBDE bit disabled by default template */
}
} else {
sgl += 1;
- /* clear the last flag in the fcp_rsp map entry */
+ /* set the last flag in the fcp_rsp map entry */
sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
@@ -3204,7 +3233,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
phba->cfg_enable_pbde) {
bde = (struct ulp_bde64 *)
- &(iocb_cmd->unsli3.sli3Words[5]);
+ &wqe->words[13];
memset(bde, 0, (sizeof(uint32_t) * 3));
}
}
@@ -3216,12 +3245,23 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* all iocb memory resources are reused.
*/
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
-
- /*
- * Due to difference in data length between DIF/non-DIF paths,
- * we need to set word 4 of IOCB here
- */
- iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ /* Set first-burst provided it was successfully negotiated */
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ vport->cfg_first_burst_size &&
+ scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ u32 init_len, total_len;
+
+ total_len = be32_to_cpu(fcp_cmnd->fcpDl);
+ init_len = min(total_len, vport->cfg_first_burst_size);
+
+ /* Word 4 & 5 */
+ wqe->fcp_iwrite.initial_xfer_len = init_len;
+ wqe->fcp_iwrite.total_xfer_len = total_len;
+ } else {
+ /* Word 4 */
+ wqe->fcp_iwrite.total_xfer_len =
+ be32_to_cpu(fcp_cmnd->fcpDl);
+ }
/*
* If the OAS driver feature is enabled and the lun is enabled for
@@ -3229,9 +3269,20 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->oas_enabled) {
- lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->priority;
+
+ /* Word 10 */
+ bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
+
+ if (lpfc_cmd->cur_iocbq.priority)
+ bf_set(wqe_ccp, &wqe->generic.wqe_com,
+ (lpfc_cmd->cur_iocbq.priority << 1));
+ else
+ bf_set(wqe_ccp, &wqe->generic.wqe_com,
+ (phba->cfg_XLanePriority << 1));
}
return 0;
@@ -3257,7 +3308,8 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
- IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t num_sge = 0;
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
@@ -3271,7 +3323,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
*/
if (scsi_sg_count(scsi_cmnd)) {
/*
- * The driver stores the segment count returned from pci_map_sg
+ * The driver stores the segment count returned from dma_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
@@ -3364,7 +3416,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
scsi_dma_unmap(scsi_cmnd);
lpfc_cmd->seg_cnt = 0;
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9083 Unexpected protection group %i\n",
prot_group_type);
return 2;
@@ -3374,42 +3426,64 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
switch (scsi_get_prot_op(scsi_cmnd)) {
case SCSI_PROT_WRITE_STRIP:
case SCSI_PROT_READ_STRIP:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP;
break;
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_INSERT:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT;
break;
case SCSI_PROT_WRITE_PASS:
case SCSI_PROT_READ_PASS:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS;
break;
}
fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
- /*
- * Due to difference in data length between DIF/non-DIF paths,
- * we need to set word 4 of IOCB here
- */
- iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+ /* Set first-burst provided it was successfully negotiated */
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ vport->cfg_first_burst_size &&
+ scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ u32 init_len, total_len;
- /*
- * For First burst, we may need to adjust the initial transfer
- * length for DIF
- */
- if (iocb_cmd->un.fcpi.fcpi_XRdy &&
- (fcpdl < vport->cfg_first_burst_size))
- iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+ total_len = be32_to_cpu(fcp_cmnd->fcpDl);
+ init_len = min(total_len, vport->cfg_first_burst_size);
+
+ /* Word 4 & 5 */
+ wqe->fcp_iwrite.initial_xfer_len = init_len;
+ wqe->fcp_iwrite.total_xfer_len = total_len;
+ } else {
+ /* Word 4 */
+ wqe->fcp_iwrite.total_xfer_len =
+ be32_to_cpu(fcp_cmnd->fcpDl);
+ }
/*
* If the OAS driver feature is enabled and the lun is enabled for
* OAS, set the oas iocb related flags.
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
- scsi_cmnd->device->hostdata)->oas_enabled)
- lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ scsi_cmnd->device->hostdata)->oas_enabled) {
+ lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+
+ /* Word 10 */
+ bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->generic.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+
+ /* Word 7. DIF Flags */
+ if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
+ else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
+ else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
+
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS |
+ LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
return 0;
err:
@@ -3420,7 +3494,7 @@ err:
scsi_prot_sg_count(scsi_cmnd),
scsi_cmnd->sc_data_direction);
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9084 Cannot setup S/G List for HBA"
"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
@@ -3470,28 +3544,47 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
}
/**
+ * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
+ * buffer
+ * @vport: Pointer to vport object.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ * @tmo: Timeout value for IO
+ *
+ * This routine initializes IOCB/WQE data structure from scsi command
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static inline int
+lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo)
+{
+ return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
+}
+
+/**
* lpfc_send_scsi_error_event - Posts an event when there is SCSI error
* @phba: Pointer to hba context object.
* @vport: Pointer to vport object.
* @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
- * @rsp_iocb: Pointer to response iocb object which reported error.
+ * @fcpi_parm: FCP Initiator parameter.
*
* This function posts an event when there is a SCSI command reporting
* error from the scsi device.
**/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
- struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+ struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
- uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
struct lpfc_fast_path_event *fast_path_evt = NULL;
struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
unsigned long flags;
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ if (!pnode)
return;
/* If there is queuefull or busy condition send a scsi event */
@@ -3590,10 +3683,148 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
}
/**
- * lpfc_handler_fcp_err - FCP response handler
+ * lpfc_unblock_requests - allow further commands to be queued.
+ * @phba: pointer to phba object
+ *
+ * For single vport, just call scsi_unblock_requests on physical port.
+ * For multiple vports, send scsi_unblock_requests for all the vports.
+ */
+void
+lpfc_unblock_requests(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ int i;
+
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ !phba->sli4_hba.max_cfg_param.vpi_used) {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_unblock_requests(shost);
+ return;
+ }
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_unblock_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_block_requests - prevent further commands from being queued.
+ * @phba: pointer to phba object
+ *
+ * For single vport, just call scsi_block_requests on physical port.
+ * For multiple vports, send scsi_block_requests for all the vports.
+ */
+void
+lpfc_block_requests(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ int i;
+
+ if (atomic_read(&phba->cmf_stop_io))
+ return;
+
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ !phba->sli4_hba.max_cfg_param.vpi_used) {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_block_requests(shost);
+ return;
+ }
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_block_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
+ * @phba: The HBA for which this call is being executed.
+ * @time: The latency of the IO that completed (in ns)
+ * @size: The size of the IO that completed
+ * @shost: SCSI host the IO completed on (NULL for a NVME IO)
+ *
+ * The routine adjusts the various Burst and Bandwidth counters used in
+ * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
+ * that means the IO was never issued to the HBA, so this routine is
+ * just being called to cleanup the counter from a previous
+ * lpfc_update_cmf_cmd call.
+ */
+int
+lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
+ uint64_t time, uint32_t size, struct Scsi_Host *shost)
+{
+ struct lpfc_cgn_stat *cgs;
+
+ if (time != LPFC_CGN_NOT_SENT) {
+ /* lat is ns coming in, save latency in us */
+ if (time < 1000)
+ time = 1;
+ else
+ time = div_u64(time + 500, 1000); /* round it */
+
+ cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
+ atomic64_add(size, &cgs->rcv_bytes);
+ atomic64_add(time, &cgs->rx_latency);
+ atomic_inc(&cgs->rx_io_cnt);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
+ * @phba: The HBA for which this call is being executed.
+ * @size: The size of the IO that will be issued
+ *
+ * The routine adjusts the various Burst and Bandwidth counters used in
+ * Congestion management and E2E.
+ */
+int
+lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
+{
+ uint64_t total;
+ struct lpfc_cgn_stat *cgs;
+ int cpu;
+
+ /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
+ phba->cmf_max_bytes_per_interval) {
+ total = 0;
+ for_each_present_cpu(cpu) {
+ cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+ total += atomic64_read(&cgs->total_bytes);
+ }
+ if (total >= phba->cmf_max_bytes_per_interval) {
+ if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
+ lpfc_block_requests(phba);
+ phba->cmf_last_ts =
+ lpfc_calc_cmf_latency(phba);
+ }
+ atomic_inc(&phba->cmf_busy);
+ return -EBUSY;
+ }
+ if (size > atomic_read(&phba->rx_max_read_cnt))
+ atomic_set(&phba->rx_max_read_cnt, size);
+ }
+
+ cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
+ atomic64_add(size, &cgs->total_bytes);
+ return 0;
+}
+
+/**
+ * lpfc_handle_fcp_err - FCP response handler
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_io_buf data structure.
- * @rsp_iocb: The response IOCB which contains FCP error.
+ * @fcpi_parm: FCP Initiator parameter.
*
* This routine is called to process response IOCB with status field
* IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
@@ -3601,13 +3832,11 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
**/
static void
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_iocbq *rsp_iocb)
+ uint32_t fcpi_parm)
{
- struct lpfc_hba *phba = vport->phba;
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
- uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
@@ -3630,17 +3859,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
if (resp_info & RSP_LEN_VALID) {
rsplen = be32_to_cpu(fcprsp->rspRspLen);
if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "2719 Invalid response length: "
- "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
- cmnd->device->id,
- cmnd->device->lun, cmnd->cmnd[0],
- rsplen);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2719 Invalid response length: "
+ "tgt x%x lun x%llx cmnd x%x rsplen "
+ "x%x\n", cmnd->device->id,
+ cmnd->device->lun, cmnd->cmnd[0],
+ rsplen);
host_status = DID_ERROR;
goto out;
}
if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2757 Protocol failure detected during "
"processing of FCP I/O op: "
"tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
@@ -3742,13 +3971,10 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
*/
} else if (fcpi_parm) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "9029 FCP %s Check Error xri x%x Data: "
+ "9029 FCP %s Check Error Data: "
"x%x x%x x%x x%x x%x\n",
((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
"Read" : "Write"),
- ((phba->sli_rev == LPFC_SLI_REV4) ?
- lpfc_cmd->cur_iocbq.sli4_xritag :
- rsp_iocb->iocb.ulpContext),
fcpDl, be32_to_cpu(fcprsp->rspResId),
fcpi_parm, cmnd->cmnd[0], scsi_status);
@@ -3775,7 +4001,342 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
out:
cmnd->result = host_status << 16 | scsi_status;
- lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
+ lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
+}
+
+/**
+ * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
+ * @phba: The hba for which this call is being executed.
+ * @pwqeIn: The command WQE for the scsi cmnd.
+ * @pwqeOut: Pointer to driver response WQE object.
+ *
+ * This routine assigns scsi command result by looking into response WQE
+ * status field appropriately. This routine handles QUEUE FULL condition as
+ * well by ramping down device queue depth.
+ **/
+static void
+lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ struct lpfc_iocbq *pwqeOut)
+{
+ struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
+ struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
+ struct lpfc_vport *vport = pwqeIn->vport;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
+ struct scsi_cmnd *cmd;
+ unsigned long flags;
+ struct lpfc_fast_path_event *fast_path_evt;
+ struct Scsi_Host *shost;
+ u32 logit = LOG_FCP;
+ u32 status, idx;
+ u32 lat;
+ u8 wait_xb_clr = 0;
+
+ /* Sanity check on return of outstanding command */
+ if (!lpfc_cmd) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "9032 Null lpfc_cmd pointer. No "
+ "release, skip completion\n");
+ return;
+ }
+
+ rdata = lpfc_cmd->rdata;
+ ndlp = rdata->pnode;
+
+ /* Sanity check on return of outstanding command */
+ cmd = lpfc_cmd->pCmd;
+ if (!cmd) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "9042 I/O completion: Not an active IO\n");
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ return;
+ }
+ /* Guard against abort handler being called at same time */
+ spin_lock(&lpfc_cmd->buf_lock);
+ idx = lpfc_cmd->cur_iocbq.hba_wqidx;
+ if (phba->sli4_hba.hdwq)
+ phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+#endif
+ shost = cmd->device->host;
+
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
+ lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
+
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ if (phba->cfg_fcp_wait_abts_rsp)
+ wait_xb_clr = 1;
+ }
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->prot_data_type) {
+ struct scsi_dif_tuple *src = NULL;
+
+ src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+ /*
+ * Used to restore any changes to protection
+ * data for error injection.
+ */
+ switch (lpfc_cmd->prot_data_type) {
+ case LPFC_INJERR_REFTAG:
+ src->ref_tag =
+ lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_APPTAG:
+ src->app_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_GUARD:
+ src->guard_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ default:
+ break;
+ }
+
+ lpfc_cmd->prot_data = 0;
+ lpfc_cmd->prot_data_type = 0;
+ lpfc_cmd->prot_data_segment = NULL;
+ }
+#endif
+ if (unlikely(lpfc_cmd->status)) {
+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+ (lpfc_cmd->result & IOERR_DRVR_MASK))
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ else if (lpfc_cmd->status >= IOSTAT_CNT)
+ lpfc_cmd->status = IOSTAT_DEFAULT;
+ if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
+ !lpfc_cmd->fcp_rsp->rspStatus3 &&
+ (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
+ !(vport->cfg_log_verbose & LOG_FCP_UNDER))
+ logit = 0;
+ else
+ logit = LOG_FCP | LOG_FCP_UNDER;
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9034 FCP cmd x%x failed <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (ndlp) ? ndlp->nlp_DID : 0,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ wcqe->parameter, wcqe->total_data_placed,
+ lpfc_cmd->cur_iocbq.iotag);
+ }
+
+ switch (lpfc_cmd->status) {
+ case IOSTAT_SUCCESS:
+ cmd->result = DID_OK << 16;
+ break;
+ case IOSTAT_FCP_RSP_ERROR:
+ lpfc_handle_fcp_err(vport, lpfc_cmd,
+ pwqeIn->wqe.fcp_iread.total_xfer_len -
+ wcqe->total_data_placed);
+ break;
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ break;
+ fast_path_evt->un.fabric_evt.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.fabric_evt.subcategory =
+ (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
+ LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
+ if (ndlp) {
+ memcpy(&fast_path_evt->un.fabric_evt.wwpn,
+ &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.fabric_evt.wwnn,
+ &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ }
+ fast_path_evt->vport = vport;
+ fast_path_evt->work_evt.evt =
+ LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp,
+ &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9035 Fabric/Node busy FCP cmd x%x failed"
+ " <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (ndlp) ? ndlp->nlp_DID : 0,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ wcqe->parameter,
+ wcqe->total_data_placed,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+ break;
+ case IOSTAT_REMOTE_STOP:
+ if (ndlp) {
+ /* This I/O was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, ndlp,
+ lpfc_cmd->cur_iocbq.sli4_lxritag,
+ 0, 0);
+ }
+ fallthrough;
+ case IOSTAT_LOCAL_REJECT:
+ if (lpfc_cmd->result & IOERR_DRVR_MASK)
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+ lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+ cmd->result = DID_NO_CONNECT << 16;
+ break;
+ }
+ if (lpfc_cmd->result == IOERR_INVALID_RPI ||
+ lpfc_cmd->result == IOERR_LINK_DOWN ||
+ lpfc_cmd->result == IOERR_NO_RESOURCES ||
+ lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+ lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
+ lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ break;
+ }
+ if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
+ lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
+ status == CQE_STATUS_DI_ERROR) {
+ if (scsi_get_prot_op(cmd) !=
+ SCSI_PROT_NORMAL) {
+ /*
+ * This is a response for a BG enabled
+ * cmd. Parse BG error
+ */
+ lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
+ break;
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_BG,
+ "9040 non-zero BGSTAT "
+ "on unprotected cmd\n");
+ }
+ }
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9036 Local Reject FCP cmd x%x failed"
+ " <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (ndlp) ? ndlp->nlp_DID : 0,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ wcqe->parameter,
+ wcqe->total_data_placed,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+ fallthrough;
+ default:
+ if (lpfc_cmd->status >= IOSTAT_CNT)
+ lpfc_cmd->status = IOSTAT_DEFAULT;
+ cmd->result = DID_ERROR << 16;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "9037 FCP Completion Error: xri %x "
+ "status x%x result x%x [x%x] "
+ "placed x%x\n",
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ lpfc_cmd->status, lpfc_cmd->result,
+ wcqe->parameter,
+ wcqe->total_data_placed);
+ }
+ if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
+ u32 *lp = (u32 *)cmd->sense_buffer;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "9039 Iodone <%d/%llu> cmd x%px, error "
+ "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
+ cmd->device->id, cmd->device->lun, cmd,
+ cmd->result, *lp, *(lp + 3),
+ (u64)scsi_get_lba(cmd),
+ cmd->retries, scsi_get_resid(cmd));
+ }
+
+ if (vport->cfg_max_scsicmpl_time &&
+ time_after(jiffies, lpfc_cmd->start_time +
+ msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (ndlp) {
+ if (ndlp->cmd_qdepth >
+ atomic_read(&ndlp->cmd_pending) &&
+ (atomic_read(&ndlp->cmd_pending) >
+ LPFC_MIN_TGT_QDEPTH) &&
+ (cmd->cmnd[0] == READ_10 ||
+ cmd->cmnd[0] == WRITE_10))
+ ndlp->cmd_qdepth =
+ atomic_read(&ndlp->cmd_pending);
+
+ ndlp->last_change_time = jiffies;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->ts_cmd_start) {
+ lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
+ lpfc_cmd->ts_data_io = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
+ lpfc_io_ktime(phba, lpfc_cmd);
+ }
+#endif
+ if (likely(!wait_xb_clr))
+ lpfc_cmd->pCmd = NULL;
+ spin_unlock(&lpfc_cmd->buf_lock);
+
+ /* Check if IO qualified for CMF */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+ cmd->sc_data_direction == DMA_FROM_DEVICE &&
+ (scsi_sg_count(cmd))) {
+ /* Used when calculating average latency */
+ lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
+ lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
+ }
+
+ if (wait_xb_clr)
+ goto out;
+
+ /* The sdev is not guaranteed to be valid post scsi_done upcall. */
+ scsi_done(cmd);
+
+ /*
+ * If there is an abort thread waiting for command completion
+ * wake up the thread.
+ */
+ spin_lock(&lpfc_cmd->buf_lock);
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ if (lpfc_cmd->waitq)
+ wake_up(lpfc_cmd->waitq);
+ spin_unlock(&lpfc_cmd->buf_lock);
+out:
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
}
/**
@@ -3793,7 +4354,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut)
{
struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *) pIocbIn->context1;
+ (struct lpfc_io_buf *) pIocbIn->io_buf;
struct lpfc_vport *vport = pIocbIn->vport;
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
@@ -3803,9 +4364,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct Scsi_Host *shost;
int idx;
uint32_t logit = LOG_FCP;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- int cpu;
-#endif
/* Guard against abort handler being called at same time */
spin_lock(&lpfc_cmd->buf_lock);
@@ -3813,7 +4371,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
if (!cmd || !phba) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2621 IO completion: Not an active IO\n");
spin_unlock(&lpfc_cmd->buf_lock);
return;
@@ -3824,21 +4382,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
- cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
- phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
- }
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
#endif
shost = cmd->device->host;
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
- /* pick up SLI4 exhange busy status from HBA */
- if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ /* pick up SLI4 exchange busy status from HBA */
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
+ if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY)
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
- else
- lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -3904,7 +4458,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
switch (lpfc_cmd->status) {
case IOSTAT_FCP_RSP_ERROR:
/* Call FCP RSP handler to determine result */
- lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
+ lpfc_handle_fcp_err(vport, lpfc_cmd,
+ pIocbOut->iocb.un.fcpi.fcpi_parm);
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
@@ -3917,7 +4472,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
fast_path_evt->un.fabric_evt.subcategory =
(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
- if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if (pnode) {
memcpy(&fast_path_evt->un.fabric_evt.wwpn,
&pnode->nlp_portname,
sizeof(struct lpfc_name));
@@ -3949,7 +4504,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -3972,7 +4527,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
&& (phba->sli_rev == LPFC_SLI_REV4)
- && (pnode && NLP_CHK_NODE_ACT(pnode))) {
+ && pnode) {
/* This IO was aborted by the target, we don't
* know the rxid and because we did not send the
* ABTS we cannot generate and RRQ.
@@ -3981,14 +4536,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->cur_iocbq.sli4_lxritag,
0, 0);
}
- /* fall through */
+ fallthrough;
default:
cmd->result = DID_ERROR << 16;
break;
}
- if (!pnode || !NLP_CHK_NODE_ACT(pnode)
- || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
SAM_STAT_BUSY;
} else
@@ -4005,12 +4559,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
- lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
- if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if (pnode) {
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
@@ -4029,15 +4582,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->pCmd = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->ts_cmd_start) {
+ lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
+ lpfc_cmd->ts_data_io = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
+ lpfc_io_ktime(phba, lpfc_cmd);
+ }
+#endif
+
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
/*
* If there is an abort thread waiting for command completion
* wake up the thread.
*/
spin_lock(&lpfc_cmd->buf_lock);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock(&lpfc_cmd->buf_lock);
@@ -4046,72 +4608,30 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
/**
- * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
- * @data: A pointer to the immediate command data portion of the IOCB.
- * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
+ * @vport: Pointer to vport object.
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ * @tmo: timeout value for the IO
*
- * The routine copies the entire FCP command from @fcp_cmnd to @data while
- * byte swapping the data to big endian format for transmission on the wire.
- **/
-static void
-lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
-{
- int i, j;
- for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
- i += sizeof(uint32_t), j++) {
- ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
- }
-}
-
-/**
- * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
- * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: The scsi command which needs to send.
- * @pnode: Pointer to lpfc_nodelist.
+ * Based on the data-direction of the command, initialize IOCB
+ * in the I/O buffer. Fill in the IOCB fields which are independent
+ * of the scsi buffer
*
- * This routine initializes fcp_cmnd and iocb data structure from scsi command
- * to transfer for device with SLI3 interface spec.
+ * RETURNS 0 - SUCCESS,
**/
-static void
-lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_nodelist *pnode)
+static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo)
{
- struct lpfc_hba *phba = vport->phba;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
- IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
- struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
- struct lpfc_sli4_hdw_queue *hdwq = NULL;
+ struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
int datadir = scsi_cmnd->sc_data_direction;
- int idx;
- uint8_t *ptr;
- bool sli4;
- uint32_t fcpdl;
-
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
- return;
-
- lpfc_cmd->fcp_rsp->rspSnsLen = 0;
- /* clear task management bits */
- lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
-
- int_to_scsilun(lpfc_cmd->pCmd->device->lun,
- &lpfc_cmd->fcp_cmnd->fcp_lun);
-
- ptr = &fcp_cmnd->fcpCdb[0];
- memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
- if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
- ptr += scsi_cmnd->cmd_len;
- memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
- }
-
- fcp_cmnd->fcpCntl1 = SIMPLE_Q;
+ u32 fcpdl;
- sli4 = (phba->sli_rev == LPFC_SLI_REV4);
piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
- idx = lpfc_cmd->hdwq_no;
- if (phba->sli4_hba.hdwq)
- hdwq = &phba->sli4_hba.hdwq[idx];
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -4125,56 +4645,199 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
iocb_cmd->ulpPU = PARM_READ_CHECK;
if (vport->cfg_first_burst_size &&
(pnode->nlp_flag & NLP_FIRSTBURST)) {
+ u32 xrdy_len;
+
fcpdl = scsi_bufflen(scsi_cmnd);
- if (fcpdl < vport->cfg_first_burst_size)
- piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
- else
- piocbq->iocb.un.fcpi.fcpi_XRdy =
- vport->cfg_first_burst_size;
+ xrdy_len = min(fcpdl,
+ vport->cfg_first_burst_size);
+ piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
- if (hdwq)
- hdwq->scsi_cstat.output_requests++;
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
fcp_cmnd->fcpCntl3 = READ_DATA;
- if (hdwq)
- hdwq->scsi_cstat.input_requests++;
}
} else {
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
fcp_cmnd->fcpCntl3 = 0;
- if (hdwq)
- hdwq->scsi_cstat.control_requests++;
}
- if (phba->sli_rev == 3 &&
- !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
- lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
+
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
- if (sli4)
- piocbq->iocb.ulpContext =
- phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
else
piocbq->iocb.ulpFCP2Rcvy = 0;
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
- piocbq->context1 = lpfc_cmd;
- piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
- piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+ piocbq->io_buf = lpfc_cmd;
+ if (!piocbq->cmd_cmpl)
+ piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+ piocbq->iocb.ulpTimeout = tmo;
piocbq->vport = vport;
+ return 0;
+}
+
+/**
+ * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
+ * @vport: Pointer to vport object.
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ * @tmo: timeout value for the IO
+ *
+ * Based on the data-direction of the command copy WQE template
+ * to I/O buffer WQE. Fill in the WQE fields which are independent
+ * of the scsi buffer
+ *
+ * RETURNS 0 - SUCCESS,
+ **/
+static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct lpfc_sli4_hdw_queue *hdwq = NULL;
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+ u16 idx = lpfc_cmd->hdwq_no;
+ int datadir = scsi_cmnd->sc_data_direction;
+
+ hdwq = &phba->sli4_hba.hdwq[idx];
+
+ /* Initialize 64 bytes only */
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither.
+ */
+ if (scsi_sg_count(scsi_cmnd)) {
+ if (datadir == DMA_TO_DEVICE) {
+ /* From the iwrite template, initialize words 7 - 11 */
+ memcpy(&wqe->words[7],
+ &lpfc_iwrite_cmd_template.words[7],
+ sizeof(uint32_t) * 5);
+
+ fcp_cmnd->fcpCntl3 = WRITE_DATA;
+ if (hdwq)
+ hdwq->scsi_cstat.output_requests++;
+ } else {
+ /* From the iread template, initialize words 7 - 11 */
+ memcpy(&wqe->words[7],
+ &lpfc_iread_cmd_template.words[7],
+ sizeof(uint32_t) * 5);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
+
+ fcp_cmnd->fcpCntl3 = READ_DATA;
+ if (hdwq)
+ hdwq->scsi_cstat.input_requests++;
+
+ /* For a CMF Managed port, iod must be zero'ed */
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_IOD_NONE);
+ }
+ } else {
+ /* From the icmnd template, initialize words 4 - 11 */
+ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
+ sizeof(uint32_t) * 8);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
+
+ fcp_cmnd->fcpCntl3 = 0;
+ if (hdwq)
+ hdwq->scsi_cstat.control_requests++;
+ }
+
+ /*
+ * Finish initializing those WQE fields that are independent
+ * of the request_buffer
+ */
+
+ /* Word 3 */
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
+
+ /* Word 7*/
+ if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
+ bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+
+ bf_set(wqe_class, &wqe->generic.wqe_com,
+ (pnode->nlp_fcp_info & 0x0f));
+
+ /* Word 8 */
+ wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+
+ pwqeq->vport = vport;
+ pwqeq->io_buf = lpfc_cmd;
+ pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
+ pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
+
+ return 0;
}
/**
- * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer for device with SLI3 interface spec.
+ **/
+static int
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
+ struct lpfc_nodelist *pnode)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ u8 *ptr;
+
+ if (!pnode)
+ return 0;
+
+ lpfc_cmd->fcp_rsp->rspSnsLen = 0;
+ /* clear task management bits */
+ lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
+
+ int_to_scsilun(lpfc_cmd->pCmd->device->lun,
+ &lpfc_cmd->fcp_cmnd->fcp_lun);
+
+ ptr = &fcp_cmnd->fcpCdb[0];
+ memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
+ ptr += scsi_cmnd->cmd_len;
+ memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
+ }
+
+ fcp_cmnd->fcpCntl1 = SIMPLE_Q;
+
+ lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
+
+ return 0;
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_io_buf data structure.
* @lun: Logical unit number.
@@ -4188,10 +4851,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
* 1 - Success
**/
static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
- struct lpfc_io_buf *lpfc_cmd,
- uint64_t lun,
- uint8_t task_mgmt_cmd)
+lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd)
{
struct lpfc_iocbq *piocbq;
IOCB_t *piocb;
@@ -4199,8 +4861,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
- ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
return 0;
piocbq = &(lpfc_cmd->cur_iocbq);
@@ -4213,15 +4874,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
- if (vport->phba->sli_rev == 3 &&
- !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+ if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
- if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- piocb->ulpContext =
- vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- }
piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
piocb->ulpPU = 0;
@@ -4237,8 +4893,79 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
} else
piocb->ulpTimeout = lpfc_cmd->timeout;
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
- lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
+ return 1;
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ * 0 - Error
+ * 1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd)
+{
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+ struct fcp_cmnd *fcp_cmnd;
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+
+ if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ return 0;
+
+ pwqeq->vport = vport;
+ /* Initialize 64 bytes only */
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* From the icmnd template, initialize words 4 - 11 */
+ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
+ sizeof(uint32_t) * 8);
+
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ /* Clear out any old data in the FCP command area */
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
+ fcp_cmnd->fcpCntl3 = 0;
+ fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
+
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_icmd, 0);
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
+ ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
+ bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
+ (ndlp->nlp_fcp_info & 0x0f));
+
+ /* ulpTimeout is only one byte */
+ if (lpfc_cmd->timeout > 0xff) {
+ /*
+ * Do not timeout the command at the firmware level.
+ * The driver will provide the timeout mechanism.
+ */
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0);
+ } else {
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout);
+ }
+
+ lpfc_prep_embed_io(vport->phba, lpfc_cmd);
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
+ wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+
+ lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
return 1;
}
@@ -4257,7 +4984,6 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
- phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
@@ -4265,19 +4991,24 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
+ phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
+ phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1418 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
@@ -4285,7 +5016,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
}
/**
- * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
+ * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
* @phba: The Hba for which this call is being executed.
* @cmdiocbq: Pointer to lpfc_iocbq data structure.
* @rspiocbq: Pointer to lpfc_iocbq data structure.
@@ -4298,8 +5029,7 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
- struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *) cmdiocbq->context1;
+ struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
if (lpfc_cmd)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
@@ -4320,7 +5050,7 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
* 0, successful
*/
int
-lpfc_check_pci_resettable(const struct lpfc_hba *phba)
+lpfc_check_pci_resettable(struct lpfc_hba *phba)
{
const struct pci_dev *pdev = phba->pcidev;
struct pci_dev *ptr = NULL;
@@ -4337,14 +5067,10 @@ lpfc_check_pci_resettable(const struct lpfc_hba *phba)
}
/* Check for valid Emulex Device ID */
- switch (ptr->device) {
- case PCI_DEVICE_ID_LANCER_FC:
- case PCI_DEVICE_ID_LANCER_G6_FC:
- case PCI_DEVICE_ID_LANCER_G7_FC:
- break;
- default:
+ if (phba->sli_rev != LPFC_SLI_REV4 ||
+ phba->hba_flag & HBA_FCOE_MODE) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "8347 Invalid device found: "
+ "8347 Incapable PCI reset device: "
"0x%04x\n", ptr->device);
return -EBADSLT;
}
@@ -4430,7 +5156,7 @@ buffer_done:
}
/**
- * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
+ * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
* @phba: The Hba for which this call is being executed.
*
* This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
@@ -4459,12 +5185,11 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
/**
* lpfc_poll_timeout - Restart polling timer
- * @ptr: Map to lpfc_hba data structure pointer.
+ * @t: Timer construct where lpfc_hba data structure pointer is obtained.
*
* This routine restarts fcp_poll timer, when FCP ring polling is enable
* and FCP Ring interrupt is disable.
**/
-
void lpfc_poll_timeout(struct timer_list *t)
{
struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
@@ -4478,10 +5203,24 @@ void lpfc_poll_timeout(struct timer_list *t)
}
}
+/*
+ * lpfc_is_command_vm_io - get the UUID from blk cgroup
+ * @cmd: Pointer to scsi_cmnd data structure
+ * Returns UUID if present, otherwise NULL
+ */
+static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
+{
+ struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
+
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio)
+ return NULL;
+ return blkcg_get_fc_appid(bio);
+}
+
/**
* lpfc_queuecommand - scsi_host_template queuecommand entry point
+ * @shost: kernel scsi host pointer.
* @cmnd: Pointer to scsi_cmnd data structure.
- * @done: Pointer to done routine.
*
* Driver registers this routine to scsi midlayer to submit a @cmd to process.
* This routine prepares an IOCB from scsi command and provides to firmware.
@@ -4496,15 +5235,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cur_iocbq = NULL;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
struct lpfc_io_buf *lpfc_cmd;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- int cpu;
-#endif
+ u8 *uuid = NULL;
+ uint64_t start;
+ start = ktime_get_ns();
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
/* sanity check on references */
@@ -4521,7 +5261,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
" op:%02x str=%s without registering for"
" BlockGuard - Rejecting command\n",
@@ -4534,8 +5274,19 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
* Catch race where our node has transitioned, but the
* transport is still transitioning.
*/
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
- goto out_tgt_busy;
+ if (!ndlp)
+ goto out_tgt_busy1;
+
+ /* Check if IO qualifies for CMF */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+ cmnd->sc_data_direction == DMA_FROM_DEVICE &&
+ (scsi_sg_count(cmnd))) {
+ /* Latency start time saved in rx_cmd_start later in routine */
+ err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
+ if (err)
+ goto out_tgt_busy1;
+ }
+
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
@@ -4563,7 +5314,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
ndlp->nlp_portname.u.wwn[5],
ndlp->nlp_portname.u.wwn[6],
ndlp->nlp_portname.u.wwn[7]);
- goto out_tgt_busy;
+ goto out_tgt_busy2;
}
}
@@ -4576,7 +5327,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
"IO busied\n");
goto out_host_busy;
}
+ lpfc_cmd->rx_cmd_start = start;
+ cur_iocbq = &lpfc_cmd->cur_iocbq;
/*
* Store the midlayer's command structure for the completion phase
* and complete the command initialization.
@@ -4584,18 +5337,23 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->ndlp = ndlp;
+ cur_iocbq->cmd_cmpl = NULL;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
+ err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+ if (err)
+ goto out_host_busy_release_buf;
+
if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
if (vport->phba->cfg_enable_bg) {
lpfc_printf_vlog(vport,
KERN_INFO, LOG_SCSI_CMD,
"9033 BLKGRD: rcvd %s cmd:x%x "
- "sector x%llx cnt %u pt %x\n",
+ "reftag x%x cnt %u pt %x\n",
dif_op_str[scsi_get_prot_op(cmnd)],
cmnd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request),
+ scsi_prot_ref_tag(cmnd),
+ scsi_logical_block_count(cmnd),
(cmnd->cmnd[1]>>5));
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -4604,10 +5362,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_printf_vlog(vport,
KERN_INFO, LOG_SCSI_CMD,
"9038 BLKGRD: rcvd PROT_NORMAL cmd: "
- "x%x sector x%llx cnt %u pt %x\n",
+ "x%x reftag x%x cnt %u pt %x\n",
cmnd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request),
+ scsi_prot_ref_tag(cmnd),
+ scsi_logical_block_count(cmnd),
(cmnd->cmnd[1]>>5));
}
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -4621,40 +5379,66 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
goto out_host_busy_free_buf;
}
- lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+ /* check the necessary and sufficient condition to support VMID */
+ if (lpfc_is_vmid_enabled(phba) &&
+ (ndlp->vmid_support ||
+ phba->pport->vmid_priority_tagging ==
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+ /* is the I/O generated by a VM, get the associated virtual */
+ /* entity id */
+ uuid = lpfc_is_command_vm_io(cmnd);
+
+ if (uuid) {
+ err = lpfc_vmid_get_appid(vport, uuid,
+ cmnd->sc_data_direction,
+ (union lpfc_vmid_io_tag *)
+ &cur_iocbq->vmid_tag);
+ if (!err)
+ cur_iocbq->cmd_flag |= LPFC_IO_VMID;
+ }
+ }
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
- cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT) {
- struct lpfc_sli4_hdw_queue *hdwq =
- &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
- hdwq->cpucheck_xmt_io[cpu]++;
- }
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
+#endif
+ /* Issue I/O to adapter */
+ err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
+ SLI_IOCB_RET_IOCB);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (start) {
+ lpfc_cmd->ts_cmd_start = start;
+ lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
+ lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
+ } else {
+ lpfc_cmd->ts_cmd_start = 0;
}
#endif
- err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
- &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "3376 FCP could not issue IOCB err %x"
+ "3376 FCP could not issue iocb err %x "
"FCP cmd x%x <%d/%llu> "
"sid: x%x did: x%x oxid: x%x "
"Data: x%x x%x x%x x%x\n",
err, cmnd->cmnd[0],
cmnd->device ? cmnd->device->id : 0xffff,
- cmnd->device ? cmnd->device->lun : (u64) -1,
+ cmnd->device ? cmnd->device->lun : (u64)-1,
vport->fc_myDID, ndlp->nlp_DID,
phba->sli_rev == LPFC_SLI_REV4 ?
- lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
- lpfc_cmd->cur_iocbq.iocb.ulpContext,
- lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
- lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
- (uint32_t)
- (cmnd->request->timeout / 1000));
+ cur_iocbq->sli4_xritag : 0xffff,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
+ cur_iocbq->iocb.ulpContext,
+ cur_iocbq->iotag,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ bf_get(wqe_tmo,
+ &cur_iocbq->wqe.generic.wqe_com) :
+ cur_iocbq->iocb.ulpTimeout,
+ (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
goto out_host_busy_free_buf;
}
+
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
@@ -4683,21 +5467,54 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
}
}
+ out_host_busy_release_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
+ lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
+ shost);
return SCSI_MLQUEUE_HOST_BUSY;
- out_tgt_busy:
+ out_tgt_busy2:
+ lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
+ shost);
+ out_tgt_busy1:
return SCSI_MLQUEUE_TARGET_BUSY;
out_fail_command_release_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
+ lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
+ shost);
out_fail_command:
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
+/*
+ * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
+ * @vport: The virtual port for which this call is being executed.
+ */
+void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
+{
+ u32 bucket;
+ struct lpfc_vmid *cur;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ del_timer_sync(&vport->phba->inactive_vmid_poll);
+
+ kfree(vport->qfpa_res);
+ kfree(vport->vmid_priority.vmid_range);
+ kfree(vport->vmid);
+
+ if (!hash_empty(vport->hash_table))
+ hash_for_each(vport->hash_table, bucket, cur, hnode)
+ hash_del(&cur->hnode);
+
+ vport->qfpa_res = NULL;
+ vport->vmid_priority.vmid_range = NULL;
+ vport->vmid = NULL;
+ vport->cur_vmid_cnt = 0;
+}
/**
* lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
@@ -4713,19 +5530,19 @@ static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocb;
- struct lpfc_iocbq *abtsiocb;
struct lpfc_io_buf *lpfc_cmd;
- IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
struct lpfc_sli_ring *pring_s4 = NULL;
+ struct lpfc_sli_ring *pring = NULL;
int ret_val;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
@@ -4733,25 +5550,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (!lpfc_cmd)
return ret;
- spin_lock_irqsave(&phba->hbalock, flags);
+ /* Guard against IO completion being called at same time */
+ spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
+
+ spin_lock(&phba->hbalock);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
ret = FAILED;
- goto out_unlock;
+ goto out_unlock_hba;
}
- /* Guard against IO completion being called at same time */
- spin_lock(&lpfc_cmd->buf_lock);
-
if (!lpfc_cmd->pCmd) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
"x%x ID %d LUN %llu\n",
SUCCESS, cmnd->device->id, cmnd->device->lun);
- goto out_unlock_buf;
+ goto out_unlock_hba;
}
iocb = &lpfc_cmd->cur_iocbq;
@@ -4759,12 +5576,12 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
if (!pring_s4) {
ret = FAILED;
- goto out_unlock_buf;
+ goto out_unlock_hba;
}
spin_lock(&pring_s4->ring_lock);
}
/* the command is in process of being cancelled */
- if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3169 SCSI Layer abort requested I/O has been "
"cancelled by LLD.\n");
@@ -4784,91 +5601,53 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto out_unlock_ring;
}
- BUG_ON(iocb->context1 != lpfc_cmd);
+ WARN_ON(iocb->io_buf != lpfc_cmd);
/* abort issued in recovery is still in progress */
- if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3389 SCSI Layer I/O Abort Request is pending\n");
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
goto wait_for_cmpl;
}
- abtsiocb = __lpfc_sli_get_iocbq(phba);
- if (abtsiocb == NULL) {
- ret = FAILED;
- goto out_unlock_ring;
- }
-
- /* Indicate the IO is being aborted by the driver. */
- iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
-
- /*
- * The scsi command can not be in txq and it is in flight because the
- * pCmd is still pointig at the SCSI command we have to abort. There
- * is no need to search the txcmplq. Just send an abort to the FW.
- */
-
- cmd = &iocb->iocb;
- icmd = &abtsiocb->iocb;
- icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
- icmd->un.acxri.abortContextTag = cmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
- else
- icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
-
- icmd->ulpLe = 1;
- icmd->ulpClass = cmd->ulpClass;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocb->hba_wqidx = iocb->hba_wqidx;
- abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocb->iocb_flag & LPFC_IO_FOF)
- abtsiocb->iocb_flag |= LPFC_IO_FOF;
-
- if (lpfc_is_link_up(phba))
- icmd->ulpCommand = CMD_ABORT_XRI_CN;
- else
- icmd->ulpCommand = CMD_CLOSE_XRI_CN;
-
- abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
- abtsiocb->vport = vport;
lpfc_cmd->waitq = &waitq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- /* Note: both hbalock and ring_lock must be set here */
- ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
- abtsiocb, 0);
spin_unlock(&pring_s4->ring_lock);
+ ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
+ lpfc_sli_abort_fcp_cmpl);
} else {
- ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
- abtsiocb, 0);
+ pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ lpfc_sli_abort_fcp_cmpl);
}
- if (ret_val == IOCB_ERROR) {
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
+ if (ret_val != IOCB_SUCCESS) {
/* Indicate the IO is not being aborted by the driver. */
- iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
- goto out;
+ goto out_unlock_hba;
}
/* no longer need the lock after this point */
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
wait_for_cmpl:
- /* Wait for abort to complete */
+ /*
+ * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
+ * for abort to complete.
+ */
wait_event_timeout(waitq,
(lpfc_cmd->pCmd != cmnd),
msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
@@ -4877,7 +5656,7 @@ wait_for_cmpl:
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0748 abort handler timed out waiting "
"for aborting I/O (xri:x%x) to complete: "
"ret %#x, ID %d, LUN %llu\n",
@@ -4893,10 +5672,9 @@ wait_for_cmpl:
out_unlock_ring:
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
-out_unlock_buf:
- spin_unlock(&lpfc_cmd->buf_lock);
-out_unlock:
- spin_unlock_irqrestore(&phba->hbalock, flags);
+out_unlock_hba:
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0749 SCSI Layer I/O Abort Request Status x%x ID %d "
@@ -5003,7 +5781,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
/**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed.
- * @rdata: Pointer to remote port local data
+ * @rport: Pointer to remote port
* @tgt_id: Target ID of remote device.
* @lun_id: Lun number for the TMF
* @task_mgmt_cmd: type of TMF to send
@@ -5016,7 +5794,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
* 0x2002 - Success.
**/
static int
-lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
unsigned int tgt_id, uint64_t lun_id,
uint8_t task_mgmt_cmd)
{
@@ -5029,21 +5807,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
int ret;
int status;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
- if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
+ rdata = rport->dd_data;
+ if (!rdata || !rdata->pnode)
return FAILED;
pnode = rdata->pnode;
- lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
- lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->pCmd = NULL;
lpfc_cmd->ndlp = pnode;
- status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
- task_mgmt_cmd);
+ status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
+ task_mgmt_cmd);
if (!status) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
@@ -5055,38 +5833,41 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
- iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+ iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl;
+ iocbq->vport = vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %llu "
"rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
- iocbq->iocb_flag);
+ iocbq->cmd_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if ((status != IOCB_SUCCESS) ||
- (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
+ (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
if (status != IOCB_SUCCESS ||
- iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0727 TMF %s to TGT %d LUN %llu "
- "failed (%d, %d) iocb_flag x%x\n",
+ "failed (%d, %d) cmd_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id,
- iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4],
- iocbq->iocb_flag);
+ get_job_ulpstatus(phba, iocbqrsp),
+ get_job_word4(phba, iocbqrsp),
+ iocbq->cmd_flag);
/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
if (status == IOCB_SUCCESS) {
- if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+ if (get_job_ulpstatus(phba, iocbqrsp) ==
+ IOSTAT_FCP_RSP_ERROR)
/* Something in the FCP_RSP was invalid.
* Check conditions */
ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
else
ret = FAILED;
- } else if (status == IOCB_TIMEDOUT) {
+ } else if ((status == IOCB_TIMEDOUT) ||
+ (status == IOCB_ABORTED)) {
ret = TIMEOUT_ERROR;
} else {
ret = FAILED;
@@ -5096,7 +5877,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_sli_release_iocbq(phba, iocbqrsp);
- if (ret != TIMEOUT_ERROR)
+ if (status != IOCB_TIMEDOUT)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return ret;
@@ -5105,7 +5886,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
/**
* lpfc_chk_tgt_mapped -
* @vport: The virtual port to check on
- * @cmnd: Pointer to scsi_cmnd data structure.
+ * @rport: Pointer to fc_rport data structure.
*
* This routine delays until the scsi target (aka rport) for the
* command exists (is present and logged in) or we declare it non-existent.
@@ -5115,37 +5896,37 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
* 0x2002 - Success
**/
static int
-lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
+lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *pnode;
+ struct lpfc_nodelist *pnode = NULL;
unsigned long later;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0797 Tgt Map rport failure: rdata x%px\n", rdata);
return FAILED;
}
pnode = rdata->pnode;
+
/*
* If target is not in a MAPPED state, delay until
* target is rediscovered or devloss timeout expires.
*/
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies)) {
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ if (!pnode)
return FAILED;
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata)
return FAILED;
pnode = rdata->pnode;
}
- if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
- (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
return FAILED;
return SUCCESS;
}
@@ -5185,7 +5966,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
}
if (cnt) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0724 I/O flush failure for context %s : cnt x%x\n",
((context == LPFC_CTX_LUN) ? "LUN" :
((context == LPFC_CTX_TGT) ? "TGT" :
@@ -5211,6 +5992,7 @@ static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
@@ -5218,22 +6000,26 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
uint64_t lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ u32 logit = LOG_FCP;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rport)
+ return FAILED;
+
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0798 Device Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
}
pnode = rdata->pnode;
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
- status = lpfc_chk_tgt_mapped(vport, cmnd);
+ status = lpfc_chk_tgt_mapped(vport, rport);
if (status == FAILED) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0721 Device Reset rport failure: rdata x%px\n", rdata);
return FAILED;
}
@@ -5247,10 +6033,12 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
FCP_LUN_RESET);
+ if (status != SUCCESS)
+ logit = LOG_TRACE_EVENT;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, logit,
"0713 SCSI layer issued Device Reset (%d, %llu) "
"return x%x\n", tgt_id, lun_id, status);
@@ -5282,6 +6070,7 @@ static int
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
@@ -5289,28 +6078,35 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
uint64_t lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ u32 logit = LOG_FCP;
+ u32 dev_loss_tmo = vport->cfg_devloss_tmo;
+ unsigned long flags;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rport)
+ return FAILED;
+
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0799 Target Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
}
pnode = rdata->pnode;
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
- status = lpfc_chk_tgt_mapped(vport, cmnd);
+ status = lpfc_chk_tgt_mapped(vport, rport);
if (status == FAILED) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0722 Target Reset rport failure: rdata x%px\n", rdata);
if (pnode) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&pnode->lock, flags);
pnode->nlp_flag &= ~NLP_NPR_ADISC;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&pnode->lock, flags);
}
lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
@@ -5326,10 +6122,49 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
FCP_TARGET_RESET);
+ if (status != SUCCESS) {
+ logit = LOG_TRACE_EVENT;
+
+ /* Issue LOGO, if no LOGO is outstanding */
+ spin_lock_irqsave(&pnode->lock, flags);
+ if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
+ !pnode->logo_waitq) {
+ pnode->logo_waitq = &waitq;
+ pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ pnode->nlp_flag |= NLP_ISSUE_LOGO;
+ pnode->save_flags |= NLP_WAIT_FOR_LOGO;
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ lpfc_unreg_rpi(vport, pnode);
+ wait_event_timeout(waitq,
+ (!(pnode->save_flags &
+ NLP_WAIT_FOR_LOGO)),
+ msecs_to_jiffies(dev_loss_tmo *
+ 1000));
+
+ if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
+ lpfc_printf_vlog(vport, KERN_ERR, logit,
+ "0725 SCSI layer TGTRST "
+ "failed & LOGO TMO (%d, %llu) "
+ "return x%x\n",
+ tgt_id, lun_id, status);
+ spin_lock_irqsave(&pnode->lock, flags);
+ pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ } else {
+ spin_lock_irqsave(&pnode->lock, flags);
+ }
+ pnode->logo_waitq = NULL;
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ status = SUCCESS;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ } else {
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ status = FAILED;
+ }
+ }
+
+ lpfc_printf_vlog(vport, KERN_ERR, logit,
"0723 SCSI layer issued Target Reset (%d, %llu) "
"return x%x\n", tgt_id, lun_id, status);
@@ -5346,93 +6181,6 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
}
/**
- * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
- * @cmnd: Pointer to scsi_cmnd data structure.
- *
- * This routine does target reset to all targets on @cmnd->device->host.
- * This emulates Parallel SCSI Bus Reset Semantics.
- *
- * Return code :
- * 0x2003 - Error
- * 0x2002 - Success
- **/
-static int
-lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
-{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_scsi_event_header scsi_event;
- int match;
- int ret = SUCCESS, status, i;
-
- scsi_event.event_type = FC_REG_SCSI_EVENT;
- scsi_event.subcategory = LPFC_EVENT_BUSRESET;
- scsi_event.lun = 0;
- memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
- memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
-
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
-
- status = fc_block_scsi_eh(cmnd);
- if (status != 0 && status != SUCCESS)
- return status;
-
- /*
- * Since the driver manages a single bus device, reset all
- * targets known to the driver. Should any target reset
- * fail, this routine returns failure to the midlayer.
- */
- for (i = 0; i < LPFC_MAX_TARGET; i++) {
- /* Search for mapped node by target ID */
- match = 0;
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (vport->phba->cfg_fcp2_no_tgt_reset &&
- (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
- continue;
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
- ndlp->nlp_sid == i &&
- ndlp->rport &&
- ndlp->nlp_type & NLP_FCP_TARGET) {
- match = 1;
- break;
- }
- }
- spin_unlock_irq(shost->host_lock);
- if (!match)
- continue;
-
- status = lpfc_send_taskmgmt(vport, cmnd,
- i, 0, FCP_TARGET_RESET);
-
- if (status != SUCCESS) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0700 Bus Reset on target %d failed\n",
- i);
- ret = FAILED;
- }
- }
- /*
- * We have to clean up i/o as : they may be orphaned by the TMFs
- * above; or if any of the TMFs failed, they may be in an
- * indeterminate state.
- * We will report success if all the i/o aborts successfully.
- */
-
- status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
- if (status != SUCCESS)
- ret = FAILED;
-
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
- return ret;
-}
-
-/**
* lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
* @cmnd: Pointer to scsi_cmnd data structure.
*
@@ -5465,6 +6213,13 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
if (rc)
goto error;
+ /* Wait for successful restart of adapter */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = lpfc_sli_chipset_init(phba);
+ if (rc)
+ goto error;
+ }
+
rc = lpfc_online(phba);
if (rc)
goto error;
@@ -5473,7 +6228,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
return ret;
error:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3323 Failed host reset\n");
lpfc_unblock_mgmt_io(phba);
return FAILED;
@@ -5584,7 +6339,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
}
num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
if (num_to_alloc != num_allocated) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0708 Allocation request of %d "
"command buffers did not succeed. "
"Allocated %d buffers.\n",
@@ -5652,10 +6407,11 @@ lpfc_slave_destroy(struct scsi_device *sdev)
/**
* lpfc_create_device_data - creates and initializes device data structure for OAS
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @lun: Lun on target
+ * @pri: Priority
* @atomic_create: Flag to indicate if memory should be allocated using the
* GFP_ATOMIC flag or not.
*
@@ -5705,7 +6461,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/**
* lpfc_delete_device_data - frees a device data structure for OAS
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @lun_info: Pointer to device data structure to free.
*
* This routine frees the previously allocated device data structure passed.
@@ -5728,7 +6484,7 @@ lpfc_delete_device_data(struct lpfc_hba *phba,
/**
* __lpfc_get_device_data - returns the device data for the specified lun
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @list: Point to list to search.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
@@ -5770,7 +6526,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
/**
* lpfc_find_next_oas_lun - searches for the next oas lun
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @starting_lun: Pointer to the lun to start searching for
@@ -5778,6 +6534,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
* @found_target_wwpn: Pointer to the found lun's target wwpn information
* @found_lun: Pointer to the found lun.
* @found_lun_status: Pointer to status of the found lun.
+ * @found_lun_pri: Pointer to priority of the found lun.
*
* This routine searches the luns list for the specified lun
* or the first lun for the vport/target. If the vport wwpn contains
@@ -5872,10 +6629,11 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/**
* lpfc_enable_oas_lun - enables a lun for OAS operations
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @lun: Lun
+ * @pri: Priority
*
* This routine enables a lun for oas operations. The routines does so by
* doing the following :
@@ -5932,10 +6690,11 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/**
* lpfc_disable_oas_lun - disables a lun for OAS operations
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @lun: Lun
+ * @pri: Priority
*
* This routine disables a lun for oas operations. The routines does so by
* doing the following :
@@ -5987,12 +6746,6 @@ lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
static int
-lpfc_no_handler(struct scsi_cmnd *cmnd)
-{
- return FAILED;
-}
-
-static int
lpfc_no_slave(struct scsi_device *sdev)
{
return -ENODEV;
@@ -6004,46 +6757,16 @@ struct scsi_host_template lpfc_template_nvme = {
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_no_command,
- .eh_abort_handler = lpfc_no_handler,
- .eh_device_reset_handler = lpfc_no_handler,
- .eh_target_reset_handler = lpfc_no_handler,
- .eh_bus_reset_handler = lpfc_no_handler,
- .eh_host_reset_handler = lpfc_no_handler,
.slave_alloc = lpfc_no_slave,
.slave_configure = lpfc_no_slave,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = 1,
.cmd_per_lun = 1,
- .shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFF,
- .vendor_id = LPFC_NL_VENDOR_ID,
- .track_queue_depth = 0,
-};
-
-struct scsi_host_template lpfc_template_no_hr = {
- .module = THIS_MODULE,
- .name = LPFC_DRIVER_NAME,
- .proc_name = LPFC_DRIVER_NAME,
- .info = lpfc_info,
- .queuecommand = lpfc_queuecommand,
- .eh_timed_out = fc_eh_timed_out,
- .eh_abort_handler = lpfc_abort_handler,
- .eh_device_reset_handler = lpfc_device_reset_handler,
- .eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
- .scan_finished = lpfc_scan_finished,
- .this_id = -1,
- .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
- .cmd_per_lun = LPFC_CMD_PER_LUN,
- .shost_attrs = lpfc_hba_attrs,
+ .shost_groups = lpfc_hba_groups,
.max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
- .change_queue_depth = scsi_change_queue_depth,
- .track_queue_depth = 1,
+ .track_queue_depth = 0,
};
struct scsi_host_template lpfc_template = {
@@ -6053,10 +6776,10 @@ struct scsi_host_template lpfc_template = {
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_timed_out = fc_eh_timed_out,
+ .eh_should_retry_cmd = fc_eh_should_retry_cmd,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
.eh_host_reset_handler = lpfc_host_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
@@ -6065,8 +6788,8 @@ struct scsi_host_template lpfc_template = {
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
- .shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFF,
+ .shost_groups = lpfc_hba_groups,
+ .max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
@@ -6079,9 +6802,12 @@ struct scsi_host_template lpfc_vport_template = {
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_timed_out = fc_eh_timed_out,
+ .eh_should_retry_cmd = fc_eh_should_retry_cmd,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = NULL,
+ .eh_host_reset_handler = NULL,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
@@ -6089,8 +6815,9 @@ struct scsi_host_template lpfc_vport_template = {
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
- .shost_attrs = lpfc_vport_attrs,
- .max_sectors = 0xFFFF,
+ .shost_groups = lpfc_vport_groups,
+ .max_sectors = 0xFFFFFFFF,
+ .vendor_id = 0,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index f76667b7da7b..eae56944f31b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -126,10 +126,6 @@ struct fcp_cmnd {
};
-struct lpfc_scsicmd_bkt {
- uint32_t cmd_count;
-};
-
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
@@ -142,6 +138,10 @@ struct lpfc_scsicmd_bkt {
#define FC_PORTSPEED_128GBIT 0x2000
#endif
+#ifndef FC_PORTSPEED_256GBIT
+#define FC_PORTSPEED_256GBIT 0x4000
+#endif
+
#define TXRDY_PAYLOAD_LEN 12
/* For sysfs/debugfs tmp string max len */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 64002b0cb02d..99d06dc7ddf6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -35,12 +35,11 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
#include <linux/aer.h>
+#include <linux/crash_dump.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -50,7 +49,6 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
#include "lpfc_compat.h"
@@ -72,8 +70,9 @@ static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint32_t);
static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint8_t *, uint32_t *);
-static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
- struct lpfc_iocbq *);
+static struct lpfc_iocbq *
+lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *rspiocbq);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
@@ -91,11 +90,134 @@ static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
struct lpfc_queue *cq,
struct lpfc_cqe *cqe);
+static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
+ struct lpfc_iocbq *pwqeq,
+ struct lpfc_sglq *sglq);
-static IOCB_t *
-lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
+union lpfc_wqe128 lpfc_iread_cmd_template;
+union lpfc_wqe128 lpfc_iwrite_cmd_template;
+union lpfc_wqe128 lpfc_icmnd_cmd_template;
+
+/* Setup WQE templates for IOs */
+void lpfc_wqe_cmd_template(void)
{
- return &iocbq->iocb;
+ union lpfc_wqe128 *wqe;
+
+ /* IREAD template */
+ wqe = &lpfc_iread_cmd_template;
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* Word 0, 1, 2 - BDE is variable */
+
+ /* Word 3 - cmd_buff_len, payload_offset_len is zero */
+
+ /* Word 4 - total_xfer_len is variable */
+
+ /* Word 5 - is zero */
+
+ /* Word 6 - ctxt_tag, xri_tag is variable */
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
+ bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
+ bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
+ bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 - abort_tag is variable */
+
+ /* Word 9 - reqtag is variable */
+
+ /* Word 10 - dbde, wqes is variable */
+ bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
+
+ /* Word 11 - pbde is variable */
+ bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
+ bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
+
+ /* Word 12 - is zero */
+
+ /* Word 13, 14, 15 - PBDE is variable */
+
+ /* IWRITE template */
+ wqe = &lpfc_iwrite_cmd_template;
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* Word 0, 1, 2 - BDE is variable */
+
+ /* Word 3 - cmd_buff_len, payload_offset_len is zero */
+
+ /* Word 4 - total_xfer_len is variable */
+
+ /* Word 5 - initial_xfer_len is variable */
+
+ /* Word 6 - ctxt_tag, xri_tag is variable */
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
+ bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
+ bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
+ bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 - abort_tag is variable */
+
+ /* Word 9 - reqtag is variable */
+
+ /* Word 10 - dbde, wqes is variable */
+ bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+
+ /* Word 11 - pbde is variable */
+ bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
+ bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
+
+ /* Word 12 - is zero */
+
+ /* Word 13, 14, 15 - PBDE is variable */
+
+ /* ICMND template */
+ wqe = &lpfc_icmnd_cmd_template;
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* Word 0, 1, 2 - BDE is variable */
+
+ /* Word 3 - payload_offset_len is variable */
+
+ /* Word 4, 5 - is zero */
+
+ /* Word 6 - ctxt_tag, xri_tag is variable */
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
+ bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
+ bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 - abort_tag is variable */
+
+ /* Word 9 - reqtag is variable */
+
+ /* Word 10 - dbde, wqes is variable */
+ bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
+ bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
+ bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
+
+ /* Word 12, 13, 14, 15 - is zero */
}
#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
@@ -152,6 +274,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
/* sanity check on queue memory */
if (unlikely(!q))
return -ENOMEM;
+
temp_wqe = lpfc_sli4_qe(q, q->host_index);
/* If the host has not yet processed the next entry then we are done */
@@ -230,31 +353,22 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
* This routine will update the HBA index of a queue to reflect consumption of
* Work Queue Entries by the HBA. When the HBA indicates that it has consumed
* an entry the host calls this function to update the queue's internal
- * pointers. This routine returns the number of entries that were consumed by
- * the HBA.
+ * pointers.
**/
-static uint32_t
+static void
lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
{
- uint32_t released = 0;
-
/* sanity check on queue memory */
if (unlikely(!q))
- return 0;
+ return;
- if (q->hba_index == index)
- return 0;
- do {
- q->hba_index = ((q->hba_index + 1) % q->entry_count);
- released++;
- } while (q->hba_index != index);
- return released;
+ q->hba_index = index;
}
/**
* lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
* @q: The Mailbox Queue to operate on.
- * @wqe: The Mailbox Queue Entry to put on the Work queue.
+ * @mqe: The Mailbox Queue Entry to put on the Work queue.
*
* This routine will copy the contents of @mqe to the next available entry on
* the @q. This function will then ring the Work Queue Doorbell to signal the
@@ -547,7 +661,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
if (count > eq->EQ_max_eqe)
eq->EQ_max_eqe = count;
- eq->queue_claimed = 0;
+ xchg(&eq->queue_claimed, 0);
rearm_and_exit:
/* Always clear the EQ. */
@@ -668,10 +782,8 @@ lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
}
-/**
+/*
* lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
- * @q: The Header Receive Queue to operate on.
- * @wqe: The Receive Queue Entry to put on the Receive queue.
*
* This routine will copy the contents of @wqe to the next available entry on
* the @q. This function will then ring the Receive Queue Doorbell to signal the
@@ -734,9 +846,8 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
return hq_put_index;
}
-/**
+/*
* lpfc_sli4_rq_release - Updates internal hba index for RQ
- * @q: The Header Receive Queue to operate on.
*
* This routine will update the HBA index of a queue to reflect consumption of
* one Receive Queue Entry by the HBA. When the HBA indicates that it has
@@ -822,7 +933,7 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* @phba: Pointer to HBA context object.
* @xritag: XRI value.
*
- * This function clears the sglq pointer from the array of acive
+ * This function clears the sglq pointer from the array of active
* sglq's. The xritag that is passed in is used to index into the
* array. Before the xritag can be used it needs to be adjusted
* by subtracting the xribase.
@@ -844,7 +955,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
* @phba: Pointer to HBA context object.
* @xritag: XRI value.
*
- * This function returns the sglq pointer from the array of acive
+ * This function returns the sglq pointer from the array of active
* sglq's. The xritag that is passed in is used to index into the
* array. Before the xritag can be used it needs to be adjusted
* by subtracting the xribase.
@@ -874,16 +985,10 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
{
struct lpfc_nodelist *ndlp = NULL;
- if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+ /* Lookup did to verify if did is still active on this vport */
+ if (rrq->vport)
ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
- /* The target DID could have been swapped (cable swap)
- * we should use the ndlp from the findnode if it is
- * available.
- */
- if ((!ndlp) && rrq->ndlp)
- ndlp = rrq->ndlp;
-
if (!ndlp)
goto out;
@@ -1005,9 +1110,14 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
}
spin_lock_irqsave(&phba->hbalock, iflags);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
- if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+ if (rrq->vport != vport)
+ continue;
+
+ if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
list_move(&rrq->list, &rrq_list);
+
+ }
spin_unlock_irqrestore(&phba->hbalock, iflags);
list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
@@ -1020,7 +1130,7 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @ndlp: Targets nodelist pointer for this exchange.
- * @xritag the xri in the bitmap to test.
+ * @xritag: the xri in the bitmap to test.
*
* This function returns:
* 0 = rrq not active for this xri
@@ -1075,12 +1185,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
goto out;
}
- /*
- * set the active bit even if there is no mem available.
- */
- if (NLP_CHK_FREE_REQ(ndlp))
- goto out;
-
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
@@ -1091,7 +1195,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
goto out;
spin_unlock_irqrestore(&phba->hbalock, iflags);
- rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
+ rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
if (!rrq) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
@@ -1106,7 +1210,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq->xritag = xritag;
rrq->rrq_stop_time = jiffies +
msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
- rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
@@ -1130,7 +1233,7 @@ out:
/**
* __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
* @phba: Pointer to HBA context object.
- * @piocb: Pointer to the iocbq.
+ * @piocbq: Pointer to the iocbq.
*
* The driver calls this function with either the nvme ls ring lock
* or the fc els ring lock held depending on the iocb usage. This function
@@ -1146,29 +1249,24 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct lpfc_sglq *start_sglq = NULL;
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
- struct lpfc_sli_ring *pring = NULL;
int found = 0;
+ u8 cmnd;
- if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
- pring = phba->sli4_hba.nvmels_wq->pring;
- else
- pring = lpfc_phba_elsring(phba);
-
- lockdep_assert_held(&pring->ring_lock);
+ cmnd = get_job_cmnd(phba, piocbq);
- if (piocbq->iocb_flag & LPFC_IO_FCP) {
- lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
+ if (piocbq->cmd_flag & LPFC_IO_FCP) {
+ lpfc_cmd = piocbq->io_buf;
ndlp = lpfc_cmd->rdata->pnode;
- } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
- !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
- ndlp = piocbq->context_un.ndlp;
- } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
- if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
+ } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
+ !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
+ ndlp = piocbq->ndlp;
+ } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
+ if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
ndlp = NULL;
else
- ndlp = piocbq->context_un.ndlp;
+ ndlp = piocbq->ndlp;
} else {
- ndlp = piocbq->context1;
+ ndlp = piocbq->ndlp;
}
spin_lock(&phba->sli4_hba.sgl_list_lock);
@@ -1206,7 +1304,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
/**
* __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
* @phba: Pointer to HBA context object.
- * @piocb: Pointer to the iocbq.
+ * @piocbq: Pointer to the iocbq.
*
* This function is called with the sgl_list lock held. This function
* gets a new driver sglq object from the sglq list. If the
@@ -1257,8 +1355,8 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
- * This function is called with hbalock held to release driver
- * iocb object to the iocb pool. The iotag in the iocb object
+ * This function is called to release the driver iocb object
+ * to the iocb pool. The iotag in the iocb object
* does not change for each use of the iocb object. This function
* clears all other fields of the iocb object when it is freed.
* The sqlq structure that holds the xritag and phys and virtual
@@ -1268,18 +1366,17 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* this IO was aborted then the sglq entry it put on the
* lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
* IO has good status or fails for any other reason then the sglq
- * entry is added to the free list (lpfc_els_sgl_list).
+ * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
+ * asserted held in the code path calling this routine.
**/
static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
struct lpfc_sglq *sglq;
- size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+ size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
unsigned long iflag = 0;
struct lpfc_sli_ring *pring;
- lockdep_assert_held(&phba->hbalock);
-
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
@@ -1287,7 +1384,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (sglq) {
- if (iocbq->iocb_flag & LPFC_IO_NVMET) {
+ if (iocbq->cmd_flag & LPFC_IO_NVMET) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
iflag);
sglq->state = SGL_FREED;
@@ -1299,11 +1396,16 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
goto out;
}
- pring = phba->sli4_hba.els_wq->pring;
- if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
- (sglq->state != SGL_XRI_ABORTED)) {
+ if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
+ (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
+ sglq->state != SGL_XRI_ABORTED) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
iflag);
+
+ /* Check if we can get a reference on ndlp */
+ if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
+ sglq->ndlp = NULL;
+
list_add(&sglq->list,
&phba->sli4_hba.lpfc_abts_els_sgl_list);
spin_unlock_irqrestore(
@@ -1317,9 +1419,9 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
&phba->sli4_hba.lpfc_els_sgl_list);
spin_unlock_irqrestore(
&phba->sli4_hba.sgl_list_lock, iflag);
-
+ pring = lpfc_phba_elsring(phba);
/* Check if TXQ queue needs to be serviced */
- if (!list_empty(&pring->txq))
+ if (pring && (!list_empty(&pring->txq)))
lpfc_worker_wake_up(phba);
}
}
@@ -1331,7 +1433,7 @@ out:
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
- iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
+ iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
LPFC_IO_NVME_LS);
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -1342,18 +1444,17 @@ out:
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
- * This function is called with hbalock held to release driver
- * iocb object to the iocb pool. The iotag in the iocb object
- * does not change for each use of the iocb object. This function
- * clears all other fields of the iocb object when it is freed.
+ * This function is called to release the driver iocb object to the
+ * iocb pool. The iotag in the iocb object does not change for each
+ * use of the iocb object. This function clears all other fields of
+ * the iocb object when it is freed. The hbalock is asserted held in
+ * the code path calling this routine.
**/
static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
- lockdep_assert_held(&phba->hbalock);
-
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
@@ -1422,15 +1523,23 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
- if (!piocb->iocb_cmpl) {
- if (piocb->iocb_flag & LPFC_IO_NVME)
- lpfc_nvme_cancel_iocb(phba, piocb);
- else
- lpfc_sli_release_iocbq(phba, piocb);
+ if (piocb->cmd_cmpl) {
+ if (piocb->cmd_flag & LPFC_IO_NVME) {
+ lpfc_nvme_cancel_iocb(phba, piocb,
+ ulpstatus, ulpWord4);
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(lpfc_wcqe_c_status,
+ &piocb->wcqe_cmpl, ulpstatus);
+ piocb->wcqe_cmpl.parameter = ulpWord4;
+ } else {
+ piocb->iocb.ulpStatus = ulpstatus;
+ piocb->iocb.un.ulpWord[4] = ulpWord4;
+ }
+ (piocb->cmd_cmpl) (phba, piocb, piocb);
+ }
} else {
- piocb->iocb.ulpStatus = ulpstatus;
- piocb->iocb.un.ulpWord[4] = ulpWord4;
- (piocb->iocb_cmpl) (phba, piocb, piocb);
+ lpfc_sli_release_iocbq(phba, piocb);
}
}
return;
@@ -1505,6 +1614,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case DSSCMD_IWRITE64_CX:
case DSSCMD_IREAD64_CR:
case DSSCMD_IREAD64_CX:
+ case CMD_SEND_FRAME:
type = LPFC_SOL_IOCB;
break;
case CMD_ABORT_XRI_CN:
@@ -1579,7 +1689,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
lpfc_config_ring(phba, i, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0446 Adapter failed to init (%d), "
"mbxCmd x%x CFG_RING, mbxStatus x%x, "
"ring %d\n",
@@ -1611,20 +1721,18 @@ static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- if (phba->sli_rev == LPFC_SLI_REV4)
- lockdep_assert_held(&pring->ring_lock);
- else
- lockdep_assert_held(&phba->hbalock);
+ u32 ulp_command = 0;
BUG_ON(!piocb);
+ ulp_command = get_job_cmnd(phba, piocb);
list_add_tail(&piocb->list, &pring->txcmplq);
- piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt++;
-
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
- (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ (ulp_command != CMD_ABORT_XRI_WQE) &&
+ (ulp_command != CMD_ABORT_XRI_CN) &&
+ (ulp_command != CMD_CLOSE_XRI_CN)) {
BUG_ON(!piocb->vport);
if (!(piocb->vport->load_flag & FC_UNLOADING))
mod_timer(&piocb->vport->els_tmofunc,
@@ -1657,6 +1765,262 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
}
/**
+ * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * This routine will inform the driver of any BW adjustments we need
+ * to make. These changes will be picked up during the next CMF
+ * timer interrupt. In addition, any BW changes will be logged
+ * with LOG_CGN_MGMT.
+ **/
+static void
+lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ union lpfc_wqe128 *wqe;
+ uint32_t status, info;
+ struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
+ uint64_t bw, bwdif, slop;
+ uint64_t pcent, bwpcent;
+ int asig, afpin, sigcnt, fpincnt;
+ int wsigmax, wfpinmax, cg, tdp;
+ char *s;
+
+ /* First check for error */
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ if (status) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6211 CMF_SYNC_WQE Error "
+ "req_tag x%x status x%x hwstatus x%x "
+ "tdatap x%x parm x%x\n",
+ bf_get(lpfc_wcqe_c_request_tag, wcqe),
+ bf_get(lpfc_wcqe_c_status, wcqe),
+ bf_get(lpfc_wcqe_c_hw_status, wcqe),
+ wcqe->total_data_placed,
+ wcqe->parameter);
+ goto out;
+ }
+
+ /* Gather congestion information on a successful cmpl */
+ info = wcqe->parameter;
+ phba->cmf_active_info = info;
+
+ /* See if firmware info count is valid or has changed */
+ if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
+ info = 0;
+ else
+ phba->cmf_info_per_interval = info;
+
+ tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
+ cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
+
+ /* Get BW requirement from firmware */
+ bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
+ if (!bw) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6212 CMF_SYNC_WQE x%x: NULL bw\n",
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ goto out;
+ }
+
+ /* Gather information needed for logging if a BW change is required */
+ wqe = &cmdiocb->wqe;
+ asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
+ afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
+ fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
+ sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
+ if (phba->cmf_max_bytes_per_interval != bw ||
+ (asig || afpin || sigcnt || fpincnt)) {
+ /* Are we increasing or decreasing BW */
+ if (phba->cmf_max_bytes_per_interval < bw) {
+ bwdif = bw - phba->cmf_max_bytes_per_interval;
+ s = "Increase";
+ } else {
+ bwdif = phba->cmf_max_bytes_per_interval - bw;
+ s = "Decrease";
+ }
+
+ /* What is the change percentage */
+ slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
+ pcent = div64_u64(bwdif * 100 + slop,
+ phba->cmf_link_byte_count);
+ bwpcent = div64_u64(bw * 100 + slop,
+ phba->cmf_link_byte_count);
+ if (asig) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6237 BW Threshold %lld%% (%lld): "
+ "%lld%% %s: Signal Alarm: cg:%d "
+ "Info:%u\n",
+ bwpcent, bw, pcent, s, cg,
+ phba->cmf_active_info);
+ } else if (afpin) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6238 BW Threshold %lld%% (%lld): "
+ "%lld%% %s: FPIN Alarm: cg:%d "
+ "Info:%u\n",
+ bwpcent, bw, pcent, s, cg,
+ phba->cmf_active_info);
+ } else if (sigcnt) {
+ wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6239 BW Threshold %lld%% (%lld): "
+ "%lld%% %s: Signal Warning: "
+ "Cnt %d Max %d: cg:%d Info:%u\n",
+ bwpcent, bw, pcent, s, sigcnt,
+ wsigmax, cg, phba->cmf_active_info);
+ } else if (fpincnt) {
+ wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6240 BW Threshold %lld%% (%lld): "
+ "%lld%% %s: FPIN Warning: "
+ "Cnt %d Max %d: cg:%d Info:%u\n",
+ bwpcent, bw, pcent, s, fpincnt,
+ wfpinmax, cg, phba->cmf_active_info);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6241 BW Threshold %lld%% (%lld): "
+ "CMF %lld%% %s: cg:%d Info:%u\n",
+ bwpcent, bw, pcent, s, cg,
+ phba->cmf_active_info);
+ }
+ } else if (info) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6246 Info Threshold %u\n", info);
+ }
+
+ /* Save BW change to be picked up during next timer interrupt */
+ phba->cmf_last_sync_bw = bw;
+out:
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
+ * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
+ * @phba: Pointer to HBA context object.
+ * @ms: ms to set in WQE interval, 0 means use init op
+ * @total: Total rcv bytes for this interval
+ *
+ * This routine is called every CMF timer interrupt. Its purpose is
+ * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
+ * that may indicate we have congestion (FPINs or Signals). Upon
+ * completion, the firmware will indicate any BW restrictions the
+ * driver may need to take.
+ **/
+int
+lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
+{
+ union lpfc_wqe128 *wqe;
+ struct lpfc_iocbq *sync_buf;
+ unsigned long iflags;
+ u32 ret_val;
+ u32 atot, wtot, max;
+ u16 warn_sync_period = 0;
+
+ /* First address any alarm / warning activity */
+ atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
+ wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
+
+ /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
+ if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
+ phba->link_state == LPFC_LINK_DOWN)
+ return 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ sync_buf = __lpfc_sli_get_iocbq(phba);
+ if (!sync_buf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+ "6244 No available WQEs for CMF_SYNC_WQE\n");
+ ret_val = ENOMEM;
+ goto out_unlock;
+ }
+
+ wqe = &sync_buf->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to zero */
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* If this is the very first CMF_SYNC_WQE, issue an init operation */
+ if (!ms) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6441 CMF Init %d - CMF_SYNC_WQE\n",
+ phba->fc_eventTag);
+ bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
+ bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
+ goto initpath;
+ }
+
+ bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
+ bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
+
+ /* Check for alarms / warnings */
+ if (atot) {
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ /* We hit an Signal alarm condition */
+ bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
+ } else {
+ /* We hit a FPIN alarm condition */
+ bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
+ }
+ } else if (wtot) {
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+ phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ /* We hit an Signal warning condition */
+ max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
+ lpfc_acqe_cgn_frequency;
+ bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
+ bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
+ warn_sync_period = lpfc_acqe_cgn_frequency;
+ } else {
+ /* We hit a FPIN warning condition */
+ bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
+ bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
+ if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
+ warn_sync_period =
+ LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
+ }
+ }
+
+ /* Update total read blocks during previous timer interval */
+ wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
+
+initpath:
+ bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
+ wqe->cmf_sync.event_tag = phba->fc_eventTag;
+ bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
+
+ /* Setup reqtag to match the wqe completion. */
+ bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
+
+ bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
+ bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
+
+ bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
+ bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
+ bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
+
+ sync_buf->vport = phba->pport;
+ sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
+ sync_buf->cmd_dmabuf = NULL;
+ sync_buf->rsp_dmabuf = NULL;
+ sync_buf->bpl_dmabuf = NULL;
+ sync_buf->sli4_xritag = NO_XRI;
+
+ sync_buf->cmd_flag |= LPFC_IO_CMF;
+ ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
+ if (ret_val) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
+ ret_val);
+ __lpfc_sli_release_iocbq(phba, sync_buf);
+ }
+out_unlock:
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return ret_val;
+}
+
+/**
* lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
@@ -1688,7 +2052,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0315 Ring %d issue: portCmdGet %d "
"is bigger than cmd ring %d\n",
pring->ringno,
@@ -1798,21 +2162,21 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* @nextiocb: Pointer to driver iocb object which need to be
* posted to firmware.
*
- * This function is called with hbalock held to post a new iocb to
- * the firmware. This function copies the new iocb to ring iocb slot and
- * updates the ring pointers. It adds the new iocb to txcmplq if there is
+ * This function is called to post a new iocb to the firmware. This
+ * function copies the new iocb to ring iocb slot and updates the
+ * ring pointers. It adds the new iocb to txcmplq if there is
* a completion call back for this iocb else the function will free the
- * iocb object.
+ * iocb object. The hbalock is asserted held in the code path calling
+ * this routine.
**/
static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
{
- lockdep_assert_held(&phba->hbalock);
/*
* Set up an iotag
*/
- nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
+ nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
if (pring->ringno == LPFC_ELS_RING) {
@@ -1833,9 +2197,9 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* If there is no completion routine to call, we can release the
* IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
- * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+ * that have no rsp ring completion, cmd_cmpl MUST be NULL.
*/
- if (nextiocb->iocb_cmpl)
+ if (nextiocb->cmd_cmpl)
lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
else
__lpfc_sli_release_iocbq(phba, nextiocb);
@@ -1974,8 +2338,7 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
hbqp->local_hbqGetIdx = getidx;
if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
- lpfc_printf_log(phba, KERN_ERR,
- LOG_SLI | LOG_VPORT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1802 HBQ %d: local_hbqGetIdx "
"%u is > than hbqp->entry_count %u\n",
hbqno, hbqp->local_hbqGetIdx,
@@ -2243,10 +2606,8 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
lpfc_hbq_defs[qno]->init_count);
}
-/**
+/*
* lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
- * @phba: Pointer to HBA context object.
- * @hbqno: HBQ number.
*
* This function removes the first hbq buffer on an hbq list and returns a
* pointer to that buffer. If it finds no buffers on the list it returns NULL.
@@ -2265,7 +2626,7 @@ lpfc_sli_hbqbuf_get(struct list_head *rb_list)
/**
* lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
* @phba: Pointer to HBA context object.
- * @hbqno: HBQ number.
+ * @hrq: HBQ number.
*
* This function removes the first RQ buffer on an RQ buffer list and returns a
* pointer to that buffer. If it finds no buffers on the list it returns NULL.
@@ -2314,7 +2675,7 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
}
}
spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1803 Bad hbq tag. Data: x%x x%x\n",
tag, phba->hbqs[tag >> 16].buffer_count);
return NULL;
@@ -2467,14 +2828,20 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
- spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
}
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
+void
+lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ __lpfc_sli_rpi_release(vport, ndlp);
+}
+
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
* @phba: Pointer to HBA context object.
@@ -2495,13 +2862,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t rpi, vpi;
int rc;
- mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
-
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
-
/*
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
@@ -2509,8 +2869,16 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!(phba->pport->load_flag & FC_UNLOADING) &&
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ if (mp) {
+ pmb->ctx_buf = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2532,8 +2900,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
- pmb->ctx_buf = NULL;
- pmb->ctx_ndlp = NULL;
}
if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
@@ -2545,9 +2911,10 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport,
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
"1438 UNREG cmpl deferred mbox x%x "
- "on NPort x%x Data: x%x x%x %px\n",
+ "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
+ ndlp->nlp_flag, ndlp->nlp_defer_did,
+ ndlp, vport->load_flag, kref_read(&ndlp->kref));
if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
(ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
@@ -2557,23 +2924,33 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
- if (vport->load_flag & FC_UNLOADING)
- lpfc_nlp_put(ndlp);
+
+ /* The unreg_login mailbox is complete and had a
+ * reference that has to be released. The PLOGI
+ * got its own ref.
+ */
+ lpfc_nlp_put(ndlp);
pmb->ctx_ndlp = NULL;
}
}
+ /* This nlp_put pairs with lpfc_sli4_resume_rpi */
+ if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ lpfc_nlp_put(ndlp);
+ }
+
/* Check security permission status on INIT_LINK mailbox command */
if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
(pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2860 SLI authentication is required "
"for INIT_LINK but has not done yet\n");
if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, pmb);
else
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
* lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
@@ -2582,7 +2959,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*
* This function is the unreg rpi mailbox completion handler. It
* frees the memory resources associated with the completed mailbox
- * command. An additional refrenece is put on the ndlp to prevent
+ * command. An additional reference is put on the ndlp to prevent
* lpfc_nlp_release from freeing the rpi bit in the bitmask before
* the unreg mailbox command completes, this routine puts the
* reference back.
@@ -2602,16 +2979,15 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
LPFC_SLI_INTF_IF_TYPE_2)) {
if (ndlp) {
lpfc_printf_vlog(
- vport, KERN_INFO, LOG_MBOX | LOG_SLI,
+ vport, KERN_INFO, LOG_MBOX | LOG_SLI,
"0010 UNREG_LOGIN vpi:%x "
"rpi:%x DID:%x defer x%x flg x%x "
- "map:%x %px\n",
+ "x%px\n",
vport->vpi, ndlp->nlp_rpi,
ndlp->nlp_DID, ndlp->nlp_defer_did,
ndlp->nlp_flag,
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- lpfc_nlp_put(ndlp);
/* Check to see if there are any deferred
* events to process
@@ -2634,6 +3010,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
+ lpfc_nlp_put(ndlp);
}
}
}
@@ -2702,7 +3079,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
MBX_SHUTDOWN) {
/* Unknown mailbox command compl */
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):0323 Unknown Mailbox command "
"x%x (x%x/x%x) Cmpl\n",
pmb->vport ? pmb->vport->vpi :
@@ -2803,6 +3180,144 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
}
/**
+ * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
+ * containing a NVME LS request.
+ * @phba: pointer to lpfc hba data structure.
+ * @piocb: pointer to the iocbq struct representing the sequence starting
+ * frame.
+ *
+ * This routine initially validates the NVME LS, validates there is a login
+ * with the port that sent the LS, and then calls the appropriate nvme host
+ * or target LS request handler.
+ **/
+static void
+lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *d_buf;
+ struct hbq_dmabuf *nvmebuf;
+ struct fc_frame_header *fc_hdr;
+ struct lpfc_async_xchg_ctx *axchg = NULL;
+ char *failwhy = NULL;
+ uint32_t oxid, sid, did, fctl, size;
+ int ret = 1;
+
+ d_buf = piocb->cmd_dmabuf;
+
+ nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ fc_hdr = nvmebuf->hbuf.virt;
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
+ did = sli4_did_from_fc_hdr(fc_hdr);
+ fctl = (fc_hdr->fh_f_ctl[0] << 16 |
+ fc_hdr->fh_f_ctl[1] << 8 |
+ fc_hdr->fh_f_ctl[2]);
+ size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
+
+ lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ failwhy = "Driver Unloading";
+ } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
+ failwhy = "NVME FC4 Disabled";
+ } else if (!phba->nvmet_support && !phba->pport->localport) {
+ failwhy = "No Localport";
+ } else if (phba->nvmet_support && !phba->targetport) {
+ failwhy = "No Targetport";
+ } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
+ failwhy = "Bad NVME LS R_CTL";
+ } else if (unlikely((fctl & 0x00FF0000) !=
+ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
+ failwhy = "Bad NVME LS F_CTL";
+ } else {
+ axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
+ if (!axchg)
+ failwhy = "No CTX memory";
+ }
+
+ if (unlikely(failwhy)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
+ sid, oxid, failwhy);
+ goto out_fail;
+ }
+
+ /* validate the source of the LS is logged in */
+ ndlp = lpfc_findnode_did(phba->pport, sid);
+ if (!ndlp ||
+ ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ "6216 NVME Unsol rcv: No ndlp: "
+ "NPort_ID x%x oxid x%x\n",
+ sid, oxid);
+ goto out_fail;
+ }
+
+ axchg->phba = phba;
+ axchg->ndlp = ndlp;
+ axchg->size = size;
+ axchg->oxid = oxid;
+ axchg->sid = sid;
+ axchg->wqeq = NULL;
+ axchg->state = LPFC_NVME_STE_LS_RCV;
+ axchg->entry_cnt = 1;
+ axchg->rqb_buffer = (void *)nvmebuf;
+ axchg->hdwq = &phba->sli4_hba.hdwq[0];
+ axchg->payload = nvmebuf->dbuf.virt;
+ INIT_LIST_HEAD(&axchg->list);
+
+ if (phba->nvmet_support) {
+ ret = lpfc_nvmet_handle_lsreq(phba, axchg);
+ spin_lock_irq(&ndlp->lock);
+ if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
+ ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
+ spin_unlock_irq(&ndlp->lock);
+
+ /* This reference is a single occurrence to hold the
+ * node valid until the nvmet transport calls
+ * host_release.
+ */
+ if (!lpfc_nlp_get(ndlp))
+ goto out_fail;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
+ "6206 NVMET unsol ls_req ndlp x%px "
+ "DID x%x xflags x%x refcnt %d\n",
+ ndlp, ndlp->nlp_DID,
+ ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
+ } else {
+ spin_unlock_irq(&ndlp->lock);
+ }
+ } else {
+ ret = lpfc_nvme_handle_lsreq(phba, axchg);
+ }
+
+ /* if zero, LS was successfully handled. If non-zero, LS not handled */
+ if (!ret)
+ return;
+
+out_fail:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
+ "NVMe%s handler failed %d\n",
+ did, sid, oxid,
+ (phba->nvmet_support) ? "T" : "I", ret);
+
+ /* recycle receive buffer */
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+
+ /* If start of new exchange, abort it */
+ if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
+ ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
+
+ if (ret)
+ kfree(axchg);
+}
+
+/**
* lpfc_complete_unsol_iocb - Complete an unsolicited sequence
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
@@ -2823,7 +3338,7 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
switch (fch_type) {
case FC_TYPE_NVME:
- lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
+ lpfc_nvme_unsol_ls_handler(phba, saveq);
return 1;
default:
break;
@@ -2850,6 +3365,56 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
}
+static void
+lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
+ struct lpfc_iocbq *saveq)
+{
+ IOCB_t *irsp;
+ union lpfc_wqe128 *wqe;
+ u16 i = 0;
+
+ irsp = &saveq->iocb;
+ wqe = &saveq->wqe;
+
+ /* Fill wcqe with the IOCB status fields */
+ bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
+ saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
+ saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
+ saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
+
+ /* Source ID */
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
+
+ /* rx-id of the response frame */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
+
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ irsp->unsli3.rcvsli3.ox_id);
+
+ /* DID */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+ irsp->un.rcvels.remoteID);
+
+ /* unsol data len */
+ for (i = 0; i < irsp->ulpBdeCount; i++) {
+ struct lpfc_hbq_entry *hbqe = NULL;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ if (i == 0) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &irsp->un.ulpWord[0];
+ saveq->wqe.gen_req.bde.tus.f.bdeSize =
+ hbqe->bde.tus.f.bdeSize;
+ } else if (i == 1) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &irsp->unsli3.sli3Words[4];
+ saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
+ }
+ }
+ }
+}
+
/**
* lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
* @phba: Pointer to HBA context object.
@@ -2870,11 +3435,13 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
IOCB_t * irsp;
WORD5 * w5p;
+ dma_addr_t paddr;
uint32_t Rctl, Type;
struct lpfc_iocbq *iocbq;
struct lpfc_dmabuf *dmzbuf;
- irsp = &(saveq->iocb);
+ irsp = &saveq->iocb;
+ saveq->vport = phba->pport;
if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
if (pring->lpfc_sli_rcv_async_status)
@@ -2892,22 +3459,22 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
- (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
+ (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
if (irsp->ulpBdeCount > 0) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->un.ulpWord[3]);
+ irsp->un.ulpWord[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 1) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->unsli3.sli3Words[3]);
+ irsp->unsli3.sli3Words[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 2) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->unsli3.sli3Words[7]);
+ irsp->unsli3.sli3Words[7]);
lpfc_in_buf_free(phba, dmzbuf);
}
@@ -2916,9 +3483,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
if (irsp->ulpBdeCount != 0) {
- saveq->context2 = lpfc_sli_get_buff(phba, pring,
+ saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
irsp->un.ulpWord[3]);
- if (!saveq->context2)
+ if (!saveq->cmd_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -2928,9 +3495,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->un.ulpWord[3]);
}
if (irsp->ulpBdeCount == 2) {
- saveq->context3 = lpfc_sli_get_buff(phba, pring,
+ saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
irsp->unsli3.sli3Words[7]);
- if (!saveq->context3)
+ if (!saveq->bpl_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -2940,11 +3507,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->unsli3.sli3Words[7]);
}
list_for_each_entry(iocbq, &saveq->list, list) {
- irsp = &(iocbq->iocb);
+ irsp = &iocbq->iocb;
if (irsp->ulpBdeCount != 0) {
- iocbq->context2 = lpfc_sli_get_buff(phba, pring,
+ iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
+ pring,
irsp->un.ulpWord[3]);
- if (!iocbq->context2)
+ if (!iocbq->cmd_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -2954,9 +3522,10 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->un.ulpWord[3]);
}
if (irsp->ulpBdeCount == 2) {
- iocbq->context3 = lpfc_sli_get_buff(phba, pring,
+ iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
+ pring,
irsp->unsli3.sli3Words[7]);
- if (!iocbq->context3)
+ if (!iocbq->bpl_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -2967,7 +3536,20 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->unsli3.sli3Words[7]);
}
}
+ } else {
+ paddr = getPaddr(irsp->un.cont64[0].addrHigh,
+ irsp->un.cont64[0].addrLow);
+ saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ if (irsp->ulpBdeCount == 2) {
+ paddr = getPaddr(irsp->un.cont64[1].addrHigh,
+ irsp->un.cont64[1].addrLow);
+ saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ paddr);
+ }
}
+
if (irsp->ulpBdeCount != 0 &&
(irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
@@ -2985,12 +3567,14 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (!found)
list_add_tail(&saveq->clist,
&pring->iocb_continue_saveq);
+
if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
list_del_init(&iocbq->clist);
saveq = iocbq;
- irsp = &(saveq->iocb);
- } else
+ irsp = &saveq->iocb;
+ } else {
return 0;
+ }
}
if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
(irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
@@ -3013,6 +3597,19 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
+ irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (irsp->unsli3.rcvsli3.vpi == 0xffff)
+ saveq->vport = phba->pport;
+ else
+ saveq->vport = lpfc_find_vport_by_vpid(phba,
+ irsp->unsli3.rcvsli3.vpi);
+ }
+
+ /* Prepare WQE with Unsol frame */
+ lpfc_sli_prep_unsol_wqe(phba, saveq);
+
if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0313 Ring %d handler: unexpected Rctl x%x "
@@ -3041,36 +3638,28 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
struct lpfc_iocbq *prspiocb)
{
struct lpfc_iocbq *cmd_iocb = NULL;
- uint16_t iotag;
- spinlock_t *temp_lock = NULL;
- unsigned long iflag = 0;
+ u16 iotag;
if (phba->sli_rev == LPFC_SLI_REV4)
- temp_lock = &pring->ring_lock;
+ iotag = get_wqe_reqtag(prspiocb);
else
- temp_lock = &phba->hbalock;
-
- spin_lock_irqsave(temp_lock, iflag);
- iotag = prspiocb->iocb.ulpIoTag;
+ iotag = prspiocb->iocb.ulpIoTag;
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
- spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}
- spin_unlock_irqrestore(temp_lock, iflag);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0317 iotag x%x is out of "
- "range: max iotag x%x wd0 x%x\n",
- iotag, phba->sli.last_iotag,
- *(((uint32_t *) &prspiocb->iocb) + 7));
+ "range: max iotag x%x\n",
+ iotag, phba->sli.last_iotag);
return NULL;
}
@@ -3091,33 +3680,23 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint16_t iotag)
{
struct lpfc_iocbq *cmd_iocb = NULL;
- spinlock_t *temp_lock = NULL;
- unsigned long iflag = 0;
-
- if (phba->sli_rev == LPFC_SLI_REV4)
- temp_lock = &pring->ring_lock;
- else
- temp_lock = &phba->hbalock;
- spin_lock_irqsave(temp_lock, iflag);
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
- spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}
- spin_unlock_irqrestore(temp_lock, iflag);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0372 iotag x%x lookup error: max iotag (x%x) "
- "iocb_flag x%x\n",
+ "cmd_flag x%x\n",
iotag, phba->sli.last_iotag,
- cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
+ cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
return NULL;
}
@@ -3143,20 +3722,38 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
{
struct lpfc_iocbq *cmdiocbp;
- int rc = 1;
unsigned long iflag;
+ u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ else
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ ulp_command = get_job_cmnd(phba, saveq);
+ ulp_status = get_job_ulpstatus(phba, saveq);
+ ulp_word4 = get_job_word4(phba, saveq);
+ ulp_context = get_job_ulpcontext(phba, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ iotag = get_wqe_reqtag(saveq);
+ else
+ iotag = saveq->iocb.ulpIoTag;
+
if (cmdiocbp) {
- if (cmdiocbp->iocb_cmpl) {
+ ulp_command = get_job_cmnd(phba, cmdiocbp);
+ if (cmdiocbp->cmd_cmpl) {
/*
* If an ELS command failed send an event to mgmt
* application.
*/
- if (saveq->iocb.ulpStatus &&
+ if (ulp_status &&
(pring->ringno == LPFC_ELS_RING) &&
- (cmdiocbp->iocb.ulpCommand ==
- CMD_ELS_REQUEST64_CR))
+ (ulp_command == CMD_ELS_REQUEST64_CR))
lpfc_send_els_failure_event(phba,
cmdiocbp, saveq);
@@ -3166,11 +3763,11 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
if (pring->ringno == LPFC_ELS_RING) {
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- (cmdiocbp->iocb_flag &
+ (cmdiocbp->cmd_flag &
LPFC_DRIVER_ABORTED)) {
spin_lock_irqsave(&phba->hbalock,
iflag);
- cmdiocbp->iocb_flag &=
+ cmdiocbp->cmd_flag &=
~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
@@ -3185,12 +3782,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
spin_lock_irqsave(&phba->hbalock,
iflag);
- saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+ saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
}
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (saveq->iocb_flag &
+ if (saveq->cmd_flag &
LPFC_EXCHANGE_BUSY) {
/* Set cmdiocb flag for the
* exchange busy so sgl (xri)
@@ -3200,12 +3797,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
spin_lock_irqsave(
&phba->hbalock, iflag);
- cmdiocbp->iocb_flag |=
+ cmdiocbp->cmd_flag |=
LPFC_EXCHANGE_BUSY;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
}
- if (cmdiocbp->iocb_flag &
+ if (cmdiocbp->cmd_flag &
LPFC_DRIVER_ABORTED) {
/*
* Clear LPFC_DRIVER_ABORTED
@@ -3214,34 +3811,34 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
spin_lock_irqsave(
&phba->hbalock, iflag);
- cmdiocbp->iocb_flag &=
+ cmdiocbp->cmd_flag &=
~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
- cmdiocbp->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- cmdiocbp->iocb.un.ulpWord[4] =
- IOERR_ABORT_REQUESTED;
+ set_job_ulpstatus(cmdiocbp,
+ IOSTAT_LOCAL_REJECT);
+ set_job_ulpword4(cmdiocbp,
+ IOERR_ABORT_REQUESTED);
/*
- * For SLI4, irsiocb contains
+ * For SLI4, irspiocb contains
* NO_XRI in sli_xritag, it
* shall not affect releasing
* sgl (xri) process.
*/
- saveq->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- saveq->iocb.un.ulpWord[4] =
- IOERR_SLI_ABORTED;
+ set_job_ulpstatus(saveq,
+ IOSTAT_LOCAL_REJECT);
+ set_job_ulpword4(saveq,
+ IOERR_SLI_ABORTED);
spin_lock_irqsave(
&phba->hbalock, iflag);
- saveq->iocb_flag |=
+ saveq->cmd_flag |=
LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
}
}
}
- (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+ cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
} else
lpfc_sli_release_iocbq(phba, cmdiocbp);
} else {
@@ -3259,16 +3856,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"0322 Ring %d handler: "
"unexpected completion IoTag x%x "
"Data: x%x x%x x%x x%x\n",
- pring->ringno,
- saveq->iocb.ulpIoTag,
- saveq->iocb.ulpStatus,
- saveq->iocb.un.ulpWord[4],
- saveq->iocb.ulpCommand,
- saveq->iocb.ulpContext);
+ pring->ringno, iotag, ulp_status,
+ ulp_word4, ulp_command, ulp_context);
}
}
- return rc;
+ return 1;
}
/**
@@ -3289,7 +3882,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* Ring <ringno> handler: portRspPut <portRspPut> is bigger than
* rsp ring <portRspMax>
*/
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0312 Ring %d handler: portRspPut %d "
"is bigger than rsp ring %d\n",
pring->ringno, le32_to_cpu(pgp->rspPutInx),
@@ -3311,7 +3904,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
/**
* lpfc_poll_eratt - Error attention polling timer timeout handler
- * @ptr: Pointer to address of HBA context object.
+ * @t: Context to fetch pointer to address of HBA context object from.
*
* This function is invoked by the Error Attention polling timer when the
* timer times out. It will check the SLI Error Attention register for
@@ -3477,18 +4070,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
break;
}
- spin_unlock_irqrestore(&phba->hbalock, iflag);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
- spin_lock_irqsave(&phba->hbalock, iflag);
if (unlikely(!cmdiocbq))
break;
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- if (cmdiocbq->iocb_cmpl) {
+ if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
+ cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ if (cmdiocbq->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
- &rspiocbq);
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
spin_lock_irqsave(&phba->hbalock, iflag);
}
break;
@@ -3508,7 +4098,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
phba->brd_no, adaptermsg);
} else {
/* Unknown IOCB command */
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0334 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
type, irsp->ulpCommand,
@@ -3579,155 +4169,159 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *rspiocbp)
{
struct lpfc_iocbq *saveq;
- struct lpfc_iocbq *cmdiocbp;
+ struct lpfc_iocbq *cmdiocb;
struct lpfc_iocbq *next_iocb;
- IOCB_t *irsp = NULL;
+ IOCB_t *irsp;
uint32_t free_saveq;
- uint8_t iocb_cmd_type;
+ u8 cmd_type;
lpfc_iocb_type type;
unsigned long iflag;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
+ u32 ulp_word4 = get_job_word4(phba, rspiocbp);
+ u32 ulp_command = get_job_cmnd(phba, rspiocbp);
int rc;
spin_lock_irqsave(&phba->hbalock, iflag);
/* First add the response iocb to the countinueq list */
- list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+ list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
pring->iocb_continueq_cnt++;
- /* Now, determine whether the list is completed for processing */
- irsp = &rspiocbp->iocb;
- if (irsp->ulpLe) {
- /*
- * By default, the driver expects to free all resources
- * associated with this iocb completion.
- */
- free_saveq = 1;
- saveq = list_get_first(&pring->iocb_continueq,
- struct lpfc_iocbq, list);
- irsp = &(saveq->iocb);
- list_del_init(&pring->iocb_continueq);
- pring->iocb_continueq_cnt = 0;
+ /*
+ * By default, the driver expects to free all resources
+ * associated with this iocb completion.
+ */
+ free_saveq = 1;
+ saveq = list_get_first(&pring->iocb_continueq,
+ struct lpfc_iocbq, list);
+ list_del_init(&pring->iocb_continueq);
+ pring->iocb_continueq_cnt = 0;
- pring->stats.iocb_rsp++;
+ pring->stats.iocb_rsp++;
- /*
- * If resource errors reported from HBA, reduce
- * queuedepths of the SCSI device.
- */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_NO_RESOURCES)) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- phba->lpfc_rampdown_queue_depth(phba);
- spin_lock_irqsave(&phba->hbalock, iflag);
- }
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ phba->lpfc_rampdown_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
- if (irsp->ulpStatus) {
- /* Rsp ring <ringno> error: IOCB */
+ if (ulp_status) {
+ /* Rsp ring <ringno> error: IOCB */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ irsp = &rspiocbp->iocb;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0328 Rsp Ring %d error: ulp_status x%x "
+ "IOCB Data: "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x\n",
+ pring->ringno, ulp_status,
+ get_job_ulpword(rspiocbp, 0),
+ get_job_ulpword(rspiocbp, 1),
+ get_job_ulpword(rspiocbp, 2),
+ get_job_ulpword(rspiocbp, 3),
+ get_job_ulpword(rspiocbp, 4),
+ get_job_ulpword(rspiocbp, 5),
+ *(((uint32_t *)irsp) + 6),
+ *(((uint32_t *)irsp) + 7),
+ *(((uint32_t *)irsp) + 8),
+ *(((uint32_t *)irsp) + 9),
+ *(((uint32_t *)irsp) + 10),
+ *(((uint32_t *)irsp) + 11),
+ *(((uint32_t *)irsp) + 12),
+ *(((uint32_t *)irsp) + 13),
+ *(((uint32_t *)irsp) + 14),
+ *(((uint32_t *)irsp) + 15));
+ } else {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0328 Rsp Ring %d error: "
+ "0321 Rsp Ring %d error: "
"IOCB Data: "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
"x%x x%x x%x x%x\n",
pring->ringno,
- irsp->un.ulpWord[0],
- irsp->un.ulpWord[1],
- irsp->un.ulpWord[2],
- irsp->un.ulpWord[3],
- irsp->un.ulpWord[4],
- irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7),
- *(((uint32_t *) irsp) + 8),
- *(((uint32_t *) irsp) + 9),
- *(((uint32_t *) irsp) + 10),
- *(((uint32_t *) irsp) + 11),
- *(((uint32_t *) irsp) + 12),
- *(((uint32_t *) irsp) + 13),
- *(((uint32_t *) irsp) + 14),
- *(((uint32_t *) irsp) + 15));
+ rspiocbp->wcqe_cmpl.word0,
+ rspiocbp->wcqe_cmpl.total_data_placed,
+ rspiocbp->wcqe_cmpl.parameter,
+ rspiocbp->wcqe_cmpl.word3);
}
+ }
- /*
- * Fetch the IOCB command type and call the correct completion
- * routine. Solicited and Unsolicited IOCBs on the ELS ring
- * get freed back to the lpfc_iocb_list by the discovery
- * kernel thread.
- */
- iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
- type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
- switch (type) {
- case LPFC_SOL_IOCB:
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- break;
-
- case LPFC_UNSOL_IOCB:
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (!rc)
- free_saveq = 0;
- break;
- case LPFC_ABORT_IOCB:
- cmdiocbp = NULL;
- if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
+ /*
+ * Fetch the iocb command type and call the correct completion
+ * routine. Solicited and Unsolicited IOCBs on the ELS ring
+ * get freed back to the lpfc_iocb_list by the discovery
+ * kernel thread.
+ */
+ cmd_type = ulp_command & CMD_IOCB_MASK;
+ type = lpfc_sli_iocb_cmd_type(cmd_type);
+ switch (type) {
+ case LPFC_SOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ break;
+ case LPFC_UNSOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (!rc)
+ free_saveq = 0;
+ break;
+ case LPFC_ABORT_IOCB:
+ cmdiocb = NULL;
+ if (ulp_command != CMD_XRI_ABORTED_CX)
+ cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
+ saveq);
+ if (cmdiocb) {
+ /* Call the specified completion routine */
+ if (cmdiocb->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
- saveq);
+ cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
spin_lock_irqsave(&phba->hbalock, iflag);
- }
- if (cmdiocbp) {
- /* Call the specified completion routine */
- if (cmdiocbp->iocb_cmpl) {
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
- (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
- saveq);
- spin_lock_irqsave(&phba->hbalock,
- iflag);
- } else
- __lpfc_sli_release_iocbq(phba,
- cmdiocbp);
- }
- break;
-
- case LPFC_UNKNOWN_IOCB:
- if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
- char adaptermsg[LPFC_MAX_ADPTMSG];
- memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
- memcpy(&adaptermsg[0], (uint8_t *)irsp,
- MAX_MSG_DATA);
- dev_warn(&((phba->pcidev)->dev),
- "lpfc%d: %s\n",
- phba->brd_no, adaptermsg);
} else {
- /* Unknown IOCB command */
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0335 Unknown IOCB "
- "command Data: x%x "
- "x%x x%x x%x\n",
- irsp->ulpCommand,
- irsp->ulpStatus,
- irsp->ulpIoTag,
- irsp->ulpContext);
+ __lpfc_sli_release_iocbq(phba, cmdiocb);
}
- break;
}
+ break;
+ case LPFC_UNKNOWN_IOCB:
+ if (ulp_command == CMD_ADAPTER_MSG) {
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+
+ memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev),
+ "lpfc%d: %s\n",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown command */
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0335 Unknown IOCB "
+ "command Data: x%x "
+ "x%x x%x x%x\n",
+ ulp_command,
+ ulp_status,
+ get_wqe_reqtag(rspiocbp),
+ get_job_ulpcontext(phba, rspiocbp));
+ }
+ break;
+ }
- if (free_saveq) {
- list_for_each_entry_safe(rspiocbp, next_iocb,
- &saveq->list, list) {
- list_del_init(&rspiocbp->list);
- __lpfc_sli_release_iocbq(phba, rspiocbp);
- }
- __lpfc_sli_release_iocbq(phba, saveq);
+ if (free_saveq) {
+ list_for_each_entry_safe(rspiocbp, next_iocb,
+ &saveq->list, list) {
+ list_del_init(&rspiocbp->list);
+ __lpfc_sli_release_iocbq(phba, rspiocbp);
}
- rspiocbp = NULL;
+ __lpfc_sli_release_iocbq(phba, saveq);
}
+ rspiocbp = NULL;
spin_unlock_irqrestore(&phba->hbalock, iflag);
return rspiocbp;
}
@@ -3786,7 +4380,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
* Ring <ringno> handler: portRspPut <portRspPut> is bigger than
* rsp ring <portRspMax>
*/
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0303 Ring %d handler: portRspPut %d "
"is bigger than rsp ring %d\n",
pring->ringno, portRspPut, portRspMax);
@@ -3920,8 +4514,8 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
irspiocbq = container_of(cq_event, struct lpfc_iocbq,
cq_event);
/* Translate ELS WCQE to response IOCBQ */
- irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
- irspiocbq);
+ irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
+ irspiocbq);
if (irspiocbq)
lpfc_sli_sp_handle_rspiocb(phba, pring,
irspiocbq);
@@ -3957,47 +4551,68 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- LIST_HEAD(completions);
+ LIST_HEAD(tx_completions);
+ LIST_HEAD(txcmplq_completions);
struct lpfc_iocbq *iocb, *next_iocb;
+ int offline;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_fabric_abort_hba(phba);
}
+ offline = pci_channel_offline(phba->pcidev);
/* Error everything on txq and txcmplq
* First do the txq.
*/
if (phba->sli_rev >= LPFC_SLI_REV4) {
spin_lock_irq(&pring->ring_lock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- spin_unlock_irq(&pring->ring_lock);
- spin_lock_irq(&phba->hbalock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
- spin_unlock_irq(&phba->hbalock);
+ if (offline) {
+ list_splice_init(&pring->txcmplq,
+ &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
+ spin_unlock_irq(&pring->ring_lock);
} else {
spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ if (offline) {
+ list_splice_init(&pring->txcmplq, &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
spin_unlock_irq(&phba->hbalock);
}
+ if (offline) {
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+ } else {
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+ }
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
/**
* lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
* @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
*
* This function aborts all iocbs in FCP rings and frees all the iocb
* objects in txq. This function issues an abort iocb for all the iocb commands
@@ -4058,7 +4673,7 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
list_splice_init(&pring->txq, &txq);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txq_cnt = 0;
@@ -4069,10 +4684,12 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
lpfc_sli_cancel_iocbs(phba, &txq,
IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
- /* Flush the txcmpq */
+ /* Flush the txcmplq */
lpfc_sli_cancel_iocbs(phba, &txcmplq,
IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ lpfc_sli4_io_xri_aborted(phba, NULL, 0);
}
} else {
pring = &psli->sli3_ring[LPFC_FCP_RING];
@@ -4082,7 +4699,7 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
list_splice_init(&pring->txq, &txq);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txq_cnt = 0;
@@ -4122,6 +4739,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
if (lpfc_readl(phba->HSregaddr, &status))
return 1;
+ phba->hba_flag |= HBA_NEEDS_CFG_PORT;
+
/*
* Check status register every 100ms for 5 retries, then every
* 500ms for 5, then every 2.5 sec for 5, then reset board and
@@ -4153,7 +4772,7 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
/* Check to see if any errors occurred during init */
if ((status & HS_FFERM) || (i >= 20)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2751 Adapter failed to restart, "
"status reg x%x, FW Data: A8 x%x AC x%x\n",
status,
@@ -4199,6 +4818,7 @@ lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
} else
phba->sli4_hba.intr_enable = 0;
+ phba->hba_flag &= ~HBA_SETUP;
return retval;
}
@@ -4229,7 +4849,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
- volatile uint32_t mbox;
+ volatile struct MAILBOX_word0 mbox;
uint32_t hc_copy, ha_copy, resp_data;
int i;
uint8_t hdrtype;
@@ -4263,13 +4883,13 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
phba->pport->stopped = 1;
}
- mbox = 0;
- ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
+ mbox.word0 = 0;
+ mbox.mbxCommand = MBX_KILL_BOARD;
+ mbox.mbxOwner = OWN_CHIP;
writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
mbox_buf = phba->MBslimaddr;
- writel(mbox, mbox_buf);
+ writel(mbox.word0, mbox_buf);
for (i = 0; i < 50; i++) {
if (lpfc_readl((resp_buf + 1), &resp_data))
@@ -4290,12 +4910,12 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
goto clear_errat;
}
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
+ mbox.mbxOwner = OWN_HOST;
resp_data = 0;
for (i = 0; i < 500; i++) {
if (lpfc_readl(resp_buf, &resp_data))
return;
- if (resp_data != mbox)
+ if (resp_data != mbox.word0)
mdelay(1);
else
break;
@@ -4375,7 +4995,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
if (retval != MBX_SUCCESS) {
if (retval != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2752 KILL_BOARD command failed retval %d\n",
retval);
spin_lock_irq(&phba->hbalock);
@@ -4450,6 +5070,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
/* perform board reset */
phba->fc_eventTag = 0;
phba->link_events = 0;
+ phba->hba_flag |= HBA_NEEDS_CFG_PORT;
if (phba->pport) {
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
@@ -4518,18 +5139,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->link_events = 0;
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
+ phba->hba_flag &= ~HBA_SETUP;
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~(LPFC_PROCESS_LA);
phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
- /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
- if (phba->hba_flag & HBA_FW_DUMP_OP) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return rc;
- }
-
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
@@ -4569,9 +5185,8 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
static int
lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
{
- MAILBOX_t *mb;
+ volatile struct MAILBOX_word0 mb;
struct lpfc_sli *psli;
- volatile uint32_t word0;
void __iomem *to_slim;
uint32_t hba_aer_enabled;
@@ -4588,24 +5203,23 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
(phba->pport) ? phba->pport->port_state : 0,
psli->sli_flag);
- word0 = 0;
- mb = (MAILBOX_t *) &word0;
- mb->mbxCommand = MBX_RESTART;
- mb->mbxHc = 1;
+ mb.word0 = 0;
+ mb.mbxCommand = MBX_RESTART;
+ mb.mbxHc = 1;
lpfc_reset_barrier(phba);
to_slim = phba->MBslimaddr;
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
if (phba->pport && phba->pport->port_state)
- word0 = 1; /* This is really setting up word1 */
+ mb.word0 = 1; /* This is really setting up word1 */
else
- word0 = 0; /* This is really setting up word1 */
+ mb.word0 = 0; /* This is really setting up word1 */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
lpfc_sli_brdreset(phba);
@@ -4664,6 +5278,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
phba->pport->stopped = 0;
phba->link_state = LPFC_INIT_START;
phba->hba_flag = 0;
+ /* Preserve FA-PWWN expectation */
+ phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -4727,7 +5343,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
if (i++ >= 200) {
/* Adapter failed to init, timeout, status reg
<status> */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0436 Adapter failed to init, "
"timeout, status reg x%x, "
"FW Data: A8 x%x AC x%x\n", status,
@@ -4742,7 +5358,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
/* ERROR: During chipset initialization */
/* Adapter failed to init, chipset, status reg
<status> */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0437 Adapter failed to init, "
"chipset, status reg x%x, "
"FW Data: A8 x%x AC x%x\n", status,
@@ -4773,7 +5389,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
if (status & HS_FFERM) {
/* ERROR: During chipset initialization */
/* Adapter failed to init, chipset, status reg <status> */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0438 Adapter failed to init, chipset, "
"status reg x%x, "
"FW Data: A8 x%x AC x%x\n", status,
@@ -4783,6 +5399,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
return -EIO;
}
+ phba->hba_flag |= HBA_NEEDS_CFG_PORT;
+
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -4996,7 +5614,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
LPFC_SLI3_CRP_ENABLED |
LPFC_SLI3_DSS_ENABLED);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0442 Adapter failed to init, mbxCmd x%x "
"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
@@ -5034,23 +5652,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
} else
phba->max_vpi = 0;
- phba->fips_level = 0;
- phba->fips_spec_rev = 0;
- if (pmb->u.mb.un.varCfgPort.gdss) {
- phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
- phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
- phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2850 Security Crypto Active. FIPS x%d "
- "(Spec Rev: x%d)",
- phba->fips_level, phba->fips_spec_rev);
- }
- if (pmb->u.mb.un.varCfgPort.sec_err) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2856 Config Port Security Crypto "
- "Error: x%x ",
- pmb->u.mb.un.varCfgPort.sec_err);
- }
if (pmb->u.mb.un.varCfgPort.gerbm)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
if (pmb->u.mb.un.varCfgPort.gcrp)
@@ -5063,7 +5664,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
if (pmb->u.mb.un.varCfgPort.gbg == 0) {
phba->cfg_enable_bg = 0;
phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0443 Adapter did not grant "
"BlockGuard\n");
}
@@ -5096,45 +5697,18 @@ int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
uint32_t rc;
- int mode = 3, i;
+ int i;
int longs;
- switch (phba->cfg_sli_mode) {
- case 2:
- if (phba->cfg_enable_npiv) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
- "1824 NPIV enabled: Override sli_mode "
- "parameter (%d) to auto (0).\n",
- phba->cfg_sli_mode);
- break;
- }
- mode = 2;
- break;
- case 0:
- case 3:
- break;
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
- "1819 Unrecognized sli_mode parameter: %d.\n",
- phba->cfg_sli_mode);
-
- break;
+ /* Enable ISR already does config_port because of config_msi mbx */
+ if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
+ rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+ if (rc)
+ return -EIO;
+ phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
}
phba->fcp_embed_io = 0; /* SLI4 FC support only */
- rc = lpfc_sli_config_port(phba, mode);
-
- if (rc && phba->cfg_sli_mode == 3)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
- "1820 Unable to select SLI-3. "
- "Not supported by adapter.\n");
- if (rc && mode != 2)
- rc = lpfc_sli_config_port(phba, 2);
- else if (rc && mode == 2)
- rc = lpfc_sli_config_port(phba, 3);
- if (rc)
- goto lpfc_sli_hba_setup_error;
-
/* Enable PCIe device Advanced Error Reporting (AER) if configured */
if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
rc = pci_enable_pcie_error_reporting(phba->pcidev);
@@ -5219,7 +5793,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
lpfc_sli_hba_setup_error:
phba->link_state = LPFC_HBA_ERROR;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0445 Firmware initialization failed\n");
return rc;
}
@@ -5227,7 +5801,7 @@ lpfc_sli_hba_setup_error:
/**
* lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
* @phba: Pointer to HBA context object.
- * @mboxq: mailbox pointer.
+ *
* This function issue a dump mailbox command to read config region
* 23 and parse the records in the region and populate driver
* data structure.
@@ -5282,26 +5856,20 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
mboxq->mcqe.trailer);
if (rc) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
rc = -EIO;
goto out_free_mboxq;
}
data_length = mqe->un.mb_words[5];
if (data_length > DMP_RGN23_SIZE) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
rc = -EIO;
goto out_free_mboxq;
}
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
rc = 0;
out_free_mboxq:
- mempool_free(mboxq, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
return rc;
}
@@ -5416,7 +5984,7 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
LPFC_SLI4_MBX_NEMBED);
if (alloclen < reqlen) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3084 Allocated DMA memory size (%d) is "
"less than the requested DMA memory size "
"(%d)\n", alloclen, reqlen);
@@ -5447,23 +6015,25 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
phba->sli4_hba.lnk_info.lnk_no =
bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+ phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
+ phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
sizeof(phba->BIOSVersion));
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
+ "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
+ "flash_id: x%02x, asic_rev: x%02x\n",
phba->sli4_hba.lnk_info.lnk_tp,
phba->sli4_hba.lnk_info.lnk_no,
- phba->BIOSVersion);
+ phba->BIOSVersion, phba->sli4_hba.flash_id,
+ phba->sli4_hba.asic_rev);
out_free_mboxq:
- if (rc != MBX_TIMEOUT) {
- if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- else
- mempool_free(mboxq, phba->mbox_mem_pool);
- }
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ else
+ mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
}
@@ -5498,6 +6068,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
/* obtain link type and link number via READ_CONFIG */
phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
lpfc_sli4_read_config(phba);
+
+ if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+
if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
goto retrieve_ppname;
@@ -5564,12 +6138,10 @@ retrieve_ppname:
}
out_free_mboxq:
- if (rc != MBX_TIMEOUT) {
- if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- else
- mempool_free(mboxq, phba->mbox_mem_pool);
- }
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ else
+ mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
}
@@ -5643,6 +6215,9 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
LPFC_MBOXQ_t *mbox;
+ *extnt_count = 0;
+ *extnt_size = 0;
+
mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -5676,7 +6251,7 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
if (bf_get(lpfc_mbox_hdr_status,
&rsrc_info->header.cfg_shdr.response)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2930 Failed to get resource extents "
"Status 0x%x Add'l Status 0x%x\n",
bf_get(lpfc_mbox_hdr_status,
@@ -5765,10 +6340,10 @@ lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
/**
* lpfc_sli4_cfg_post_extnts -
* @phba: Pointer to HBA context object.
- * @extnt_cnt - number of available extents.
- * @type - the extent type (rpi, xri, vfi, vpi).
- * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
- * @mbox - pointer to the caller's allocated mailbox structure.
+ * @extnt_cnt: number of available extents.
+ * @type: the extent type (rpi, xri, vfi, vpi).
+ * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox: pointer to the caller's allocated mailbox structure.
*
* This function executes the extents allocation request. It also
* takes care of the amount of memory needed to allocate or get the
@@ -5814,7 +6389,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
req_len, *emb);
if (alloc_len < req_len) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2982 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
@@ -5870,7 +6445,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
return -EIO;
if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3009 No available Resource Extents "
"for resource type 0x%x: Count: 0x%x, "
"Size 0x%x\n", type, rsrc_cnt,
@@ -6121,7 +6696,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
if (bf_get(lpfc_mbox_hdr_status,
&dealloc_rsrc->header.cfg_shdr.response)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2919 Failed to release resource extents "
"for type %d - Status 0x%x Add'l Status 0x%x. "
"Resource memory not released.\n",
@@ -6190,6 +6765,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
uint32_t feature)
{
uint32_t len;
+ u32 sig_freq = 0;
len = sizeof(struct lpfc_mbx_set_feature) -
sizeof(struct lpfc_sli4_cfg_mhdr);
@@ -6212,6 +6788,35 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
mbox->u.mqe.un.set_feature.param_len = 8;
break;
+ case LPFC_SET_CGN_SIGNAL:
+ if (phba->cmf_active_mode == LPFC_CFG_OFF)
+ sig_freq = 0;
+ else
+ sig_freq = phba->cgn_sig_freq;
+
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+ bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
+ &mbox->u.mqe.un.set_feature, sig_freq);
+ bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
+ &mbox->u.mqe.un.set_feature, sig_freq);
+ }
+
+ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
+ bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
+ &mbox->u.mqe.un.set_feature, sig_freq);
+
+ if (phba->cmf_active_mode == LPFC_CFG_OFF ||
+ phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
+ sig_freq = 0;
+ else
+ sig_freq = lpfc_acqe_cgn_frequency;
+
+ bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
+ &mbox->u.mqe.un.set_feature, sig_freq);
+
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
+ mbox->u.mqe.un.set_feature.param_len = 12;
+ break;
case LPFC_SET_DUAL_DUMP:
bf_set(lpfc_mbx_set_feature_dd,
&mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
@@ -6220,8 +6825,27 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
mbox->u.mqe.un.set_feature.param_len = 4;
break;
+ case LPFC_SET_ENABLE_MI:
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
+ mbox->u.mqe.un.set_feature.param_len = 4;
+ bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
+ phba->pport->cfg_lun_queue_depth);
+ bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
+ phba->sli4_hba.pc_sli4_params.mi_ver);
+ break;
+ case LPFC_SET_LD_SIGNAL:
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
+ mbox->u.mqe.un.set_feature.param_len = 16;
+ bf_set(lpfc_mbx_set_feature_lds_qry,
+ &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
+ break;
+ case LPFC_SET_ENABLE_CMF:
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
+ mbox->u.mqe.un.set_feature.param_len = 4;
+ bf_set(lpfc_mbx_set_feature_cmf,
+ &mbox->u.mqe.un.set_feature, 1);
+ break;
}
-
return;
}
@@ -6315,7 +6939,7 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
&ras_fwlog->lwpd.phys,
GFP_KERNEL);
if (!ras_fwlog->lwpd.virt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6185 LWPD Memory Alloc Failed\n");
return -ENOMEM;
@@ -6356,7 +6980,7 @@ free_mem:
/**
* lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
* @phba: pointer to lpfc hba data structure.
- * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
*
* Completion handler for driver's RAS MBX command to the device.
**/
@@ -6376,7 +7000,7 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6188 FW LOG mailbox "
"completed with status x%x add_status x%x,"
" mbx status x%x\n",
@@ -6444,7 +7068,7 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
/* Setup Mailbox command */
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6190 RAS MBX Alloc Failed");
rc = -ENOMEM;
goto mem_free;
@@ -6492,7 +7116,7 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6191 FW-Log Mailbox failed. "
"status %d mbxStatus : x%x", rc,
bf_get(lpfc_mqe_status, &mbox->u.mqe));
@@ -6628,7 +7252,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* RPIs. */
count = phba->sli4_hba.max_cfg_param.max_rpi;
if (count <= 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3279 Invalid provisioning of "
"rpi:%d\n", count);
rc = -EINVAL;
@@ -6656,7 +7280,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VPIs. */
count = phba->sli4_hba.max_cfg_param.max_vpi;
if (count <= 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3280 Invalid provisioning of "
"vpi:%d\n", count);
rc = -EINVAL;
@@ -6683,7 +7307,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* XRIs. */
count = phba->sli4_hba.max_cfg_param.max_xri;
if (count <= 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3281 Invalid provisioning of "
"xri:%d\n", count);
rc = -EINVAL;
@@ -6712,7 +7336,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VFIs. */
count = phba->sli4_hba.max_cfg_param.max_vfi;
if (count <= 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3282 Invalid provisioning of "
"vfi:%d\n", count);
rc = -EINVAL;
@@ -6806,7 +7430,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
* lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
* @phba: Pointer to HBA context object.
* @type: The resource extent type.
- * @extnt_count: buffer to hold port extent count response
+ * @extnt_cnt: buffer to hold port extent count response
* @extnt_size: buffer to hold port extent size response.
*
* This function calls the port to read the host allocated extents
@@ -6890,7 +7514,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
req_len, emb);
if (alloc_len < req_len) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2983 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
@@ -6933,7 +7557,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
}
if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2984 Failed to read allocated resources "
"for type %d - Status 0x%x Add'l Status 0x%x.\n",
type,
@@ -6950,7 +7574,6 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
/**
* lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
* @phba: pointer to lpfc hba data structure.
- * @pring: Pointer to driver SLI ring object.
* @sgl_list: linked link of sgl buffers to post
* @cnt: number of linked list buffers
*
@@ -7088,7 +7711,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irq(&phba->hbalock);
} else {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3161 Failure to post sgl to port.\n");
return -EIO;
}
@@ -7143,7 +7766,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
mbox->u.mqe.un.set_host_data.param_len =
LPFC_HOST_OS_DRIVER_VERSION_SIZE;
- snprintf(mbox->u.mqe.un.set_host_data.data,
+ snprintf(mbox->u.mqe.un.set_host_data.un.data,
LPFC_HOST_OS_DRIVER_VERSION_SIZE,
"Linux %s v"LPFC_DRIVER_VERSION,
(phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
@@ -7161,12 +7784,16 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list);
- spin_lock_irqsave(&phba->hbalock, flags);
rqbp = hrq->rqbp;
for (i = 0; i < count; i++) {
+ spin_lock_irqsave(&phba->hbalock, flags);
/* IF RQ is already full, don't bother */
- if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
+ if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
break;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
rqb_buffer = rqbp->rqb_alloc_buffer(phba);
if (!rqb_buffer)
break;
@@ -7175,6 +7802,8 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rqb_buffer->idx = idx;
list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
}
+
+ spin_lock_irqsave(&phba->hbalock, flags);
while (!list_empty(&rqb_buf_list)) {
list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
hbuf.list);
@@ -7185,7 +7814,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
if (rc < 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6421 Cannot post to HRQ %d: %x %x %x "
"DRQ %x %x\n",
hrq->queue_id,
@@ -7205,6 +7834,619 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
return 1;
}
+static void
+lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ union lpfc_sli4_cfg_shdr *shdr;
+ u32 shdr_status, shdr_add_status;
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
+ "4622 SET_FEATURE (x%x) mbox failed, "
+ "status x%x add_status x%x, mbx status x%x\n",
+ LPFC_SET_LD_SIGNAL, shdr_status,
+ shdr_add_status, pmb->u.mb.mbxStatus);
+ phba->degrade_activate_threshold = 0;
+ phba->degrade_deactivate_threshold = 0;
+ phba->fec_degrade_interval = 0;
+ goto out;
+ }
+
+ phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
+ phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
+ phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
+ "4624 Success: da x%x dd x%x interval x%x\n",
+ phba->degrade_activate_threshold,
+ phba->degrade_deactivate_threshold,
+ phba->fec_degrade_interval);
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+int
+lpfc_read_lds_params(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ union lpfc_sli4_cfg_shdr *shdr;
+ u32 shdr_status, shdr_add_status;
+ u32 sig, acqe;
+
+ /* Two outcomes. (1) Set featurs was successul and EDC negotiation
+ * is done. (2) Mailbox failed and send FPIN support only.
+ */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
+ "2516 CGN SET_FEATURE mbox failed with "
+ "status x%x add_status x%x, mbx status x%x "
+ "Reset Congestion to FPINs only\n",
+ shdr_status, shdr_add_status,
+ pmb->u.mb.mbxStatus);
+ /* If there is a mbox error, move on to RDF */
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+ goto out;
+ }
+
+ /* Zero out Congestion Signal ACQE counter */
+ phba->cgn_acqe_cnt = 0;
+
+ acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
+ &pmb->u.mqe.un.set_feature);
+ sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
+ &pmb->u.mqe.un.set_feature);
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4620 SET_FEATURES Success: Freq: %ds %dms "
+ " Reg: x%x x%x\n", acqe, sig,
+ phba->cgn_reg_signal, phba->cgn_reg_fpin);
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Register for FPIN events from the fabric now that the
+ * EDC common_set_features has completed.
+ */
+ lpfc_issue_els_rdf(vport, 0);
+}
+
+int
+lpfc_config_cgn_signal(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ u32 rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ goto out_rdf;
+
+ lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
+ "Reg: x%x x%x\n",
+ phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
+ phba->cgn_reg_signal, phba->cgn_reg_fpin);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out;
+ return 0;
+
+out:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+out_rdf:
+ /* If there is a mbox error, move on to RDF */
+ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+ lpfc_issue_els_rdf(phba->pport, 0);
+ return -EIO;
+}
+
+/**
+ * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine initializes the per-cq idle_stat to dynamically dictate
+ * polling decisions.
+ *
+ * Return codes:
+ * None
+ **/
+static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
+{
+ int i;
+ struct lpfc_sli4_hdw_queue *hdwq;
+ struct lpfc_queue *cq;
+ struct lpfc_idle_stat *idle_stat;
+ u64 wall;
+
+ for_each_present_cpu(i) {
+ hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
+ cq = hdwq->io_cq;
+
+ /* Skip if we've already handled this cq's primary CPU */
+ if (cq->chann != i)
+ continue;
+
+ idle_stat = &phba->sli4_hba.idle_stat[i];
+
+ idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
+ idle_stat->prev_wall = wall;
+
+ if (phba->nvmet_support ||
+ phba->cmf_active_mode != LPFC_CFG_OFF)
+ cq->poll_mode = LPFC_QUEUE_WORK;
+ else
+ cq->poll_mode = LPFC_IRQ_POLL;
+ }
+
+ if (!phba->nvmet_support)
+ schedule_delayed_work(&phba->idle_stat_delay_work,
+ msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
+}
+
+static void lpfc_sli4_dip(struct lpfc_hba *phba)
+{
+ uint32_t if_type;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
+ if_type == LPFC_SLI_INTF_IF_TYPE_6) {
+ struct lpfc_register reg_data;
+
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &reg_data.word0))
+ return;
+
+ if (bf_get(lpfc_sliport_status_dip, &reg_data))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2904 Firmware Dump Image Present"
+ " on Adapter");
+ }
+}
+
+/**
+ * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entries: Number of rx_info_entry objects to allocate in ring
+ *
+ * Return:
+ * 0 - Success
+ * ENOMEM - Failure to kmalloc
+ **/
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries)
+{
+ rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
+ GFP_KERNEL);
+ if (!rx_monitor->ring)
+ return -ENOMEM;
+
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_lock_init(&rx_monitor->lock);
+ rx_monitor->entries = entries;
+
+ return 0;
+}
+
+/**
+ * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ **/
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
+{
+ spin_lock(&rx_monitor->lock);
+ kfree(rx_monitor->ring);
+ rx_monitor->ring = NULL;
+ rx_monitor->entries = 0;
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_unlock(&rx_monitor->lock);
+}
+
+/**
+ * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entry: Pointer to rx_info_entry
+ *
+ * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
+ * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
+ *
+ * This is called from lpfc_cmf_timer, which is in timer/softirq context.
+ *
+ * In cases of old data overflow, we do a best effort of FIFO order.
+ **/
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+
+ spin_lock(ring_lock);
+ memcpy(&ring[*tail_idx], entry, sizeof(*entry));
+ *tail_idx = (*tail_idx + 1) % ring_size;
+
+ /* Best effort of FIFO saved data */
+ if (*tail_idx == *head_idx)
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ spin_unlock(ring_lock);
+}
+
+/**
+ * lpfc_rx_monitor_report - Read out rx_monitor's ring
+ * @phba: Pointer to lpfc_hba object
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @buf: Pointer to char buffer that will contain rx monitor info data
+ * @buf_len: Length buf including null char
+ * @max_read_entries: Maximum number of entries to read out of ring
+ *
+ * Used to dump/read what's in rx_monitor's ring buffer.
+ *
+ * If buf is NULL || buf_len == 0, then it is implied that we want to log the
+ * information to kmsg instead of filling out buf.
+ *
+ * Return:
+ * Number of entries read out of the ring
+ **/
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ struct rx_info_entry *entry;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+ u32 cnt = 0;
+ char tmp[DBG_LOG_STR_SZ] = {0};
+ bool log_to_kmsg = (!buf || !buf_len) ? true : false;
+
+ if (!log_to_kmsg) {
+ /* clear the buffer to be sure */
+ memset(buf, 0, buf_len);
+
+ scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
+ "%-8s%-8s%-8s%-16s\n",
+ "MaxBPI", "Tot_Data_CMF",
+ "Tot_Data_Cmd", "Tot_Data_Cmpl",
+ "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
+ "IO_cnt", "Info", "BWutil(ms)");
+ }
+
+ /* Needs to be _bh because record is called from timer interrupt
+ * context
+ */
+ spin_lock_bh(ring_lock);
+ while (*head_idx != *tail_idx) {
+ entry = &ring[*head_idx];
+
+ /* Read out this entry's data. */
+ if (!log_to_kmsg) {
+ /* If !log_to_kmsg, then store to buf. */
+ scnprintf(tmp, sizeof(tmp),
+ "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
+ "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
+ *head_idx, entry->max_bytes_per_interval,
+ entry->cmf_bytes, entry->total_bytes,
+ entry->rcv_bytes, entry->avg_io_latency,
+ entry->avg_io_size, entry->max_read_cnt,
+ entry->cmf_busy, entry->io_cnt,
+ entry->cmf_info, entry->timer_utilization,
+ entry->timer_interval);
+
+ /* Check for buffer overflow */
+ if ((strlen(buf) + strlen(tmp)) >= buf_len)
+ break;
+
+ /* Append entry's data to buffer */
+ strlcat(buf, tmp, buf_len);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4410 %02u: MBPI %llu Xmit %llu "
+ "Cmpl %llu Lat %llu ASz %llu Info %02u "
+ "BWUtil %u Int %u slot %u\n",
+ cnt, entry->max_bytes_per_interval,
+ entry->total_bytes, entry->rcv_bytes,
+ entry->avg_io_latency,
+ entry->avg_io_size, entry->cmf_info,
+ entry->timer_utilization,
+ entry->timer_interval, *head_idx);
+ }
+
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ /* Don't feed more than max_read_entries */
+ cnt++;
+ if (cnt >= max_read_entries)
+ break;
+ }
+ spin_unlock_bh(ring_lock);
+
+ return cnt;
+}
+
+/**
+ * lpfc_cmf_setup - Initialize idle_stat tracking
+ * @phba: Pointer to HBA context object.
+ *
+ * This is called from HBA setup during driver load or when the HBA
+ * comes online. this does all the initialization to support CMF and MI.
+ **/
+static int
+lpfc_cmf_setup(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_pc_sli4_params *sli4_params;
+ int rc, cmf, mi_ver;
+
+ rc = lpfc_sli4_refresh_params(phba);
+ if (unlikely(rc))
+ return rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ sli4_params = &phba->sli4_hba.pc_sli4_params;
+
+ /* Always try to enable MI feature if we can */
+ if (sli4_params->mi_ver) {
+ lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ mi_ver = bf_get(lpfc_mbx_set_feature_mi,
+ &mboxq->u.mqe.un.set_feature);
+
+ if (rc == MBX_SUCCESS) {
+ if (mi_ver) {
+ lpfc_printf_log(phba,
+ KERN_WARNING, LOG_CGN_MGMT,
+ "6215 MI is enabled\n");
+ sli4_params->mi_ver = mi_ver;
+ } else {
+ lpfc_printf_log(phba,
+ KERN_WARNING, LOG_CGN_MGMT,
+ "6338 MI is disabled\n");
+ sli4_params->mi_ver = 0;
+ }
+ } else {
+ /* mi_ver is already set from GET_SLI4_PARAMETERS */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_CGN_MGMT | LOG_INIT,
+ "6245 Enable MI Mailbox x%x (x%x/x%x) "
+ "failed, rc:x%x mi:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get
+ (phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get
+ (phba, mboxq),
+ rc, sli4_params->mi_ver);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6217 MI is disabled\n");
+ }
+
+ /* Ensure FDMI is enabled for MI if enable_mi is set */
+ if (sli4_params->mi_ver)
+ phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
+
+ /* Always try to enable CMF feature if we can */
+ if (sli4_params->cmf) {
+ lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ cmf = bf_get(lpfc_mbx_set_feature_cmf,
+ &mboxq->u.mqe.un.set_feature);
+ if (rc == MBX_SUCCESS && cmf) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6218 CMF is enabled: mode %d\n",
+ phba->cmf_active_mode);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_CGN_MGMT | LOG_INIT,
+ "6219 Enable CMF Mailbox x%x (x%x/x%x) "
+ "failed, rc:x%x dd:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get
+ (phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get
+ (phba, mboxq),
+ rc, cmf);
+ sli4_params->cmf = 0;
+ phba->cmf_active_mode = LPFC_CFG_OFF;
+ goto no_cmf;
+ }
+
+ /* Allocate Congestion Information Buffer */
+ if (!phba->cgn_i) {
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (mp)
+ mp->virt = dma_alloc_coherent
+ (&phba->pcidev->dev,
+ sizeof(struct lpfc_cgn_info),
+ &mp->phys, GFP_KERNEL);
+ if (!mp || !mp->virt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2640 Failed to alloc memory "
+ "for Congestion Info\n");
+ kfree(mp);
+ sli4_params->cmf = 0;
+ phba->cmf_active_mode = LPFC_CFG_OFF;
+ goto no_cmf;
+ }
+ phba->cgn_i = mp;
+
+ /* initialize congestion buffer info */
+ lpfc_init_congestion_buf(phba);
+ lpfc_init_congestion_stat(phba);
+
+ /* Zero out Congestion Signal counters */
+ atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+ atomic64_set(&phba->cgn_acqe_stat.warn, 0);
+ }
+
+ rc = lpfc_sli4_cgn_params_read(phba);
+ if (rc < 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "6242 Error reading Cgn Params (%d)\n",
+ rc);
+ /* Ensure CGN Mode is off */
+ sli4_params->cmf = 0;
+ } else if (!rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+ "6243 CGN Event empty object.\n");
+ /* Ensure CGN Mode is off */
+ sli4_params->cmf = 0;
+ }
+ } else {
+no_cmf:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ "6220 CMF is disabled\n");
+ }
+
+ /* Only register congestion buffer with firmware if BOTH
+ * CMF and E2E are enabled.
+ */
+ if (sli4_params->cmf && sli4_params->mi_ver) {
+ rc = lpfc_reg_congestion_buf(phba);
+ if (rc) {
+ dma_free_coherent(&phba->pcidev->dev,
+ sizeof(struct lpfc_cgn_info),
+ phba->cgn_i->virt, phba->cgn_i->phys);
+ kfree(phba->cgn_i);
+ phba->cgn_i = NULL;
+ /* Ensure CGN Mode is off */
+ phba->cmf_active_mode = LPFC_CFG_OFF;
+ return 0;
+ }
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6470 Setup MI version %d CMF %d mode %d\n",
+ sli4_params->mi_ver, sli4_params->cmf,
+ phba->cmf_active_mode);
+
+ mempool_free(mboxq, phba->mbox_mem_pool);
+
+ /* Initialize atomic counters */
+ atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+ atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_alarm_cnt, 0);
+ atomic_set(&phba->cgn_sync_warn_cnt, 0);
+ atomic_set(&phba->cgn_driver_evt_cnt, 0);
+ atomic_set(&phba->cgn_latency_evt_cnt, 0);
+ atomic64_set(&phba->cgn_latency_evt, 0);
+
+ phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
+
+ /* Allocate RX Monitor Buffer */
+ if (!phba->rx_monitor) {
+ phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
+ GFP_KERNEL);
+
+ if (!phba->rx_monitor) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2644 Failed to alloc memory "
+ "for RX Monitor Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* Instruct the rx_monitor object to instantiate its ring */
+ if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
+ LPFC_MAX_RXMONITOR_ENTRY)) {
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2645 Failed to alloc memory "
+ "for RX Monitor's Ring\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int
+lpfc_set_host_tm(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t len, rc;
+ struct timespec64 cur_time;
+ struct tm broken;
+ uint32_t month, day, year;
+ uint32_t hour, minute, second;
+ struct lpfc_mbx_set_host_date_time *tm;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ len = sizeof(struct lpfc_mbx_set_host_data) -
+ sizeof(struct lpfc_sli4_cfg_mhdr);
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+ LPFC_SLI4_MBX_EMBED);
+
+ mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
+ mboxq->u.mqe.un.set_host_data.param_len =
+ sizeof(struct lpfc_mbx_set_host_date_time);
+ tm = &mboxq->u.mqe.un.set_host_data.un.tm;
+ ktime_get_real_ts64(&cur_time);
+ time64_to_tm(cur_time.tv_sec, 0, &broken);
+ month = broken.tm_mon + 1;
+ day = broken.tm_mday;
+ year = broken.tm_year - 100;
+ hour = broken.tm_hour;
+ minute = broken.tm_min;
+ second = broken.tm_sec;
+ bf_set(lpfc_mbx_set_host_month, tm, month);
+ bf_set(lpfc_mbx_set_host_day, tm, day);
+ bf_set(lpfc_mbx_set_host_year, tm, year);
+ bf_set(lpfc_mbx_set_host_hour, tm, hour);
+ bf_set(lpfc_mbx_set_host_min, tm, minute);
+ bf_set(lpfc_mbx_set_host_sec, tm, second);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
/**
* lpfc_sli4_hba_setup - SLI4 device initialization PCI function
* @phba: Pointer to HBA context object.
@@ -7227,6 +8469,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
struct lpfc_rqb *rqbp;
+ u32 flg;
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
@@ -7240,9 +8483,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
else {
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+ flg = phba->sli.sli_flag;
spin_unlock_irq(&phba->hbalock);
+ /* Allow a little time after setting SLI_ACTIVE for any polled
+ * MBX commands to complete via BSG.
+ */
+ for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
+ msleep(20);
+ spin_lock_irq(&phba->hbalock);
+ flg = phba->sli.sli_flag;
+ spin_unlock_irq(&phba->hbalock);
+ }
}
+ lpfc_sli4_dip(phba);
+
/*
* Allocate a single mailbox container for initializing the
* port.
@@ -7283,7 +8538,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->hba_flag &= ~HBA_IOQ_FLUSH;
if (phba->sli_rev != LPFC_SLI_REV4) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0376 READ_REV Error. SLI Level %d "
"FCoE enabled %d\n",
phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
@@ -7292,6 +8547,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ rc = lpfc_set_host_tm(phba);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "6468 Set host date / time: Status x%x:\n", rc);
+
/*
* Continue initialization with default values even if driver failed
* to read FCoE param config regions, only read parameters if the
@@ -7325,7 +8584,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
*/
rc = lpfc_parse_vpd(phba, vpd, vpd_size);
if (unlikely(!rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0377 Error %d parsing vpd. "
"Using defaults.\n", rc);
rc = 0;
@@ -7371,15 +8630,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
- /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
- rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
- if (phba->pport->cfg_lun_queue_depth > rc) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "3362 LUN queue depth changed from %d to %d\n",
- phba->pport->cfg_lun_queue_depth, rc);
- phba->pport->cfg_lun_queue_depth = rc;
- }
-
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
@@ -7416,6 +8666,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ /* Disable VMID if app header is not supported */
+ if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
+ &mqe->un.req_ftrs))) {
+ bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
+ phba->cfg_vmid_app_header = 0;
+ lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
+ "1242 vmid feature not supported\n");
+ }
+
/*
* The port must support FCP initiator mode as this is the
* only mode running in the host.
@@ -7473,7 +8732,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"6448 Dual Dump is enabled\n");
else
lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
@@ -7491,7 +8750,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
*/
rc = lpfc_sli4_alloc_resource_identifiers(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2920 Failed to alloc Resource IDs "
"rc = x%x\n", rc);
goto out_free_mbox;
@@ -7523,14 +8782,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
/*
- * This memory was allocated by the lpfc_read_sparam routine. Release
- * it to the mbuf pool.
+ * This memory was allocated by the lpfc_read_sparam routine but is
+ * no longer needed. It is released and ctx_buf NULLed to prevent
+ * unintended pointer access as the mbox is reused.
*/
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mboxq->ctx_buf = NULL;
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0382 READ_SPARAM command failed "
"status %d, mbxStatus x%x\n",
rc, bf_get(lpfc_mqe_status, mqe));
@@ -7548,7 +8808,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3089 Failed to allocate queues\n");
rc = -ENODEV;
goto out_free_mbox;
@@ -7556,7 +8816,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Set up all the queues to the device */
rc = lpfc_sli4_queue_setup(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0381 Error %d during queue setup.\n ", rc);
goto out_stop_timers;
}
@@ -7567,7 +8827,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* update host els xri-sgl sizes and mappings */
rc = lpfc_sli4_els_sgl_update(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1400 Failed to update xri-sgl size and "
"mapping: %d\n", rc);
goto out_destroy_queue;
@@ -7577,7 +8837,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
phba->sli4_hba.els_xri_cnt);
if (unlikely(rc < 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0582 Error %d during els sgl post "
"operation\n", rc);
rc = -ENODEV;
@@ -7589,7 +8849,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* update host nvmet xri-sgl sizes and mappings */
rc = lpfc_sli4_nvmet_sgl_update(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6308 Failed to update nvmet-sgl size "
"and mapping: %d\n", rc);
goto out_destroy_queue;
@@ -7601,7 +8861,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
&phba->sli4_hba.lpfc_nvmet_sgl_list,
phba->sli4_hba.nvmet_xri_cnt);
if (unlikely(rc < 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3117 Error %d during nvmet "
"sgl post\n", rc);
rc = -ENODEV;
@@ -7618,7 +8878,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* update host common xri-sgl sizes and mappings */
rc = lpfc_sli4_io_sgl_update(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6082 Failed to update nvme-sgl size "
"and mapping: %d\n", rc);
goto out_destroy_queue;
@@ -7627,7 +8887,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* register the allocated common sgl pool to the port */
rc = lpfc_sli4_repost_io_sgl_list(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6116 Error %d during nvme sgl post "
"operation\n", rc);
/* Some NVME buffers were moved to abort nvme list */
@@ -7648,7 +8908,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1413 Failed to init iocb list.\n");
goto out_destroy_queue;
}
@@ -7677,11 +8937,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Post the rpi header region to the device. */
rc = lpfc_sli4_post_all_rpi_hdrs(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0393 Error %d during rpi post operation\n",
rc);
rc = -ENODEV;
- goto out_destroy_queue;
+ goto out_free_iocblist;
}
lpfc_sli4_node_prep(phba);
@@ -7761,7 +9021,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
- phba->hb_outstanding = 0;
+ phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
phba->last_completion_time = jiffies;
/* start eq_delay heartbeat */
@@ -7769,6 +9029,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
queue_delayed_work(phba->wq, &phba->eq_delay_work,
msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
+ /* start per phba idle_stat_delay heartbeat */
+ lpfc_init_idle_stat_hb(phba);
+
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
@@ -7816,14 +9079,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Indicate device interrupt mode */
phba->sli4_hba.intr_enable = 1;
+ /* Setup CMF after HBA is initialized */
+ lpfc_cmf_setup(phba);
+
if (!(phba->hba_flag & HBA_FCOE_MODE) &&
(phba->hba_flag & LINK_DISABLED)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3103 Adapter Link is disabled.\n");
lpfc_down_link(phba, mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3104 Adapter failed to issue "
"DOWN_LINK mbox cmd, rc:x%x\n", rc);
goto out_io_buff_free;
@@ -7837,15 +9103,22 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
}
mempool_free(mboxq, phba->mbox_mem_pool);
+
+ /* Enable RAS FW log support */
+ lpfc_sli4_ras_setup(phba);
+
+ phba->hba_flag |= HBA_SETUP;
return rc;
+
out_io_buff_free:
/* Free allocated IO Buffers */
lpfc_io_free(phba);
out_unset_queue:
/* Unset all the queues set up in this routine when error out */
lpfc_sli4_queue_unset(phba);
-out_destroy_queue:
+out_free_iocblist:
lpfc_free_iocb_list(phba);
+out_destroy_queue:
lpfc_sli4_queue_destroy(phba);
out_stop_timers:
lpfc_stop_hba_timers(phba);
@@ -7856,7 +9129,7 @@ out_free_mbox:
/**
* lpfc_mbox_timeout - Timeout call back function for mbox timer
- * @ptr: context object - pointer to hba structure.
+ * @t: Context to fetch pointer to hba structure from.
*
* This is the callback function for mailbox timer. The mailbox
* timer is armed when a new mailbox command is issued and the timer
@@ -8008,8 +9281,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
- /* If the mailbox completed, process the completion and return */
- if (lpfc_sli4_process_missed_mbox_completions(phba))
+ /* If the mailbox completed, process the completion */
+ lpfc_sli4_process_missed_mbox_completions(phba);
+
+ if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
return;
if (pmbox != NULL)
@@ -8030,7 +9305,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
}
/* Mbox cmd <mbxCommand> timeout */
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
mb->mbxCommand,
phba->pport->port_state,
@@ -8050,9 +9325,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
- lpfc_sli_abort_fcp_rings(phba);
-
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0345 Resetting board due to mailbox timeout\n");
/* Reset the HBA device */
@@ -8150,7 +9423,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):0311 Mailbox command x%x cannot "
"issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
@@ -8162,7 +9435,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
!(hc_copy & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):2528 Mailbox command x%x cannot "
"issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
@@ -8181,7 +9454,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):2529 Mailbox command x%x "
"cannot issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
@@ -8193,7 +9466,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):2530 Mailbox command x%x "
"cannot issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
@@ -8246,7 +9519,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):2531 Mailbox command x%x "
"cannot issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
@@ -8494,8 +9767,11 @@ static int
lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *mboxq;
int rc = 0;
unsigned long timeout = 0;
+ u32 sli_flag;
+ u8 cmd, subsys, opcode;
/* Mark the asynchronous mailbox command posting as blocked */
spin_lock_irq(&phba->hbalock);
@@ -8513,12 +9789,37 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
if (timeout)
lpfc_sli4_process_missed_mbox_completions(phba);
- /* Wait for the outstnading mailbox command to complete */
+ /* Wait for the outstanding mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
if (time_after(jiffies, timeout)) {
- /* Timeout, marked the outstanding cmd not complete */
+ /* Timeout, mark the outstanding cmd not complete */
+
+ /* Sanity check sli.mbox_active has not completed or
+ * cancelled from another context during last 2ms sleep,
+ * so take hbalock to be sure before logging.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli.mbox_active) {
+ mboxq = phba->sli.mbox_active;
+ cmd = mboxq->u.mb.mbxCommand;
+ subsys = lpfc_sli_config_mbox_subsys_get(phba,
+ mboxq);
+ opcode = lpfc_sli_config_mbox_opcode_get(phba,
+ mboxq);
+ sli_flag = psli->sli_flag;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2352 Mailbox command x%x "
+ "(x%x/x%x) sli_flag x%x could "
+ "not complete\n",
+ cmd, subsys, opcode,
+ sli_flag);
+ } else {
+ spin_unlock_irq(&phba->hbalock);
+ }
+
rc = 1;
break;
}
@@ -8637,7 +9938,7 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_lock_irqsave(&phba->hbalock, iflag);
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):2532 Mailbox command x%x (x%x/x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
@@ -8736,7 +10037,7 @@ exit:
/**
* lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
* @phba: Pointer to HBA context object.
- * @pmbox: Pointer to mailbox object.
+ * @mboxq: Pointer to mailbox object.
* @flag: Flag indicating how the mailbox need to be processed.
*
* This function is called by discovery code and HBA management code to submit
@@ -8758,7 +10059,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
rc = lpfc_mbox_dev_check(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):2544 Mailbox command x%x (x%x/x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
@@ -8780,7 +10081,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
"(%d):2541 Mailbox command x%x "
"(x%x/x%x) failure: "
"mqe_sta: x%x mcqe_sta: x%x/x%x "
- "Data: x%x x%x\n,",
+ "Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
@@ -8814,7 +10115,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
"(%d):2597 Sync Mailbox command "
"x%x (x%x/x%x) failure: "
"mqe_sta: x%x mcqe_sta: x%x/x%x "
- "Data: x%x x%x\n,",
+ "Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
@@ -8835,7 +10136,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
/* Now, interrupt mode asynchronous mailbox command */
rc = lpfc_mbox_cmd_check(phba, mboxq);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):2543 Mailbox command x%x (x%x/x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
@@ -8903,7 +10204,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
}
if (unlikely(phba->sli.mbox_active)) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0384 There is pending active mailbox cmd\n");
return MBX_NOT_FINISHED;
}
@@ -8964,7 +10265,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
/* Post the mailbox command to the port */
rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):2533 Mailbox command x%x (x%x/x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
@@ -9044,7 +10345,6 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
"1420 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
return 0;
}
@@ -9138,11 +10438,10 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
lockdep_assert_held(&phba->hbalock);
- if (piocb->iocb_cmpl && (!piocb->vport) &&
+ if (piocb->cmd_cmpl && (!piocb->vport) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
- lpfc_printf_log(phba, KERN_ERR,
- LOG_SLI | LOG_VPORT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1807 IOCB x%x failed. No vport\n",
piocb->iocb.ulpCommand);
dump_stack();
@@ -9177,25 +10476,15 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
- case CMD_GEN_REQUEST64_CR:
- case CMD_GEN_REQUEST64_CX:
- if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
- FC_RCTL_DD_UNSOL_CMD) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Type !=
- MENLO_TRANSPORT_TYPE))
-
- goto iocb_busy;
- break;
case CMD_QUE_RING_BUF_CN:
case CMD_QUE_RING_BUF64_CN:
/*
* For IOCBs, like QUE_RING_BUF, that have no rsp ring
- * completion, iocb_cmpl MUST be 0.
+ * completion, cmd_cmpl MUST be 0.
*/
- if (piocb->iocb_cmpl)
- piocb->iocb_cmpl = NULL;
- /*FALLTHROUGH*/
+ if (piocb->cmd_cmpl)
+ piocb->cmd_cmpl = NULL;
+ fallthrough;
case CMD_CREATE_XRI_CR:
case CMD_CLOSE_XRI_CN:
case CMD_CLOSE_XRI_CX:
@@ -9241,711 +10530,110 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
}
/**
- * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
+ * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
* @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue wqe on.
* @piocb: Pointer to command iocb.
- * @sglq: Pointer to the scatter gather queue object.
+ * @flag: Flag indicating if this command can be put into txq.
*
- * This routine converts the bpl or bde that is in the IOCB
- * to a sgl list for the sli4 hardware. The physical address
- * of the bpl/bde is converted back to a virtual address.
- * If the IOCB contains a BPL then the list of BDE's is
- * converted to sli4_sge's. If the IOCB contains a single
- * BDE then it is converted to a single sli_sge.
- * The IOCB is still in cpu endianess so the contents of
- * the bpl can be used without byte swapping.
+ * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
+ * send an iocb command to an HBA with SLI-3 interface spec.
*
- * Returns valid XRI = Success, NO_XRI = Failure.
-**/
-static uint16_t
-lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
- struct lpfc_sglq *sglq)
+ * This function takes the hbalock before invoking the lockless version.
+ * The function will return success after it successfully submit the wqe to
+ * firmware or after adding to the txq.
+ **/
+static int
+__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
{
- uint16_t xritag = NO_XRI;
- struct ulp_bde64 *bpl = NULL;
- struct ulp_bde64 bde;
- struct sli4_sge *sgl = NULL;
- struct lpfc_dmabuf *dmabuf;
- IOCB_t *icmd;
- int numBdes = 0;
- int i = 0;
- uint32_t offset = 0; /* accumulated offset in the sg request list */
- int inbound = 0; /* number of sg reply entries inbound from firmware */
-
- if (!piocbq || !sglq)
- return xritag;
-
- sgl = (struct sli4_sge *)sglq->sgl;
- icmd = &piocbq->iocb;
- if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
- return sglq->sli4_xritag;
- if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
- numBdes = icmd->un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- /* The addrHigh and addrLow fields within the IOCB
- * have not been byteswapped yet so there is no
- * need to swap them back.
- */
- if (piocbq->context3)
- dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
- else
- return xritag;
-
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- if (!bpl)
- return xritag;
+ unsigned long iflags;
+ int rc;
- for (i = 0; i < numBdes; i++) {
- /* Should already be byte swapped. */
- sgl->addr_hi = bpl->addrHigh;
- sgl->addr_lo = bpl->addrLow;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((i+1) == numBdes)
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- bde.tus.w = le32_to_cpu(bpl->tus.w);
- sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
- /* The offsets in the sgl need to be accumulated
- * separately for the request and reply lists.
- * The request is always first, the reply follows.
- */
- if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
- /* add up the reply sg entries */
- if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
- inbound++;
- /* first inbound? reset the offset */
- if (inbound == 1)
- offset = 0;
- bf_set(lpfc_sli4_sge_offset, sgl, offset);
- bf_set(lpfc_sli4_sge_type, sgl,
- LPFC_SGE_TYPE_DATA);
- offset += bde.tus.f.bdeSize;
- }
- sgl->word2 = cpu_to_le32(sgl->word2);
- bpl++;
- sgl++;
- }
- } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
- /* The addrHigh and addrLow fields of the BDE have not
- * been byteswapped yet so they need to be swapped
- * before putting them in the sgl.
- */
- sgl->addr_hi =
- cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
- sgl->addr_lo =
- cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
- sgl->word2 = le32_to_cpu(sgl->word2);
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len =
- cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
- }
- return sglq->sli4_xritag;
+ return rc;
}
/**
- * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
+ * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
* @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue wqe on.
* @piocb: Pointer to command iocb.
- * @wqe: Pointer to the work queue entry.
+ * @flag: Flag indicating if this command can be put into txq.
*
- * This routine converts the iocb command to its Work Queue Entry
- * equivalent. The wqe pointer should not have any fields set when
- * this routine is called because it will memcpy over them.
- * This routine does not set the CQ_ID or the WQEC bits in the
- * wqe.
+ * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
+ * an wqe command to an HBA with SLI-4 interface spec.
*
- * Returns: 0 = Success, IOCB_ERROR = Failure.
+ * This function is a lockless version. The function will return success
+ * after it successfully submit the wqe to firmware or after adding to the
+ * txq.
**/
static int
-lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
- union lpfc_wqe128 *wqe)
-{
- uint32_t xmit_len = 0, total_len = 0;
- uint8_t ct = 0;
- uint32_t fip;
- uint32_t abort_tag;
- uint8_t command_type = ELS_COMMAND_NON_FIP;
- uint8_t cmnd;
- uint16_t xritag;
- uint16_t abrt_iotag;
- struct lpfc_iocbq *abrtiocbq;
- struct ulp_bde64 *bpl = NULL;
- uint32_t els_id = LPFC_ELS_ID_DEFAULT;
- int numBdes, i;
- struct ulp_bde64 bde;
- struct lpfc_nodelist *ndlp;
- uint32_t *pcmd;
- uint32_t if_type;
-
- fip = phba->hba_flag & HBA_FIP_SUPPORT;
- /* The fcp commands will set command type */
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- command_type = FCP_COMMAND;
- else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
- command_type = ELS_COMMAND_FIP;
- else
- command_type = ELS_COMMAND_NON_FIP;
-
- if (phba->fcp_embed_io)
- memset(wqe, 0, sizeof(union lpfc_wqe128));
- /* Some of the fields are in the right position already */
- memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
- /* The ct field has moved so reset */
- wqe->generic.wqe_com.word7 = 0;
- wqe->generic.wqe_com.word10 = 0;
-
- abort_tag = (uint32_t) iocbq->iotag;
- xritag = iocbq->sli4_xritag;
- /* words0-2 bpl convert bde */
- if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
- numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- bpl = (struct ulp_bde64 *)
- ((struct lpfc_dmabuf *)iocbq->context3)->virt;
- if (!bpl)
- return IOCB_ERROR;
+__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
- /* Should already be byte swapped. */
- wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
- wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
- xmit_len = wqe->generic.bde.tus.f.bdeSize;
- total_len = 0;
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- total_len += bde.tus.f.bdeSize;
- }
- } else
- xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
+ lpfc_prep_embed_io(phba, lpfc_cmd);
+ return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
+}
- iocbq->iocb.ulpIoTag = iocbq->iotag;
- cmnd = iocbq->iocb.ulpCommand;
+void
+lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+{
+ struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
+ struct sli4_sge *sgl;
- switch (iocbq->iocb.ulpCommand) {
- case CMD_ELS_REQUEST64_CR:
- if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
- ndlp = iocbq->context_un.ndlp;
- else
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- if (!iocbq->iocb.ulpLe) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2007 Only Limited Edition cmd Format"
- " supported 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
+ /* 128 byte wqe support here */
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- wqe->els_req.payload_len = xmit_len;
- /* Els_reguest64 has a TMO */
- bf_set(wqe_tmo, &wqe->els_req.wqe_com,
- iocbq->iocb.ulpTimeout);
- /* Need a VF for word 4 set the vf bit*/
- bf_set(els_req64_vf, &wqe->els_req, 0);
- /* And a VFID for word 12 */
- bf_set(els_req64_vfid, &wqe->els_req, 0);
- ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- iocbq->iocb.ulpContext);
- bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
- bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
- /* CCP CCPE PV PRI in word10 were set in the memcpy */
- if (command_type == ELS_COMMAND_FIP)
- els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
- >> LPFC_FIP_ELS_ID_SHIFT);
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- iocbq->context2)->virt);
- if_type = bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf);
- if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
- *pcmd == ELS_CMD_SCR ||
- *pcmd == ELS_CMD_RSCN_XMT ||
- *pcmd == ELS_CMD_FDISC ||
- *pcmd == ELS_CMD_LOGO ||
- *pcmd == ELS_CMD_PLOGI)) {
- bf_set(els_req64_sp, &wqe->els_req, 1);
- bf_set(els_req64_sid, &wqe->els_req,
- iocbq->vport->fc_myDID);
- if ((*pcmd == ELS_CMD_FLOGI) &&
- !(phba->fc_topology ==
- LPFC_TOPOLOGY_LOOP))
- bf_set(els_req64_sid, &wqe->els_req, 0);
- bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- phba->vpi_ids[iocbq->vport->vpi]);
- } else if (pcmd && iocbq->context1) {
- bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- }
- }
- bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
- bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
- wqe->els_req.max_response_payload_len = total_len - xmit_len;
- break;
- case CMD_XMIT_SEQUENCE64_CX:
- bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.un.ulpWord[3]);
- bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.unsli3.rcvsli3.ox_id);
- /* The entire sequence is transmitted for this IOCB */
- xmit_len = total_len;
- cmnd = CMD_XMIT_SEQUENCE64_CR;
- if (phba->link_flag & LS_LOOPBACK_MODE)
- bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
- /* fall through */
- case CMD_XMIT_SEQUENCE64_CR:
- /* word3 iocb=io_tag32 wqe=reserved */
- wqe->xmit_sequence.rsvd3 = 0;
- /* word4 relative_offset memcpy */
- /* word5 r_ctl/df_ctl memcpy */
- bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
- bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
- LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
- LPFC_WQE_LENLOC_WORD12);
- bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
- wqe->xmit_sequence.xmit_len = xmit_len;
- command_type = OTHER_COMMAND;
- break;
- case CMD_XMIT_BCAST64_CN:
- /* word3 iocb=iotag32 wqe=seq_payload_len */
- wqe->xmit_bcast64.seq_payload_len = xmit_len;
- /* word4 iocb=rsvd wqe=rsvd */
- /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
- /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
- bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
- LPFC_WQE_LENLOC_WORD3);
- bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
- break;
- case CMD_FCP_IWRITE64_CR:
- command_type = FCP_COMMAND_DATA_OUT;
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_iwrite,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_iwrite,
- 0);
- /* word4 iocb=parameter wqe=total_xfer_length memcpy */
- /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
- bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
- /* Always open the exchange */
- bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
- LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
+ if (phba->fcp_embed_io) {
+ struct fcp_cmnd *fcp_cmnd;
+ u32 *ptr;
- /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
- else
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
+ /* Word 0-2 - FCP_CMND */
+ wqe->generic.bde.tus.f.bdeFlags =
+ BUFF_TYPE_BDE_IMMED;
+ wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
+ wqe->generic.bde.addrHigh = 0;
+ wqe->generic.bde.addrLow = 88; /* Word 22 */
- /* 128 byte wqe support here */
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ /* Word 22-29 FCP CMND Payload */
+ ptr = &wqe->words[22];
+ memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+ } else {
+ /* Word 0-2 - Inline BDE */
+ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
+ wqe->generic.bde.addrHigh = sgl->addr_hi;
+ wqe->generic.bde.addrLow = sgl->addr_lo;
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
+ }
+ /* add the VMID tags as per switch response */
+ if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (piocb->vmid_tag.cs_ctl_vmid));
+ } else if (phba->cfg_vmid_app_header) {
+ bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_FCP_IREAD64_CR:
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_iread,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_iread,
- 0);
- /* word4 iocb=parameter wqe=total_xfer_length memcpy */
- /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
- bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
- /* Always open the exchange */
- bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
- LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
- else
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_FCP_ICMND64_CR:
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_icmd,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_icmd,
- 0);
- /* word3 iocb=IO_TAG wqe=reserved */
- bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
- /* Always open the exchange */
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_GEN_REQUEST64_CR:
- /* For this command calculate the xmit length of the
- * request bde.
- */
- xmit_len = 0;
- numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- break;
- xmit_len += bde.tus.f.bdeSize;
- }
- /* word3 iocb=IO_TAG wqe=request_payload_len */
- wqe->gen_req.request_payload_len = xmit_len;
- /* word4 iocb=parameter wqe=relative_offset memcpy */
- /* word5 [rctl, type, df_ctl, la] copied in memcpy */
- /* word6 context tag copied in memcpy */
- if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
- ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2015 Invalid CT %x command 0x%x\n",
- ct, iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
- bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
- bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
- bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
- bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
- wqe->gen_req.max_response_payload_len = total_len - xmit_len;
- command_type = OTHER_COMMAND;
- break;
- case CMD_XMIT_ELS_RSP64_CX:
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- /* words0-2 BDE memcpy */
- /* word3 iocb=iotag32 wqe=response_payload_len */
- wqe->xmit_els_rsp.response_payload_len = xmit_len;
- /* word4 */
- wqe->xmit_els_rsp.word4 = 0;
- /* word5 iocb=rsvd wge=did */
- bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
- iocbq->iocb.un.xseq64.xmit_els_remoteID);
-
- if_type = bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf);
- if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (iocbq->vport->fc_flag & FC_PT2PT) {
- bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
- bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
- iocbq->vport->fc_myDID);
- if (iocbq->vport->fc_myDID == Fabric_DID) {
- bf_set(wqe_els_did,
- &wqe->xmit_els_rsp.wqe_dest, 0);
- }
- }
- }
- bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
- iocbq->iocb.unsli3.rcvsli3.ox_id);
- if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
- bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- phba->vpi_ids[iocbq->vport->vpi]);
- bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
- LPFC_WQE_LENLOC_WORD3);
- bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
- bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- iocbq->context2)->virt);
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
- bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
- iocbq->vport->fc_myDID);
- bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- phba->vpi_ids[phba->pport->vpi]);
+ wqe->words[31] = piocb->vmid_tag.app_id;
}
- command_type = OTHER_COMMAND;
- break;
- case CMD_CLOSE_XRI_CN:
- case CMD_ABORT_XRI_CN:
- case CMD_ABORT_XRI_CX:
- /* words 0-2 memcpy should be 0 rserved */
- /* port will send abts */
- abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
- if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
- abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
- fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
- } else
- fip = 0;
-
- if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
- /*
- * The link is down, or the command was ELS_FIP
- * so the fw does not need to send abts
- * on the wire.
- */
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- else
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
- /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
- wqe->abort_cmd.rsrvd5 = 0;
- bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- abort_tag = iocbq->iocb.un.acxri.abortIoTag;
- /*
- * The abort handler will send us CMD_ABORT_XRI_CN or
- * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
- */
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- cmnd = CMD_ABORT_XRI_CX;
- command_type = OTHER_COMMAND;
- xritag = 0;
- break;
- case CMD_XMIT_BLS_RSP64_CX:
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- /* As BLS ABTS RSP WQE is very different from other WQEs,
- * we re-construct this WQE here based on information in
- * iocbq from scratch.
- */
- memset(wqe, 0, sizeof(*wqe));
- /* OX_ID is invariable to who sent ABTS to CT exchange */
- bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
- if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
- LPFC_ABTS_UNSOL_INT) {
- /* ABTS sent by initiator to CT exchange, the
- * RX_ID field will be filled with the newly
- * allocated responder XRI.
- */
- bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- iocbq->sli4_xritag);
- } else {
- /* ABTS sent by responder to CT exchange, the
- * RX_ID field will be filled with the responder
- * RX_ID from ABTS.
- */
- bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
- }
- bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
- bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
-
- /* Use CT=VPI */
- bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
- ndlp->nlp_DID);
- bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
- iocbq->iocb.ulpContext);
- bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
- phba->vpi_ids[phba->pport->vpi]);
- bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- /* Overwrite the pre-set comnd type with OTHER_COMMAND */
- command_type = OTHER_COMMAND;
- if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
- bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
- bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
- bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
- bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
- bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
- bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
- }
-
- break;
- case CMD_SEND_FRAME:
- bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
- bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
- bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
- bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
- bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
- bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
- bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
- bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
- return 0;
- case CMD_XRI_ABORTED_CX:
- case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
- case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
- case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
- case CMD_FCP_TRSP64_CX: /* Target mode rcv */
- case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2014 Invalid command 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- break;
}
-
- if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
- else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
- else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
- iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
- LPFC_IO_DIF_INSERT);
- bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
- bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
- wqe->generic.wqe_com.abort_tag = abort_tag;
- bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
- bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
- bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- return 0;
}
/**
@@ -9967,13 +10655,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sglq *sglq;
- union lpfc_wqe128 wqe;
+ union lpfc_wqe128 *wqe;
struct lpfc_queue *wq;
struct lpfc_sli_ring *pring;
+ u32 ulp_command = get_job_cmnd(phba, piocb);
/* Get the WQ */
- if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if ((piocb->cmd_flag & LPFC_IO_FCP) ||
+ (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
} else {
wq = phba->sli4_hba.els_wq;
@@ -9987,36 +10676,27 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
*/
lockdep_assert_held(&pring->ring_lock);
-
+ wqe = &piocb->wqe;
if (piocb->sli4_xritag == NO_XRI) {
- if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ if (ulp_command == CMD_ABORT_XRI_CX)
sglq = NULL;
else {
- if (!list_empty(&pring->txq)) {
+ sglq = __lpfc_sli_get_els_sglq(phba, piocb);
+ if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
- pring, piocb);
+ pring,
+ piocb);
return IOCB_SUCCESS;
} else {
return IOCB_BUSY;
}
- } else {
- sglq = __lpfc_sli_get_els_sglq(phba, piocb);
- if (!sglq) {
- if (!(flag & SLI_IOCB_RET_IOCB)) {
- __lpfc_sli_ringtx_put(phba,
- pring,
- piocb);
- return IOCB_SUCCESS;
- } else
- return IOCB_BUSY;
- }
}
}
- } else if (piocb->iocb_flag & LPFC_IO_FCP)
+ } else if (piocb->cmd_flag & LPFC_IO_FCP) {
/* These IO's already have an XRI and a mapped sgl. */
sglq = NULL;
+ }
else {
/*
* This is a continuation of a commandi,(CX) so this
@@ -10030,21 +10710,51 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (sglq) {
piocb->sli4_lxritag = sglq->sli4_lxritag;
piocb->sli4_xritag = sglq->sli4_xritag;
- if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
+
+ /* ABTS sent by initiator to CT exchange, the
+ * RX_ID field will be filled with the newly
+ * allocated responder XRI.
+ */
+ if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
+ piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
+ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+ piocb->sli4_xritag);
+
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
+ piocb->sli4_xritag);
+
+ if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
return IOCB_ERROR;
}
- if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+ if (lpfc_sli4_wq_put(wq, wqe))
return IOCB_ERROR;
- if (lpfc_sli4_wq_put(wq, &wqe))
- return IOCB_ERROR;
lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
return 0;
}
-/**
+/*
+ * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
+ *
+ * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
+ * or IOCB for sli-3 function.
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * IOCB_ERROR - Error
+ * IOCB_SUCCESS - Success
+ * IOCB_BUSY - Busy
+ **/
+int
+lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
+}
+
+/*
* __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
*
* This routine wraps the actual lockless version for issusing IOCB function
@@ -10062,6 +10772,411 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
}
+static void
+__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *cmd;
+
+ cmd = &cmdiocbq->iocb;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+
+ if (expect_rsp) {
+ cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ cmd->un.elsreq64.remoteID = did; /* DID */
+ cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+ cmd->ulpTimeout = tmo;
+ } else {
+ cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+ cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
+ cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ cmd->ulpPU = PARM_NPIV_DID;
+ }
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+
+ /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ if (expect_rsp) {
+ cmd->un.elsreq64.myID = vport->fc_myDID;
+
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ cmd->ulpContext = phba->vpi_ids[vport->vpi];
+ }
+
+ cmd->ulpCt_h = 0;
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ cmd->ulpCt_l = 0; /* context = invalid RPI */
+ else
+ cmd->ulpCt_l = 1; /* context = VPI */
+ }
+}
+
+static void
+__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ union lpfc_wqe128 *wqe;
+ struct ulp_bde64_le *bde;
+ u8 els_id;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Word 0 - 2 BDE */
+ bde = (struct ulp_bde64_le *)&wqe->generic.bde;
+ bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
+ bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+ bde->type_size = cpu_to_le32(cmd_size);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+
+ if (expect_rsp) {
+ bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
+
+ /* Transfer length */
+ wqe->els_req.payload_len = cmd_size;
+ wqe->els_req.max_response_payload_len = FCELSSIZE;
+
+ /* DID */
+ bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
+
+ /* Word 11 - ELS_ID */
+ switch (elscmd) {
+ case ELS_CMD_PLOGI:
+ els_id = LPFC_ELS_ID_PLOGI;
+ break;
+ case ELS_CMD_FLOGI:
+ els_id = LPFC_ELS_ID_FLOGI;
+ break;
+ case ELS_CMD_LOGO:
+ els_id = LPFC_ELS_ID_LOGO;
+ break;
+ case ELS_CMD_FDISC:
+ if (!vport->fc_myDID) {
+ els_id = LPFC_ELS_ID_FDISC;
+ break;
+ }
+ fallthrough;
+ default:
+ els_id = LPFC_ELS_ID_DEFAULT;
+ break;
+ }
+
+ bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+ } else {
+ /* DID */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
+
+ /* Transfer length */
+ wqe->xmit_els_rsp.response_payload_len = cmd_size;
+
+ bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
+ CMD_XMIT_ELS_RSP64_WQE);
+ }
+
+ bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
+ bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
+
+ /* If we have NPIV enabled, we want to send ELS traffic by VPI.
+ * For SLI4, since the driver controls VPIs we also want to include
+ * all ELS pt2pt protocol traffic as well.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
+ (vport->fc_flag & FC_PT2PT)) {
+ if (expect_rsp) {
+ bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
+
+ /* For ELS_REQUEST64_WQE, use the VPI by default */
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[vport->vpi]);
+ }
+
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
+ else
+ bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
+ }
+}
+
+void
+lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
+ u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
+ u8 expect_rsp)
+{
+ phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
+ elscmd, tmo, expect_rsp);
+}
+
+static void
+__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
+ u16 rpi, u32 num_entry, u8 tmo)
+{
+ IOCB_t *cmd;
+
+ cmd = &cmdiocbq->iocb;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
+
+ cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+
+ cmd->ulpContext = rpi;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpOwner = OWN_CHIP;
+ cmd->ulpTimeout = tmo;
+}
+
+static void
+__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
+ u16 rpi, u32 num_entry, u8 tmo)
+{
+ union lpfc_wqe128 *cmdwqe;
+ struct ulp_bde64_le *bde, *bpl;
+ u32 xmit_len = 0, total_len = 0, size, type, i;
+
+ cmdwqe = &cmdiocbq->wqe;
+ memset(cmdwqe, 0, sizeof(*cmdwqe));
+
+ /* Calculate total_len and xmit_len */
+ bpl = (struct ulp_bde64_le *)bmp->virt;
+ for (i = 0; i < num_entry; i++) {
+ size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
+ total_len += size;
+ }
+ for (i = 0; i < num_entry; i++) {
+ size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
+ type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
+ if (type != ULP_BDE64_TYPE_BDE_64)
+ break;
+ xmit_len += size;
+ }
+
+ /* Words 0 - 2 */
+ bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
+ bde->addr_low = bpl->addr_low;
+ bde->addr_high = bpl->addr_high;
+ bde->type_size = cpu_to_le32(xmit_len);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+
+ /* Word 3 */
+ cmdwqe->gen_req.request_payload_len = xmit_len;
+
+ /* Word 5 */
+ bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
+ bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
+ bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
+ bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
+ bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
+ bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
+ bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
+
+ /* Word 12 */
+ cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
+}
+
+void
+lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
+{
+ phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
+}
+
+static void
+__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ IOCB_t *icmd;
+
+ icmd = &cmdiocbq->iocb;
+ memset(icmd, 0, sizeof(*icmd));
+
+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ icmd->un.xseq64.w5.hcsw.Fctl = LA;
+ if (last_seq)
+ icmd->un.xseq64.w5.hcsw.Fctl |= LS;
+ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ icmd->un.xseq64.w5.hcsw.Rctl = rctl;
+ icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+
+ switch (cr_cx_cmd) {
+ case CMD_XMIT_SEQUENCE64_CR:
+ icmd->ulpContext = rpi;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+ break;
+ case CMD_XMIT_SEQUENCE64_CX:
+ icmd->ulpContext = ox_id;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ union lpfc_wqe128 *wqe;
+ struct ulp_bde64 *bpl;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Words 0 - 2 */
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
+ wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
+ wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
+
+ /* Word 5 */
+ bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
+ bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
+ bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
+ bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
+ bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
+
+ bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
+ CMD_XMIT_SEQUENCE64_WQE);
+
+ /* Word 7 */
+ bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
+
+ /* Word 9 */
+ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
+
+ /* Word 12 */
+ if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
+ wqe->xmit_sequence.xmit_len = full_size;
+ else
+ wqe->xmit_sequence.xmit_len =
+ wqe->xmit_sequence.bde.tus.f.bdeSize;
+}
+
+void
+lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
+ rctl, last_seq, cr_cx_cmd);
+}
+
+static void
+__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
+{
+ IOCB_t *icmd = NULL;
+
+ icmd = &cmdiocbq->iocb;
+ memset(icmd, 0, sizeof(*icmd));
+
+ /* Word 5 */
+ icmd->un.acxri.abortContextTag = ulp_context;
+ icmd->un.acxri.abortIoTag = iotag;
+
+ if (ia) {
+ /* Word 7 */
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+ } else {
+ /* Word 3 */
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+
+ /* Word 7 */
+ icmd->ulpClass = ulp_class;
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ }
+
+ /* Word 7 */
+ icmd->ulpLe = 1;
+}
+
+static void
+__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
+{
+ union lpfc_wqe128 *wqe;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Word 3 */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+ if (ia)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ else
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
+
+ /* Word 8 */
+ wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
+
+ /* Word 10 */
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+
+ /* Word 11 */
+ if (wqec)
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+}
+
+void
+lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
+ bool ia, bool wqec)
+{
+ phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
+ cqid, ia, wqec);
+}
+
/**
* lpfc_sli_api_table_setup - Set up sli api function jump table
* @phba: The hba struct for which this call is being executed.
@@ -10079,19 +11194,27 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
case LPFC_PCI_DEV_LP:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+ phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
+ phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
+ phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
+ phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
+ phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
break;
case LPFC_PCI_DEV_OC:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
+ phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
+ phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
+ phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
+ phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
+ phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1419 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
- phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
return 0;
}
@@ -10110,15 +11233,15 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
{
struct lpfc_io_buf *lpfc_cmd;
- if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+ if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
if (unlikely(!phba->sli4_hba.hdwq))
return NULL;
/*
* for abort iocb hba_wqidx should already
* be setup based on what work queue we used.
*/
- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
+ if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
+ lpfc_cmd = piocb->io_buf;
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
}
return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
@@ -10133,7 +11256,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
/**
* lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
+ * @ring_number: Ring number
* @piocb: Pointer to command iocb.
* @flag: Flag indicating if this command can be put into txq.
*
@@ -10152,7 +11275,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
unsigned long iflags;
int rc;
+ /* If the PCI channel is in offline state, do not post iocbs. */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IOCB_ERROR;
+
if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli_prep_wqe(phba, piocb);
+
eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
pring = lpfc_sli4_calc_ring(phba, piocb);
@@ -10219,6 +11348,32 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
+static void
+lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+ struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+ }
+
+ /* Incrementing the reference count until the queued work is done. */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (!evtp->evt_arg1) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+ }
+ evtp->evt = LPFC_EVT_RECOVER_PORT;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ lpfc_worker_wake_up(phba);
+}
+
/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to iocb object.
@@ -10252,7 +11407,7 @@ lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
if (!vport)
goto err_exit;
ndlp = lpfc_findnode_rpi(vport, rpi);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
goto err_exit;
if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
@@ -10263,8 +11418,8 @@ lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3095 Event Context not found, no "
"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
- iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
- vpi, rpi);
+ vpi, rpi, iocbq->iocb.ulpStatus,
+ iocbq->iocb.ulpContext);
}
/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
@@ -10282,17 +11437,15 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp,
struct sli4_wcqe_xri_aborted *axri)
{
- struct lpfc_vport *vport;
uint32_t ext_status = 0;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3115 Node Context not found, driver "
"ignoring abts err event\n");
return;
}
- vport = ndlp->vport;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3116 Port generated FCP XRI ABORT event on "
"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
@@ -10309,7 +11462,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
ext_status = axri->parameter & IOERR_PARAM_MASK;
if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
- lpfc_sli_abts_recover_port(vport, ndlp);
+ lpfc_sli_post_recovery_event(phba, ndlp);
}
/**
@@ -10345,13 +11498,13 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
if (evt_code == ASYNC_TEMP_WARN) {
temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
- lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0347 Adapter is very hot, please take "
"corrective action. temperature : %d Celsius\n",
(uint32_t) icmd->ulpContext);
} else {
temp_event_data.event_code = LPFC_NORMAL_TEMP;
- lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0340 Adapter temperature is OK now. "
"temperature : %d Celsius\n",
(uint32_t) icmd->ulpContext);
@@ -10368,7 +11521,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
break;
default:
iocb_w = (uint32_t *) icmd;
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0346 Ring %d handler: unexpected ASYNC_STATUS"
" evt_code 0x%x\n"
"W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
@@ -10763,7 +11916,8 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
&pring->txcmplq, list) {
if (iocb->vport != vport)
continue;
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ NULL);
}
pring->flag = prev_pring_flag;
}
@@ -10790,13 +11944,17 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
&pring->txcmplq, list) {
if (iocb->vport != vport)
continue;
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ NULL);
}
pring->flag = prev_pring_flag;
}
}
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
@@ -11042,7 +12200,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0402 Cannot find virtual addr for buffer tag on "
"ring %d Data x%lx x%px x%px x%x\n",
pring->ringno, (unsigned long) tag,
@@ -11086,7 +12244,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0410 Cannot find virtual addr for mapped buf on "
"ring %d Data x%llx x%px x%px x%x\n",
pring->ringno, (unsigned long long)phys,
@@ -11109,47 +12267,33 @@ static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
- uint16_t abort_iotag, abort_context;
- struct lpfc_iocbq *abort_iocb = NULL;
-
- if (irsp->ulpStatus) {
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ u8 cmnd = get_job_cmnd(phba, cmdiocb);
+ if (ulp_status) {
/*
* Assume that the port already completed and returned, or
* will return the iocb. Just Log the message.
*/
- abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
- abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
-
- spin_lock_irq(&phba->hbalock);
if (phba->sli_rev < LPFC_SLI_REV4) {
- if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
- irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
- spin_unlock_irq(&phba->hbalock);
+ if (cmnd == CMD_ABORT_XRI_CX &&
+ ulp_status == IOSTAT_LOCAL_REJECT &&
+ ulp_word4 == IOERR_ABORT_REQUESTED) {
goto release_iocb;
}
- if (abort_iotag != 0 &&
- abort_iotag <= phba->sli.last_iotag)
- abort_iocb =
- phba->sli.iocbq_lookup[abort_iotag];
- } else
- /* For sli4 the abort_tag is the XRI,
- * so the abort routine puts the iotag of the iocb
- * being aborted in the context field of the abort
- * IOCB.
- */
- abort_iocb = phba->sli.iocbq_lookup[abort_context];
+ }
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
"0327 Cannot abort els iocb x%px "
- "with tag %x context %x, abort status %x, "
- "abort code %x\n",
- abort_iocb, abort_iotag, abort_context,
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ "with io cmd xri %x abort tag : x%x, "
+ "abort status %x abort code %x\n",
+ cmdiocb, get_job_abtsiotag(phba, cmdiocb),
+ (phba->sli_rev == LPFC_SLI_REV4) ?
+ get_wqe_reqtag(cmdiocb) :
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ ulp_status, ulp_word4);
- spin_unlock_irq(&phba->hbalock);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -11167,114 +12311,169 @@ release_iocb:
* which are aborted. The function frees memory resources used for
* the aborted ELS commands.
**/
-static void
+void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ IOCB_t *irsp;
+ LPFC_MBOXQ_t *mbox;
+ u32 ulp_command, ulp_status, ulp_word4, iotag;
+
+ ulp_command = get_job_cmnd(phba, cmdiocb);
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+
+ /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
+ * The MBX_REG_LOGIN64 mbox command is freed back to the
+ * mbox_mem_pool here.
+ */
+ if (cmdiocb->context_un.mbox) {
+ mbox = cmdiocb->context_un.mbox;
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ cmdiocb->context_un.mbox = NULL;
+ }
+ }
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "0139 Ignoring ELS cmd tag x%x completion Data: "
- "x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
- if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+ "0139 Ignoring ELS cmd code x%x completion Data: "
+ "x%x x%x x%x x%px\n",
+ ulp_command, ulp_status, ulp_word4, iotag,
+ cmdiocb->ndlp);
+ /*
+ * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
+ * if exchange is busy.
+ */
+ if (ulp_command == CMD_GEN_REQUEST64_CR)
lpfc_ct_free_iocb(phba, cmdiocb);
else
lpfc_els_free_iocb(phba, cmdiocb);
- return;
+
+ lpfc_nlp_put(ndlp);
}
/**
- * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @cmdiocb: Pointer to driver command iocb object.
+ * @cmpl: completion function.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
+ * when the command iocb is an abort request.
*
- * This function issues an abort iocb for the provided command iocb down to
- * the port. Other than the case the outstanding command iocb is an abort
- * request, this function issues abort out unconditionally. This function is
- * called with hbalock held. The function returns 0 when it fails due to
- * memory allocation failure or when the command iocb is an abort request.
**/
-static int
-lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *cmdiocb)
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb, void *cmpl)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
- IOCB_t *icmd = NULL;
- IOCB_t *iabt = NULL;
- int retval;
+ int retval = IOCB_ERROR;
unsigned long iflags;
- struct lpfc_nodelist *ndlp;
-
- lockdep_assert_held(&phba->hbalock);
+ struct lpfc_nodelist *ndlp = NULL;
+ u32 ulp_command = get_job_cmnd(phba, cmdiocb);
+ u16 ulp_context, iotag;
+ bool ia;
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
* being aborted.
*/
- icmd = &cmdiocb->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
- return 0;
+ if (ulp_command == CMD_ABORT_XRI_WQE ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
+ return IOCB_ABORTING;
+
+ if (!pring) {
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
+ return retval;
+ }
+
+ /*
+ * If we're unloading, don't abort iocb on the ELS ring, but change
+ * the callback so that nothing happens when it finishes.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ pring->ringno == LPFC_ELS_RING) {
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
+ return retval;
+ }
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
- return 0;
+ return IOCB_NORESOURCE;
/* This signals the response to set the correct status
* before calling the completion handler
*/
- cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
- iabt = &abtsiocbp->iocb;
- iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
- iabt->un.acxri.abortContextTag = icmd->ulpContext;
if (phba->sli_rev == LPFC_SLI_REV4) {
- iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
- iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+ ulp_context = cmdiocb->sli4_xritag;
+ iotag = abtsiocbp->iotag;
} else {
- iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+ iotag = cmdiocb->iocb.ulpIoTag;
if (pring->ringno == LPFC_ELS_RING) {
- ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
- iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
+ ndlp = cmdiocb->ndlp;
+ ulp_context = ndlp->nlp_rpi;
+ } else {
+ ulp_context = cmdiocb->iocb.ulpContext;
}
}
- iabt->ulpLe = 1;
- iabt->ulpClass = icmd->ulpClass;
+
+ if (phba->link_state < LPFC_LINK_UP ||
+ (phba->sli_rev == LPFC_SLI_REV4 &&
+ phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
+ (phba->link_flag & LS_EXTERNAL_LOOPBACK))
+ ia = true;
+ else
+ ia = false;
+
+ lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
+ cmdiocb->iocb.ulpClass,
+ LPFC_WQE_CQ_ID_DEFAULT, ia, false);
+
+ abtsiocbp->vport = vport;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP)
- abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (cmdiocb->iocb_flag & LPFC_IO_FOF)
- abtsiocbp->iocb_flag |= LPFC_IO_FOF;
+ if (cmdiocb->cmd_flag & LPFC_IO_FCP)
+ abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
- if (phba->link_state >= LPFC_LINK_UP)
- iabt->ulpCommand = CMD_ABORT_XRI_CN;
- else
- iabt->ulpCommand = CMD_CLOSE_XRI_CN;
+ if (cmdiocb->cmd_flag & LPFC_IO_FOF)
+ abtsiocbp->cmd_flag |= LPFC_IO_FOF;
- abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+ if (cmpl)
+ abtsiocbp->cmd_cmpl = cmpl;
+ else
+ abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
abtsiocbp->vport = vport;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0339 Abort xri x%x, original iotag x%x, "
- "abort cmd iotag x%x\n",
- iabt->un.acxri.abortIoTag,
- iabt->un.acxri.abortContextTag,
- abtsiocbp->iotag);
-
if (phba->sli_rev == LPFC_SLI_REV4) {
pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
if (unlikely(pring == NULL))
- return 0;
+ goto abort_iotag_exit;
/* Note: both hbalock and ring_lock need to be set here */
spin_lock_irqsave(&pring->ring_lock, iflags);
retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
@@ -11285,76 +12484,20 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp, 0);
}
- if (retval)
- __lpfc_sli_release_iocbq(phba, abtsiocbp);
-
- /*
- * Caller to this routine should check for IOCB_ERROR
- * and handle it properly. This routine no longer removes
- * iocb off txcmplq and call compl in case of IOCB_ERROR.
- */
- return retval;
-}
-
-/**
- * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- * @cmdiocb: Pointer to driver command iocb object.
- *
- * This function issues an abort iocb for the provided command iocb. In case
- * of unloading, the abort iocb will not be issued to commands on the ELS
- * ring. Instead, the callback function shall be changed to those commands
- * so that nothing happens when them finishes. This function is called with
- * hbalock held. The function returns 0 when the command iocb is an abort
- * request.
- **/
-int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *cmdiocb)
-{
- struct lpfc_vport *vport = cmdiocb->vport;
- int retval = IOCB_ERROR;
- IOCB_t *icmd = NULL;
-
- lockdep_assert_held(&phba->hbalock);
-
- /*
- * There are certain command types we don't want to abort. And we
- * don't want to abort commands that are already in the process of
- * being aborted.
- */
- icmd = &cmdiocb->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
- return 0;
-
- if (!pring) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
- goto abort_iotag_exit;
- }
+abort_iotag_exit:
- /*
- * If we're unloading, don't abort iocb on the ELS ring, but change
- * the callback so that nothing happens when it finishes.
- */
- if ((vport->load_flag & FC_UNLOADING) &&
- (pring->ringno == LPFC_ELS_RING)) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
- goto abort_iotag_exit;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0339 Abort IO XRI x%x, Original iotag x%x, "
+ "abort tag x%x Cmdjob : x%px Abortjob : x%px "
+ "retval x%x\n",
+ ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
+ cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
+ retval);
+ if (retval) {
+ cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ __lpfc_sli_release_iocbq(phba, abtsiocbp);
}
- /* Now, we try to issue the abort to the cmdiocb out */
- retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
-
-abort_iotag_exit:
/*
* Caller to this routine should check for IOCB_ERROR
* and handle it properly. This routine no longer removes
@@ -11393,15 +12536,55 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
+ * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
+ * @iocbq: Pointer to iocb object.
+ * @vport: Pointer to driver virtual port object.
+ *
+ * This function acts as an iocb filter for functions which abort FCP iocbs.
+ *
+ * Return values
+ * -ENODEV, if a null iocb or vport ptr is encountered
+ * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
+ * driver already started the abort process, or is an abort iocb itself
+ * 0, passes criteria for aborting the FCP I/O iocb
+ **/
+static int
+lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
+ struct lpfc_vport *vport)
+{
+ u8 ulp_command;
+
+ /* No null ptr vports */
+ if (!iocbq || iocbq->vport != vport)
+ return -ENODEV;
+
+ /* iocb must be for FCP IO, already exists on the TX cmpl queue,
+ * can't be premarked as driver aborted, nor be an ABORT iocb itself
+ */
+ ulp_command = get_job_cmnd(vport->phba, iocbq);
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
+ (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
+ (ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_WQE))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
* @iocbq: Pointer to driver iocb object.
* @vport: Pointer to driver virtual port object.
* @tgt_id: SCSI ID of the target.
* @lun_id: LUN ID of the scsi device.
* @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
*
- * This function acts as an iocb filter for functions which abort or count
- * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
+ * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
+ * host.
+ *
+ * It will return
* 0 if the filtering criteria is met for the given iocb and will return
* 1 if the filtering criteria is not met.
* If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
@@ -11422,13 +12605,6 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
struct lpfc_io_buf *lpfc_cmd;
int rc = 1;
- if (iocbq->vport != vport)
- return rc;
-
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
- return rc;
-
lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
if (lpfc_cmd->pCmd == NULL)
@@ -11484,16 +12660,33 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
int sum, i;
+ unsigned long iflags;
+ u8 ulp_command;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
- ctx_cmd) == 0)
+ if (!iocbq || iocbq->vport != vport)
+ continue;
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
+ continue;
+
+ /* Include counting outstanding aborts */
+ ulp_command = get_job_cmnd(phba, iocbq);
+ if (ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_WQE) {
+ sum++;
+ continue;
+ }
+
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ ctx_cmd) == 0)
sum++;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return sum;
}
@@ -11513,13 +12706,15 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3096 ABORT_XRI_CN completing on rpi x%x "
+ "3096 ABORT_XRI_CX completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"status 0x%x, reason 0x%x\n",
+ (phba->sli_rev == LPFC_SLI_REV4) ?
+ cmdiocb->sli4_xritag :
cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag, rspiocb->iocb.ulpStatus,
- rspiocb->iocb.un.ulpWord[4]);
+ get_job_abtsiotag(phba, cmdiocb),
+ cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
+ get_job_word4(phba, rspiocb));
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -11527,14 +12722,17 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
* lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
* @vport: Pointer to virtual port.
- * @pring: Pointer to driver SLI ring object.
* @tgt_id: SCSI ID of the target.
* @lun_id: LUN ID of the scsi device.
* @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
*
* This function sends an abort command for every SCSI command
* associated with the given virtual port pending on the ring
- * filtered by lpfc_sli_validate_fcp_iocb function.
+ * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
+ * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
+ * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
+ * followed by lpfc_sli_validate_fcp_iocb.
+ *
* When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
* FCP iocbs associated with lun specified by tgt_id and lun_id
* parameters
@@ -11542,19 +12740,20 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* FCP iocbs associated with SCSI target specified by tgt_id parameter.
* When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
* FCP iocbs associated with virtual port.
+ * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
+ * lpfc_sli4_calc_ring is used.
* This function returns number of iocbs it failed to abort.
* This function is called with no locks held.
**/
int
-lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
- uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
+lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
+ lpfc_ctx_cmd abort_cmd)
{
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ring *pring = NULL;
struct lpfc_iocbq *iocbq;
- struct lpfc_iocbq *abtsiocb;
- struct lpfc_sli_ring *pring_s4;
- IOCB_t *cmd = NULL;
int errcnt = 0, ret_val = 0;
+ unsigned long iflags;
int i;
/* all I/Os are in process of being flushed */
@@ -11564,66 +12763,24 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
- abort_cmd) != 0)
+ if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
continue;
- /*
- * If the iocbq is already being aborted, don't take a second
- * action, but do count it.
- */
- if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ abort_cmd) != 0)
continue;
- /* issue ABTS for this IOCB based on iotag */
- abtsiocb = lpfc_sli_get_iocbq(phba);
- if (abtsiocb == NULL) {
- errcnt++;
- continue;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
+ } else if (phba->sli_rev == LPFC_SLI_REV4) {
+ pring = lpfc_sli4_calc_ring(phba, iocbq);
}
-
- /* indicate the IO is being aborted by the driver. */
- iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
-
- cmd = &iocbq->iocb;
- abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
- abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
- else
- abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
- abtsiocb->iocb.ulpLe = 1;
- abtsiocb->iocb.ulpClass = cmd->ulpClass;
- abtsiocb->vport = vport;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocb->hba_wqidx = iocbq->hba_wqidx;
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocbq->iocb_flag & LPFC_IO_FOF)
- abtsiocb->iocb_flag |= LPFC_IO_FOF;
-
- if (lpfc_is_link_up(phba))
- abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
- else
- abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
-
- /* Setup callback routine and issue the command. */
- abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
- if (!pring_s4)
- continue;
- ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
- abtsiocb, 0);
- } else
- ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
- abtsiocb, 0);
- if (ret_val == IOCB_ERROR) {
- lpfc_sli_release_iocbq(phba, abtsiocb);
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
+ lpfc_sli_abort_fcp_cmpl);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (ret_val != IOCB_SUCCESS)
errcnt++;
- continue;
- }
}
return errcnt;
@@ -11635,11 +12792,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
* @pring: Pointer to driver SLI ring object.
* @tgt_id: SCSI ID of the target.
* @lun_id: LUN ID of the scsi device.
- * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
*
* This function sends an abort command for every SCSI command
* associated with the given virtual port pending on the ring
- * filtered by lpfc_sli_validate_fcp_iocb function.
+ * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
+ * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
+ * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
+ * followed by lpfc_sli_validate_fcp_iocb.
+ *
* When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
* FCP iocbs associated with lun specified by tgt_id and lun_id
* parameters
@@ -11658,12 +12819,13 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
struct lpfc_hba *phba = vport->phba;
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_iocbq *abtsiocbq;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
struct lpfc_iocbq *iocbq;
- IOCB_t *icmd;
int sum, i, ret_val;
unsigned long iflags;
struct lpfc_sli_ring *pring_s4 = NULL;
+ u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
+ bool ia;
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -11677,6 +12839,9 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
+ if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
+ continue;
+
if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
cmd) != 0)
continue;
@@ -11705,8 +12870,8 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
* If the iocbq is already being aborted, don't take a second
* action, but do count it.
*/
- if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
spin_unlock(&lpfc_cmd->buf_lock);
@@ -11722,41 +12887,50 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
continue;
}
- icmd = &iocbq->iocb;
- abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
- abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- abtsiocbq->iocb.un.acxri.abortIoTag =
- iocbq->sli4_xritag;
- else
- abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
- abtsiocbq->iocb.ulpLe = 1;
- abtsiocbq->iocb.ulpClass = icmd->ulpClass;
- abtsiocbq->vport = vport;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocbq->iocb_flag & LPFC_IO_FOF)
- abtsiocbq->iocb_flag |= LPFC_IO_FOF;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = abtsiocbq->iotag;
+ ulp_context = iocbq->sli4_xritag;
+ cqid = lpfc_cmd->hdwq->io_cq_map;
+ } else {
+ iotag = iocbq->iocb.ulpIoTag;
+ if (pring->ringno == LPFC_ELS_RING) {
+ ndlp = iocbq->ndlp;
+ ulp_context = ndlp->nlp_rpi;
+ } else {
+ ulp_context = iocbq->iocb.ulpContext;
+ }
+ }
ndlp = lpfc_cmd->rdata->pnode;
if (lpfc_is_link_up(phba) &&
- (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
- abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
+ !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
+ ia = false;
else
- abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+ ia = true;
+
+ lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
+ iocbq->iocb.ulpClass, cqid,
+ ia, false);
+
+ abtsiocbq->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
+ if (iocbq->cmd_flag & LPFC_IO_FCP)
+ abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
+ if (iocbq->cmd_flag & LPFC_IO_FOF)
+ abtsiocbq->cmd_flag |= LPFC_IO_FOF;
/* Setup callback routine and issue the command. */
- abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+ abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
/*
* Indicate the IO is being aborted by the driver and set
* the caller's flag into the aborted IO.
*/
- iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+ iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
if (phba->sli_rev == LPFC_SLI_REV4) {
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
@@ -11803,9 +12977,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
wait_queue_head_t *pdone_q;
unsigned long iflags;
struct lpfc_io_buf *lpfc_cmd;
+ size_t offset = offsetof(struct lpfc_iocbq, wqe);
spin_lock_irqsave(&phba->hbalock, iflags);
- if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
/*
* A time out has occurred for the iocb. If a time out
@@ -11814,26 +12989,27 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
*/
spin_unlock_irqrestore(&phba->hbalock, iflags);
- cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
- cmdiocbq->wait_iocb_cmpl = NULL;
- if (cmdiocbq->iocb_cmpl)
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+ cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
+ cmdiocbq->wait_cmd_cmpl = NULL;
+ if (cmdiocbq->cmd_cmpl)
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
else
lpfc_sli_release_iocbq(phba, cmdiocbq);
return;
}
- cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
- if (cmdiocbq->context2 && rspiocbq)
- memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
- &rspiocbq->iocb, sizeof(IOCB_t));
+ /* Copy the contents of the local rspiocb into the caller's buffer. */
+ cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
+ if (cmdiocbq->rsp_iocb && rspiocbq)
+ memcpy((char *)cmdiocbq->rsp_iocb + offset,
+ (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
/* Set the exchange busy flag for task management commands */
- if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
- !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
+ !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
- cur_iocbq);
- if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ cur_iocbq);
+ if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
else
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
@@ -11852,7 +13028,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
* @piocbq: Pointer to command iocb.
* @flag: Flag to test.
*
- * This routine grabs the hbalock and then test the iocb_flag to
+ * This routine grabs the hbalock and then test the cmd_flag to
* see if the passed in flag is set.
* Returns:
* 1 if flag is set.
@@ -11866,7 +13042,7 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
int ret;
spin_lock_irqsave(&phba->hbalock, iflags);
- ret = piocbq->iocb_flag & flag;
+ ret = piocbq->cmd_flag & flag;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret;
@@ -11875,20 +13051,20 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
/**
* lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
* @phba: Pointer to HBA context object..
- * @pring: Pointer to sli ring.
+ * @ring_number: Ring number
* @piocb: Pointer to command iocb.
* @prspiocbq: Pointer to response iocb.
* @timeout: Timeout in number of seconds.
*
* This function issues the iocb to firmware and waits for the
- * iocb to complete. The iocb_cmpl field of the shall be used
+ * iocb to complete. The cmd_cmpl field of the shall be used
* to handle iocbs which time out. If the field is NULL, the
* function shall free the iocbq structure. If more clean up is
* needed, the caller is expected to provide a completion function
* that will provide the needed clean up. If the iocb command is
* not completed within timeout seconds, the function will either
- * free the iocbq structure (if iocb_cmpl == NULL) or execute the
- * completion function set in the iocb_cmpl field and then return
+ * free the iocbq structure (if cmd_cmpl == NULL) or execute the
+ * completion function set in the cmd_cmpl field and then return
* a status of IOCB_TIMEDOUT. The caller should not free the iocb
* resources if this function returns IOCB_TIMEDOUT.
* The function waits for the iocb completion using an
@@ -11900,7 +13076,7 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
* This function assumes that the iocb completions occur while
* this function sleep. So, this function cannot be called from
* the thread which process iocb completion for this ring.
- * This function clears the iocb_flag of the iocb object before
+ * This function clears the cmd_flag of the iocb object before
* issuing the iocb and the iocb completion handler sets this
* flag and wakes this thread when the iocb completes.
* The contents of the response iocb will be copied to prspiocbq
@@ -11926,24 +13102,26 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
unsigned long iflags;
bool iocb_completed = true;
- if (phba->sli_rev >= LPFC_SLI_REV4)
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ lpfc_sli_prep_wqe(phba, piocb);
+
pring = lpfc_sli4_calc_ring(phba, piocb);
- else
+ } else
pring = &phba->sli.sli3_ring[ring_number];
/*
- * If the caller has provided a response iocbq buffer, then context2
+ * If the caller has provided a response iocbq buffer, then rsp_iocb
* is NULL or its an error.
*/
if (prspiocbq) {
- if (piocb->context2)
+ if (piocb->rsp_iocb)
return IOCB_ERROR;
- piocb->context2 = prspiocbq;
+ piocb->rsp_iocb = prspiocbq;
}
- piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
- piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
+ piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
+ piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
piocb->context_un.wait_queue = &done_q;
- piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
+ piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val))
@@ -11961,7 +13139,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
spin_lock_irqsave(&phba->hbalock, iflags);
- if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
+ if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
/*
* IOCB timed out. Inform the wake iocb wait
@@ -11969,7 +13147,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
*/
iocb_completed = false;
- piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+ piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (iocb_completed) {
@@ -11981,12 +13159,12 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
* completed. Not that it completed successfully.
* */
} else if (timeleft == 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0338 IOCB wait timeout error - no "
"wake response Data x%x\n", timeout);
retval = IOCB_TIMEDOUT;
} else {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0330 IOCB wake NOT set, "
"Data x%x x%lx\n",
timeout, (timeleft / jiffies));
@@ -12021,10 +13199,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
}
if (prspiocbq)
- piocb->context2 = NULL;
+ piocb->rsp_iocb = NULL;
piocb->context_un.wait_queue = NULL;
- piocb->iocb_cmpl = NULL;
+ piocb->cmd_cmpl = NULL;
return retval;
}
@@ -12095,6 +13273,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
/**
* lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
* @phba: Pointer to HBA context.
+ * @mbx_action: Mailbox shutdown options.
*
* This function is called to shutdown the driver's mailbox sub-system.
* It first marks the mailbox sub-system is in a block state to prevent
@@ -12229,6 +13408,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
uint32_t uerr_sta_hi, uerr_sta_lo;
uint32_t if_type, portsmphr;
struct lpfc_register portstat_reg;
+ u32 logmask;
/*
* For now, use the SLI4 device internal unrecoverable error
@@ -12248,7 +13428,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
}
if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
(~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1423 HBA Unrecoverable error: "
"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
"ue_mask_lo_reg=0x%x, "
@@ -12279,7 +13459,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ logmask = LOG_TRACE_EVENT;
+ if (phba->work_status[0] ==
+ SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
+ logmask = LOG_SLI;
+ lpfc_printf_log(phba, KERN_ERR, logmask,
"2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
@@ -12295,7 +13480,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2886 HBA Error Attention on unsupported "
"if type %d.", if_type);
return 1;
@@ -12359,7 +13544,7 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
ha_copy = lpfc_sli4_eratt_read(phba);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0299 Invalid SLI revision (%d)\n",
phba->sli_rev);
ha_copy = 0;
@@ -12592,8 +13777,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
* Stray Mailbox Interrupt, mbxCommand <cmd>
* mbxStatus <status>
*/
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
- LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"(%d):0304 Stray Mailbox "
"Interrupt mbxCommand x%x "
"mbxStatus x%x\n",
@@ -12653,7 +13837,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
if (rc != MBX_BUSY)
lpfc_printf_log(phba,
KERN_ERR,
- LOG_MBOX | LOG_SLI,
+ LOG_TRACE_EVENT,
"0350 rc should have"
"been MBX_BUSY\n");
if (rc != MBX_NOT_FINISHED)
@@ -12668,7 +13852,21 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
spin_unlock_irqrestore(
&phba->pport->work_port_lock,
iflag);
- lpfc_mbox_cmpl_put(phba, pmb);
+
+ /* Do NOT queue MBX_HEARTBEAT to the worker
+ * thread for processing.
+ */
+ if (pmbox->mbxCommand == MBX_HEARTBEAT) {
+ /* Process mbox now */
+ phba->sli.mbox_active = NULL;
+ phba->sli.sli_flag &=
+ ~LPFC_SLI_MBOX_ACTIVE;
+ if (pmb->mbox_cmpl)
+ pmb->mbox_cmpl(phba, pmb);
+ } else {
+ /* Queue to worker thread to process */
+ lpfc_mbox_cmpl_put(phba, pmb);
+ }
}
} else
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -12682,8 +13880,9 @@ send_current_mbox:
MBX_NOWAIT);
} while (rc == MBX_NOT_FINISHED);
if (rc != MBX_SUCCESS)
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
- LOG_SLI, "0349 rc should be "
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
+ "0349 rc should be "
"MBX_SUCCESS\n");
}
@@ -12917,155 +14116,46 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
{
struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
/* First, declare the els xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
/* Now, handle all the els xri abort events */
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
/* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
/* Notify aborted XRI for ELS work queue */
lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+
/* Free the event processed back to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event);
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
}
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
}
/**
- * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
- * @phba: pointer to lpfc hba data structure
- * @pIocbIn: pointer to the rspiocbq
- * @pIocbOut: pointer to the cmdiocbq
- * @wcqe: pointer to the complete wcqe
- *
- * This routine transfers the fields of a command iocbq to a response iocbq
- * by copying all the IOCB fields from command iocbq and transferring the
- * completion status information from the complete wcqe.
- **/
-static void
-lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
- struct lpfc_iocbq *pIocbIn,
- struct lpfc_iocbq *pIocbOut,
- struct lpfc_wcqe_complete *wcqe)
-{
- int numBdes, i;
- unsigned long iflags;
- uint32_t status, max_response;
- struct lpfc_dmabuf *dmabuf;
- struct ulp_bde64 *bpl, bde;
- size_t offset = offsetof(struct lpfc_iocbq, iocb);
-
- memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
- sizeof(struct lpfc_iocbq) - offset);
- /* Map WCQE parameters into irspiocb parameters */
- status = bf_get(lpfc_wcqe_c_status, wcqe);
- pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
- if (pIocbOut->iocb_flag & LPFC_IO_FCP)
- if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
- pIocbIn->iocb.un.fcpi.fcpi_parm =
- pIocbOut->iocb.un.fcpi.fcpi_parm -
- wcqe->total_data_placed;
- else
- pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- else {
- pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- switch (pIocbOut->iocb.ulpCommand) {
- case CMD_ELS_REQUEST64_CR:
- dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- bde.tus.w = le32_to_cpu(bpl[1].tus.w);
- max_response = bde.tus.f.bdeSize;
- break;
- case CMD_GEN_REQUEST64_CR:
- max_response = 0;
- if (!pIocbOut->context3)
- break;
- numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
- sizeof(struct ulp_bde64);
- dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- max_response += bde.tus.f.bdeSize;
- }
- break;
- default:
- max_response = wcqe->total_data_placed;
- break;
- }
- if (max_response < wcqe->total_data_placed)
- pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
- else
- pIocbIn->iocb.un.genreq64.bdl.bdeSize =
- wcqe->total_data_placed;
- }
-
- /* Convert BG errors for completion status */
- if (status == CQE_STATUS_DI_ERROR) {
- pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
-
- if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
- pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
- else
- pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
-
- pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
- if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_GUARD_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_APPTAG_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_REFTAG_ERR_MASK;
-
- /* Check to see if there was any good data before the error */
- if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_HI_WATER_MARK_PRESENT_MASK;
- pIocbIn->iocb.unsli3.sli3_bg.bghm =
- wcqe->total_data_placed;
- }
-
- /*
- * Set ALL the error bits to indicate we don't know what
- * type of error it is.
- */
- if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
- BGS_GUARD_ERR_MASK);
- }
-
- /* Pick up HBA exchange busy condition */
- if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-}
-
-/**
- * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
+ * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
* @phba: Pointer to HBA context object.
- * @wcqe: Pointer to work-queue completion queue entry.
+ * @irspiocbq: Pointer to work-queue completion queue entry.
*
* This routine handles an ELS work-queue completion event and construct
- * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
* discovery engine to handle.
*
* Return: Pointer to the receive IOCBQ, NULL otherwise.
**/
static struct lpfc_iocbq *
-lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
- struct lpfc_iocbq *irspiocbq)
+lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *irspiocbq)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
@@ -13077,11 +14167,13 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return NULL;
wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
+ spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
if (unlikely(!cmdiocbq)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0386 ELS complete with no corresponding "
"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
@@ -13091,13 +14183,18 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return NULL;
}
- spin_lock_irqsave(&pring->ring_lock, iflags);
+ memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
+ memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
+
/* Put the iocb back on the txcmplq */
lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- /* Fake the irspiocbq and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
return irspiocbq;
}
@@ -13110,7 +14207,7 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
/* Allocate a new internal CQ_EVENT entry */
cq_event = lpfc_sli4_cq_event_alloc(phba);
if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0602 Failed to alloc CQ_EVENT entry\n");
return NULL;
}
@@ -13123,7 +14220,7 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
/**
* lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
* @phba: Pointer to HBA context object.
- * @cqe: Pointer to mailbox completion queue entry.
+ * @mcqe: Pointer to mailbox completion queue entry.
*
* This routine process a mailbox completion queue entry with asynchronous
* event.
@@ -13144,9 +14241,13 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
if (!cq_event)
return false;
- spin_lock_irqsave(&phba->hbalock, iflags);
+
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
+
/* Set the async event flag */
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag |= ASYNC_EVENT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -13156,7 +14257,7 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
/**
* lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
* @phba: Pointer to HBA context object.
- * @cqe: Pointer to mailbox completion queue entry.
+ * @mcqe: Pointer to mailbox completion queue entry.
*
* This routine process a mailbox completion queue entry with mailbox
* completion event.
@@ -13185,7 +14286,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
spin_lock_irqsave(&phba->hbalock, iflags);
pmb = phba->sli.mbox_active;
if (unlikely(!pmb)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1832 No pending MBOX command to handle\n");
spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out_no_mqe_complete;
@@ -13223,19 +14324,32 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- /* Reg_LOGIN of dflt RPI was successful. Now lets get
- * RID of the PPI using the same mbox buffer.
+
+ /* Reg_LOGIN of dflt RPI was successful. Mark the
+ * node as having an UNREG_LOGIN in progress to stop
+ * an unsolicited PLOGI from the same NPortId from
+ * starting another mailbox transaction.
*/
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ ndlp->nlp_flag |= NLP_UNREG_INP;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_unreg_login(phba, vport->vpi,
pmbox->un.varWords[0], pmb);
pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
pmb->ctx_buf = mp;
+
+ /* No reference taken here. This is a default
+ * RPI reg/immediate unreg cycle. The reference was
+ * taken in the reg rpi path and is released when
+ * this mailbox completes.
+ */
pmb->ctx_ndlp = ndlp;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_BUSY)
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
- LOG_SLI, "0385 rc should "
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
+ "0385 rc should "
"have been MBX_BUSY\n");
if (rc != MBX_NOT_FINISHED)
goto send_current_mbox;
@@ -13245,7 +14359,26 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
- /* There is mailbox completion work to do */
+ /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
+ if (pmbox->mbxCommand == MBX_HEARTBEAT) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ /* Release the mailbox command posting token */
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ /* Post the next mbox command, if there is one */
+ lpfc_sli4_post_async_mbox(phba);
+
+ /* Process cmpl now */
+ if (pmb->mbox_cmpl)
+ pmb->mbox_cmpl(phba, pmb);
+ return false;
+ }
+
+ /* There is mailbox completion work to queue to the worker thread */
spin_lock_irqsave(&phba->hbalock, iflags);
__lpfc_mbox_cmpl_put(phba, pmb);
phba->work_ha |= HA_MBATT;
@@ -13276,6 +14409,7 @@ out_no_mqe_complete:
/**
* lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
* @phba: Pointer to HBA context object.
+ * @cq: Pointer to associated CQ
* @cqe: Pointer to mailbox completion queue entry.
*
* This routine process a mailbox completion queue entry, it invokes the
@@ -13342,7 +14476,7 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
txq_cnt++;
if (!list_empty(&pring->txcmplq))
txcmplq_cnt++;
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
"els_txcmplq_cnt=%d\n",
txq_cnt, phba->iocb_cnt,
@@ -13419,21 +14553,24 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
break;
case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
- cq_event = lpfc_cq_event_setup(
- phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
- if (!cq_event)
- return false;
+ cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
+ if (!cq_event) {
+ workposted = false;
+ break;
+ }
cq_event->hdwq = cq->hdwq;
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
/* Set the els xri abort event flag */
phba->hba_flag |= ELS_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
workposted = true;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0603 Invalid CQ subtype %d: "
"%08x %08x %08x %08x\n",
cq->subtype, wcqe->word0, wcqe->parameter,
@@ -13481,9 +14618,9 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
status = bf_get(lpfc_rcqe_status, rcqe);
switch (status) {
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2537 Receive Frame Truncated!!\n");
- /* fall through */
+ fallthrough;
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
@@ -13503,7 +14640,11 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Handle MDS Loopback frames */
- lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_sli4_handle_mds_loopback(phba->pport,
+ dma_buf);
+ else
+ lpfc_in_buf_free(phba, &dma_buf->dbuf);
break;
}
@@ -13518,7 +14659,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
case FC_STATUS_INSUFF_BUF_FRM_DISC:
if (phba->nvmet_support) {
tgtp = phba->targetport->private;
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6402 RQE Error x%x, posted %d err_cnt "
"%d: %x %x %x\n",
status, hrq->RQ_buf_posted,
@@ -13527,7 +14668,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
}
- /* fallthrough */
+ fallthrough;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
hrq->RQ_no_posted_buf++;
@@ -13590,7 +14731,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
(struct lpfc_rcqe *)&cqevt);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0388 Not a valid WCQE code: x%x\n",
bf_get(lpfc_cqe_code, &cqevt));
break;
@@ -13602,6 +14743,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
* @phba: Pointer to HBA context object.
* @eqe: Pointer to fast-path event queue entry.
+ * @speq: Pointer to slow-path event queue.
*
* This routine process a event queue entry from the slow-path event queue.
* It will check the MajorCode and MinorCode to determine this is for a
@@ -13617,6 +14759,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
{
struct lpfc_queue *cq = NULL, *childq;
uint16_t cqid;
+ int ret = 0;
/* Get the reference to the corresponding CQ */
cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
@@ -13629,7 +14772,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
}
if (unlikely(!cq)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0365 Slow-path CQ identifier "
"(%d) does not exist\n", cqid);
return;
@@ -13638,9 +14781,14 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* Save EQ associated with this CQ */
cq->assoc_qp = speq;
- if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0390 Cannot schedule soft IRQ "
+ if (is_kdump_kernel())
+ ret = queue_work(phba->wq, &cq->spwork);
+ else
+ ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
+
+ if (!ret)
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0390 Cannot schedule queue work "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
cqid, cq->queue_id, raw_smp_processor_id());
}
@@ -13651,6 +14799,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
* @cq: Pointer to CQ to be processed
* @handler: Routine to process each cqe
* @delay: Pointer to usdelay to set in case of rescheduling of the handler
+ * @poll_mode: Polling mode we were called from
*
* This routine processes completion queue entries in a CQ. While a valid
* queue element is found, the handler is called. During processing checks
@@ -13668,7 +14817,8 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
static bool
__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_cqe *), unsigned long *delay)
+ struct lpfc_cqe *), unsigned long *delay,
+ enum lpfc_poll_mode poll_mode)
{
struct lpfc_cqe *cqe;
bool workposted = false;
@@ -13709,6 +14859,10 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
arm = false;
}
+ /* Note: complete the irq_poll softirq before rearming CQ */
+ if (poll_mode == LPFC_IRQ_POLL)
+ irq_poll_complete(&cq->iop);
+
/* Track the max number of CQEs processed in 1 EQ */
if (count > cq->CQ_max_cqe)
cq->CQ_max_cqe = count;
@@ -13721,7 +14875,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
"0369 No entry from completion queue "
"qid=%d\n", cq->queue_id);
- cq->queue_claimed = 0;
+ xchg(&cq->queue_claimed, 0);
rearm_and_exit:
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
@@ -13731,7 +14885,7 @@ rearm_and_exit:
}
/**
- * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
+ * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
* @cq: pointer to CQ to process
*
* This routine calls the cq processing routine with a handler specific
@@ -13751,36 +14905,42 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
struct lpfc_hba *phba = cq->phba;
unsigned long delay;
bool workposted = false;
+ int ret = 0;
/* Process and rearm the CQ */
switch (cq->type) {
case LPFC_MCQ:
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_mcqe,
- &delay);
+ &delay, LPFC_QUEUE_WORK);
break;
case LPFC_WCQ:
if (cq->subtype == LPFC_IO)
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_fp_handle_cqe,
- &delay);
+ &delay, LPFC_QUEUE_WORK);
else
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_cqe,
- &delay);
+ &delay, LPFC_QUEUE_WORK);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0370 Invalid completion queue type (%d)\n",
cq->type);
return;
}
if (delay) {
- if (!queue_delayed_work_on(cq->chann, phba->wq,
- &cq->sched_spwork, delay))
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0394 Cannot schedule soft IRQ "
+ if (is_kdump_kernel())
+ ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
+ delay);
+ else
+ ret = queue_delayed_work_on(cq->chann, phba->wq,
+ &cq->sched_spwork, delay);
+ if (!ret)
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0394 Cannot schedule queue work "
"for cqid=%d on CPU %d\n",
cq->queue_id, cq->chann);
}
@@ -13835,7 +14995,6 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
{
struct lpfc_sli_ring *pring = cq->pring;
struct lpfc_iocbq *cmdiocbq;
- struct lpfc_iocbq irspiocbq;
unsigned long iflags;
/* Check for response status */
@@ -13849,9 +15008,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
IOERR_NO_RESOURCES))
phba->lpfc_rampdown_queue_depth(phba);
- /* Log the error status */
+ /* Log the cmpl status */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0373 FCP CQE error: status=x%x: "
+ "0373 FCP CQE cmpl: status=x%x: "
"CQE: %08x %08x %08x %08x\n",
bf_get(lpfc_wcqe_c_status, wcqe),
wcqe->word0, wcqe->total_data_placed,
@@ -13861,9 +15020,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Look up the FCP command IOCB and create pseudo response IOCB */
spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0374 FCP complete with no corresponding "
@@ -13874,36 +15033,31 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
cmdiocbq->isr_timestamp = cq->isr_timestamp;
#endif
- if (cmdiocbq->iocb_cmpl == NULL) {
- if (cmdiocbq->wqe_cmpl) {
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
- /* Pass the cmd_iocb and the wcqe to the upper layer */
- (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
- return;
+ if (cmdiocbq->cmd_cmpl) {
+ /* For FCP the flag is cleared in cmd_cmpl */
+ if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
+ cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
+
+ /* Pass the cmd_iocb and the wcqe to the upper layer */
+ memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
+ sizeof(struct lpfc_wcqe_complete));
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
+ } else {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0375 FCP cmdiocb not callback function "
"iotag: (%d)\n",
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- return;
- }
-
- /* Fake the irspiocb and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
-
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
}
-
- /* Pass the cmd_iocb and the rsp state to the upper layer */
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
}
/**
@@ -13945,6 +15099,7 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/**
* lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
* @phba: Pointer to HBA context object.
+ * @cq: Pointer to completion queue.
* @rcqe: Pointer to receive-queue completion queue entry.
*
* This routine process a receive-queue completion queue entry.
@@ -13989,9 +15144,9 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
status = bf_get(lpfc_rcqe_status, rcqe);
switch (status) {
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6126 Receive Frame Truncated!!\n");
- /* fall through */
+ fallthrough;
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
@@ -14008,8 +15163,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Just some basic sanity checks on FCP Command frame */
fctl = (fc_hdr->fh_f_ctl[0] << 16 |
- fc_hdr->fh_f_ctl[1] << 8 |
- fc_hdr->fh_f_ctl[2]);
+ fc_hdr->fh_f_ctl[1] << 8 |
+ fc_hdr->fh_f_ctl[2]);
if (((fctl &
(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
@@ -14029,7 +15184,7 @@ drop:
case FC_STATUS_INSUFF_BUF_FRM_DISC:
if (phba->nvmet_support) {
tgtp = phba->targetport->private;
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6401 RQE Error x%x, posted %d err_cnt "
"%d: %x %x %x\n",
status, hrq->RQ_buf_posted,
@@ -14038,7 +15193,7 @@ drop:
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
}
- /* fallthrough */
+ fallthrough;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
hrq->RQ_no_posted_buf++;
@@ -14053,7 +15208,7 @@ out:
* lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
* @phba: adapter with cq
* @cq: Pointer to the completion queue.
- * @eqe: Pointer to fast-path completion queue entry.
+ * @cqe: Pointer to fast-path completion queue entry.
*
* This routine process a fast-path work queue completion entry from fast-path
* event queue for FCP command response completion.
@@ -14103,7 +15258,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
}
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0144 Not a valid CQE code: x%x\n",
bf_get(lpfc_wcqe_c_code, &wcqe));
break;
@@ -14112,8 +15267,51 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
}
/**
+ * lpfc_sli4_sched_cq_work - Schedules cq work
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to CQ
+ * @cqid: CQ ID
+ *
+ * This routine checks the poll mode of the CQ corresponding to
+ * cq->chann, then either schedules a softirq or queue_work to complete
+ * cq work.
+ *
+ * queue_work path is taken if in NVMET mode, or if poll_mode is in
+ * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
+ *
+ **/
+static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
+ struct lpfc_queue *cq, uint16_t cqid)
+{
+ int ret = 0;
+
+ switch (cq->poll_mode) {
+ case LPFC_IRQ_POLL:
+ /* CGN mgmt is mutually exclusive from softirq processing */
+ if (phba->cmf_active_mode == LPFC_CFG_OFF) {
+ irq_poll_sched(&cq->iop);
+ break;
+ }
+ fallthrough;
+ case LPFC_QUEUE_WORK:
+ default:
+ if (is_kdump_kernel())
+ ret = queue_work(phba->wq, &cq->irqwork);
+ else
+ ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
+ if (!ret)
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0383 Cannot schedule queue work "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id,
+ raw_smp_processor_id());
+ }
+}
+
+/**
* lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
* @phba: Pointer to HBA context object.
+ * @eq: Pointer to the queue structure.
* @eqe: Pointer to fast-path event queue entry.
*
* This routine process a event queue entry from the fast-path event queue.
@@ -14132,7 +15330,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
uint16_t cqid, id;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0366 Not a valid completion "
"event: majorcode=x%x, minorcode=x%x\n",
bf_get_le32(lpfc_eqe_major_code, eqe),
@@ -14175,7 +15373,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
process_cq:
if (unlikely(cqid != cq->queue_id)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0368 Miss-matched fast-path completion "
"queue identifier: eqcqid=%d, fcpcqid=%d\n",
cqid, cq->queue_id);
@@ -14189,16 +15387,13 @@ work_cq:
else
cq->isr_timestamp = 0;
#endif
- if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0363 Cannot schedule soft IRQ "
- "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
- cqid, cq->queue_id, raw_smp_processor_id());
+ lpfc_sli4_sched_cq_work(phba, cq, cqid);
}
/**
* __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
* @cq: Pointer to CQ to be processed
+ * @poll_mode: Enum lpfc_poll_state to determine poll mode
*
* This routine calls the cq processing routine with the handler for
* fast path CQEs.
@@ -14212,23 +15407,30 @@ work_cq:
* the delay indicates when to reschedule it.
**/
static void
-__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
+__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
+ enum lpfc_poll_mode poll_mode)
{
struct lpfc_hba *phba = cq->phba;
unsigned long delay;
bool workposted = false;
+ int ret = 0;
/* process and rearm the CQ */
workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
- &delay);
+ &delay, poll_mode);
if (delay) {
- if (!queue_delayed_work_on(cq->chann, phba->wq,
- &cq->sched_irqwork, delay))
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0367 Cannot schedule soft IRQ "
- "for cqid=%d on CPU %d\n",
- cq->queue_id, cq->chann);
+ if (is_kdump_kernel())
+ ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
+ delay);
+ else
+ ret = queue_delayed_work_on(cq->chann, phba->wq,
+ &cq->sched_irqwork, delay);
+ if (!ret)
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0367 Cannot schedule queue work "
+ "for cqid=%d on CPU %d\n",
+ cq->queue_id, cq->chann);
}
/* wake up worker thread if there are works to be done */
@@ -14248,11 +15450,11 @@ lpfc_sli4_hba_process_cq(struct work_struct *work)
{
struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
- __lpfc_sli4_hba_process_cq(cq);
+ __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
}
/**
- * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
+ * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
* @work: pointer to work element
*
* translates from the work handler and calls the fast-path handler.
@@ -14263,7 +15465,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
struct lpfc_queue *cq = container_of(to_delayed_work(work),
struct lpfc_queue, sched_irqwork);
- __lpfc_sli4_hba_process_cq(cq);
+ __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
}
/**
@@ -14302,7 +15504,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
int ecount = 0;
int hba_eqidx;
struct lpfc_eq_intr_info *eqi;
- uint32_t icnt;
/* Get the driver's phba structure from the dev_id */
hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
@@ -14330,11 +15531,12 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- eqi = phba->sli4_hba.eq_info;
- icnt = this_cpu_inc_return(eqi->icnt);
+ eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
+ eqi->icnt++;
+
fpeq->last_cpu = raw_smp_processor_id();
- if (icnt > LPFC_EQD_ISR_TRIGGER &&
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
@@ -14356,7 +15558,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
}
return IRQ_HANDLED;
-} /* lpfc_sli4_fp_intr_handler */
+} /* lpfc_sli4_hba_intr_handler */
/**
* lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
@@ -14450,12 +15652,10 @@ static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
- if (list_empty(&phba->poll_list)) {
- timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
- /* kickstart slowpath processing for this eq */
+ /* kickstart slowpath processing if needed */
+ if (list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
- }
list_add_rcu(&eq->_poll_list, &phba->poll_list);
synchronize_rcu();
@@ -14586,7 +15786,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
* @phba: The HBA that this queue is being created on.
* @page_size: The size of a queue page
* @entry_size: The size of each queue entry for this queue.
- * @entry count: The number of entries that this queue will handle.
+ * @entry_count: The number of entries that this queue will handle.
* @cpu: The cpu that will primarily utilize this queue.
*
* This function allocates a queue structure and the DMAable memory used for
@@ -14758,7 +15958,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6428 Failed allocating mailbox cmd buffer."
" EQ delay was not set.\n");
return;
@@ -14793,14 +15993,13 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->ctx_buf = NULL;
mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2512 MODIFY_EQ_DELAY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -14877,14 +16076,14 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
dmult);
switch (eq->entry_count) {
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0360 Unsupported EQ count. (%d)\n",
eq->entry_count);
if (eq->entry_count < 256) {
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 256:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_256);
@@ -14921,7 +16120,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2500 EQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -14940,11 +16139,22 @@ out:
return status;
}
+static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
+{
+ struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
+
+ __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
+
+ return 1;
+}
+
/**
* lpfc_cq_create - Create a Completion Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @cq: The queue structure to use to create the completion queue.
* @eq: The event queue to bind this completion queue to.
+ * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
+ * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
*
* This function creates a completion queue, as detailed in @wq, on a port,
* described by @phba by sending a CQ_CREATE mailbox command to the HBA.
@@ -15015,9 +16225,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
LPFC_CQ_CNT_WORD7);
break;
}
- /* fall through */
+ fallthrough;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0361 Unsupported CQ count: "
"entry cnt %d sz %d pg cnt %d\n",
cq->entry_count, cq->entry_size,
@@ -15026,7 +16236,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 256:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
LPFC_CQ_CNT_256);
@@ -15053,7 +16263,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2501 CQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -15079,6 +16289,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (cq->queue_id > phba->sli4_hba.cq_max)
phba->sli4_hba.cq_max = cq->queue_id;
+
+ irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
@@ -15089,6 +16301,8 @@ out:
* @phba: HBA structure that indicates port to create a queue on.
* @cqp: The queue structure array to use to create the completion queues.
* @hdwq: The hardware queue array with the EQ to bind completion queues to.
+ * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
+ * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
*
* This function creates a set of completion queue, s to support MRQ
* as detailed in @cqp, on a port,
@@ -15138,7 +16352,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
LPFC_SLI4_MBX_NEMBED);
if (alloclen < length) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3098 Allocated DMA memory size (%d) is "
"less than the requested DMA memory size "
"(%d)\n", alloclen, length);
@@ -15190,16 +16404,16 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
LPFC_CQ_CNT_WORD7);
break;
}
- /* fall through */
+ fallthrough;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3118 Bad CQ count. (%d)\n",
cq->entry_count);
if (cq->entry_count < 256) {
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest */
+ fallthrough; /* otherwise default to smallest */
case 256:
bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
&cq_set->u.request, LPFC_CQ_CNT_256);
@@ -15310,7 +16524,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3119 CQ_CREATE_SET mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -15468,14 +16682,14 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
cq->queue_id);
switch (mq->entry_count) {
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0362 Unsupported MQ count. (%d)\n",
mq->entry_count);
if (mq->entry_count < 16) {
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 16:
bf_set(lpfc_mq_context_ring_size,
&mq_create_ext->u.request.context,
@@ -15524,7 +16738,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2502 MQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -15587,8 +16801,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
uint16_t pci_barset;
uint8_t dpp_barset;
uint32_t dpp_offset;
- unsigned long pg_addr;
uint8_t wq_create_version;
+#ifdef CONFIG_X86
+ unsigned long pg_addr;
+#endif
/* sanity check on queue memory */
if (!wq || !cq)
@@ -15621,12 +16837,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
else
wq_create_version = LPFC_Q_CREATE_VERSION_0;
-
- if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
- wq_create_version = LPFC_Q_CREATE_VERSION_1;
- else
- wq_create_version = LPFC_Q_CREATE_VERSION_0;
-
switch (wq_create_version) {
case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
@@ -15673,7 +16883,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2503 WQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -15700,7 +16910,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
&wq_create->u.response);
if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
(wq->db_format != LPFC_DB_RING_FORMAT)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3265 WQ[%d] doorbell format "
"not supported: x%x\n",
wq->queue_id, wq->db_format);
@@ -15712,7 +16922,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
pci_barset);
if (!bar_memmap_p) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3263 WQ[%d] failed to memmap "
"pci barset:x%x\n",
wq->queue_id, pci_barset);
@@ -15722,7 +16932,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
db_offset = wq_create->u.response.doorbell_offset;
if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
(db_offset != LPFC_ULP1_WQ_DOORBELL)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3252 WQ[%d] doorbell offset "
"not supported: x%x\n",
wq->queue_id, db_offset);
@@ -15746,7 +16956,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
pci_barset);
if (!bar_memmap_p) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3267 WQ[%d] failed to memmap "
"pci barset:x%x\n",
wq->queue_id, pci_barset);
@@ -15762,7 +16972,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
dpp_barset);
if (!bar_memmap_p) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3268 WQ[%d] failed to memmap "
"pci barset:x%x\n",
wq->queue_id, dpp_barset);
@@ -15778,9 +16988,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->queue_id, pci_barset, db_offset,
wq->dpp_id, dpp_barset, dpp_offset);
+#ifdef CONFIG_X86
/* Enable combined writes for DPP aperture */
pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
-#ifdef CONFIG_X86
rc = set_memory_wc(pg_addr, 1);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -15820,6 +17030,7 @@ out:
* @hrq: The queue structure to use to create the header receive queue.
* @drq: The queue structure to use to create the data receive queue.
* @cq: The completion queue to bind this work queue to.
+ * @subtype: The subtype of the work queue indicating its functionality.
*
* This function creates a receive buffer queue pair , as detailed in @hrq and
* @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
@@ -15886,14 +17097,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
} else {
switch (hrq->entry_count) {
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2535 Unsupported RQ count. (%d)\n",
hrq->entry_count);
if (hrq->entry_count < 512) {
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
@@ -15937,7 +17148,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2504 RQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -15955,7 +17166,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
&rq_create->u.response);
if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
(hrq->db_format != LPFC_DB_RING_FORMAT)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3262 RQ [%d] doorbell format not "
"supported: x%x\n", hrq->queue_id,
hrq->db_format);
@@ -15967,7 +17178,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
&rq_create->u.response);
bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
if (!bar_memmap_p) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3269 RQ[%d] failed to memmap pci "
"barset:x%x\n", hrq->queue_id,
pci_barset);
@@ -15978,7 +17189,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
db_offset = rq_create->u.response.doorbell_offset;
if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
(db_offset != LPFC_ULP1_RQ_DOORBELL)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3270 RQ[%d] doorbell offset not "
"supported: x%x\n", hrq->queue_id,
db_offset);
@@ -16023,14 +17234,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
} else {
switch (drq->entry_count) {
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2536 Unsupported RQ count. (%d)\n",
drq->entry_count);
if (drq->entry_count < 512) {
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
@@ -16109,6 +17320,7 @@ out:
* @hrqp: The queue structure array to use to create the header receive queues.
* @drqp: The queue structure array to use to create the data receive queues.
* @cqp: The completion queue array to bind these receive queues to.
+ * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
*
* This function creates a receive buffer queue pair , as detailed in @hrq and
* @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
@@ -16160,7 +17372,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
LPFC_SLI4_MBX_NEMBED);
if (alloclen < length) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3099 Allocated DMA memory size (%d) is "
"less than the requested DMA memory size "
"(%d)\n", alloclen, length);
@@ -16270,7 +17482,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3120 RQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -16298,6 +17510,7 @@ out:
/**
* lpfc_eq_destroy - Destroy an event Queue on the HBA
+ * @phba: HBA structure that indicates port to destroy a queue on.
* @eq: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @eq by sending an mailbox
@@ -16340,7 +17553,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2505 EQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -16355,6 +17568,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
/**
* lpfc_cq_destroy - Destroy a Completion Queue on the HBA
+ * @phba: HBA structure that indicates port to destroy a queue on.
* @cq: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @cq by sending an mailbox
@@ -16395,7 +17609,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2506 CQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -16409,7 +17623,8 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
/**
* lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
- * @qm: The queue structure associated with the queue to destroy.
+ * @phba: HBA structure that indicates port to destroy a queue on.
+ * @mq: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @mq by sending an mailbox
* command, specific to the type of queue, to the HBA.
@@ -16449,7 +17664,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2507 MQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -16463,6 +17678,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
/**
* lpfc_wq_destroy - Destroy a Work Queue on the HBA
+ * @phba: HBA structure that indicates port to destroy a queue on.
* @wq: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @wq by sending an mailbox
@@ -16502,7 +17718,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2508 WQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -16518,7 +17734,9 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
/**
* lpfc_rq_destroy - Destroy a Receive Queue on the HBA
- * @rq: The queue structure associated with the queue to destroy.
+ * @phba: HBA structure that indicates port to destroy a queue on.
+ * @hrq: The queue structure associated with the queue to destroy.
+ * @drq: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @rq by sending an mailbox
* command, specific to the type of queue, to the HBA.
@@ -16559,12 +17777,11 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2509 RQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
return -ENXIO;
}
bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
@@ -16575,7 +17792,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2510 RQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -16623,7 +17840,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
union lpfc_sli4_cfg_shdr *shdr;
if (xritag == NO_XRI) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0364 Invalid param:\n");
return -EINVAL;
}
@@ -16661,10 +17878,12 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
+ if (!phba->sli4_hba.intr_enable)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ else if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2511 POST_SGL mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -16695,8 +17914,8 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
* the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
- phba->sli4_hba.max_cfg_param.max_xri, 0);
+ xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri);
if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
spin_unlock_irq(&phba->hbalock);
return NO_XRI;
@@ -16709,8 +17928,9 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_free_xri - Release an xri for reuse.
+ * __lpfc_sli4_free_xri - Release an xri for reuse.
* @phba: pointer to lpfc hba data structure.
+ * @xri: xri to release.
*
* This routine is invoked to release an xri to the pool of
* available rpis maintained by the driver.
@@ -16726,6 +17946,7 @@ __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
/**
* lpfc_sli4_free_xri - Release an xri for reuse.
* @phba: pointer to lpfc hba data structure.
+ * @xri: xri to release.
*
* This routine is invoked to release an xri to the pool of
* available rpis maintained by the driver.
@@ -16768,7 +17989,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
* lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
* @phba: pointer to lpfc hba data structure.
* @post_sgl_list: pointer to els sgl entry list.
- * @count: number of els sgl entries on the list.
+ * @post_cnt: number of els sgl entries on the list.
*
* This routine is invoked to post a block of driver's sgl pages to the
* HBA using non-embedded mailbox command. No Lock is held. This routine
@@ -16795,7 +18016,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
if (reqlen > SLI4_PAGE_SIZE) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2559 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
return -ENOMEM;
@@ -16811,7 +18032,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
LPFC_SLI4_MBX_NEMBED);
if (alloclen < reqlen) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0285 Allocated DMA memory size (%d) is "
"less than the requested DMA memory "
"size (%d)\n", alloclen, reqlen);
@@ -16856,10 +18077,12 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
+ if (!phba->sli4_hba.intr_enable)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ else if (rc != MBX_TIMEOUT)
lpfc_sli4_mbox_cmd_free(phba, mbox);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2513 POST_SGL_BLOCK mailbox command failed "
"status x%x add_status x%x mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -16907,7 +18130,7 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6119 Failed to allocate mbox cmd memory\n");
return -ENOMEM;
}
@@ -16918,7 +18141,7 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
reqlen, LPFC_SLI4_MBX_NEMBED);
if (alloclen < reqlen) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6120 Allocated DMA memory size (%d) is "
"less than the requested DMA memory "
"size (%d)\n", alloclen, reqlen);
@@ -16969,10 +18192,12 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
+ if (!phba->sli4_hba.intr_enable)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ else if (rc != MBX_TIMEOUT)
lpfc_sli4_mbox_cmd_free(phba, mbox);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6125 POST_SGL_BLOCK mailbox command failed "
"status x%x add_status x%x mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -16985,6 +18210,7 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
* lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
* @phba: pointer to lpfc hba data structure.
* @post_nblist: pointer to the nvme buffer list.
+ * @sb_count: number of nvme buffers.
*
* This routine walks a list of nvme buffers that was passed in. It attempts
* to construct blocks of nvme buffer sgls which contains contiguous xris and
@@ -17141,7 +18367,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
case FC_RCTL_ELS_REP: /* extended link services reply */
case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
- case FC_RCTL_BA_NOP: /* basic link service NOP */
case FC_RCTL_BA_ABTS: /* basic link service abort */
case FC_RCTL_BA_RMC: /* remove connection */
case FC_RCTL_BA_ACC: /* basic accept */
@@ -17162,6 +18387,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
return lpfc_fc_frame_check(phba, fc_hdr);
+ case FC_RCTL_BA_NOP: /* basic link service NOP */
default:
goto drop;
}
@@ -17218,6 +18444,7 @@ lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
* @phba: Pointer to the HBA structure to search for the vport on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
* @fcfi: The FC Fabric ID that the frame came from
+ * @did: Destination ID to match against
*
* This function searches the @phba for a vport that matches the content of the
* @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
@@ -17355,6 +18582,7 @@ lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
/**
* lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
+ * @vport: pointer to a vitural port
* @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
*
* This function searches through the existing incomplete sequences that have
@@ -17414,7 +18642,6 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
seq_dmabuf->time_stamp = jiffies;
lpfc_update_rcv_time_stamp(vport);
if (list_empty(&seq_dmabuf->dbuf.list)) {
- temp_hdr = dmabuf->hbuf.virt;
list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
return seq_dmabuf;
}
@@ -17545,21 +18772,17 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmd_iocbq,
struct lpfc_iocbq *rsp_iocbq)
{
- struct lpfc_nodelist *ndlp;
-
if (cmd_iocbq) {
- ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
- lpfc_nlp_put(ndlp);
- lpfc_nlp_not_used(ndlp);
+ lpfc_nlp_put(cmd_iocbq->ndlp);
lpfc_sli_release_iocbq(phba, cmd_iocbq);
}
/* Failure means BLS ABORT RSP did not get delivered to remote node*/
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3154 BLS ABORT RSP failed, data: x%x/x%x\n",
- rsp_iocbq->iocb.ulpStatus,
- rsp_iocbq->iocb.un.ulpWord[4]);
+ get_job_ulpstatus(phba, rsp_iocbq),
+ get_job_word4(phba, rsp_iocbq));
}
/**
@@ -17585,8 +18808,9 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
/**
* lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
- * @phba: Pointer to HBA context object.
+ * @vport: pointer to a virtual port.
* @fc_hdr: pointer to a FC frame header.
+ * @aborted: was the partially assembled receive sequence successfully aborted
*
* This function sends a basic response to a previous unsol sequence abort
* event after aborting the sequence handling.
@@ -17600,7 +18824,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp;
uint16_t oxid, rxid, xri, lxri;
uint32_t sid, fctl;
- IOCB_t *icmd;
+ union lpfc_wqe128 *icmd;
int rc;
if (!lpfc_is_link_up(phba))
@@ -17621,15 +18845,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
}
/* Put ndlp onto pport node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "3275 Failed to active ndlp found "
- "for oxid:x%x SID:x%x\n", oxid, sid);
- return;
- }
}
/* Allocate buffer for rsp iocb */
@@ -17637,28 +18852,22 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
if (!ctiocb)
return;
+ icmd = &ctiocb->wqe;
+
/* Extract the F_CTL field from FC_HDR */
fctl = sli4_fctl_from_fc_hdr(fc_hdr);
- icmd = &ctiocb->iocb;
- icmd->un.xseq64.bdl.bdeSize = 0;
- icmd->un.xseq64.bdl.ulpIoTag32 = 0;
- icmd->un.xseq64.w5.hcsw.Dfctl = 0;
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
- icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
-
- /* Fill in the rest of iocb fields */
- icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
- icmd->ulpBdeCount = 0;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
- icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- ctiocb->context1 = lpfc_nlp_get(ndlp);
+ ctiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!ctiocb->ndlp) {
+ lpfc_sli_release_iocbq(phba, ctiocb);
+ return;
+ }
ctiocb->vport = phba->pport;
- ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
+ ctiocb->abort_rctl = FC_RCTL_BA_ACC;
if (fctl & FC_FC_EX_CTX)
/* Exchange responder sent the abort so we
@@ -17678,10 +18887,12 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
*/
if ((fctl & FC_FC_EX_CTX) &&
(lxri > lpfc_sli4_get_iocb_cnt(phba))) {
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
- bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
- bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
- bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ ctiocb->abort_rctl = FC_RCTL_BA_RJT;
+ bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
+ bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_INV_XID);
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_UNABLE);
}
/* If BA_ABTS failed to abort a partially assembled receive sequence,
@@ -17689,10 +18900,12 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
* the IOCB for a BA_RJT.
*/
if (aborted == false) {
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
- bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
- bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
- bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ ctiocb->abort_rctl = FC_RCTL_BA_RJT;
+ bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
+ bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_INV_XID);
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_UNABLE);
}
if (fctl & FC_FC_EX_CTX) {
@@ -17700,31 +18913,41 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
* of BA_ACC will use OX_ID from ABTS for the XRI_TAG
* field and RX_ID from ABTS for RX_ID field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+ ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
+ bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
} else {
/* ABTS sent by initiator to CT exchange, construction
* of BA_ACC will need to allocate a new XRI as for the
* XRI_TAG field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+ ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
}
- bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
- bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
+
+ /* OX_ID is invariable to who sent ABTS to CT exchange */
+ bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
+ bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
+
+ /* Use CT=VPI */
+ bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
+ ndlp->nlp_DID);
+ bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
/* Xmit CT abts response on exchange <xid> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+ ctiocb->abort_rctl, oxid, phba->link_state);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2925 Failed to issue CT ABTS RSP x%x on "
"xri x%x, Data x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ ctiocb->abort_rctl, oxid,
phba->link_state);
lpfc_nlp_put(ndlp);
- ctiocb->context1 = NULL;
+ ctiocb->ndlp = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
}
@@ -17738,7 +18961,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
* receive sequence is only partially assembed by the driver, it shall abort
* the partially assembled frames for the sequence. Otherwise, if the
* unsolicited receive sequence has been completely assembled and passed to
- * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
+ * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
* unsolicited sequence has been aborted. After that, it will issue a basic
* accept to accept the abort.
**/
@@ -17825,7 +19048,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
/**
* lpfc_prep_seq - Prep sequence for ULP processing
* @vport: Pointer to the vport on which this sequence was received
- * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
*
* This function takes a sequence, described by a list of frames, and creates
* a list of iocbq structures to describe the sequence. This iocbq list will be
@@ -17844,7 +19067,6 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
struct fc_frame_header *fc_hdr;
uint32_t sid;
uint32_t len, tot_len;
- struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* remove from receive buffer list */
@@ -17857,40 +19079,40 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
/* Initialize the first IOCB. */
- first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
- first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+ first_iocbq->wcqe_cmpl.total_data_placed = 0;
+ bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
+ IOSTAT_SUCCESS);
first_iocbq->vport = vport;
/* Check FC Header to see what TYPE of frame we are rcv'ing */
if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
- first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
- first_iocbq->iocb.un.rcvels.parmRo =
- sli4_did_from_fc_hdr(fc_hdr);
- first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
- } else
- first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
- first_iocbq->iocb.ulpContext = NO_XRI;
- first_iocbq->iocb.unsli3.rcvsli3.ox_id =
- be16_to_cpu(fc_hdr->fh_ox_id);
- /* iocbq is prepped for internal consumption. Physical vpi. */
- first_iocbq->iocb.unsli3.rcvsli3.vpi =
- vport->phba->vpi_ids[vport->vpi];
- /* put the first buffer into the first IOCBq */
+ bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
+ sli4_did_from_fc_hdr(fc_hdr));
+ }
+
+ bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
+ NO_XRI);
+ bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
+ be16_to_cpu(fc_hdr->fh_ox_id));
+
+ /* put the first buffer into the first iocb */
tot_len = bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+ first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
+ first_iocbq->bpl_dmabuf = NULL;
+ /* Keep track of the BDE count */
+ first_iocbq->wcqe_cmpl.word3 = 1;
- first_iocbq->context2 = &seq_dmabuf->dbuf;
- first_iocbq->context3 = NULL;
- first_iocbq->iocb.ulpBdeCount = 1;
if (tot_len > LPFC_DATA_BUF_SIZE)
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
else
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
-
- first_iocbq->iocb.un.rcvels.remoteID = sid;
+ first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+ first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
+ bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
+ sid);
}
iocbq = first_iocbq;
/*
@@ -17902,30 +19124,25 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
lpfc_in_buf_free(vport->phba, d_buf);
continue;
}
- if (!iocbq->context3) {
- iocbq->context3 = d_buf;
- iocbq->iocb.ulpBdeCount++;
+ if (!iocbq->bpl_dmabuf) {
+ iocbq->bpl_dmabuf = d_buf;
+ iocbq->wcqe_cmpl.word3++;
/* We need to get the size out of the right CQE */
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
- pbde = (struct ulp_bde64 *)
- &iocbq->iocb.unsli3.sli3Words[4];
- if (len > LPFC_DATA_BUF_SIZE)
- pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
- else
- pbde->tus.f.bdeSize = len;
-
- iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+ iocbq->unsol_rcv_len = len;
+ iocbq->wcqe_cmpl.total_data_placed += len;
tot_len += len;
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
if (first_iocbq) {
- first_iocbq->iocb.ulpStatus =
- IOSTAT_FCP_RSP_ERROR;
- first_iocbq->iocb.un.ulpWord[4] =
- IOERR_NO_RESOURCES;
+ bf_set(lpfc_wcqe_c_status,
+ &first_iocbq->wcqe_cmpl,
+ IOSTAT_SUCCESS);
+ first_iocbq->wcqe_cmpl.parameter =
+ IOERR_NO_RESOURCES;
}
lpfc_in_buf_free(vport->phba, d_buf);
continue;
@@ -17934,22 +19151,28 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
- iocbq->context2 = d_buf;
- iocbq->context3 = NULL;
- iocbq->iocb.ulpBdeCount = 1;
+ iocbq->cmd_dmabuf = d_buf;
+ iocbq->bpl_dmabuf = NULL;
+ iocbq->wcqe_cmpl.word3 = 1;
+
if (len > LPFC_DATA_BUF_SIZE)
- iocbq->iocb.un.cont64[0].tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
else
- iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
+ iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
+ len;
tot_len += len;
- iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
-
- iocbq->iocb.un.rcvels.remoteID = sid;
+ iocbq->wcqe_cmpl.total_data_placed = tot_len;
+ bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
+ sid);
list_add_tail(&iocbq->list, &first_iocbq->list);
}
}
+ /* Free the sequence's header buffer */
+ if (!first_iocbq)
+ lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
+
return first_iocbq;
}
@@ -17964,7 +19187,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
iocbq = lpfc_prep_seq(vport, seq_dmabuf);
if (!iocbq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2707 Ring %d handler: Failed to allocate "
"iocb Rctl x%x Type x%x received\n",
LPFC_ELS_RING,
@@ -17974,16 +19197,18 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
if (!lpfc_complete_unsol_iocb(phba,
phba->sli4_hba.els_wq->pring,
iocbq, fc_hdr->fh_r_ctl,
- fc_hdr->fh_type))
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ fc_hdr->fh_type)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+ lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
+ }
/* Free iocb created in lpfc_prep_seq */
list_for_each_entry_safe(curr_iocb, next_iocb,
- &iocbq->list, list) {
+ &iocbq->list, list) {
list_del_init(&curr_iocb->list);
lpfc_sli_release_iocbq(phba, curr_iocb);
}
@@ -17994,7 +19219,7 @@ static void
lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_dmabuf *pcmd = cmdiocb->context2;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
if (pcmd && pcmd->virt)
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
@@ -18010,7 +19235,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq = NULL;
- union lpfc_wqe *wqe;
+ union lpfc_wqe128 *pwqe;
struct lpfc_dmabuf *pcmd = NULL;
uint32_t frame_len;
int rc;
@@ -18045,34 +19270,46 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
/* copyin the payload */
memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
- /* fill in BDE's for command */
- iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
- iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
- iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
- iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
-
- iocbq->context2 = pcmd;
+ iocbq->cmd_dmabuf = pcmd;
iocbq->vport = vport;
- iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
- iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+ iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
+ iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
+ iocbq->num_bdes = 0;
+
+ pwqe = &iocbq->wqe;
+ /* fill in BDE's for command */
+ pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
+ pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
+ pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
+ pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+
+ pwqe->send_frame.frame_len = frame_len;
+ pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
+ pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
+ pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
+ pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
+ pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
+ pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
+
+ pwqe->generic.wqe_com.word7 = 0;
+ pwqe->generic.wqe_com.word10 = 0;
+
+ bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
+ bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
+ bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
+ bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
+ bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
+ bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
+ bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
+ pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
+
+ iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
- /*
- * Setup rest of the iocb as though it were a WQE
- * Build the SEND_FRAME WQE
- */
- wqe = (union lpfc_wqe *)&iocbq->iocb;
-
- wqe->send_frame.frame_len = frame_len;
- wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
- wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
- wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
- wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
- wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
- wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
-
- iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
- iocbq->iocb.ulpLe = 1;
- iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
if (rc == IOCB_ERROR)
goto exit;
@@ -18094,6 +19331,7 @@ exit:
/**
* lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
* @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
*
* This function is called with no lock held. This function processes all
* the received buffers and gives it to upper layers when a received buffer
@@ -18119,7 +19357,10 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
vport = phba->pport;
/* Handle MDS Loopback frames */
- lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+ else
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
}
@@ -18240,7 +19481,7 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2008 Error %d posting all rpi "
"headers\n", rc);
rc = -EIO;
@@ -18286,7 +19527,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
/* The port is notified of the header region via a mailbox command. */
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2001 Unable to allocate memory for issuing "
"SLI_CONFIG_SPECIAL mailbox command\n");
return -ENOMEM;
@@ -18313,10 +19554,9 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
+ mempool_free(mboxq, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2514 POST_RPI_HDR mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -18363,7 +19603,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
@@ -18406,7 +19646,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2002 Error Could not grow rpi "
"count\n");
} else {
@@ -18420,8 +19660,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * __lpfc_sli4_free_rpi - Release an rpi for reuse.
* @phba: pointer to lpfc hba data structure.
+ * @rpi: rpi to free
*
* This routine is invoked to release an rpi to the pool of
* available rpis maintained by the driver.
@@ -18450,6 +19691,7 @@ __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
/**
* lpfc_sli4_free_rpi - Release an rpi for reuse.
* @phba: pointer to lpfc hba data structure.
+ * @rpi: rpi to free
*
* This routine is invoked to release an rpi to the pool of
* available rpis maintained by the driver.
@@ -18479,7 +19721,9 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
/**
* lpfc_sli4_resume_rpi - Remove the rpi bitmask region
- * @phba: pointer to lpfc hba data structure.
+ * @ndlp: pointer to lpfc nodelist data structure.
+ * @cmpl: completion call-back.
+ * @arg: data to load as MBox 'caller buffer information'
*
* This routine is invoked to remove the memory region that
* provided rpi via a bitmask.
@@ -18497,21 +19741,36 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
if (!mboxq)
return -ENOMEM;
+ /* If cmpl assigned, then this nlp_get pairs with
+ * lpfc_mbx_cmpl_resume_rpi.
+ *
+ * Else cmpl is NULL, then this nlp_get pairs with
+ * lpfc_sli_def_mbox_cmpl.
+ */
+ if (!lpfc_nlp_get(ndlp)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2122 %s: Failed to get nlp ref\n",
+ __func__);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
/* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
mboxq->ctx_buf = arg;
- mboxq->ctx_ndlp = ndlp;
} else
mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mboxq->ctx_ndlp = ndlp;
mboxq->vport = ndlp->vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2010 Resume RPI Mailbox failed "
"status %d, mbxStatus x%x\n", rc,
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ lpfc_nlp_put(ndlp);
mempool_free(mboxq, phba->mbox_mem_pool);
return -EIO;
}
@@ -18543,7 +19802,7 @@ lpfc_sli4_init_vpi(struct lpfc_vport *vport)
mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
if (rc != MBX_SUCCESS) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2022 INIT VPI Mailbox failed "
"status %d, mbxStatus x%x\n", rc,
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
@@ -18579,7 +19838,7 @@ lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if ((shdr_status || shdr_add_status) &&
(shdr_status != STATUS_FCF_IN_USE))
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2558 ADD_FCF_RECORD mailbox failed with "
"status x%x add_status x%x\n",
shdr_status, shdr_add_status);
@@ -18609,7 +19868,7 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2009 Failed to allocate mbox for ADD_FCF cmd\n");
return -ENOMEM;
}
@@ -18622,7 +19881,7 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
req_len, LPFC_SLI4_MBX_NEMBED);
if (alloc_len < req_len) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2523 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
@@ -18655,7 +19914,7 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2515 ADD_FCF_RECORD mailbox failed with "
"status 0x%x\n", rc);
lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -18728,7 +19987,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2000 Failed to allocate mbox for "
"READ_FCF cmd\n");
error = -ENOMEM;
@@ -18863,7 +20122,7 @@ fail_fcf_read:
/**
* lpfc_check_next_fcf_pri_level
- * phba pointer to the lpfc_hba struct for this port.
+ * @phba: pointer to the lpfc_hba struct for this port.
* This routine is called from the lpfc_sli4_fcf_rr_next_index_get
* routine when the rr_bmask is empty. The FCF indecies are put into the
* rr_bmask based on their priority level. Starting from the highest priority
@@ -18987,8 +20246,8 @@ next_priority:
* have been tested so that we can detect when we should
* change the priority level.
*/
- next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
- LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
}
@@ -19028,6 +20287,7 @@ next_priority:
/**
* lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
* @phba: pointer to lpfc hba data structure.
+ * @fcf_index: index into the FCF table to 'set'
*
* This routine sets the FCF record index in to the eligible bmask for
* roundrobin failover search. It checks to make sure that the index
@@ -19060,6 +20320,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
/**
* lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
* @phba: pointer to lpfc hba data structure.
+ * @fcf_index: index into the FCF table to 'clear'
*
* This routine clears the FCF record index from the eligible bmask for
* roundrobin failover search. It checks to make sure that the index
@@ -19097,6 +20358,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
/**
* lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
* @phba: pointer to lpfc hba data structure.
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
*
* This routine is the completion routine for the rediscover FCF table mailbox
* command. If the mailbox command returned failure, it will try to stop the
@@ -19171,7 +20433,7 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2745 Failed to allocate mbox for "
"requesting FCF rediscover.\n");
return -ENOMEM;
@@ -19246,7 +20508,7 @@ lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2600 failed to allocate mailbox memory\n");
return 0;
}
@@ -19269,6 +20531,7 @@ lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
*/
if (mb->un.varDmp.word_cnt == 0)
break;
+
if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
@@ -19305,7 +20568,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3105 failed to allocate mailbox memory\n");
return 0;
}
@@ -19326,11 +20589,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
}
lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
out:
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
return data_length;
}
@@ -19369,7 +20628,7 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
/* Check the region signature first */
if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2619 Config region 23 has bad signature\n");
goto out;
}
@@ -19377,7 +20636,7 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
/* Check the data structure version */
if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2620 Config region 23 has bad version\n");
goto out;
}
@@ -19433,6 +20692,91 @@ out:
}
/**
+ * lpfc_log_fw_write_cmpl - logs firmware write completion status
+ * @phba: pointer to lpfc hba data structure
+ * @shdr_status: wr_object rsp's status field
+ * @shdr_add_status: wr_object rsp's add_status field
+ * @shdr_add_status_2: wr_object rsp's add_status_2 field
+ * @shdr_change_status: wr_object rsp's change_status field
+ * @shdr_csf: wr_object rsp's csf bit
+ *
+ * This routine is intended to be called after a firmware write completes.
+ * It will log next action items to be performed by the user to instantiate
+ * the newly downloaded firmware or reason for incompatibility.
+ **/
+static void
+lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
+ u32 shdr_add_status, u32 shdr_add_status_2,
+ u32 shdr_change_status, u32 shdr_csf)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "4198 %s: flash_id x%02x, asic_rev x%02x, "
+ "status x%02x, add_status x%02x, add_status_2 x%02x, "
+ "change_status x%02x, csf %01x\n", __func__,
+ phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
+ shdr_status, shdr_add_status, shdr_add_status_2,
+ shdr_change_status, shdr_csf);
+
+ if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
+ switch (shdr_add_status_2) {
+ case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "4199 Firmware write failed: "
+ "image incompatible with flash x%02x\n",
+ phba->sli4_hba.flash_id);
+ break;
+ case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "4200 Firmware write failed: "
+ "image incompatible with ASIC "
+ "architecture x%02x\n",
+ phba->sli4_hba.asic_rev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "4210 Firmware write failed: "
+ "add_status_2 x%02x\n",
+ shdr_add_status_2);
+ break;
+ }
+ } else if (!shdr_status && !shdr_add_status) {
+ if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
+ shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
+ if (shdr_csf)
+ shdr_change_status =
+ LPFC_CHANGE_STATUS_PCI_RESET;
+ }
+
+ switch (shdr_change_status) {
+ case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3198 Firmware write complete: System "
+ "reboot required to instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_FW_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3199 Firmware write complete: "
+ "Firmware reset required to "
+ "instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3200 Firmware write complete: Port "
+ "Migration or PCI Reset required to "
+ "instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_PCI_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3201 Firmware write complete: PCI "
+ "Reset required to instantiate\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
* lpfc_wr_object - write an object to the firmware
* @phba: HBA structure that indicates port to create a queue on.
* @dmabuf_list: list of dmabufs to write to the port.
@@ -19458,7 +20802,8 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
struct lpfc_mbx_wr_object *wr_object;
LPFC_MBOXQ_t *mbox;
int rc = 0, i = 0;
- uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
+ uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
+ uint32_t shdr_change_status = 0, shdr_csf = 0;
uint32_t mbox_tmo;
struct lpfc_dmabuf *dmabuf;
uint32_t written = 0;
@@ -19512,56 +20857,36 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
&wr_object->header.cfg_shdr.response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
&wr_object->header.cfg_shdr.response);
+ shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
+ &wr_object->header.cfg_shdr.response);
if (check_change_status) {
shdr_change_status = bf_get(lpfc_wr_object_change_status,
&wr_object->u.response);
-
- if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
- shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
- shdr_csf = bf_get(lpfc_wr_object_csf,
- &wr_object->u.response);
- if (shdr_csf)
- shdr_change_status =
- LPFC_CHANGE_STATUS_PCI_RESET;
- }
-
- switch (shdr_change_status) {
- case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3198 Firmware write complete: System "
- "reboot required to instantiate\n");
- break;
- case (LPFC_CHANGE_STATUS_FW_RESET):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3199 Firmware write complete: Firmware"
- " reset required to instantiate\n");
- break;
- case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3200 Firmware write complete: Port "
- "Migration or PCI Reset required to "
- "instantiate\n");
- break;
- case (LPFC_CHANGE_STATUS_PCI_RESET):
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3201 Firmware write complete: PCI "
- "Reset required to instantiate\n");
- break;
- default:
- break;
- }
+ shdr_csf = bf_get(lpfc_wr_object_csf,
+ &wr_object->u.response);
}
- if (rc != MBX_TIMEOUT)
+
+ if (!phba->sli4_hba.intr_enable)
mempool_free(mbox, phba->mbox_mem_pool);
- if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ else if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3025 Write Object mailbox failed with "
- "status x%x add_status x%x, mbx status x%x\n",
- shdr_status, shdr_add_status, rc);
+ "status x%x add_status x%x, add_status_2 x%x, "
+ "mbx status x%x\n",
+ shdr_status, shdr_add_status, shdr_add_status_2,
+ rc);
rc = -ENXIO;
*offset = shdr_add_status;
- } else
+ } else {
*offset += wr_object->u.response.actual_write_length;
+ }
+
+ if (rc || check_change_status)
+ lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
+ shdr_add_status_2, shdr_change_status,
+ shdr_csf);
return rc;
}
@@ -19579,10 +20904,8 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
- struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct lpfc_nodelist *act_mbx_ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
LIST_HEAD(mbox_cmd_list);
uint8_t restart_loop;
@@ -19596,8 +20919,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
(mb->u.mb.mbxCommand != MBX_REG_VPI))
continue;
- list_del(&mb->list);
- list_add_tail(&mb->list, &mbox_cmd_list);
+ list_move_tail(&mb->list, &mbox_cmd_list);
}
/* Clean up active mailbox command with the vport */
mb = phba->sli.mbox_active;
@@ -19607,8 +20929,12 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
- /* Put reference count for delayed processing */
+
+ /* This reference is local to this routine. The
+ * reference is removed at routine exit.
+ */
act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
+
/* Unregister the RPI when mailbox complete */
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
}
@@ -19636,9 +20962,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
restart_loop = 1;
spin_unlock_irq(&phba->hbalock);
- spin_lock(shost->host_lock);
+ spin_lock(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock(shost->host_lock);
+ spin_unlock(&ndlp->lock);
spin_lock_irq(&phba->hbalock);
break;
}
@@ -19651,29 +20977,23 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
while (!list_empty(&mbox_cmd_list)) {
list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mb->ctx_buf = NULL;
ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
mb->ctx_ndlp = NULL;
if (ndlp) {
- spin_lock(shost->host_lock);
+ spin_lock(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock(shost->host_lock);
+ spin_unlock(&ndlp->lock);
lpfc_nlp_put(ndlp);
}
}
- mempool_free(mb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
}
/* Release the ndlp with the cleaned-up active mailbox command */
if (act_mbx_ndlp) {
- spin_lock(shost->host_lock);
+ spin_lock(&act_mbx_ndlp->lock);
act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock(shost->host_lock);
+ spin_unlock(&act_mbx_ndlp->lock);
lpfc_nlp_put(act_mbx_ndlp);
}
}
@@ -19697,10 +21017,9 @@ lpfc_drain_txq(struct lpfc_hba *phba)
struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
- struct lpfc_sglq *sglq;
- union lpfc_wqe128 wqe;
uint32_t txq_cnt = 0;
struct lpfc_queue *wq;
+ int ret = 0;
if (phba->link_flag & LS_MDS_LOOPBACK) {
/* MDS WQE are posted only to first WQ*/
@@ -19734,48 +21053,38 @@ lpfc_drain_txq(struct lpfc_hba *phba)
piocbq = lpfc_sli_ringtx_get(phba, pring);
if (!piocbq) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2823 txq empty and txq_cnt is %d\n ",
txq_cnt);
break;
}
- sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
- if (!sglq) {
- __lpfc_sli_ringtx_put(phba, pring, piocbq);
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
- break;
- }
txq_cnt--;
- /* The xri and iocb resources secured,
- * attempt to issue request
- */
- piocbq->sli4_lxritag = sglq->sli4_lxritag;
- piocbq->sli4_xritag = sglq->sli4_xritag;
- if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
- fail_msg = "to convert bpl to sgl";
- else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
- fail_msg = "to convert iocb to wqe";
- else if (lpfc_sli4_wq_put(wq, &wqe))
- fail_msg = " - Wq is full";
- else
- lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
+ ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
+ if (ret && ret != IOCB_BUSY) {
+ fail_msg = " - Cannot send IO ";
+ piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ }
if (fail_msg) {
+ piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
/* Failed means we can't issue and need to cancel */
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2822 IOCB failed %s iotag 0x%x "
- "xri 0x%x\n",
- fail_msg,
- piocbq->iotag, piocbq->sli4_xritag);
+ "xri 0x%x %d flg x%x\n",
+ fail_msg, piocbq->iotag,
+ piocbq->sli4_xritag, ret,
+ piocbq->cmd_flag);
list_add_tail(&piocbq->list, &completions);
+ fail_msg = NULL;
}
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ if (txq_cnt == 0 || ret == IOCB_BUSY)
+ break;
}
-
/* Cancel all the IOCBs that cannot be issued */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ IOERR_SLI_ABORTED);
return txq_cnt;
}
@@ -19783,7 +21092,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
/**
* lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
* @phba: Pointer to HBA context object.
- * @pwqe: Pointer to command WQE.
+ * @pwqeq: Pointer to command WQE.
* @sglq: Pointer to the scatter gather queue object.
*
* This routine converts the bpl or bde that is in the WQE
@@ -19823,14 +21132,14 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
if (cmd == CMD_XMIT_BLS_RSP64_WQE)
return sglq->sli4_xritag;
- numBdes = pwqeq->rsvd2;
+ numBdes = pwqeq->num_bdes;
if (numBdes) {
/* The addrHigh and addrLow fields within the WQE
* have not been byteswapped yet so there is no
* need to swap them back.
*/
- if (pwqeq->context3)
- dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
+ if (pwqeq->bpl_dmabuf)
+ dmabuf = pwqeq->bpl_dmabuf;
else
return xritag;
@@ -19908,7 +21217,7 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
/**
* lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
* @phba: Pointer to HBA context object.
- * @ring_number: Base sli ring number
+ * @qp: Pointer to HDW queue.
* @pwqe: Pointer to command WQE.
**/
int
@@ -19916,7 +21225,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe)
{
union lpfc_wqe128 *wqe = &pwqe->wqe;
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_queue *wq;
struct lpfc_sglq *sglq;
struct lpfc_sli_ring *pring;
@@ -19924,7 +21233,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
uint32_t ret = 0;
/* NVME_LS and NVME_LS ABTS requests. */
- if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
+ if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
pring = phba->sli4_hba.nvmels_wq->pring;
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
@@ -19955,7 +21264,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
/* NVME_FCREQ and NVME_ABTS requests */
- if (pwqe->iocb_flag & LPFC_IO_NVME) {
+ if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
@@ -19977,12 +21286,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
/* NVMET requests */
- if (pwqe->iocb_flag & LPFC_IO_NVMET) {
+ if (pwqe->cmd_flag & LPFC_IO_NVMET) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
- ctxp = pwqe->context2;
+ ctxp = pwqe->context_un.axchg;
sglq = ctxp->ctxbuf->sglq;
if (pwqe->sli4_xritag == NO_XRI) {
pwqe->sli4_lxritag = sglq->sli4_lxritag;
@@ -20008,6 +21317,86 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
return WQE_ERROR;
}
+/**
+ * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @cmpl: completion function.
+ *
+ * Fill the appropriate fields for the abort WQE and call
+ * internal routine lpfc_sli4_issue_wqe to send the WQE
+ * This function is called with hbalock held and no ring_lock held.
+ *
+ * RETURNS 0 - SUCCESS
+ **/
+
+int
+lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ void *cmpl)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_iocbq *abtsiocb = NULL;
+ union lpfc_wqe128 *abtswqe;
+ struct lpfc_io_buf *lpfc_cmd;
+ int retval = IOCB_ERROR;
+ u16 xritag = cmdiocb->sli4_xritag;
+
+ /*
+ * The scsi command can not be in txq and it is in flight because the
+ * pCmd is still pointing at the SCSI command we have to abort. There
+ * is no need to search the txcmplq. Just send an abort to the FW.
+ */
+
+ abtsiocb = __lpfc_sli_get_iocbq(phba);
+ if (!abtsiocb)
+ return WQE_NORESOURCE;
+
+ /* Indicate the IO is being aborted by the driver. */
+ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
+
+ abtswqe = &abtsiocb->wqe;
+ memset(abtswqe, 0, sizeof(*abtswqe));
+
+ if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
+ bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
+ bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
+ abtswqe->abort_cmd.rsrvd5 = 0;
+ abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
+ bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
+ bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+ bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
+ bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
+ abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
+ if (cmdiocb->cmd_flag & LPFC_IO_FCP)
+ abtsiocb->cmd_flag |= LPFC_IO_FCP;
+ if (cmdiocb->cmd_flag & LPFC_IO_NVME)
+ abtsiocb->cmd_flag |= LPFC_IO_NVME;
+ if (cmdiocb->cmd_flag & LPFC_IO_FOF)
+ abtsiocb->cmd_flag |= LPFC_IO_FOF;
+ abtsiocb->vport = vport;
+ abtsiocb->cmd_cmpl = cmpl;
+
+ lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
+ retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "0359 Abort xri x%x, original iotag x%x, "
+ "abort cmd iotag x%x retval x%x\n",
+ xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
+
+ if (retval) {
+ cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ __lpfc_sli_release_iocbq(phba, abtsiocb);
+ }
+
+ return retval;
+}
+
#ifdef LPFC_MXP_STAT
/**
* lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
@@ -20190,6 +21579,7 @@ void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
/**
* _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
* @phba: pointer to lpfc hba data structure
+ * @qp: pointer to HDW queue
* @pbl_pool: specified public free XRI pool
* @pvt_pool: specified private free XRI pool
* @count: number of XRIs to move
@@ -20319,7 +21709,7 @@ void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
/**
* lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
* @phba: pointer to lpfc hba data structure.
- * @qp: belong to which HWQ.
+ * @hwqid: belong to which HWQ.
*
* This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
* low watermark.
@@ -20362,8 +21752,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
/* MUST zero fields if buffer is reused by another protocol */
lpfc_ncmd->nvmeCmd = NULL;
- lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
- lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+ lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
if (phba->cfg_xpsgl && !phba->nvmet_support &&
!list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
@@ -20441,6 +21830,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
/**
* lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
* @phba: pointer to lpfc hba data structure.
+ * @qp: pointer to HDW queue
* @pvt_pool: pointer to private pool data structure.
* @ndlp: pointer to lpfc nodelist data structure.
*
@@ -20546,8 +21936,26 @@ lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[hwqid];
lpfc_ncmd = NULL;
+ if (!qp) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5556 NULL qp for hwqid x%x\n", hwqid);
+ return lpfc_ncmd;
+ }
multixri_pool = qp->p_multixri_pool;
+ if (!multixri_pool) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5557 NULL multixri for hwqid x%x\n", hwqid);
+ return lpfc_ncmd;
+ }
pvt_pool = &multixri_pool->pvt_pool;
+ if (!pvt_pool) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
+ return lpfc_ncmd;
+ }
multixri_pool->io_req_count++;
/* If pvt_pool is empty, move some XRIs from public to private pool */
@@ -20623,6 +22031,12 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[hwqid];
lpfc_cmd = NULL;
+ if (!qp) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5555 NULL qp for hwqid x%x\n", hwqid);
+ return lpfc_cmd;
+ }
if (phba->cfg_xri_rebalancing)
lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
@@ -20652,6 +22066,119 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
}
/**
+ * lpfc_read_object - Retrieve object data from HBA
+ * @phba: The HBA for which this call is being executed.
+ * @rdobject: Pathname of object data we want to read.
+ * @datap: Pointer to where data will be copied to.
+ * @datasz: size of data area
+ *
+ * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
+ * The data will be truncated if datasz is not large enough.
+ * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
+ * Returns the actual bytes read from the object.
+ */
+int
+lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
+ uint32_t datasz)
+{
+ struct lpfc_mbx_read_object *read_object;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, eof, j, byte_cnt = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_dmabuf *pcmd;
+ u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
+
+ /* sanity check on queue memory */
+ if (!datap)
+ return -ENODEV;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_read_object) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_READ_OBJECT,
+ length, LPFC_SLI4_MBX_EMBED);
+ read_object = &mbox->u.mqe.un.read_object;
+ shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
+
+ bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
+ bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
+ read_object->u.request.rd_object_offset = 0;
+ read_object->u.request.rd_object_cnt = 1;
+
+ memset((void *)read_object->u.request.rd_object_name, 0,
+ LPFC_OBJ_NAME_SZ);
+ scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
+ for (j = 0; j < strlen(rdobject); j++)
+ read_object->u.request.rd_object_name[j] =
+ cpu_to_le32(rd_object_name[j]);
+
+ pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
+ if (pcmd)
+ pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
+ if (!pcmd || !pcmd->virt) {
+ kfree(pcmd);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+ memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
+ read_object->u.request.rd_object_hbuf[0].pa_lo =
+ putPaddrLow(pcmd->phys);
+ read_object->u.request.rd_object_hbuf[0].pa_hi =
+ putPaddrHigh(pcmd->phys);
+ read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
+
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->ctx_ndlp = NULL;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+ if (shdr_status == STATUS_FAILED &&
+ shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
+ "4674 No port cfg file in FW.\n");
+ byte_cnt = -ENOENT;
+ } else if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
+ "2625 READ_OBJECT mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ byte_cnt = -ENXIO;
+ } else {
+ /* Success */
+ length = read_object->u.response.rd_object_actual_rlen;
+ eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
+ "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
+ length, datasz, eof);
+
+ /* Detect the port config file exists but is empty */
+ if (!length && eof) {
+ byte_cnt = 0;
+ goto exit;
+ }
+
+ byte_cnt = length;
+ lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
+ }
+
+ exit:
+ /* This is an embedded SLI4 mailbox with an external buffer allocated.
+ * Free the pcmd and then cleanup with the correct routine.
+ */
+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+ kfree(pcmd);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return byte_cnt;
+}
+
+/**
* lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
* @phba: The HBA for which this call is being executed.
* @lpfc_buf: IO buf structure to append the SGL chunk
@@ -20839,7 +22366,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
return NULL;
}
- tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
+ tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
GFP_ATOMIC,
&tmp->fcp_cmd_rsp_dma_handle);
@@ -20941,3 +22468,180 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
+
+/**
+ * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
+ * @phba: phba object
+ * @job: job entry of the command to be posted.
+ *
+ * Fill the common fields of the wqe for each of the command.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
+{
+ u8 cmnd;
+ u32 *pcmd;
+ u32 if_type = 0;
+ u32 fip, abort_tag;
+ struct lpfc_nodelist *ndlp = NULL;
+ union lpfc_wqe128 *wqe = &job->wqe;
+ u8 command_type = ELS_COMMAND_NON_FIP;
+
+ fip = phba->hba_flag & HBA_FIP_SUPPORT;
+ /* The fcp commands will set command type */
+ if (job->cmd_flag & LPFC_IO_FCP)
+ command_type = FCP_COMMAND;
+ else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
+ command_type = ELS_COMMAND_FIP;
+ else
+ command_type = ELS_COMMAND_NON_FIP;
+
+ abort_tag = job->iotag;
+ cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
+
+ switch (cmnd) {
+ case CMD_ELS_REQUEST64_WQE:
+ ndlp = job->ndlp;
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
+ pcmd = (u32 *)job->cmd_dmabuf->virt;
+ if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+ *pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_RDF ||
+ *pcmd == ELS_CMD_EDC ||
+ *pcmd == ELS_CMD_RSCN_XMT ||
+ *pcmd == ELS_CMD_FDISC ||
+ *pcmd == ELS_CMD_LOGO ||
+ *pcmd == ELS_CMD_QFPA ||
+ *pcmd == ELS_CMD_UVEM ||
+ *pcmd == ELS_CMD_PLOGI)) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ job->vport->fc_myDID);
+
+ if ((*pcmd == ELS_CMD_FLOGI) &&
+ !(phba->fc_topology ==
+ LPFC_TOPOLOGY_LOOP))
+ bf_set(els_req64_sid, &wqe->els_req, 0);
+
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[job->vport->vpi]);
+ } else if (pcmd) {
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ }
+ }
+
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+
+ bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+ break;
+ case CMD_XMIT_ELS_RSP64_WQE:
+ ndlp = job->ndlp;
+
+ /* word4 */
+ wqe->xmit_els_rsp.word4 = 0;
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
+ if (job->vport->fc_flag & FC_PT2PT) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ job->vport->fc_myDID);
+ if (job->vport->fc_myDID == Fabric_DID) {
+ bf_set(wqe_els_did,
+ &wqe->xmit_els_rsp.wqe_dest, 0);
+ }
+ }
+ }
+
+ bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
+
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ job->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+
+ if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ phba->vpi_ids[job->vport->vpi]);
+ }
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_GEN_REQUEST64_WQE:
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_SEQUENCE64_WQE:
+ if (phba->link_flag & LS_LOOPBACK_MODE)
+ bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+
+ wqe->xmit_sequence.rsvd3 = 0;
+ bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_BLS_RSP64_WQE:
+ bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
+ bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+ bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ /* Overwrite the pre-set comnd type with OTHER_COMMAND */
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
+ case CMD_ABORT_XRI_WQE: /* abort iotag */
+ case CMD_SEND_FRAME: /* mds loopback */
+ /* cases already formatted for sli4 wqe - no chgs necessary */
+ return;
+ default:
+ dump_stack();
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6207 Invalid command 0x%x\n",
+ cmnd);
+ break;
+ }
+
+ wqe->generic.wqe_com.abort_tag = abort_tag;
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7bcf922a8be2..cd33dfec758c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -35,6 +35,18 @@ typedef enum _lpfc_ctx_cmd {
LPFC_CTX_HOST
} lpfc_ctx_cmd;
+/* Enumeration to describe the thread lock context. */
+enum lpfc_mbox_ctx {
+ MBOX_THD_UNLOCKED,
+ MBOX_THD_LOCKED
+};
+
+union lpfc_vmid_tag {
+ uint32_t app_id;
+ uint8_t cs_ctl_vmid;
+ struct lpfc_vmid_context *vmid_context; /* UVEM context information */
+};
+
struct lpfc_cq_event {
struct list_head list;
uint16_t hdwq;
@@ -63,16 +75,25 @@ struct lpfc_iocbq {
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
uint16_t hba_wqidx; /* index to HBA work queue */
struct lpfc_cq_event cq_event;
- struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
uint64_t isr_timestamp;
union lpfc_wqe128 wqe; /* SLI-4 */
IOCB_t iocb; /* SLI-3 */
+ struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
+
+ u32 unsol_rcv_len; /* Receive len in usol path */
- uint8_t rsvd2;
- uint8_t priority; /* OAS priority */
- uint8_t retry; /* retry counter for IOCB cmd - if needed */
- uint32_t iocb_flag;
+ /* Pack the u8's together and make them module-4. */
+ u8 num_bdes; /* Number of BDEs */
+ u8 abort_bls; /* ABTS by initiator or responder */
+ u8 abort_rctl; /* ACC or RJT flag */
+ u8 priority; /* OAS priority */
+ u8 retry; /* retry counter for IOCB cmd - if needed */
+ u8 rsvd1; /* Pad for u32 */
+ u8 rsvd2; /* Pad for u32 */
+ u8 rsvd3; /* Pad for u32 */
+
+ u32 cmd_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
@@ -100,28 +121,34 @@ struct lpfc_iocbq {
#define LPFC_IO_NVME 0x200000 /* NVME FCP command */
#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */
#define LPFC_IO_NVMET 0x800000 /* NVMET command */
+#define LPFC_IO_VMID 0x1000000 /* VMID tagged IO */
+#define LPFC_IO_CMF 0x4000000 /* CMF command */
uint32_t drvrTimeout; /* driver timeout in seconds */
struct lpfc_vport *vport;/* virtual port pointer */
- void *context1; /* caller context information */
- void *context2; /* caller context information */
- void *context3; /* caller context information */
+ struct lpfc_dmabuf *cmd_dmabuf;
+ struct lpfc_dmabuf *rsp_dmabuf;
+ struct lpfc_dmabuf *bpl_dmabuf;
+ uint32_t event_tag; /* LA Event tag */
union {
wait_queue_head_t *wait_queue;
- struct lpfc_iocbq *rsp_iocb;
struct lpfcMboxq *mbox;
- struct lpfc_nodelist *ndlp;
struct lpfc_node_rrq *rrq;
+ struct nvmefc_ls_req *nvme_lsreq;
+ struct lpfc_async_xchg_ctx *axchg;
+ struct bsg_job_data *dd_data;
} context_un;
- void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_wcqe_complete *);
+ struct lpfc_io_buf *io_buf;
+ struct lpfc_iocbq *rsp_iocb;
+ struct lpfc_nodelist *ndlp;
+ union lpfc_vmid_tag vmid_tag;
+ void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
+ void (*wait_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
+ void (*cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
};
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -130,6 +157,9 @@ struct lpfc_iocbq {
#define IOCB_BUSY 1
#define IOCB_ERROR 2
#define IOCB_TIMEDOUT 3
+#define IOCB_ABORTED 4
+#define IOCB_ABORTING 5
+#define IOCB_NORESOURCE 6
#define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */
@@ -138,6 +168,8 @@ struct lpfc_iocbq {
#define WQE_ERROR 2
#define WQE_TIMEDOUT 3
#define WQE_ABORTED 4
+#define WQE_ABORTING 5
+#define WQE_NORESOURCE 6
#define LPFC_MBX_WAKE 1
#define LPFC_MBX_IMED_UNREG 2
@@ -323,7 +355,6 @@ struct lpfc_sli {
#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
-#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
@@ -446,6 +477,7 @@ struct lpfc_io_buf {
uint64_t ts_last_cmd;
uint64_t ts_cmd_wqput;
uint64_t ts_isr_cmpl;
- uint64_t ts_data_nvme;
+ uint64_t ts_data_io;
#endif
+ uint64_t rx_cmd_start;
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d963ca871383..cbb1aa1cf025 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,6 +20,9 @@
* included with this package. *
*******************************************************************/
+#include <linux/irq_poll.h>
+#include <linux/cpufreq.h>
+
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
#define CONFIG_SCSI_LPFC_DEBUG_FS
#endif
@@ -135,6 +138,16 @@ struct lpfc_rqb {
struct rqb_dmabuf *);
};
+enum lpfc_poll_mode {
+ LPFC_QUEUE_WORK,
+ LPFC_IRQ_POLL
+};
+
+struct lpfc_idle_stat {
+ u64 prev_idle;
+ u64 prev_wall;
+};
+
struct lpfc_queue {
struct list_head list;
struct list_head wq_list;
@@ -265,6 +278,10 @@ struct lpfc_queue {
struct lpfc_queue *assoc_qp;
struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
+
+#define LPFC_IRQ_POLL_WEIGHT 256
+ struct irq_poll iop;
+ enum lpfc_poll_mode poll_mode;
};
struct lpfc_sli4_link {
@@ -472,7 +489,7 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
- uint16_t irq;
+ int irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
@@ -532,6 +549,15 @@ struct lpfc_pc_sli4_params {
uint32_t hdr_pp_align;
uint32_t sgl_pages_max;
uint32_t sgl_pp_align;
+ uint32_t mib_size;
+ uint16_t mi_ver;
+#define LPFC_MIB1_SUPPORT 1
+#define LPFC_MIB2_SUPPORT 2
+#define LPFC_MIB3_SUPPORT 3
+ uint16_t mi_value;
+#define LPFC_DFLT_MIB_VAL 2
+ uint8_t mib_bde_cnt;
+ uint8_t cmf;
uint8_t cqv;
uint8_t mqv;
uint8_t wqv;
@@ -585,6 +611,8 @@ struct lpfc_vector_map_info {
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
+#define LPFC_IRQ_EMPTY 0xffffffff
+
/* Multi-XRI pool */
#define XRI_BATCH 8
@@ -697,13 +725,6 @@ struct lpfc_sli4_hdw_queue {
struct lpfc_lock_stat lock_conflict;
#endif
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-#define LPFC_CHECK_CPU_CNT 128
- uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
- uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
- uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
-#endif
-
/* Per HDWQ pool resources */
struct list_head sgl_list;
struct list_head cmd_rsp_buf_list;
@@ -740,6 +761,15 @@ struct lpfc_sli4_hdw_queue {
#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
#endif
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+struct lpfc_hdwq_stat {
+ u32 hdwq_no;
+ u32 rcv_io;
+ u32 xmt_io;
+ u32 cmpl_io;
+};
+#endif
+
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
* config space registers
@@ -901,8 +931,9 @@ struct lpfc_sli4_hba {
struct list_head sp_queue_event;
struct list_head sp_cqe_event_pool;
struct list_head sp_asynce_work_queue;
- struct list_head sp_fcp_xri_aborted_work_queue;
+ spinlock_t asynce_list_lock; /* protect sp_asynce_work_queue list */
struct list_head sp_els_xri_aborted_work_queue;
+ spinlock_t els_xri_abrt_list_lock; /* protect els_xri_aborted list */
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -918,9 +949,13 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu;
uint16_t num_present_cpu;
- struct cpumask numa_mask;
+ struct cpumask irq_aff_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_hdwq_stat __percpu *c_stat;
+#endif
+ struct lpfc_idle_stat *idle_stat;
uint32_t conf_trunk;
#define lpfc_conf_trunk_port0_WORD conf_trunk
#define lpfc_conf_trunk_port0_SHIFT 0
@@ -946,6 +981,11 @@ struct lpfc_sli4_hba {
#define lpfc_conf_trunk_port3_nd_WORD conf_trunk
#define lpfc_conf_trunk_port3_nd_SHIFT 7
#define lpfc_conf_trunk_port3_nd_MASK 0x1
+ uint8_t flash_id;
+ uint8_t asic_rev;
+ uint16_t fawwpn_flag; /* FA-WWPN support state */
+#define LPFC_FAWWPN_CONFIG 0x1 /* FA-PWWN is configured */
+#define LPFC_FAWWPN_FABRIC 0x2 /* FA-PWWN success with Fabric */
};
enum lpfc_sge_type {
@@ -1080,8 +1120,9 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
-void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
+void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_ncmd);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri,
struct lpfc_io_buf *lpfc_ncmd);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9563c49f36ab..192d5630a44d 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.6.0.3"
+#define LPFC_DRIVER_VERSION "14.2.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2019 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2022 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
new file mode 100644
index 000000000000..ed1d7f7b88a3
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -0,0 +1,286 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.broadcom.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/dma-direction.h>
+
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+
+
+/*
+ * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash: calculated hash value
+ * @buf: uuid associated with the VE
+ * Return the VMID entry associated with the UUID
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ u32 hash, u8 *buf)
+{
+ struct lpfc_vmid *vmp;
+
+ hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
+ if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
+ return vmp;
+ }
+ return NULL;
+}
+
+/*
+ * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash - calculated hash value
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ *
+ * This routine will insert the newly acquired VMID entity in the hash table.
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+ struct lpfc_vmid *vmp)
+{
+ hash_add(vport->hash_table, &vmp->hnode, hash);
+}
+
+/*
+ * lpfc_vmid_hash_fn - create a hash value of the UUID
+ * @vmid: uuid associated with the VE
+ * @len: length of the VMID string
+ * Returns the calculated hash value
+ */
+int lpfc_vmid_hash_fn(const char *vmid, int len)
+{
+ int c;
+ int hash = 0;
+
+ if (len == 0)
+ return 0;
+ while (len--) {
+ c = *vmid++;
+ if (c >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+
+ hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
+ (c >> LPFC_VMID_HASH_SHIFT)) * 19;
+ }
+
+ return hash & LPFC_VMID_HASH_MASK;
+}
+
+/*
+ * lpfc_vmid_update_entry - update the vmid entry in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @iodir: io direction
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ * @tag: VMID tag
+ */
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport,
+ enum dma_data_direction iodir,
+ struct lpfc_vmid *vmp,
+ union lpfc_vmid_io_tag *tag)
+{
+ u64 *lta;
+
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
+ else if (vport->phba->cfg_vmid_app_header)
+ tag->app_id = vmp->un.app_id;
+
+ if (iodir == DMA_TO_DEVICE)
+ vmp->io_wr_cnt++;
+ else if (iodir == DMA_FROM_DEVICE)
+ vmp->io_rd_cnt++;
+
+ /* update the last access timestamp in the table */
+ lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
+ *lta = jiffies;
+}
+
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid)
+{
+ u32 hash;
+ struct lpfc_vmid *pvmid;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ } else {
+ hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
+ pvmid =
+ lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
+ vmid->host_vmid);
+ if (pvmid)
+ vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
+ else
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ }
+}
+
+/*
+ * lpfc_vmid_get_appid - get the VMID associated with the UUID
+ * @vport: The virtual port for which this call is being executed.
+ * @uuid: UUID associated with the VE
+ * @cmd: address of scsi_cmd descriptor
+ * @iodir: io direction
+ * @tag: VMID tag
+ * Returns status of the function
+ */
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag)
+{
+ struct lpfc_vmid *vmp = NULL;
+ int hash, len, rc = -EPERM, i;
+
+ /* check if QFPA is complete */
+ if (lpfc_vmid_is_type_priority_tag(vport) &&
+ !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
+ (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
+ vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ return -EAGAIN;
+ }
+
+ /* search if the UUID has already been mapped to the VMID */
+ len = strlen(uuid);
+ hash = lpfc_vmid_hash_fn(uuid, len);
+
+ /* search for the VMID in the table */
+ read_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* if found, check if its already registered */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ read_unlock(&vport->vmid_lock);
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ rc = 0;
+ } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
+ vmp->flag & LPFC_VMID_DE_REGISTER)) {
+ /* else if register or dereg request has already been sent */
+ /* Hence VMID tag will not be added for this I/O */
+ read_unlock(&vport->vmid_lock);
+ rc = -EBUSY;
+ } else {
+ /* The VMID was not found in the hashtable. At this point, */
+ /* drop the read lock first before proceeding further */
+ read_unlock(&vport->vmid_lock);
+ /* start the process to obtain one as per the */
+ /* type of the VMID indicated */
+ write_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* while the read lock was released, in case the entry was */
+ /* added by other context or is in process of being added */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ write_unlock(&vport->vmid_lock);
+ return 0;
+ } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
+ write_unlock(&vport->vmid_lock);
+ return -EBUSY;
+ }
+
+ /* else search and allocate a free slot in the hash table */
+ if (vport->cur_vmid_cnt < vport->max_vmid) {
+ for (i = 0; i < vport->max_vmid; i++) {
+ vmp = vport->vmid + i;
+ if (vmp->flag == LPFC_VMID_SLOT_FREE)
+ break;
+ }
+ if (i == vport->max_vmid)
+ vmp = NULL;
+ } else {
+ vmp = NULL;
+ }
+
+ if (!vmp) {
+ write_unlock(&vport->vmid_lock);
+ return -ENOMEM;
+ }
+
+ /* Add the vmid and register */
+ lpfc_put_vmid_in_hashtable(vport, hash, vmp);
+ vmp->vmid_len = len;
+ memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+ vmp->flag = LPFC_VMID_SLOT_USED;
+
+ vmp->delete_inactive =
+ vport->vmid_inactivity_timeout ? 1 : 0;
+
+ /* if type priority tag, get next available VMID */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ lpfc_vmid_assign_cs_ctl(vport, vmp);
+
+ /* allocate the per cpu variable for holding */
+ /* the last access time stamp only if VMID is enabled */
+ if (!vmp->last_io_time)
+ vmp->last_io_time = alloc_percpu_gfp(u64, GFP_ATOMIC);
+ if (!vmp->last_io_time) {
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ write_unlock(&vport->vmid_lock);
+
+ /* complete transaction with switch */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ rc = lpfc_vmid_uvem(vport, vmp, true);
+ else if (vport->phba->cfg_vmid_app_header)
+ rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
+ if (!rc) {
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt++;
+ vmp->flag |= LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ } else {
+ write_lock(&vport->vmid_lock);
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(vmp->last_io_time);
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ /* finally, enable the idle timer once */
+ if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
+ mod_timer(&vport->phba->inactive_vmid_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
+ }
+ }
+ return rc;
+}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index b76646357980..4d171f5c213f 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -135,47 +135,44 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
/*
- * Grab buffer pointer and clear context1 so we can use
- * lpfc_sli_issue_box_wait
+ * Wait for the read_sparams mailbox to complete. Driver needs
+ * this per vport to start the FDISC. If the mailbox fails,
+ * just cleanup and return an error unless the failure is a
+ * mailbox timeout. For MBX_TIMEOUT, allow the default
+ * mbox completion handler to take care of the cleanup. This
+ * is safe as the mailbox command isn't one that triggers
+ * another mailbox.
*/
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- pmb->ctx_buf = NULL;
-
pmb->vport = vport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
if (signal_pending(current)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1830 Signal aborted mbxCmd x%x\n",
mb->mbxCommand);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
if (rc != MBX_TIMEOUT)
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb,
+ MBOX_THD_UNLOCKED);
return -EINTR;
} else {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1818 VPort failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x, rc = x%x\n",
mb->mbxCommand, mb->mbxStatus, rc);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
if (rc != MBX_TIMEOUT)
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb,
+ MBOX_THD_UNLOCKED);
return -EIO;
}
}
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
-
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return 0;
}
@@ -284,11 +281,11 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
}
if (time_after(jiffies, wait_time_max))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
- "1835 Vport discovery quiesce failed:"
- " state x%x fc_flags x%x wait msecs x%x\n",
- vport->port_state, vport->fc_flag,
- jiffies_to_msecs(jiffies - start_time));
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "1835 Vport discovery quiesce failed:"
+ " state x%x fc_flags x%x wait msecs x%x\n",
+ vport->port_state, vport->fc_flag,
+ jiffies_to_msecs(jiffies - start_time));
}
int
@@ -305,7 +302,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
int status;
if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1808 Create VPORT failed: "
"NPIV is not enabled: SLImode:%d\n",
phba->sli_rev);
@@ -315,7 +312,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
/* NPIV is not supported if HBA has NVME Target enabled */
if (phba->nvmet_support) {
- lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3189 Create VPORT failed: "
"NPIV is not supported on NVME Target\n");
rc = VPORT_INVAL;
@@ -324,7 +321,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vpi = lpfc_alloc_vpi(phba);
if (vpi == 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1809 Create VPORT failed: "
"Max VPORTs (%d) exceeded\n",
phba->max_vpi);
@@ -334,7 +331,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
/* Assign an unused board number */
if ((instance = lpfc_get_instance()) < 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1810 Create VPORT failed: Cannot get "
"instance number\n");
lpfc_free_vpi(phba, vpi);
@@ -344,7 +341,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport = lpfc_create_port(phba, instance, &fc_vport->dev);
if (!vport) {
- lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1811 Create VPORT failed: vpi x%x\n", vpi);
lpfc_free_vpi(phba, vpi);
rc = VPORT_NORESOURCES;
@@ -356,11 +353,11 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
if ((status = lpfc_vport_sparm(phba, vport))) {
if (status == -EINTR) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1831 Create VPORT Interrupted.\n");
rc = VPORT_ERROR;
} else {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1813 Create VPORT failed. "
"Cannot get sparam\n");
rc = VPORT_NORESOURCES;
@@ -378,7 +375,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
!lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1821 Create VPORT failed. "
"Invalid WWN format\n");
lpfc_free_vpi(phba, vpi);
@@ -388,7 +385,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (!lpfc_unique_wwpn(phba, vport)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1823 Create VPORT failed. "
"Duplicate WWN on HBA\n");
lpfc_free_vpi(phba, vpi);
@@ -426,7 +423,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
(pport->fc_flag & FC_VFI_REGISTERED)) {
rc = lpfc_sli4_init_vpi(vport);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1838 Failed to INIT_VPI on vpi %d "
"status %d\n", vpi, rc);
rc = VPORT_NORESOURCES;
@@ -462,14 +459,14 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ if (ndlp &&
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
lpfc_set_disctmo(vport);
lpfc_initial_fdisc(vport);
} else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0262 No NPIV Fabric support\n");
}
} else {
@@ -479,30 +476,74 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
- "1825 Vport Created.\n");
+ "1825 Vport Created.\n");
lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
error_out:
return rc;
}
static int
+lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ int rc;
+ struct lpfc_hba *phba = vport->phba;
+
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
+ !ndlp->logo_waitq) {
+ ndlp->logo_waitq = &waitq;
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+ ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
+ }
+ spin_unlock_irq(&ndlp->lock);
+ rc = lpfc_issue_els_npiv_logo(vport, ndlp);
+ if (!rc) {
+ wait_event_timeout(waitq,
+ (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
+ msecs_to_jiffies(phba->fc_ratov * 2000));
+
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
+ goto logo_cmpl;
+ /* LOGO wait failed. Correct status. */
+ rc = -EINTR;
+ } else {
+ rc = -EIO;
+ }
+
+ /* Error - clean up node flags. */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+
+ logo_cmpl:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+ "1824 Issue LOGO completes with status %d\n",
+ rc);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->logo_waitq = NULL;
+ spin_unlock_irq(&ndlp->lock);
+ return rc;
+}
+
+static int
disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
- long timeout;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ /* Can't disable during an outstanding delete. */
+ if (vport->load_flag & FC_UNLOADING)
+ return 0;
+
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)
- && phba->link_state >= LPFC_LINK_UP) {
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
- }
+ if (ndlp && phba->link_state >= LPFC_LINK_UP)
+ (void)lpfc_send_npiv_logo(vport, ndlp);
lpfc_sli_host_down(vport);
@@ -510,8 +551,6 @@ disable_vport(struct fc_vport *fc_vport)
* calling lpfc_cleanup_rpis(vport, 1)
*/
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -568,14 +607,13 @@ enable_vport(struct fc_vport *fc_vport)
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)
- && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
lpfc_set_disctmo(vport);
lpfc_initial_fdisc(vport);
} else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0264 No NPIV Fabric support\n");
}
} else {
@@ -597,19 +635,17 @@ lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
return enable_vport(fc_vport);
}
-
int
lpfc_vport_delete(struct fc_vport *fc_vport)
{
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_hba *phba = vport->phba;
- long timeout;
- bool ns_ndlp_referenced = false;
+ struct lpfc_hba *phba = vport->phba;
+ int rc;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1812 vport_delete failed: Cannot delete "
"physical host\n");
return VPORT_ERROR;
@@ -618,14 +654,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
/* If the vport is a static vport fail the deletion. */
if ((vport->vport_flag & STATIC_VPORT) &&
!(phba->pport->load_flag & FC_UNLOADING)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1837 vport_delete failed: Cannot delete "
"static vport.\n");
return VPORT_ERROR;
}
+
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
+
/*
* If we are not unloading the driver then prevent the vport_delete
* from happening until after this vport's discovery is finished.
@@ -642,125 +680,41 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
vport->port_state < LPFC_VPORT_READY)
return -EAGAIN;
}
+
/*
- * This is a bit of a mess. We want to ensure the shost doesn't get
- * torn down until we're done with the embedded lpfc_vport structure.
- *
- * Beyond holding a reference for this function, we also need a
- * reference for outstanding I/O requests we schedule during delete
- * processing. But once we scsi_remove_host() we can no longer obtain
- * a reference through scsi_host_get().
- *
- * So we take two references here. We release one reference at the
- * bottom of the function -- after delinking the vport. And we
- * release the other at the completion of the unreg_vpi that get's
- * initiated after we've disposed of all other resources associated
- * with the port.
+ * Take early refcount for outstanding I/O requests we schedule during
+ * delete processing for unreg_vpi. Always keep this before
+ * scsi_remove_host() as we can no longer obtain a reference through
+ * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
*/
if (!scsi_host_get(shost))
return VPORT_INVAL;
- if (!scsi_host_get(shost)) {
- scsi_host_put(shost);
- return VPORT_INVAL;
- }
- lpfc_free_sysfs_attr(vport);
+ lpfc_free_sysfs_attr(vport);
lpfc_debugfs_terminate(vport);
- /*
- * The call to fc_remove_host might release the NameServer ndlp. Since
- * we might need to use the ndlp to send the DA_ID CT command,
- * increment the reference for the NameServer ndlp to prevent it from
- * being released.
- */
- ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_nlp_get(ndlp);
- ns_ndlp_referenced = true;
- }
-
- /* Remove FC host and then SCSI host with the vport */
+ /* Remove FC host to break driver binding. */
fc_remove_host(shost);
scsi_remove_host(shost);
- ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
-
- /* In case of driver unload, we shall not perform fabric logo as the
- * worker thread already stopped at this stage and, in this case, we
- * can safely skip the fabric logo.
- */
- if (phba->pport->load_flag & FC_UNLOADING) {
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
- phba->link_state >= LPFC_LINK_UP) {
- /* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp)
- goto skip_logo;
- else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- goto skip_logo;
- }
- /* Remove ndlp from vport npld list */
- lpfc_dequeue_node(vport, ndlp);
-
- /* Indicate free memory when release */
- spin_lock_irq(&phba->ndlp_lock);
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
- /* Kick off release ndlp when it can be safely done */
- lpfc_nlp_put(ndlp);
- }
+ /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
goto skip_logo;
- }
- /* Otherwise, we will perform fabric logo as needed */
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP &&
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
- while (vport->ct_flags && timeout)
- timeout = schedule_timeout(timeout);
- else
+ /* Send DA_ID and wait for a completion. */
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
+ if (rc) {
lpfc_printf_log(vport->phba, KERN_WARNING,
LOG_VPORT,
"1829 CT command failed to "
- "delete objects on fabric\n");
- }
- /* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp) {
- /* Cannot find existing Fabric ndlp, allocate one */
- ndlp = lpfc_nlp_init(vport, Fabric_DID);
- if (!ndlp)
- goto skip_logo;
- /* Indicate free memory when release */
- NLP_SET_FREE_REQ(ndlp);
- } else {
- if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- goto skip_logo;
+ "delete objects on fabric, "
+ "rc %d\n", rc);
}
-
- /* Remove ndlp from vport list */
- lpfc_dequeue_node(vport, ndlp);
- spin_lock_irq(&phba->ndlp_lock);
- if (!NLP_CHK_FREE_REQ(ndlp))
- /* Indicate free memory when release */
- NLP_SET_FREE_REQ(ndlp);
- else {
- /* Skip this if ndlp is already in free mode */
- spin_unlock_irq(&phba->ndlp_lock);
- goto skip_logo;
- }
- spin_unlock_irq(&phba->ndlp_lock);
}
/*
@@ -768,16 +722,17 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
* exist and there is no need for a ELS LOGO. Just cleanup
* the ndlp.
*/
- if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
- lpfc_nlp_put(ndlp);
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
goto skip_logo;
- }
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
+ /* Issue a Fabric LOGO to cleanup fabric resources. */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ goto skip_logo;
+
+ rc = lpfc_send_npiv_logo(vport, ndlp);
+ if (rc)
+ goto skip_logo;
}
if (!(phba->pport->load_flag & FC_UNLOADING))
@@ -785,18 +740,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
skip_logo:
- /*
- * If the NameServer ndlp has been incremented to allow the DA_ID CT
- * command to be sent, decrement the ndlp now.
- */
- if (ns_ndlp_referenced) {
- ndlp = lpfc_findnode_did(vport, NameServer_DID);
- lpfc_nlp_put(ndlp);
- }
-
lpfc_cleanup(vport);
- lpfc_sli_host_down(vport);
+ /* Remove scsi host now. The nodes are cleaned up. */
+ lpfc_sli_host_down(vport);
lpfc_stop_vport_timers(vport);
if (!(phba->pport->load_flag & FC_UNLOADING)) {
@@ -809,8 +756,9 @@ skip_logo:
if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
lpfc_mbx_unreg_vpi(vport))
scsi_host_put(shost);
- } else
+ } else {
scsi_host_put(shost);
+ }
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
@@ -838,7 +786,8 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
if (port_iterator->load_flag & FC_UNLOADING)
continue;
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
+ lpfc_printf_vlog(port_iterator, KERN_ERR,
+ LOG_TRACE_EVENT,
"1801 Create vport work array FAILED: "
"cannot do scsi_host_get\n");
continue;
@@ -860,79 +809,3 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
kfree(vports);
}
-
-/**
- * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
- * @vport: Pointer to vport object.
- *
- * This function resets the statistical data for the vport. This function
- * is called with the host_lock held
- **/
-void
-lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->lat_data)
- memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
- sizeof(struct lpfc_scsicmd_bkt));
- }
-}
-
-
-/**
- * lpfc_alloc_bucket - Allocate data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * This function allocates data buffer required for all the FC
- * nodes of the vport to collect statistical data.
- **/
-void
-lpfc_alloc_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
-
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_ATOMIC);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
- "0287 lpfc_alloc_bucket failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
- }
-}
-
-/**
- * lpfc_free_bucket - Free data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * Th function frees statistical data buffer of all the FC
- * nodes of the vport.
- **/
-void
-lpfc_free_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
- }
-}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index f4b8528dd2e7..fa60c146c169 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -115,8 +115,4 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
-void lpfc_vport_reset_stat_data(struct lpfc_vport *);
-void lpfc_alloc_bucket(struct lpfc_vport *);
-void lpfc_free_bucket(struct lpfc_vport *);
-
#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 35d3e322d6d5..f75928f7773e 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -20,9 +20,9 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pgtable.h>
#include <asm/dbdma.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/macio.h>
@@ -66,8 +66,7 @@ static irqreturn_t do_mac53c94_interrupt(int, void *);
static void cmd_done(struct fsc_state *, int result);
static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *);
-
-static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int mac53c94_queue_lck(struct scsi_cmnd *cmd)
{
struct fsc_state *state;
@@ -83,7 +82,6 @@ static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cm
}
#endif
- cmd->scsi_done = done;
cmd->host_scribble = NULL;
state = (struct fsc_state *) cmd->device->host->hostdata;
@@ -127,7 +125,6 @@ static void mac53c94_init(struct fsc_state *state)
{
struct mac53c94_regs __iomem *regs = state->regs;
struct dbdma_regs __iomem *dma = state->dma;
- int x;
writeb(state->host->this_id | CF1_PAR_ENABLE, &regs->config1);
writeb(TIMO_VAL(250), &regs->sel_timeout); /* 250ms */
@@ -136,7 +133,7 @@ static void mac53c94_init(struct fsc_state *state)
writeb(0, &regs->config3);
writeb(0, &regs->sync_period);
writeb(0, &regs->sync_offset);
- x = readb(&regs->interrupt);
+ (void)readb(&regs->interrupt);
writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control);
}
@@ -196,7 +193,8 @@ static void mac53c94_interrupt(int irq, void *dev_id)
struct fsc_state *state = (struct fsc_state *) dev_id;
struct mac53c94_regs __iomem *regs = state->regs;
struct dbdma_regs __iomem *dma = state->dma;
- struct scsi_cmnd *cmd = state->current_req;
+ struct scsi_cmnd *const cmd = state->current_req;
+ struct mac53c94_cmd_priv *const mcmd = mac53c94_priv(cmd);
int nb, stat, seq, intr;
static int mac53c94_errors;
@@ -236,7 +234,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
++mac53c94_errors;
writeb(CMD_NOP + CMD_DMA_MODE, &regs->command);
}
- if (cmd == 0) {
+ if (!cmd) {
printk(KERN_DEBUG "53c94: interrupt with no command active?\n");
return;
}
@@ -266,10 +264,10 @@ static void mac53c94_interrupt(int irq, void *dev_id)
/* set DMA controller going if any data to transfer */
if ((stat & (STAT_MSG|STAT_CD)) == 0
&& (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
- nb = cmd->SCp.this_residual;
+ nb = mcmd->this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
- cmd->SCp.this_residual -= nb;
+ mcmd->this_residual -= nb;
writeb(nb, &regs->count_lo);
writeb(nb >> 8, &regs->count_mid);
writeb(CMD_DMA_MODE + CMD_NOP, &regs->command);
@@ -296,13 +294,13 @@ static void mac53c94_interrupt(int irq, void *dev_id)
cmd_done(state, DID_ERROR << 16);
return;
}
- if (cmd->SCp.this_residual != 0
+ if (mcmd->this_residual != 0
&& (stat & (STAT_MSG|STAT_CD)) == 0) {
/* Set up the count regs to transfer more */
- nb = cmd->SCp.this_residual;
+ nb = mcmd->this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
- cmd->SCp.this_residual -= nb;
+ mcmd->this_residual -= nb;
writeb(nb, &regs->count_lo);
writeb(nb >> 8, &regs->count_mid);
writeb(CMD_DMA_MODE + CMD_NOP, &regs->command);
@@ -324,9 +322,8 @@ static void mac53c94_interrupt(int irq, void *dev_id)
cmd_done(state, DID_ERROR << 16);
return;
}
- cmd->SCp.Status = readb(&regs->fifo);
- cmd->SCp.Message = readb(&regs->fifo);
- cmd->result = CMD_ACCEPT_MSG;
+ mcmd->status = readb(&regs->fifo);
+ mcmd->message = readb(&regs->fifo);
writeb(CMD_ACCEPT_MSG, &regs->command);
state->phase = busfreeing;
break;
@@ -334,8 +331,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
if (intr != INTR_DISCONNECT) {
printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr);
}
- cmd_done(state, (DID_OK << 16) + (cmd->SCp.Message << 8)
- + cmd->SCp.Status);
+ cmd_done(state, (DID_OK << 16) + (mcmd->message << 8) + mcmd->status);
break;
default:
printk(KERN_DEBUG "don't know about phase %d\n", state->phase);
@@ -347,9 +343,9 @@ static void cmd_done(struct fsc_state *state, int result)
struct scsi_cmnd *cmd;
cmd = state->current_req;
- if (cmd != 0) {
+ if (cmd) {
cmd->result = result;
- (*cmd->scsi_done)(cmd);
+ scsi_done(cmd);
state->current_req = NULL;
}
state->phase = idle;
@@ -393,7 +389,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
dcmds[-1].command = cpu_to_le16(dma_cmd);
dcmds->command = cpu_to_le16(DBDMA_STOP);
- cmd->SCp.this_residual = total;
+ mac53c94_priv(cmd)->this_residual = total;
}
static struct scsi_host_template mac53c94_template = {
@@ -405,6 +401,7 @@ static struct scsi_host_template mac53c94_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.max_segment_size = 65535,
+ .cmd_size = sizeof(struct mac53c94_cmd_priv),
};
static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
@@ -468,12 +465,13 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
dma_cmd_space = kmalloc_array(host->sg_tablesize + 2,
sizeof(struct dbdma_cmd),
GFP_KERNEL);
- if (dma_cmd_space == 0) {
- printk(KERN_ERR "mac53c94: couldn't allocate dma "
- "command space for %pOF\n", node);
+ if (!dma_cmd_space) {
+ printk(KERN_ERR "mac53c94: couldn't allocate dma "
+ "command space for %pOF\n", node);
rc = -ENOMEM;
- goto out_free;
- }
+ goto out_free;
+ }
+
state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space);
memset(state->dma_cmds, 0, (host->sg_tablesize + 1)
* sizeof(struct dbdma_cmd));
diff --git a/drivers/scsi/mac53c94.h b/drivers/scsi/mac53c94.h
index 5df6e81f78a8..b4093027f9c3 100644
--- a/drivers/scsi/mac53c94.h
+++ b/drivers/scsi/mac53c94.h
@@ -212,4 +212,15 @@ struct mac53c94_regs {
#define CF4_TEST 0x02
#define CF4_BBTE 0x01
+struct mac53c94_cmd_priv {
+ int this_residual;
+ int status;
+ int message;
+};
+
+static inline struct mac53c94_cmd_priv *mac53c94_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#endif /* _MAC53C94_H */
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 1c78bc10c790..6d23ab5aee56 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -439,22 +439,10 @@ static struct platform_driver esp_mac_driver = {
.name = DRV_MODULE_NAME,
},
};
-
-static int __init mac_esp_init(void)
-{
- return platform_driver_register(&esp_mac_driver);
-}
-
-static void __exit mac_esp_exit(void)
-{
- platform_driver_unregister(&esp_mac_driver);
-}
+module_platform_driver(esp_mac_driver);
MODULE_DESCRIPTION("Mac ESP SCSI driver");
MODULE_AUTHOR("Finn Thain");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
-
-module_init(mac_esp_init);
-module_exit(mac_esp_exit);
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index b5dde9d0d054..2e511697fce3 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -285,7 +285,7 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
- BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+ BASR_DRQ | BASR_PHASE_MATCH, 0)) {
int bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX)
@@ -304,7 +304,7 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK,
- BASR_ACK, HZ / 64) < 0)
+ BASR_ACK, 0) < 0)
scmd_printk(KERN_DEBUG, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
@@ -344,7 +344,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
- BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+ BASR_DRQ | BASR_PHASE_MATCH, 0)) {
int bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX)
@@ -362,7 +362,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT,
TCR_LAST_BYTE_SENT,
- HZ / 64) < 0) {
+ 0) < 0) {
scmd_printk(KERN_ERR, hostdata->connected,
"%s: Last Byte Sent timeout\n", __func__);
result = -1;
@@ -372,7 +372,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK,
- BASR_ACK, HZ / 64) < 0)
+ BASR_ACK, 0) < 0)
scmd_printk(KERN_DEBUG, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
@@ -404,11 +404,12 @@ out:
static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
- cmd->SCp.this_residual < setup_use_pdma)
+ int resid = NCR5380_to_ncmd(cmd)->this_residual;
+
+ if (hostdata->flags & FLAG_NO_PSEUDO_DMA || resid < setup_use_pdma)
return 0;
- return cmd->SCp.this_residual;
+ return resid;
}
static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata)
@@ -434,7 +435,7 @@ static struct scsi_host_template mac_scsi_template = {
.sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
};
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index ff6d4aa92421..bf491af9f0d6 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -44,10 +44,14 @@
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <scsi/scsicam.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
#include "megaraid.h"
@@ -124,7 +128,7 @@ static int trace_level;
/**
* mega_setup_mailbox()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* Allocates a 8 byte aligned memory for the handshake mailbox.
*/
@@ -133,8 +137,10 @@ mega_setup_mailbox(adapter_t *adapter)
{
unsigned long align;
- adapter->una_mbox64 = pci_alloc_consistent(adapter->dev,
- sizeof(mbox64_t), &adapter->una_mbox64_dma);
+ adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mbox64_t),
+ &adapter->una_mbox64_dma,
+ GFP_KERNEL);
if( !adapter->una_mbox64 ) return -1;
@@ -190,23 +196,21 @@ mega_query_adapter(adapter_t *adapter)
{
dma_addr_t prod_info_dma_handle;
mega_inquiry3 *inquiry3;
- u8 raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ u8 *raw_mbox = (u8 *)&mbox;
int retval;
/* Initialize adapter inquiry mailbox */
- mbox = (mbox_t *)raw_mbox;
-
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* Try to issue Inquiry3 command
* if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
* update enquiry3 structure
*/
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
@@ -222,17 +226,18 @@ mega_query_adapter(adapter_t *adapter)
mraid_inquiry *inq;
dma_addr_t dma_handle;
- ext_inq = pci_alloc_consistent(adapter->dev,
- sizeof(mraid_ext_inquiry), &dma_handle);
+ ext_inq = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mraid_ext_inquiry),
+ &dma_handle, GFP_KERNEL);
if( ext_inq == NULL ) return -1;
inq = &ext_inq->raid_inq;
- mbox->m_out.xferaddr = (u32)dma_handle;
+ mbox.xferaddr = (u32)dma_handle;
/*issue old 0x04 command to adapter */
- mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ;
+ mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ;
issue_scb_block(adapter, raw_mbox);
@@ -243,8 +248,9 @@ mega_query_adapter(adapter_t *adapter)
mega_8_to_40ld(inq, inquiry3,
(mega_product_info *)&adapter->product_info);
- pci_free_consistent(adapter->dev, sizeof(mraid_ext_inquiry),
- ext_inq, dma_handle);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mraid_ext_inquiry), ext_inq,
+ dma_handle);
} else { /*adapter supports 40ld */
adapter->flag |= BOARD_40LD;
@@ -253,11 +259,12 @@ mega_query_adapter(adapter_t *adapter)
* get product_info, which is static information and will be
* unchanged
*/
- prod_info_dma_handle = pci_map_single(adapter->dev, (void *)
- &adapter->product_info,
- sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
+ prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
+ (void *)&adapter->product_info,
+ sizeof(mega_product_info),
+ DMA_FROM_DEVICE);
- mbox->m_out.xferaddr = prod_info_dma_handle;
+ mbox.xferaddr = prod_info_dma_handle;
raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
@@ -267,8 +274,8 @@ mega_query_adapter(adapter_t *adapter)
"Product_info cmd failed with error: %d\n",
retval);
- pci_unmap_single(adapter->dev, prod_info_dma_handle,
- sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
+ sizeof(mega_product_info), DMA_FROM_DEVICE);
}
@@ -347,7 +354,7 @@ mega_query_adapter(adapter_t *adapter)
/**
* mega_runpendq()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* Runs through the list of pending requests.
*/
@@ -365,8 +372,7 @@ mega_runpendq(adapter_t *adapter)
*
* The command queuing entry point for the mid-layer.
*/
-static int
-megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+static int megaraid_queue_lck(struct scsi_cmnd *scmd)
{
adapter_t *adapter;
scb_t *scb;
@@ -375,9 +381,6 @@ megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
adapter = (adapter_t *)scmd->device->host->hostdata;
- scmd->scsi_done = done;
-
-
/*
* Allocate and build a SCB request
* busy flag will be set if mega_build_cmd() command could not
@@ -413,8 +416,8 @@ static DEF_SCSI_QCMD(megaraid_queue)
/**
* mega_allocate_scb()
- * @adapter - pointer to our soft state
- * @cmd - scsi command from the mid-layer
+ * @adapter: pointer to our soft state
+ * @cmd: scsi command from the mid-layer
*
* Allocate a SCB structure. This is the central structure for controller
* commands.
@@ -444,9 +447,9 @@ mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
/**
* mega_get_ldrv_num()
- * @adapter - pointer to our soft state
- * @cmd - scsi mid layer command
- * @channel - channel on the controller
+ * @adapter: pointer to our soft state
+ * @cmd: scsi mid layer command
+ * @channel: channel on the controller
*
* Calculate the logical drive number based on the information in scsi command
* and the channel number.
@@ -491,9 +494,9 @@ mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
if (adapter->support_random_del && adapter->read_ldidmap )
switch (cmd->cmnd[0]) {
- case READ_6: /* fall through */
- case WRITE_6: /* fall through */
- case READ_10: /* fall through */
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
case WRITE_10:
ldrv_num += 0x80;
}
@@ -503,9 +506,9 @@ mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
/**
* mega_build_cmd()
- * @adapter - pointer to our soft state
- * @cmd - Prepare using this scsi command
- * @busy - busy flag if no resources
+ * @adapter: pointer to our soft state
+ * @cmd: Prepare using this scsi command
+ * @busy: busy flag if no resources
*
* Prepares a command and scatter gather list for the controller. This routine
* also finds out if the commands is intended for a logical drive or a
@@ -517,7 +520,6 @@ mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
static scb_t *
mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
{
- mega_ext_passthru *epthru;
mega_passthru *pthru;
scb_t *scb;
mbox_t *mbox;
@@ -582,7 +584,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
/* have just LUN 0 for each target on virtual channels */
if (cmd->device->lun) {
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -601,7 +603,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
if(ldrv_num > max_ldrv_num ) {
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -613,7 +615,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
* devices
*/
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
}
@@ -633,7 +635,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
*/
if( !adapter->has_cluster ) {
cmd->result = (DID_OK << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -646,12 +648,12 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
scb->raw_mbox[3] = ldrv_num;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
return scb;
#else
cmd->result = (DID_OK << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
#endif
@@ -666,7 +668,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
kunmap_atomic(buf - sg->offset);
cmd->result = (DID_OK << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -710,7 +712,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
}
- scb->dma_direction = PCI_DMA_FROMDEVICE;
+ scb->dma_direction = DMA_FROM_DEVICE;
pthru->numsgelements = mega_build_sglist(adapter, scb,
&pthru->dataxferaddr, &pthru->dataxferlen);
@@ -840,10 +842,10 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
* If it is a read command
*/
if( (*cmd->cmnd & 0x0F) == 0x08 ) {
- scb->dma_direction = PCI_DMA_FROMDEVICE;
+ scb->dma_direction = DMA_FROM_DEVICE;
}
else {
- scb->dma_direction = PCI_DMA_TODEVICE;
+ scb->dma_direction = DMA_TO_DEVICE;
}
/* Calculate Scatter-Gather info */
@@ -853,7 +855,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
return scb;
#if MEGA_HAVE_CLUSTERING
- case RESERVE: /* Fall through */
+ case RESERVE:
case RELEASE:
/*
@@ -862,7 +864,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
if( ! adapter->has_cluster ) {
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
@@ -878,14 +880,14 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
scb->raw_mbox[3] = ldrv_num;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
return scb;
#endif
default:
cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return NULL;
}
}
@@ -905,7 +907,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
if( adapter->support_ext_cdb ) {
- epthru = mega_prepare_extpassthru(adapter, scb, cmd,
+ mega_prepare_extpassthru(adapter, scb, cmd,
channel, target);
mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU;
@@ -937,11 +939,11 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
/**
* mega_prepare_passthru()
- * @adapter - pointer to our soft state
- * @scb - our scsi control block
- * @cmd - scsi command from the mid-layer
- * @channel - actual channel on the controller
- * @target - actual id on the controller.
+ * @adapter: pointer to our soft state
+ * @scb: our scsi control block
+ * @cmd: scsi command from the mid-layer
+ * @channel: actual channel on the controller
+ * @target: actual id on the controller.
*
* prepare a command for the scsi physical devices.
*/
@@ -972,7 +974,7 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
/* Not sure about the direction */
- scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+ scb->dma_direction = DMA_BIDIRECTIONAL;
/* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
switch (cmd->cmnd[0]) {
@@ -988,7 +990,7 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
adapter->flag |= (1L << cmd->device->channel);
}
- /* Fall through */
+ fallthrough;
default:
pthru->numsgelements = mega_build_sglist(adapter, scb,
&pthru->dataxferaddr, &pthru->dataxferlen);
@@ -1000,11 +1002,11 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
/**
* mega_prepare_extpassthru()
- * @adapter - pointer to our soft state
- * @scb - our scsi control block
- * @cmd - scsi command from the mid-layer
- * @channel - actual channel on the controller
- * @target - actual id on the controller.
+ * @adapter: pointer to our soft state
+ * @scb: our scsi control block
+ * @cmd: scsi command from the mid-layer
+ * @channel: actual channel on the controller
+ * @target: actual id on the controller.
*
* prepare a command for the scsi physical devices. This rountine prepares
* commands for devices which can take extended CDBs (>10 bytes)
@@ -1036,7 +1038,7 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
/* Not sure about the direction */
- scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+ scb->dma_direction = DMA_BIDIRECTIONAL;
switch(cmd->cmnd[0]) {
case INQUIRY:
@@ -1051,7 +1053,7 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
adapter->flag |= (1L << cmd->device->channel);
}
- /* Fall through */
+ fallthrough;
default:
epthru->numsgelements = mega_build_sglist(adapter, scb,
&epthru->dataxferaddr, &epthru->dataxferlen);
@@ -1085,8 +1087,8 @@ __mega_runpendq(adapter_t *adapter)
/**
* issue_scb()
- * @adapter - pointer to our soft state
- * @scb - scsi control block
+ * @adapter: pointer to our soft state
+ * @scb: scsi control block
*
* Post a command to the card if the mailbox is available, otherwise return
* busy. We also take the scb from the pending list if the mailbox is
@@ -1166,8 +1168,8 @@ mega_busywait_mbox (adapter_t *adapter)
/**
* issue_scb_block()
- * @adapter - pointer to our soft state
- * @raw_mbox - the mailbox
+ * @adapter: pointer to our soft state
+ * @raw_mbox: the mailbox
*
* Issue a scb in synchronous and non-interrupt mode
*/
@@ -1247,8 +1249,8 @@ bug_blocked_mailbox:
/**
* megaraid_isr_iomapped()
- * @irq - irq
- * @devp - pointer to our soft state
+ * @irq: irq
+ * @devp: pointer to our soft state
*
* Interrupt service routine for io-mapped controllers.
* Find out if our device is interrupting. If yes, acknowledge the interrupt
@@ -1323,8 +1325,8 @@ megaraid_isr_iomapped(int irq, void *devp)
/**
* megaraid_isr_memmapped()
- * @irq - irq
- * @devp - pointer to our soft state
+ * @irq: irq
+ * @devp: pointer to our soft state
*
* Interrupt service routine for memory-mapped controllers.
* Find out if our device is interrupting. If yes, acknowledge the interrupt
@@ -1401,10 +1403,10 @@ megaraid_isr_memmapped(int irq, void *devp)
}
/**
* mega_cmd_done()
- * @adapter - pointer to our soft state
- * @completed - array of ids of completed commands
- * @nstatus - number of completed commands
- * @status - status of the last command completed
+ * @adapter: pointer to our soft state
+ * @completed: array of ids of completed commands
+ * @nstatus: number of completed commands
+ * @status: status of the last command completed
*
* Complete the commands and call the scsi mid-layer callback hooks.
*/
@@ -1579,9 +1581,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
memcpy(cmd->sense_buffer, pthru->reqsensearea,
14);
- cmd->result = (DRIVER_SENSE << 24) |
- (DID_OK << 16) |
- (CHECK_CONDITION << 1);
+ cmd->result = SAM_STAT_CHECK_CONDITION;
}
else {
if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
@@ -1589,14 +1589,10 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
memcpy(cmd->sense_buffer,
epthru->reqsensearea, 14);
- cmd->result = (DRIVER_SENSE << 24) |
- (DID_OK << 16) |
- (CHECK_CONDITION << 1);
- } else {
- cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = ABORTED_COMMAND;
- cmd->result |= (CHECK_CONDITION << 1);
- }
+ cmd->result = SAM_STAT_CHECK_CONDITION;
+ } else
+ scsi_build_sense(cmd, 0,
+ ABORTED_COMMAND, 0, 0);
}
break;
@@ -1613,7 +1609,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if( cmd->cmnd[0] == TEST_UNIT_READY ) {
cmd->result |= (DID_ERROR << 16) |
- (RESERVATION_CONFLICT << 1);
+ SAM_STAT_RESERVATION_CONFLICT;
}
else
/*
@@ -1625,7 +1621,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
cmd->cmnd[0] == RELEASE) ) {
cmd->result |= (DID_ERROR << 16) |
- (RESERVATION_CONFLICT << 1);
+ SAM_STAT_RESERVATION_CONFLICT;
}
else
#endif
@@ -1648,16 +1644,10 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
static void
mega_rundoneq (adapter_t *adapter)
{
- struct scsi_cmnd *cmd;
- struct list_head *pos;
-
- list_for_each(pos, &adapter->completed_list) {
+ struct megaraid_cmd_priv *cmd_priv;
- struct scsi_pointer* spos = (struct scsi_pointer *)pos;
-
- cmd = list_entry(spos, struct scsi_cmnd, SCp);
- cmd->scsi_done(cmd);
- }
+ list_for_each_entry(cmd_priv, &adapter->completed_list, entry)
+ scsi_done(megaraid_to_scsi_cmd(cmd_priv));
INIT_LIST_HEAD(&adapter->completed_list);
}
@@ -1814,25 +1804,25 @@ mega_free_sgl(adapter_t *adapter)
scb = &adapter->scb_list[i];
if( scb->sgl64 ) {
- pci_free_consistent(adapter->dev,
- sizeof(mega_sgl64) * adapter->sglen,
- scb->sgl64,
- scb->sgl_dma_addr);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mega_sgl64) * adapter->sglen,
+ scb->sgl64, scb->sgl_dma_addr);
scb->sgl64 = NULL;
}
if( scb->pthru ) {
- pci_free_consistent(adapter->dev, sizeof(mega_passthru),
- scb->pthru, scb->pthru_dma_addr);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mega_passthru), scb->pthru,
+ scb->pthru_dma_addr);
scb->pthru = NULL;
}
if( scb->epthru ) {
- pci_free_consistent(adapter->dev,
- sizeof(mega_ext_passthru),
- scb->epthru, scb->epthru_dma_addr);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mega_ext_passthru),
+ scb->epthru, scb->epthru_dma_addr);
scb->epthru = NULL;
}
@@ -1921,9 +1911,9 @@ megaraid_reset(struct scsi_cmnd *cmd)
/**
* megaraid_abort_and_reset()
- * @adapter - megaraid soft state
- * @cmd - scsi command to be aborted or reset
- * @aor - abort or reset flag
+ * @adapter: megaraid soft state
+ * @cmd: scsi command to be aborted or reset
+ * @aor: abort or reset flag
*
* Try to locate the scsi command in the pending queue. If found and is not
* issued to the controller, abort/reset it. Otherwise return failure
@@ -2005,7 +1995,7 @@ make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
- if( pci_set_dma_mask(*pdev, DMA_BIT_MASK(32)) != 0 ) {
+ if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
kfree(*pdev);
return -1;
}
@@ -2021,22 +2011,24 @@ free_local_pdev(struct pci_dev *pdev)
/**
* mega_allocate_inquiry()
- * @dma_handle - handle returned for dma address
- * @pdev - handle to pci device
+ * @dma_handle: handle returned for dma address
+ * @pdev: handle to pci device
*
* allocates memory for inquiry structure
*/
static inline void *
mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
{
- return pci_alloc_consistent(pdev, sizeof(mega_inquiry3), dma_handle);
+ return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
+ dma_handle, GFP_KERNEL);
}
static inline void
mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
{
- pci_free_consistent(pdev, sizeof(mega_inquiry3), inquiry, dma_handle);
+ dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
+ dma_handle);
}
@@ -2045,8 +2037,8 @@ mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
/**
* proc_show_config()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display configuration information about the controller.
*/
@@ -2109,8 +2101,8 @@ proc_show_config(struct seq_file *m, void *v)
/**
* proc_show_stat()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display statistical information about the I/O activity.
*/
@@ -2143,8 +2135,8 @@ proc_show_stat(struct seq_file *m, void *v)
/**
* proc_show_mbox()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display mailbox information for the last command issued. This information
* is good for debugging.
@@ -2171,8 +2163,8 @@ proc_show_mbox(struct seq_file *m, void *v)
/**
* proc_show_rebuild_rate()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display current rebuild rate
*/
@@ -2214,8 +2206,8 @@ free_pdev:
/**
* proc_show_battery()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display information about the battery module on the controller.
*/
@@ -2317,9 +2309,9 @@ mega_print_inquiry(struct seq_file *m, char *scsi_inq)
/**
* proc_show_pdrv()
- * @m - Synthetic file construction data
- * @page - buffer to write the data in
- * @adapter - pointer to our soft state
+ * @m: Synthetic file construction data
+ * @adapter: pointer to our soft state
+ * @channel: channel
*
* Display information about the physical drives.
*/
@@ -2350,7 +2342,8 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
}
- scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle);
+ scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
+ GFP_KERNEL);
if( scsi_inq == NULL ) {
seq_puts(m, "memory not available for scsi inq.\n");
goto free_inquiry;
@@ -2423,7 +2416,7 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
}
free_pci:
- pci_free_consistent(pdev, 256, scsi_inq, scsi_inq_dma_handle);
+ dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
@@ -2433,8 +2426,8 @@ free_pdev:
/**
* proc_show_pdrv_ch0()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display information about the physical drives on physical channel 0.
*/
@@ -2447,8 +2440,8 @@ proc_show_pdrv_ch0(struct seq_file *m, void *v)
/**
* proc_show_pdrv_ch1()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display information about the physical drives on physical channel 1.
*/
@@ -2461,8 +2454,8 @@ proc_show_pdrv_ch1(struct seq_file *m, void *v)
/**
* proc_show_pdrv_ch2()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display information about the physical drives on physical channel 2.
*/
@@ -2475,8 +2468,8 @@ proc_show_pdrv_ch2(struct seq_file *m, void *v)
/**
* proc_show_pdrv_ch3()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display information about the physical drives on physical channel 3.
*/
@@ -2489,10 +2482,10 @@ proc_show_pdrv_ch3(struct seq_file *m, void *v)
/**
* proc_show_rdrv()
- * @m - Synthetic file construction data
- * @adapter - pointer to our soft state
- * @start - starting logical drive to display
- * @end - ending logical drive to display
+ * @m: Synthetic file construction data
+ * @adapter: pointer to our soft state
+ * @start: starting logical drive to display
+ * @end: ending logical drive to display
*
* We do not print the inquiry information since its already available through
* /proc/scsi/scsi interface
@@ -2543,8 +2536,8 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
raid_inq.logdrv_info.num_ldrv;
}
- disk_array = pci_alloc_consistent(pdev, array_sz,
- &disk_array_dma_handle);
+ disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
+ &disk_array_dma_handle, GFP_KERNEL);
if( disk_array == NULL ) {
seq_puts(m, "memory not available.\n");
@@ -2663,8 +2656,8 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
}
free_pci:
- pci_free_consistent(pdev, array_sz, disk_array,
- disk_array_dma_handle);
+ dma_free_coherent(&pdev->dev, array_sz, disk_array,
+ disk_array_dma_handle);
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
@@ -2674,8 +2667,8 @@ free_pdev:
/**
* proc_show_rdrv_10()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display real time information about the logical drives 0 through 9.
*/
@@ -2688,8 +2681,8 @@ proc_show_rdrv_10(struct seq_file *m, void *v)
/**
* proc_show_rdrv_20()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display real time information about the logical drives 0 through 9.
*/
@@ -2702,8 +2695,8 @@ proc_show_rdrv_20(struct seq_file *m, void *v)
/**
* proc_show_rdrv_30()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display real time information about the logical drives 0 through 9.
*/
@@ -2716,8 +2709,8 @@ proc_show_rdrv_30(struct seq_file *m, void *v)
/**
* proc_show_rdrv_40()
- * @m - Synthetic file construction data
- * @v - File iterator
+ * @m: Synthetic file construction data
+ * @v: File iterator
*
* Display real time information about the logical drives 0 through 9.
*/
@@ -2729,8 +2722,8 @@ proc_show_rdrv_40(struct seq_file *m, void *v)
/**
* mega_create_proc_entry()
- * @index - index in soft state array
- * @parent - parent node for this /proc entry
+ * @index: index in soft state array
+ * @parent: parent node for this /proc entry
*
* Creates /proc entries for our controllers.
*/
@@ -2785,7 +2778,7 @@ static inline void mega_create_proc_entry(int index, struct proc_dir_entry *pare
#endif
-/**
+/*
* megaraid_biosparam()
*
* Return the disk geometry for a particular disk
@@ -2795,11 +2788,9 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
adapter_t *adapter;
- unsigned char *bh;
int heads;
int sectors;
int cylinders;
- int rval;
/* Get pointer to host config structure */
adapter = (adapter_t *)sdev->host->hostdata;
@@ -2826,15 +2817,8 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
geom[2] = cylinders;
}
else {
- bh = scsi_bios_ptable(bdev);
-
- if( bh ) {
- rval = scsi_partsize(bh, capacity,
- &geom[2], &geom[0], &geom[1]);
- kfree(bh);
- if( rval != -1 )
- return rval;
- }
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
dev_info(&adapter->dev->dev,
"invalid partition on this disk on channel %d\n",
@@ -2863,7 +2847,7 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
/**
* mega_init_scb()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* Allocate memory for the various pointers in the scb structures:
* scatter-gather list pointer, passthru and extended passthru structure
@@ -2891,9 +2875,9 @@ mega_init_scb(adapter_t *adapter)
scb->idx = i;
- scb->sgl64 = pci_alloc_consistent(adapter->dev,
- sizeof(mega_sgl64) * adapter->sglen,
- &scb->sgl_dma_addr);
+ scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mega_sgl64) * adapter->sglen,
+ &scb->sgl_dma_addr, GFP_KERNEL);
scb->sgl = (mega_sglist *)scb->sgl64;
@@ -2903,9 +2887,9 @@ mega_init_scb(adapter_t *adapter)
return -1;
}
- scb->pthru = pci_alloc_consistent(adapter->dev,
- sizeof(mega_passthru),
- &scb->pthru_dma_addr);
+ scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mega_passthru),
+ &scb->pthru_dma_addr, GFP_KERNEL);
if( !scb->pthru ) {
dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
@@ -2913,9 +2897,9 @@ mega_init_scb(adapter_t *adapter)
return -1;
}
- scb->epthru = pci_alloc_consistent(adapter->dev,
- sizeof(mega_ext_passthru),
- &scb->epthru_dma_addr);
+ scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mega_ext_passthru),
+ &scb->epthru_dma_addr, GFP_KERNEL);
if( !scb->epthru ) {
dev_warn(&adapter->dev->dev,
@@ -2943,8 +2927,8 @@ mega_init_scb(adapter_t *adapter)
/**
* megadev_open()
- * @inode - unused
- * @filep - unused
+ * @inode: unused
+ * @filep: unused
*
* Routines for the character/ioctl interface to the driver. Find out if this
* is a valid open.
@@ -2963,10 +2947,9 @@ megadev_open (struct inode *inode, struct file *filep)
/**
* megadev_ioctl()
- * @inode - Our device inode
- * @filep - unused
- * @cmd - ioctl command
- * @arg - user buffer
+ * @filep: Our device file
+ * @cmd: ioctl command
+ * @arg: user buffer
*
* ioctl entry point for our private ioctl interface. We move the data in from
* the user space, prepare the command (if necessary, convert the old MIMD
@@ -2986,14 +2969,13 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
void *data = NULL; /* data to be transferred */
dma_addr_t data_dma_hndl; /* dma handle for data xfer area */
megacmd_t mc;
- megastat_t __user *ustats;
- int num_ldrv;
+#if MEGA_HAVE_STATS
+ megastat_t __user *ustats = NULL;
+ int num_ldrv = 0;
+#endif
u32 uxferaddr = 0;
struct pci_dev *pdev;
- ustats = NULL; /* avoid compilation warnings */
- num_ldrv = 0;
-
/*
* Make sure only USCSICMD are issued through this interface.
* MIMD application would still fire different command.
@@ -3157,9 +3139,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
/* Passthru commands */
- pthru = pci_alloc_consistent(pdev,
- sizeof(mega_passthru),
- &pthru_dma_hndl);
+ pthru = dma_alloc_coherent(&pdev->dev,
+ sizeof(mega_passthru),
+ &pthru_dma_hndl, GFP_KERNEL);
if( pthru == NULL ) {
free_local_pdev(pdev);
@@ -3177,9 +3159,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if( copy_from_user(pthru, upthru,
sizeof(mega_passthru)) ) {
- pci_free_consistent(pdev,
- sizeof(mega_passthru), pthru,
- pthru_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ sizeof(mega_passthru),
+ pthru, pthru_dma_hndl);
free_local_pdev(pdev);
@@ -3190,15 +3172,16 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
* Is there a data transfer
*/
if( pthru->dataxferlen ) {
- data = pci_alloc_consistent(pdev,
- pthru->dataxferlen,
- &data_dma_hndl);
+ data = dma_alloc_coherent(&pdev->dev,
+ pthru->dataxferlen,
+ &data_dma_hndl,
+ GFP_KERNEL);
if( data == NULL ) {
- pci_free_consistent(pdev,
- sizeof(mega_passthru),
- pthru,
- pthru_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ sizeof(mega_passthru),
+ pthru,
+ pthru_dma_hndl);
free_local_pdev(pdev);
@@ -3263,13 +3246,13 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
freemem_and_return:
if( pthru->dataxferlen ) {
- pci_free_consistent(pdev,
- pthru->dataxferlen, data,
- data_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ pthru->dataxferlen, data,
+ data_dma_hndl);
}
- pci_free_consistent(pdev, sizeof(mega_passthru),
- pthru, pthru_dma_hndl);
+ dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
+ pthru, pthru_dma_hndl);
free_local_pdev(pdev);
@@ -3282,8 +3265,10 @@ freemem_and_return:
* Is there a data transfer
*/
if( uioc.xferlen ) {
- data = pci_alloc_consistent(pdev,
- uioc.xferlen, &data_dma_hndl);
+ data = dma_alloc_coherent(&pdev->dev,
+ uioc.xferlen,
+ &data_dma_hndl,
+ GFP_KERNEL);
if( data == NULL ) {
free_local_pdev(pdev);
@@ -3303,9 +3288,9 @@ freemem_and_return:
if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
uioc.xferlen) ) {
- pci_free_consistent(pdev,
- uioc.xferlen,
- data, data_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ uioc.xferlen, data,
+ data_dma_hndl);
free_local_pdev(pdev);
@@ -3326,9 +3311,9 @@ freemem_and_return:
if( rval ) {
if( uioc.xferlen ) {
- pci_free_consistent(pdev,
- uioc.xferlen, data,
- data_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ uioc.xferlen, data,
+ data_dma_hndl);
}
free_local_pdev(pdev);
@@ -3348,9 +3333,8 @@ freemem_and_return:
}
if( uioc.xferlen ) {
- pci_free_consistent(pdev,
- uioc.xferlen, data,
- data_dma_hndl);
+ dma_free_coherent(&pdev->dev, uioc.xferlen,
+ data, data_dma_hndl);
}
free_local_pdev(pdev);
@@ -3379,8 +3363,8 @@ megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
/**
* mega_m_to_n()
- * @arg - user address
- * @uioc - new ioctl structure
+ * @arg: user address
+ * @uioc: new ioctl structure
*
* A thin layer to convert older mimd interface ioctl structure to NIT ioctl
* structure
@@ -3507,8 +3491,8 @@ mega_m_to_n(void __user *arg, nitioctl_t *uioc)
/*
* mega_n_to_m()
- * @arg - user address
- * @mc - mailbox command
+ * @arg: user address
+ * @mc: mailbox command
*
* Updates the status information to the application, depending on application
* conforms to older mimd ioctl interface or newer NIT ioctl interface
@@ -3574,30 +3558,26 @@ mega_n_to_m(void __user *arg, megacmd_t *mc)
/**
* mega_is_bios_enabled()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* issue command to find out if the BIOS is enabled for this controller
*/
static int
mega_is_bios_enabled(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
- int ret;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = IS_BIOS_ENABLED;
raw_mbox[2] = GET_BIOS;
-
- ret = issue_scb_block(adapter, raw_mbox);
+ issue_scb_block(adapter, raw_mbox);
return *(char *)adapter->mega_buffer;
}
@@ -3605,7 +3585,7 @@ mega_is_bios_enabled(adapter_t *adapter)
/**
* mega_enum_raid_scsi()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* Find out what channels are RAID/SCSI. This information is used to
* differentiate the virtual channels and physical channels and to support
@@ -3614,13 +3594,11 @@ mega_is_bios_enabled(adapter_t *adapter)
static void
mega_enum_raid_scsi(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int i;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out what channels are raid/scsi
@@ -3630,7 +3608,7 @@ mega_enum_raid_scsi(adapter_t *adapter)
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Non-ROMB firmware fail this command, so all channels
@@ -3660,7 +3638,7 @@ mega_enum_raid_scsi(adapter_t *adapter)
/**
* mega_get_boot_drv()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* Find out which device is the boot device. Note, any logical drive or any
* phyical device (e.g., a CDROM) can be designated as a boot device.
@@ -3669,23 +3647,21 @@ static void
mega_get_boot_drv(adapter_t *adapter)
{
struct private_bios_data *prv_bios_data;
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
u16 cksum = 0;
u8 *cksum_p;
u8 boot_pdrv;
int i;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
raw_mbox[0] = BIOS_PVT_DATA;
raw_mbox[2] = GET_BIOS_PVT_DATA;
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
adapter->boot_ldrv_enabled = 0;
adapter->boot_ldrv = 0;
@@ -3727,7 +3703,7 @@ mega_get_boot_drv(adapter_t *adapter)
/**
* mega_support_random_del()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* Find out if this controller supports random deletion and addition of
* logical drives
@@ -3735,13 +3711,11 @@ mega_get_boot_drv(adapter_t *adapter)
static int
mega_support_random_del(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command
@@ -3757,20 +3731,18 @@ mega_support_random_del(adapter_t *adapter)
/**
* mega_support_ext_cdb()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* Find out if this firmware support cdblen > 10
*/
static int
mega_support_ext_cdb(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out if controller supports extended CDBs.
*/
@@ -3785,8 +3757,8 @@ mega_support_ext_cdb(adapter_t *adapter)
/**
* mega_del_logdrv()
- * @adapter - pointer to our soft state
- * @logdrv - logical drive to be deleted
+ * @adapter: pointer to our soft state
+ * @logdrv: logical drive to be deleted
*
* Delete the specified logical drive. It is the responsibility of the user
* app to let the OS know about this operation.
@@ -3871,7 +3843,7 @@ mega_do_del_logdrv(adapter_t *adapter, int logdrv)
/**
* mega_get_max_sgl()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* Find out the maximum number of scatter-gather elements supported by this
* version of the firmware
@@ -3879,16 +3851,14 @@ mega_do_del_logdrv(adapter_t *adapter, int logdrv)
static void
mega_get_max_sgl(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(mbox, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = MAIN_MISC_OPCODE;
raw_mbox[2] = GET_MAX_SG_SUPPORT;
@@ -3902,7 +3872,7 @@ mega_get_max_sgl(adapter_t *adapter)
}
else {
adapter->sglen = *((char *)adapter->mega_buffer);
-
+
/*
* Make sure this is not more than the resources we are
* planning to allocate
@@ -3917,23 +3887,21 @@ mega_get_max_sgl(adapter_t *adapter)
/**
* mega_support_cluster()
- * @adapter - pointer to our soft state
+ * @adapter: pointer to our soft state
*
* Find out if this firmware support cluster calls.
*/
static int
mega_support_cluster(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(mbox, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Try to get the initiator id. This command will succeed iff the
@@ -3959,8 +3927,8 @@ mega_support_cluster(adapter_t *adapter)
#ifdef CONFIG_PROC_FS
/**
* mega_adapinq()
- * @adapter - pointer to our soft state
- * @dma_handle - DMA address of the buffer
+ * @adapter: pointer to our soft state
+ * @dma_handle: DMA address of the buffer
*
* Issue internal commands while interrupts are available.
* We only issue direct mailbox commands from within the driver. ioctl()
@@ -3992,11 +3960,12 @@ mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle)
}
-/** mega_internal_dev_inquiry()
- * @adapter - pointer to our soft state
- * @ch - channel for this device
- * @tgt - ID of this device
- * @buf_dma_handle - DMA address of the buffer
+/**
+ * mega_internal_dev_inquiry()
+ * @adapter: pointer to our soft state
+ * @ch: channel for this device
+ * @tgt: ID of this device
+ * @buf_dma_handle: DMA address of the buffer
*
* Issue the scsi inquiry for the specified device.
*/
@@ -4017,8 +3986,8 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
*/
if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
- pthru = pci_alloc_consistent(pdev, sizeof(mega_passthru),
- &pthru_dma_handle);
+ pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
+ &pthru_dma_handle, GFP_KERNEL);
if( pthru == NULL ) {
free_local_pdev(pdev);
@@ -4054,8 +4023,8 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
rval = mega_internal_command(adapter, &mc, pthru);
- pci_free_consistent(pdev, sizeof(mega_passthru), pthru,
- pthru_dma_handle);
+ dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
+ pthru_dma_handle);
free_local_pdev(pdev);
@@ -4065,9 +4034,9 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
/**
* mega_internal_command()
- * @adapter - pointer to our soft state
- * @mc - the mailbox command
- * @pthru - Passthru structure for DCDB commands
+ * @adapter: pointer to our soft state
+ * @mc: the mailbox command
+ * @pthru: Passthru structure for DCDB commands
*
* Issue the internal commands in interrupt mode.
* The last argument is the address of the passthru structure if the command
@@ -4148,6 +4117,7 @@ static struct scsi_host_template megaraid_template = {
.eh_bus_reset_handler = megaraid_reset,
.eh_host_reset_handler = megaraid_reset,
.no_write_same = 1,
+ .cmd_size = sizeof(struct megaraid_cmd_priv),
};
static int
@@ -4280,8 +4250,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/*
* Allocate buffer to issue internal commands.
*/
- adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
- MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
+ adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
+ MEGA_BUFFER_SIZE,
+ &adapter->buf_dma_handle,
+ GFP_KERNEL);
if (!adapter->mega_buffer) {
dev_warn(&pdev->dev, "out of RAM\n");
goto out_host_put;
@@ -4440,10 +4412,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the Mode of addressing to 64 bit if we can */
if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
- pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
adapter->has_64bit_addr = 1;
} else {
- pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
adapter->has_64bit_addr = 0;
}
@@ -4482,15 +4454,15 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_free_mbox:
- pci_free_consistent(adapter->dev, sizeof(mbox64_t),
- adapter->una_mbox64, adapter->una_mbox64_dma);
+ dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
+ adapter->una_mbox64, adapter->una_mbox64_dma);
out_free_irq:
free_irq(adapter->host->irq, adapter);
out_free_scb_list:
kfree(adapter->scb_list);
out_free_cmd_buffer:
- pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
- adapter->mega_buffer, adapter->buf_dma_handle);
+ dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
+ adapter->mega_buffer, adapter->buf_dma_handle);
out_host_put:
scsi_host_put(host);
out_iounmap:
@@ -4564,11 +4536,11 @@ megaraid_remove_one(struct pci_dev *pdev)
sprintf(buf, "hba%d", adapter->host->host_no);
remove_proc_subtree(buf, mega_proc_dir_entry);
- pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
- adapter->mega_buffer, adapter->buf_dma_handle);
+ dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
+ adapter->mega_buffer, adapter->buf_dma_handle);
kfree(adapter->scb_list);
- pci_free_consistent(adapter->dev, sizeof(mbox64_t),
- adapter->una_mbox64, adapter->una_mbox64_dma);
+ dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
+ adapter->una_mbox64, adapter->una_mbox64_dma);
scsi_host_put(host);
pci_disable_device(pdev);
@@ -4635,7 +4607,7 @@ static int __init megaraid_init(void)
* major number allocation.
*/
major = register_chrdev(0, "megadev_legacy", &megadev_fops);
- if (!major) {
+ if (major < 0) {
printk(KERN_WARNING
"megaraid: failed to register char device\n");
}
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index cce23a086fbe..013fbfb911b9 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -4,6 +4,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <scsi/scsi_cmnd.h>
#define MEGARAID_VERSION \
"v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n"
@@ -756,8 +757,28 @@ struct private_bios_data {
#define CACHED_IO 0
#define DIRECT_IO 1
+struct megaraid_cmd_priv {
+ struct list_head entry;
+};
+
+#define SCSI_LIST(scp) \
+ (&((struct megaraid_cmd_priv *)scsi_cmd_priv(scp))->entry)
+
+struct scsi_cmd_and_priv {
+ struct scsi_cmnd cmd;
+ struct megaraid_cmd_priv priv;
+};
+
+static inline struct scsi_cmnd *
+megaraid_to_scsi_cmd(struct megaraid_cmd_priv *cmd_priv)
+{
+ /* See also scsi_mq_setup_tags() */
+ BUILD_BUG_ON(sizeof(struct scsi_cmd_and_priv) !=
+ sizeof(struct scsi_cmnd) +
+ sizeof(struct megaraid_cmd_priv));
-#define SCSI_LIST(scp) ((struct list_head *)(&(scp)->SCp))
+ return &container_of(cmd_priv, struct scsi_cmd_and_priv, priv)->cmd;
+}
/*
* Each controller's soft state
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index 01a1bfb8ea2a..f0ef8f7f82c1 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -781,5 +781,3 @@ typedef struct {
} __attribute__ ((packed)) mbox_sgl32;
#endif // _MRAID_MBOX_DEFS_H_
-
-/* vim: set ts=8 sw=8 tw=78: */
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 3a7596e47a88..2ad0aa2f837d 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -282,5 +282,3 @@ struct mraid_pci_blk {
};
#endif // _MEGA_COMMON_H_
-
-// vim: set ts=8 sw=8 tw=78:
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 8443f2f35be2..132de68c14e9 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -121,8 +121,8 @@ static irqreturn_t megaraid_isr(int, void *);
static void megaraid_mbox_dpc(unsigned long);
-static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
-static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
+static ssize_t megaraid_mbox_app_hndl_show(struct device *, struct device_attribute *attr, char *);
+static ssize_t megaraid_mbox_ld_show(struct device *, struct device_attribute *attr, char *);
static int megaraid_cmm_register(adapter_t *);
static int megaraid_cmm_unregister(adapter_t *);
@@ -181,7 +181,7 @@ MODULE_PARM_DESC(cmd_per_lun,
* This would result in non-disk devices being skipped during driver load
* time. These can be later added though, using /proc/scsi/scsi
*/
-static unsigned int megaraid_fast_load = 0;
+static unsigned int megaraid_fast_load;
module_param_named(fast_load, megaraid_fast_load, int, 0);
MODULE_PARM_DESC(fast_load,
"Faster loading of the driver, skips physical devices! (default=0)");
@@ -302,24 +302,26 @@ static struct pci_driver megaraid_pci_driver = {
// definitions for the device attributes for exporting logical drive number
// for a scsi address (Host, Channel, Id, Lun)
-DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
- NULL);
+static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_app_hndl);
// Host template initializer for megaraid mbox sysfs device attributes
-static struct device_attribute *megaraid_shost_attrs[] = {
- &dev_attr_megaraid_mbox_app_hndl,
+static struct attribute *megaraid_shost_attrs[] = {
+ &dev_attr_megaraid_mbox_app_hndl.attr,
NULL,
};
+ATTRIBUTE_GROUPS(megaraid_shost);
-DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
+static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_ld);
// Host template initializer for megaraid mbox sysfs device attributes
-static struct device_attribute *megaraid_sdev_attrs[] = {
- &dev_attr_megaraid_mbox_ld,
+static struct attribute *megaraid_sdev_attrs[] = {
+ &dev_attr_megaraid_mbox_ld.attr,
NULL,
};
+ATTRIBUTE_GROUPS(megaraid_sdev);
+
/*
* Scsi host template for megaraid unified driver
*/
@@ -332,8 +334,8 @@ static struct scsi_host_template megaraid_template_g = {
.eh_host_reset_handler = megaraid_reset_handler,
.change_queue_depth = scsi_change_queue_depth,
.no_write_same = 1,
- .sdev_attrs = megaraid_sdev_attrs,
- .shost_attrs = megaraid_shost_attrs,
+ .sdev_groups = megaraid_sdev_groups,
+ .shost_groups = megaraid_shost_groups,
};
@@ -1165,7 +1167,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
* structure
* Since passthru and extended passthru commands are exclusive, they
* share common memory pool. Passthru structures piggyback on memory
- * allocted to extended passthru since passthru is smaller of the two
+ * allocated to extended passthru since passthru is smaller of the two
*/
raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru",
&adapter->pdev->dev, sizeof(mraid_epassthru_t), 128, 0);
@@ -1427,21 +1429,19 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
/**
- * megaraid_queue_command - generic queue entry point for all LLDs
+ * megaraid_queue_command_lck - generic queue entry point for all LLDs
* @scp : pointer to the scsi command to be executed
- * @done : callback routine to be called after the cmd has be completed
*
* Queue entry point for mailbox based controllers.
*/
-static int
-megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+static int megaraid_queue_command_lck(struct scsi_cmnd *scp)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
adapter_t *adapter;
scb_t *scb;
int if_busy;
adapter = SCP2ADAPTER(scp);
- scp->scsi_done = done;
scp->result = 0;
/*
@@ -1574,14 +1574,12 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
}
if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = ILLEGAL_REQUEST;
- scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
- scp->result = CHECK_CONDITION << 1;
+ scsi_build_sense(scp, 0, ILLEGAL_REQUEST,
+ MEGA_INVALID_FIELD_IN_CDB, 0);
return NULL;
}
- /* Fall through */
+ fallthrough;
case READ_CAPACITY:
/*
@@ -2301,8 +2299,7 @@ megaraid_mbox_dpc(unsigned long devp)
memcpy(scp->sense_buffer, pthru->reqsensearea,
14);
- scp->result = DRIVER_SENSE << 24 |
- DID_OK << 16 | CHECK_CONDITION << 1;
+ scp->result = SAM_STAT_CHECK_CONDITION;
}
else {
if (mbox->cmd == MBOXCMD_EXTPTHRU) {
@@ -2310,14 +2307,10 @@ megaraid_mbox_dpc(unsigned long devp)
memcpy(scp->sense_buffer,
epthru->reqsensearea, 14);
- scp->result = DRIVER_SENSE << 24 |
- DID_OK << 16 |
- CHECK_CONDITION << 1;
- } else {
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = ABORTED_COMMAND;
- scp->result = CHECK_CONDITION << 1;
- }
+ scp->result = SAM_STAT_CHECK_CONDITION;
+ } else
+ scsi_build_sense(scp, 0,
+ ABORTED_COMMAND, 0, 0);
}
break;
@@ -2334,7 +2327,7 @@ megaraid_mbox_dpc(unsigned long devp)
*/
if (scp->cmnd[0] == TEST_UNIT_READY) {
scp->result = DID_ERROR << 16 |
- RESERVATION_CONFLICT << 1;
+ SAM_STAT_RESERVATION_CONFLICT;
}
else
/*
@@ -2345,7 +2338,7 @@ megaraid_mbox_dpc(unsigned long devp)
scp->cmnd[0] == RELEASE)) {
scp->result = DID_ERROR << 16 |
- RESERVATION_CONFLICT << 1;
+ SAM_STAT_RESERVATION_CONFLICT;
}
else {
scp->result = DID_BAD_TARGET << 16 | status;
@@ -2366,7 +2359,7 @@ megaraid_mbox_dpc(unsigned long devp)
megaraid_dealloc_scb(adapter, scb);
// send the scsi packet back to kernel
- scp->scsi_done(scp);
+ scsi_done(scp);
}
return;
@@ -2424,7 +2417,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
scb->sno, scb->dev_channel, scb->dev_target));
scp->result = (DID_ABORT << 16);
- scp->scsi_done(scp);
+ scsi_done(scp);
megaraid_dealloc_scb(adapter, scb);
@@ -2454,7 +2447,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
scb->dev_channel, scb->dev_target));
scp->result = (DID_ABORT << 16);
- scp->scsi_done(scp);
+ scsi_done(scp);
megaraid_dealloc_scb(adapter, scb);
@@ -2574,7 +2567,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
}
scb->scp->result = (DID_RESET << 16);
- scb->scp->scsi_done(scb->scp);
+ scsi_done(scb->scp);
megaraid_dealloc_scb(adapter, scb);
}
@@ -3240,8 +3233,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
int i;
uint32_t dword;
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = 0xFF;
@@ -3304,7 +3295,6 @@ blocked_mailbox:
* megaraid_mbox_display_scb - display SCB information, mostly debug purposes
* @adapter : controller's soft state
* @scb : SCB to be displayed
- * @level : debug level for console print
*
* Diplay information about the given SCB iff the current debug level is
* verbose.
@@ -3971,8 +3961,9 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
/**
- * megaraid_sysfs_show_app_hndl - display application handle for this adapter
- * @cdev : class device object representation for the host
+ * megaraid_mbox_app_hndl_show - display application handle for this adapter
+ * @dev : class device object representation for the host
+ * @attr : device attribute (unused)
* @buf : buffer to send data to
*
* Display the handle used by the applications while executing management
@@ -3980,8 +3971,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
* handle, since we do not interface with applications directly.
*/
static ssize_t
-megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
- char *buf)
+megaraid_mbox_app_hndl_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost);
@@ -3989,12 +3979,12 @@ megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
- return snprintf(buf, 8, "%u\n", app_hndl);
+ return sysfs_emit(buf, "%u\n", app_hndl);
}
/**
- * megaraid_sysfs_show_ldnum - display the logical drive number for this device
+ * megaraid_mbox_ld_show - display the logical drive number for this device
* @dev : device object representation for the scsi device
* @attr : device attribute to show
* @buf : buffer to send data to
@@ -4009,7 +3999,7 @@ megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
* <int> <int> <int> <int>
*/
static ssize_t
-megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
+megaraid_mbox_ld_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
@@ -4058,7 +4048,7 @@ megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, cha
}
}
- return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
+ return sysfs_emit(buf, "%d %d %d %d\n", scsi_id, logical_drv,
ldid_map, app_hndl);
}
@@ -4068,5 +4058,3 @@ megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, cha
*/
module_init(megaraid_init);
module_exit(megaraid_exit);
-
-/* vim: set ts=8 sw=8 tw=78 ai si: */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 3e4347c6dab1..d2fe7f69cd5d 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -230,5 +230,3 @@ typedef struct {
#define WROUTDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x2C)
#endif // _MEGARAID_H_
-
-// vim: set ts=8 sw=8 tw=78:
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index e83163c66884..c509440bd161 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -95,7 +95,6 @@ mraid_mm_open(struct inode *inode, struct file *filep)
/**
* mraid_mm_ioctl - module entry-point for ioctls
- * @inode : inode (ignored)
* @filep : file operations pointer (ignored)
* @cmd : ioctl command
* @arg : user ioctl packet
@@ -239,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
mimd_t mimd;
uint32_t adapno;
int iterator;
-
+ bool is_found;
if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
*rval = -EFAULT;
@@ -255,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
adapter = NULL;
iterator = 0;
+ is_found = false;
list_for_each_entry(adapter, &adapters_list_g, list) {
- if (iterator++ == adapno) break;
+ if (iterator++ == adapno) {
+ is_found = true;
+ break;
+ }
}
- if (!adapter) {
+ if (!is_found) {
*rval = -ENODEV;
return NULL;
}
@@ -491,7 +494,7 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
}
/**
- * mraid_mm_attch_buf - Attach a free dma buffer for required size
+ * mraid_mm_attach_buf - Attach a free dma buffer for required size
* @adp : Adapter softstate
* @kioc : kioc that the buffer needs to be attached to
* @xferlen : required length for buffer
@@ -726,6 +729,7 @@ ioctl_done(uioc_t *kioc)
uint32_t adapno;
int iterator;
mraid_mmadp_t* adapter;
+ bool is_found;
/*
* When the kioc returns from driver, make sure it still doesn't
@@ -748,19 +752,23 @@ ioctl_done(uioc_t *kioc)
iterator = 0;
adapter = NULL;
adapno = kioc->adapno;
+ is_found = false;
con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
"ioctl that was timedout before\n"));
list_for_each_entry(adapter, &adapters_list_g, list) {
- if (iterator++ == adapno) break;
+ if (iterator++ == adapno) {
+ is_found = true;
+ break;
+ }
}
kioc->timedout = 0;
- if (adapter) {
+ if (is_found)
mraid_mm_dealloc_kioc( adapter, kioc );
- }
+
}
else {
wake_up(&wait_q);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 83d8c4cb1ad5..4919ea54b827 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,11 +18,13 @@
#ifndef LSI_MEGARAID_SAS_H
#define LSI_MEGARAID_SAS_H
+#include <scsi/scsi_cmnd.h>
+
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.713.01.00-rc1"
-#define MEGASAS_RELDATE "Dec 27, 2019"
+#define MEGASAS_VERSION "07.719.03.00-rc1"
+#define MEGASAS_RELDATE "Sep 29, 2021"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -511,7 +513,7 @@ union MR_PROGRESS {
*/
struct MR_PD_PROGRESS {
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1;
u32 patrol:1;
u32 clear:1;
@@ -537,7 +539,7 @@ struct MR_PD_PROGRESS {
};
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1;
u32 patrol:1;
u32 clear:1;
@@ -2019,10 +2021,12 @@ union megasas_frame {
* struct MR_PRIV_DEVICE - sdev private hostdata
* @is_tm_capable: firmware managed tm_capable flag
* @tm_busy: TM request is in progress
+ * @sdev_priv_busy: pending command per sdev
*/
struct MR_PRIV_DEVICE {
bool is_tm_capable;
bool tm_busy;
+ atomic_t sdev_priv_busy;
atomic_t r1_ldio_hint;
u8 interface_type;
u8 task_abort_tmo;
@@ -2212,6 +2216,7 @@ struct megasas_irq_context {
struct irq_poll irqpoll;
bool irq_poll_scheduled;
bool irq_line_enable;
+ atomic_t in_used;
};
struct MR_DRV_SYSTEM_INFO {
@@ -2259,6 +2264,15 @@ enum MR_PERF_MODE {
(mode) == MR_LATENCY_PERF_MODE ? "Latency" : \
"Unknown")
+enum MEGASAS_LD_TARGET_ID_STATUS {
+ LD_TARGET_ID_INITIAL,
+ LD_TARGET_ID_ACTIVE,
+ LD_TARGET_ID_DELETED,
+};
+
+#define MEGASAS_TARGET_ID(sdev) \
+ (((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id)
+
struct megasas_instance {
unsigned int *reply_map;
@@ -2323,6 +2337,9 @@ struct megasas_instance {
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
u8 ld_ids[MEGASAS_MAX_LD_IDS];
+ u8 ld_tgtid_status[MEGASAS_MAX_LD_IDS];
+ u8 ld_ids_prev[MEGASAS_MAX_LD_IDS];
+ u8 ld_ids_from_raidmap[MEGASAS_MAX_LD_IDS];
s8 init_id;
u16 max_num_sge;
@@ -2446,6 +2463,7 @@ struct megasas_instance {
bool support_pci_lane_margining;
u8 low_latency_index_start;
int perf_mode;
+ int iopoll_q_count;
};
struct MR_LD_VF_MAP {
@@ -2542,6 +2560,9 @@ struct megasas_instance_template {
#define MEGASAS_IS_LOGICAL(sdev) \
((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+#define MEGASAS_IS_LUN_VALID(sdev) \
+ (((sdev)->lun == 0) ? 1 : 0)
+
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
scp->device->id)
@@ -2578,6 +2599,16 @@ struct megasas_cmd {
};
};
+struct megasas_cmd_priv {
+ void *cmd_priv;
+ u8 status;
+};
+
+static inline struct megasas_cmd_priv *megasas_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#define MAX_MGMT_ADAPTERS 1024
#define MAX_IOCTL_SGE 16
@@ -2605,7 +2636,6 @@ struct megasas_aen {
u32 class_locale_word;
} __attribute__ ((packed));
-#ifdef CONFIG_COMPAT
struct compat_megasas_iocpacket {
u16 host_no;
u16 __pad1;
@@ -2621,7 +2651,6 @@ struct compat_megasas_iocpacket {
} __attribute__ ((packed));
#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket)
-#endif
#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
@@ -2709,4 +2738,25 @@ int megasas_adp_reset_wait_for_ready(struct megasas_instance *instance,
int ocr_context);
int megasas_irqpoll(struct irq_poll *irqpoll, int budget);
void megasas_dump_fusion_io(struct scsi_cmnd *scmd);
+u32 megasas_readl(struct megasas_instance *instance,
+ const volatile void __iomem *addr);
+struct megasas_cmd *megasas_get_cmd(struct megasas_instance *instance);
+void megasas_return_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+int megasas_issue_polled(struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+void megaraid_sas_kill_hba(struct megasas_instance *instance);
+void megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
+void megasas_start_timer(struct megasas_instance *instance);
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial);
+int megasas_alloc_cmds(struct megasas_instance *instance);
+void megasas_free_cmds(struct megasas_instance *instance);
+
+void megasas_init_debugfs(void);
+void megasas_exit_debugfs(void);
+void megasas_setup_debugfs(struct megasas_instance *instance);
+void megasas_destroy_debugfs(struct megasas_instance *instance);
+int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
+
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index fd4b5ac6ac5b..d265a2d9d082 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,6 +37,7 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -77,11 +78,11 @@ unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
module_param(resetwaittime, int, 0444);
MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
-int smp_affinity_enable = 1;
+static int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
-int rdpq_enable = 1;
+static int rdpq_enable = 1;
module_param(rdpq_enable, int, 0444);
MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
@@ -89,7 +90,7 @@ unsigned int dual_qdepth_disable;
module_param(dual_qdepth_disable, int, 0444);
MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
-unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
+static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
@@ -113,6 +114,19 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
+ "This parameter is effective only if host_tagset_enable=1 &\n\t\t"
+ "It is not applicable for MFI_SERIES. &\n\t\t"
+ "Driver will work in latency mode. &\n\t\t"
+ "High iops queues are not allocated &\n\t\t"
+ );
+
+int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -127,6 +141,8 @@ static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
static void megasas_get_pd_info(struct megasas_instance *instance,
struct scsi_device *sdev);
+static void
+megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
/*
* PCI ID table for all supported controllers
@@ -199,13 +215,10 @@ static bool support_nvme_encapsulation;
static bool support_pci_lane_margining;
/* define lock for aen poll */
-static spinlock_t poll_aen_lock;
+static DEFINE_SPINLOCK(poll_aen_lock);
extern struct dentry *megasas_debugfs_root;
-extern void megasas_init_debugfs(void);
-extern void megasas_exit_debugfs(void);
-extern void megasas_setup_debugfs(struct megasas_instance *instance);
-extern void megasas_destroy_debugfs(struct megasas_instance *instance);
+extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
@@ -425,16 +438,22 @@ megasas_decode_evt(struct megasas_instance *instance)
(class_locale.members.locale),
format_class(class_locale.members.class),
evt_detail->description);
+
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "evt_detail.args.ld.target_id/index %d/%d\n",
+ evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index);
+
}
-/**
-* The following functions are defined for xscale
-* (deviceid : 1064R, PERC5) controllers
-*/
+/*
+ * The following functions are defined for xscale
+ * (deviceid : 1064R, PERC5) controllers
+ */
/**
* megasas_enable_intr_xscale - Enables interrupts
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static inline void
megasas_enable_intr_xscale(struct megasas_instance *instance)
@@ -450,7 +469,7 @@ megasas_enable_intr_xscale(struct megasas_instance *instance)
/**
* megasas_disable_intr_xscale -Disables interrupt
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static inline void
megasas_disable_intr_xscale(struct megasas_instance *instance)
@@ -466,7 +485,7 @@ megasas_disable_intr_xscale(struct megasas_instance *instance)
/**
* megasas_read_fw_status_reg_xscale - returns the current FW status value
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static u32
megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
@@ -474,8 +493,8 @@ megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
return readl(&instance->reg_set->outbound_msg_0);
}
/**
- * megasas_clear_interrupt_xscale - Check & clear interrupt
- * @regs: MFI register set
+ * megasas_clear_intr_xscale - Check & clear interrupt
+ * @instance: Adapter soft state
*/
static int
megasas_clear_intr_xscale(struct megasas_instance *instance)
@@ -509,9 +528,10 @@ megasas_clear_intr_xscale(struct megasas_instance *instance)
/**
* megasas_fire_cmd_xscale - Sends command to the FW
- * @frame_phys_addr : Physical address of cmd
- * @frame_count : Number of frames for the command
- * @regs : MFI register set
+ * @instance: Adapter soft state
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
*/
static inline void
megasas_fire_cmd_xscale(struct megasas_instance *instance,
@@ -529,7 +549,8 @@ megasas_fire_cmd_xscale(struct megasas_instance *instance,
/**
* megasas_adp_reset_xscale - For controller reset
- * @regs: MFI register set
+ * @instance: Adapter soft state
+ * @regs: MFI register set
*/
static int
megasas_adp_reset_xscale(struct megasas_instance *instance,
@@ -570,7 +591,8 @@ megasas_adp_reset_xscale(struct megasas_instance *instance,
/**
* megasas_check_reset_xscale - For controller reset check
- * @regs: MFI register set
+ * @instance: Adapter soft state
+ * @regs: MFI register set
*/
static int
megasas_check_reset_xscale(struct megasas_instance *instance,
@@ -599,19 +621,19 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
.issue_dcmd = megasas_issue_dcmd,
};
-/**
-* This is the end of set of functions & definitions specific
-* to xscale (deviceid : 1064R, PERC5) controllers
-*/
+/*
+ * This is the end of set of functions & definitions specific
+ * to xscale (deviceid : 1064R, PERC5) controllers
+ */
-/**
-* The following functions are defined for ppc (deviceid : 0x60)
-* controllers
-*/
+/*
+ * The following functions are defined for ppc (deviceid : 0x60)
+ * controllers
+ */
/**
* megasas_enable_intr_ppc - Enables interrupts
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static inline void
megasas_enable_intr_ppc(struct megasas_instance *instance)
@@ -629,7 +651,7 @@ megasas_enable_intr_ppc(struct megasas_instance *instance)
/**
* megasas_disable_intr_ppc - Disable interrupt
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static inline void
megasas_disable_intr_ppc(struct megasas_instance *instance)
@@ -645,7 +667,7 @@ megasas_disable_intr_ppc(struct megasas_instance *instance)
/**
* megasas_read_fw_status_reg_ppc - returns the current FW status value
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static u32
megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
@@ -654,8 +676,8 @@ megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
}
/**
- * megasas_clear_interrupt_ppc - Check & clear interrupt
- * @regs: MFI register set
+ * megasas_clear_intr_ppc - Check & clear interrupt
+ * @instance: Adapter soft state
*/
static int
megasas_clear_intr_ppc(struct megasas_instance *instance)
@@ -688,9 +710,10 @@ megasas_clear_intr_ppc(struct megasas_instance *instance)
/**
* megasas_fire_cmd_ppc - Sends command to the FW
- * @frame_phys_addr : Physical address of cmd
- * @frame_count : Number of frames for the command
- * @regs : MFI register set
+ * @instance: Adapter soft state
+ * @frame_phys_addr: Physical address of cmd
+ * @frame_count: Number of frames for the command
+ * @regs: MFI register set
*/
static inline void
megasas_fire_cmd_ppc(struct megasas_instance *instance,
@@ -708,7 +731,8 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
/**
* megasas_check_reset_ppc - For controller reset check
- * @regs: MFI register set
+ * @instance: Adapter soft state
+ * @regs: MFI register set
*/
static int
megasas_check_reset_ppc(struct megasas_instance *instance,
@@ -738,7 +762,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
/**
* megasas_enable_intr_skinny - Enables interrupts
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static inline void
megasas_enable_intr_skinny(struct megasas_instance *instance)
@@ -756,7 +780,7 @@ megasas_enable_intr_skinny(struct megasas_instance *instance)
/**
* megasas_disable_intr_skinny - Disables interrupt
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static inline void
megasas_disable_intr_skinny(struct megasas_instance *instance)
@@ -772,7 +796,7 @@ megasas_disable_intr_skinny(struct megasas_instance *instance)
/**
* megasas_read_fw_status_reg_skinny - returns the current FW status value
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static u32
megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
@@ -781,8 +805,8 @@ megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
}
/**
- * megasas_clear_interrupt_skinny - Check & clear interrupt
- * @regs: MFI register set
+ * megasas_clear_intr_skinny - Check & clear interrupt
+ * @instance: Adapter soft state
*/
static int
megasas_clear_intr_skinny(struct megasas_instance *instance)
@@ -825,9 +849,10 @@ megasas_clear_intr_skinny(struct megasas_instance *instance)
/**
* megasas_fire_cmd_skinny - Sends command to the FW
- * @frame_phys_addr : Physical address of cmd
- * @frame_count : Number of frames for the command
- * @regs : MFI register set
+ * @instance: Adapter soft state
+ * @frame_phys_addr: Physical address of cmd
+ * @frame_count: Number of frames for the command
+ * @regs: MFI register set
*/
static inline void
megasas_fire_cmd_skinny(struct megasas_instance *instance,
@@ -847,7 +872,8 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
/**
* megasas_check_reset_skinny - For controller reset check
- * @regs: MFI register set
+ * @instance: Adapter soft state
+ * @regs: MFI register set
*/
static int
megasas_check_reset_skinny(struct megasas_instance *instance,
@@ -876,14 +902,14 @@ static struct megasas_instance_template megasas_instance_template_skinny = {
};
-/**
-* The following functions are defined for gen2 (deviceid : 0x78 0x79)
-* controllers
-*/
+/*
+ * The following functions are defined for gen2 (deviceid : 0x78 0x79)
+ * controllers
+ */
/**
* megasas_enable_intr_gen2 - Enables interrupts
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static inline void
megasas_enable_intr_gen2(struct megasas_instance *instance)
@@ -902,7 +928,7 @@ megasas_enable_intr_gen2(struct megasas_instance *instance)
/**
* megasas_disable_intr_gen2 - Disables interrupt
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static inline void
megasas_disable_intr_gen2(struct megasas_instance *instance)
@@ -918,7 +944,7 @@ megasas_disable_intr_gen2(struct megasas_instance *instance)
/**
* megasas_read_fw_status_reg_gen2 - returns the current FW status value
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static u32
megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
@@ -927,8 +953,8 @@ megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
}
/**
- * megasas_clear_interrupt_gen2 - Check & clear interrupt
- * @regs: MFI register set
+ * megasas_clear_intr_gen2 - Check & clear interrupt
+ * @instance: Adapter soft state
*/
static int
megasas_clear_intr_gen2(struct megasas_instance *instance)
@@ -961,11 +987,13 @@ megasas_clear_intr_gen2(struct megasas_instance *instance)
return mfiStatus;
}
+
/**
* megasas_fire_cmd_gen2 - Sends command to the FW
- * @frame_phys_addr : Physical address of cmd
- * @frame_count : Number of frames for the command
- * @regs : MFI register set
+ * @instance: Adapter soft state
+ * @frame_phys_addr: Physical address of cmd
+ * @frame_count: Number of frames for the command
+ * @regs: MFI register set
*/
static inline void
megasas_fire_cmd_gen2(struct megasas_instance *instance,
@@ -983,7 +1011,8 @@ megasas_fire_cmd_gen2(struct megasas_instance *instance,
/**
* megasas_adp_reset_gen2 - For controller reset
- * @regs: MFI register set
+ * @instance: Adapter soft state
+ * @reg_set: MFI register set
*/
static int
megasas_adp_reset_gen2(struct megasas_instance *instance,
@@ -1043,7 +1072,8 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
/**
* megasas_check_reset_gen2 - For controller reset check
- * @regs: MFI register set
+ * @instance: Adapter soft state
+ * @regs: MFI register set
*/
static int
megasas_check_reset_gen2(struct megasas_instance *instance,
@@ -1071,10 +1101,10 @@ static struct megasas_instance_template megasas_instance_template_gen2 = {
.issue_dcmd = megasas_issue_dcmd,
};
-/**
-* This is the end of set of functions & definitions
-* specific to gen2 (deviceid : 0x78, 0x79) controllers
-*/
+/*
+ * This is the end of set of functions & definitions
+ * specific to gen2 (deviceid : 0x78, 0x79) controllers
+ */
/*
* Template added for TB (Fusion)
@@ -1421,10 +1451,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
* pthru timeout to the os layer timeout value.
*/
if (scp->device->type == TYPE_TAPE) {
- if ((scp->request->timeout / HZ) > 0xFFFF)
+ if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF)
pthru->timeout = cpu_to_le16(0xFFFF);
else
- pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
+ pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ);
}
/*
@@ -1609,7 +1639,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
/**
* megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
* and whether it's RW or non RW
- * @scmd: SCSI command
+ * @cmd: SCSI command
*
*/
inline int megasas_cmd_type(struct scsi_cmnd *cmd)
@@ -1730,7 +1760,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
goto out_return_cmd;
cmd->scmd = scmd;
- scmd->SCp.ptr = (char *)cmd;
+ megasas_priv(scmd)->cmd_priv = cmd;
/*
* Issue the command to the FW
@@ -1749,21 +1779,22 @@ out_return_cmd:
/**
* megasas_queue_command - Queue entry point
+ * @shost: adapter SCSI host
* @scmd: SCSI command to be queued
- * @done: Callback entry point
*/
static int
megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
struct megasas_instance *instance;
struct MR_PRIV_DEVICE *mr_device_priv_data;
+ u32 ld_tgt_id;
instance = (struct megasas_instance *)
scmd->device->host->hostdata;
if (instance->unload == 1) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -1778,22 +1809,26 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return SCSI_MLQUEUE_HOST_BUSY;
} else {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
}
- if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+ mr_device_priv_data = scmd->device->hostdata;
+ if (!mr_device_priv_data ||
+ (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
- mr_device_priv_data = scmd->device->hostdata;
- if (!mr_device_priv_data) {
- scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
- return 0;
+ if (MEGASAS_IS_LOGICAL(scmd->device)) {
+ ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
+ if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ return 0;
+ }
}
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
@@ -1822,7 +1857,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return instance->instancet->build_and_issue_cmd(instance, scmd);
out_done:
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -1881,7 +1916,7 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
raid = MR_LdRaidGet(ld, local_map_ptr);
if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
- blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+ blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
mr_device_priv_data->is_tm_capable =
raid->capability.tmCapable;
@@ -1982,9 +2017,9 @@ static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
if (is_target_prop) {
tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
- if (tgt_device_qd &&
- (tgt_device_qd <= instance->host->can_queue))
- device_qd = tgt_device_qd;
+ if (tgt_device_qd)
+ device_qd = min(instance->host->can_queue,
+ (int)tgt_device_qd);
}
if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
@@ -2073,7 +2108,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
static int megasas_slave_alloc(struct scsi_device *sdev)
{
- u16 pd_index = 0;
+ u16 pd_index = 0, ld_tgt_id;
struct megasas_instance *instance ;
struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -2091,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
goto scan_target;
}
return -ENXIO;
+ } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return -ENXIO;
}
scan_target:
@@ -2098,6 +2136,14 @@ scan_target:
GFP_KERNEL);
if (!mr_device_priv_data)
return -ENOMEM;
+
+ if (MEGASAS_IS_LOGICAL(sdev)) {
+ ld_tgt_id = MEGASAS_TARGET_ID(sdev);
+ instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id);
+ }
+
sdev->hostdata = mr_device_priv_data;
atomic_set(&mr_device_priv_data->r1_ldio_hint,
@@ -2107,6 +2153,23 @@ scan_target:
static void megasas_slave_destroy(struct scsi_device *sdev)
{
+ u16 ld_tgt_id;
+ struct megasas_instance *instance;
+
+ instance = megasas_lookup_instance(sdev->host->host_no);
+
+ if (MEGASAS_IS_LOGICAL(sdev)) {
+ if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return;
+ }
+ ld_tgt_id = MEGASAS_TARGET_ID(sdev);
+ instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ sdev_printk(KERN_INFO, sdev,
+ "LD target ID %d removed from OS stack\n", ld_tgt_id);
+ }
+
kfree(sdev->hostdata);
sdev->hostdata = NULL;
}
@@ -2727,7 +2790,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
reset_index, reset_cmd,
reset_cmd->scmd->cmnd[0]);
- reset_cmd->scmd->scsi_done(reset_cmd->scmd);
+ scsi_done(reset_cmd->scmd);
megasas_return_cmd(instance, reset_cmd);
} else if (reset_cmd->sync_cmd) {
dev_notice(&instance->pdev->dev, "%p synch cmds"
@@ -2916,11 +2979,7 @@ megasas_dump(void *buf, int sz, int format)
/**
* megasas_dump_reg_set - This function will print hexdump of register set
- * @buf: Buffer to be dumped
- * @sz: Size in bytes
- * @format: Different formats of dumping e.g. format=n will
- * cause only 'n' 32 bit words to be dumped in a
- * single line.
+ * @reg_set: Register set to be dumped
*/
inline void
megasas_dump_reg_set(void __iomem *reg_set)
@@ -2940,11 +2999,10 @@ megasas_dump_reg_set(void __iomem *reg_set)
void
megasas_dump_fusion_io(struct scsi_cmnd *scmd)
{
- struct megasas_cmd_fusion *cmd;
+ struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct megasas_instance *instance;
- cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
instance = (struct megasas_instance *)scmd->device->host->hostdata;
scmd_printk(KERN_INFO, scmd,
@@ -2987,15 +3045,17 @@ megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
u32 __iomem *reg = (u32 __iomem *)reg_set;
for (i = 0; i < sz / sizeof(u32); i++) {
- bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
- "%08x: %08x\n", (i * 4),
- readl(&reg[i]));
+ bytes_wrote += scnprintf(loc + bytes_wrote,
+ PAGE_SIZE - bytes_wrote,
+ "%08x: %08x\n", (i * 4),
+ readl(&reg[i]));
}
return bytes_wrote;
}
/**
* megasas_reset_bus_host - Bus & host reset handler entry point
+ * @scmd: Mid-layer SCSI command
*/
static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
{
@@ -3114,6 +3174,43 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
+static void megasas_map_queues(struct Scsi_Host *shost)
+{
+ struct megasas_instance *instance;
+ int qoff = 0, offset;
+ struct blk_mq_queue_map *map;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+
+ if (shost->nr_hw_queues == 1)
+ return;
+
+ offset = instance->low_latency_index_start;
+
+ /* Setup Default hctx */
+ map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ map->nr_queues = instance->msix_vectors - offset;
+ map->queue_offset = 0;
+ blk_mq_pci_map_queues(map, instance->pdev, offset);
+ qoff += map->nr_queues;
+ offset += map->nr_queues;
+
+ /* we never use READ queue, so can't cheat blk-mq */
+ shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0;
+
+ /* Setup Poll hctx */
+ map = &shost->tag_set.map[HCTX_TYPE_POLL];
+ map->nr_queues = instance->iopoll_q_count;
+ if (map->nr_queues) {
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ blk_mq_map_queues(map);
+ }
+}
+
static void megasas_aen_polling(struct work_struct *work);
/**
@@ -3391,19 +3488,21 @@ static DEVICE_ATTR_RW(enable_sdev_max_qd);
static DEVICE_ATTR_RO(dump_system_regs);
static DEVICE_ATTR_RO(raid_map_id);
-static struct device_attribute *megaraid_host_attrs[] = {
- &dev_attr_fw_crash_buffer_size,
- &dev_attr_fw_crash_buffer,
- &dev_attr_fw_crash_state,
- &dev_attr_page_size,
- &dev_attr_ldio_outstanding,
- &dev_attr_fw_cmds_outstanding,
- &dev_attr_enable_sdev_max_qd,
- &dev_attr_dump_system_regs,
- &dev_attr_raid_map_id,
+static struct attribute *megaraid_host_attrs[] = {
+ &dev_attr_fw_crash_buffer_size.attr,
+ &dev_attr_fw_crash_buffer.attr,
+ &dev_attr_fw_crash_state.attr,
+ &dev_attr_page_size.attr,
+ &dev_attr_ldio_outstanding.attr,
+ &dev_attr_fw_cmds_outstanding.attr,
+ &dev_attr_enable_sdev_max_qd.attr,
+ &dev_attr_dump_system_regs.attr,
+ &dev_attr_raid_map_id.attr,
NULL,
};
+ATTRIBUTE_GROUPS(megaraid_host);
+
/*
* Scsi host template for megaraid_sas driver
*/
@@ -3420,10 +3519,13 @@ static struct scsi_host_template megasas_template = {
.eh_abort_handler = megasas_task_abort,
.eh_host_reset_handler = megasas_reset_bus_host,
.eh_timed_out = megasas_reset_timer,
- .shost_attrs = megaraid_host_attrs,
+ .shost_groups = megaraid_host_groups,
.bios_param = megasas_bios_param,
+ .map_queues = megasas_map_queues,
+ .mq_poll = megasas_blk_mq_poll,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
+ .cmd_size = sizeof(struct megasas_cmd_priv),
};
/**
@@ -3467,6 +3569,22 @@ megasas_complete_abort(struct megasas_instance *instance,
}
}
+static void
+megasas_set_ld_removed_by_fw(struct megasas_instance *instance)
+{
+ uint i;
+
+ for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) {
+ if (instance->ld_ids_prev[i] != 0xff &&
+ instance->ld_ids_from_raidmap[i] == 0xff) {
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "LD target ID %d removed from RAID map\n", i);
+ instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED;
+ }
+ }
+}
+
/**
* megasas_complete_cmd - Completes a command
* @instance: Adapter soft state
@@ -3491,7 +3609,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
cmd->retry_for_fw_reset = 0;
if (cmd->scmd)
- cmd->scmd->SCp.ptr = NULL;
+ megasas_priv(cmd->scmd)->cmd_priv = NULL;
switch (hdr->cmd) {
case MFI_CMD_INVALID:
@@ -3517,7 +3635,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
megasas_complete_int_cmd(instance, cmd);
break;
}
- /* fall through */
+ fallthrough;
case MFI_CMD_LD_READ:
case MFI_CMD_LD_WRITE:
@@ -3532,7 +3650,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
atomic_dec(&instance->fw_outstanding);
scsi_dma_unmap(cmd->scmd);
- cmd->scmd->scsi_done(cmd->scmd);
+ scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
break;
@@ -3559,8 +3677,6 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->scmd->sense_buffer, cmd->sense,
hdr->sense_len);
-
- cmd->scmd->result |= DRIVER_SENSE << 24;
}
break;
@@ -3580,7 +3696,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
atomic_dec(&instance->fw_outstanding);
scsi_dma_unmap(cmd->scmd);
- cmd->scmd->scsi_done(cmd->scmd);
+ scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
break;
@@ -3629,9 +3745,13 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
fusion->fast_path_io = 0;
}
+ if (instance->adapter_type >= INVADER_SERIES)
+ megasas_set_ld_removed_by_fw(instance);
+
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
+
break;
}
if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
@@ -3776,7 +3896,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
megasas_register_aen(instance, seq_num, class_locale.word);
}
-/**
+/*
* Move the internal reset pending commands to a deferred queue.
*
* We move the commands pending at internal reset time to a
@@ -3784,7 +3904,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
* completion of the internal reset sequence. if the internal reset
* did not complete in time, the kernel reset handler would flush
* these commands.
- **/
+ */
static void
megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
{
@@ -3828,9 +3948,9 @@ process_fw_state_change_wq(struct work_struct *work)
u32 wait;
unsigned long flags;
- if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
- atomic_read(&instance->adprecovery));
+ atomic_read(&instance->adprecovery));
return ;
}
@@ -3901,10 +4021,8 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
u32 mfiStatus;
u32 fw_state;
- if ((mfiStatus = instance->instancet->check_reset(instance,
- instance->reg_set)) == 1) {
+ if (instance->instancet->check_reset(instance, instance->reg_set) == 1)
return IRQ_HANDLED;
- }
mfiStatus = instance->instancet->clear_intr(instance);
if (mfiStatus == 0) {
@@ -3962,8 +4080,11 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
tasklet_schedule(&instance->isr_tasklet);
return IRQ_HANDLED;
}
+
/**
* megasas_isr - isr entry point
+ * @irq: IRQ number
+ * @devp: IRQ context address
*/
static irqreturn_t megasas_isr(int irq, void *devp)
{
@@ -3985,6 +4106,7 @@ static irqreturn_t megasas_isr(int irq, void *devp)
/**
* megasas_transition_to_ready - Move the FW to READY state
* @instance: Adapter soft state
+ * @ocr: Adapter reset state
*
* During the initialization, FW passes can potentially be in any one of
* several possible states. If the FW in operational, waiting-for-handshake
@@ -4350,8 +4472,6 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
return -ENOMEM;
}
- memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
-
for (i = 0; i < max_cmd; i++) {
instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
GFP_KERNEL);
@@ -4742,7 +4862,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
/**
* megasas_ld_list_query - Returns FW's ld_list structure
* @instance: Adapter soft state
- * @ld_list: ld_list structure
+ * @query_type: ld_list structure type
*
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
@@ -4856,6 +4976,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
}
/**
+ * megasas_host_device_list_query
* dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET
* dcmd.mbox - reserved
* dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure
@@ -5032,9 +5153,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
fusion->current_map_sz = ventura_map_sz;
fusion->max_map_sz = ventura_map_sz;
} else {
- fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *
- (instance->fw_supported_vd_count - 1));
+ fusion->old_map_sz =
+ struct_size((struct MR_FW_RAID_MAP *)0, ldSpanMap,
+ instance->fw_supported_vd_count);
fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
fusion->max_map_sz =
@@ -5133,7 +5254,7 @@ void megasas_get_snapdump_properties(struct megasas_instance *instance)
}
/**
- * megasas_get_controller_info - Returns FW's controller structure
+ * megasas_get_ctrl_info - Returns FW's controller structure
* @instance: Adapter soft state
*
* Issues an internal command (DCMD) to get the FW's controller structure.
@@ -5601,9 +5722,13 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
&instance->irq_context[i])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i);
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
+ if (j < instance->low_latency_index_start)
+ irq_update_affinity_hint(
+ pci_irq_vector(pdev, j), NULL);
free_irq(pci_irq_vector(pdev, j),
&instance->irq_context[j]);
+ }
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
instance->msix_load_balance = false;
@@ -5641,6 +5766,9 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
+ if (i < instance->low_latency_index_start)
+ irq_update_affinity_hint(
+ pci_irq_vector(instance->pdev, i), NULL);
free_irq(pci_irq_vector(instance->pdev, i),
&instance->irq_context[i]);
}
@@ -5652,7 +5780,6 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
/**
* megasas_setup_jbod_map - setup jbod map for FP seq_number.
* @instance: Adapter soft state
- * @is_probe: Driver probe check
*
* Return 0 on success.
*/
@@ -5661,10 +5788,10 @@ megasas_setup_jbod_map(struct megasas_instance *instance)
{
int i;
struct fusion_context *fusion = instance->ctrl_context;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz = struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0, seq,
+ MAX_PHYSICAL_DEVICES);
instance->use_seqnum_jbod_fp =
instance->support_seqnum_jbod_fp;
@@ -5747,10 +5874,6 @@ fallback:
static
int megasas_get_device_list(struct megasas_instance *instance)
{
- memset(instance->pd_list, 0,
- (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
- memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-
if (instance->enable_fw_dev_list) {
if (megasas_host_device_list_query(instance, true))
return FAILED;
@@ -5771,22 +5894,25 @@ int megasas_get_device_list(struct megasas_instance *instance)
}
/**
- * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
- * @instance: Adapter soft state
- * return: void
+ * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint
+ * for high IOPS queues
+ * @instance: Adapter soft state
+ * return: void
*/
static inline void
-megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
+megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
{
int i;
- int local_numa_node;
+ unsigned int irq;
+ const struct cpumask *mask;
if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
- local_numa_node = dev_to_node(&instance->pdev->dev);
+ mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
- for (i = 0; i < instance->low_latency_index_start; i++)
- irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
- cpumask_of_node(local_numa_node));
+ for (i = 0; i < instance->low_latency_index_start; i++) {
+ irq = pci_irq_vector(instance->pdev, i);
+ irq_set_affinity_and_hint(irq, mask);
+ }
}
}
@@ -5800,13 +5926,16 @@ __megasas_alloc_irq_vectors(struct megasas_instance *instance)
irq_flags = PCI_IRQ_MSIX;
if (instance->smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
else
descp = NULL;
+ /* Do not allocate msix vectors for poll_queues.
+ * msix_vectors is always within a range of FW supported reply queue.
+ */
i = pci_alloc_irq_vectors_affinity(instance->pdev,
instance->low_latency_index_start,
- instance->msix_vectors, irq_flags, descp);
+ instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp);
return i;
}
@@ -5822,10 +5951,30 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
int i;
unsigned int num_msix_req;
+ instance->iopoll_q_count = 0;
+ if ((instance->adapter_type != MFI_SERIES) &&
+ poll_queues) {
+
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+ instance->low_latency_index_start = 1;
+
+ /* reserve for default and non-mananged pre-vector. */
+ if (instance->msix_vectors > (poll_queues + 2))
+ instance->iopoll_q_count = poll_queues;
+ else
+ instance->iopoll_q_count = 0;
+
+ num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+ instance->msix_vectors = min(num_msix_req,
+ instance->msix_vectors);
+
+ }
+
i = __megasas_alloc_irq_vectors(instance);
- if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
- (i != instance->msix_vectors)) {
+ if (((instance->perf_mode == MR_BALANCED_PERF_MODE)
+ || instance->iopoll_q_count) &&
+ (i != (instance->msix_vectors - instance->iopoll_q_count))) {
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
/* Disable Balanced IOPS mode and try realloc vectors */
@@ -5836,12 +5985,15 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
instance->msix_vectors = min(num_msix_req,
instance->msix_vectors);
+ instance->iopoll_q_count = 0;
i = __megasas_alloc_irq_vectors(instance);
}
dev_info(&instance->pdev->dev,
- "requested/available msix %d/%d\n", instance->msix_vectors, i);
+ "requested/available msix %d/%d poll_queue %d\n",
+ instance->msix_vectors - instance->iopoll_q_count,
+ i, instance->iopoll_q_count);
if (i > 0)
instance->msix_vectors = i;
@@ -5849,7 +6001,7 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
instance->msix_vectors = 0;
if (instance->smp_affinity_enable)
- megasas_set_high_iops_queue_affinity_hint(instance);
+ megasas_set_high_iops_queue_affinity_and_hint(instance);
}
/**
@@ -6493,7 +6645,7 @@ dcmd_failed:
* megasas_register_aen - Registers for asynchronous event notification
* @instance: Adapter soft state
* @seq_num: The starting sequence number
- * @class_locale: Class of the event
+ * @class_locale_word: Class of the event
*
* This function subscribes for AEN for events beyond the @seq_num. It requests
* to be notified if and only if the event is of type @class_locale
@@ -6793,6 +6945,32 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
+ /* Use shared host tagset only for fusion adaptors
+ * if there are managed interrupts (smp affinity enabled case).
+ * Single msix_vectors in kdump, so shared host tag is also disabled.
+ */
+
+ host->host_tagset = 0;
+ host->nr_hw_queues = 1;
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ (instance->msix_vectors > instance->low_latency_index_start) &&
+ host_tagset_enable &&
+ instance->smp_affinity_enable) {
+ host->host_tagset = 1;
+ host->nr_hw_queues = instance->msix_vectors -
+ instance->low_latency_index_start + instance->iopoll_q_count;
+ if (instance->iopoll_q_count)
+ host->nr_maps = 3;
+ } else {
+ instance->iopoll_q_count = 0;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "Max firmware commands: %d shared with default "
+ "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds,
+ host->nr_hw_queues - instance->iopoll_q_count,
+ instance->iopoll_q_count);
/*
* Notify the mid-layer about the new controller
*/
@@ -6967,22 +7145,18 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
switch (instance->adapter_type) {
case MFI_SERIES:
if (megasas_alloc_mfi_ctrl_mem(instance))
- goto fail;
+ return -ENOMEM;
break;
case AERO_SERIES:
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
if (megasas_alloc_fusion_context(instance))
- goto fail;
+ return -ENOMEM;
break;
}
return 0;
- fail:
- kfree(instance->reply_map);
- instance->reply_map = NULL;
- return -ENOMEM;
}
/*
@@ -7013,8 +7187,9 @@ static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
* megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
* driver load time
*
- * @instance- Adapter soft instance
- * @return- O for SUCCESS
+ * @instance: Adapter soft instance
+ *
+ * @return: O for SUCCESS
*/
static inline
int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
@@ -7041,7 +7216,7 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
if (!fusion->ioc_init_request) {
dev_err(&pdev->dev,
- "Failed to allocate PD list buffer\n");
+ "Failed to allocate ioc init request\n");
return -ENOMEM;
}
@@ -7260,7 +7435,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
instance->flag_ieee = 1;
- megasas_dbg_lvl = 0;
instance->flag = 0;
instance->unload = 1;
instance->last_time = 0;
@@ -7423,11 +7597,16 @@ static int megasas_probe_one(struct pci_dev *pdev,
return 0;
fail_start_aen:
+ instance->unload = 1;
+ scsi_remove_host(instance->host);
fail_io_attach:
megasas_mgmt_info.count--;
megasas_mgmt_info.max_index--;
megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
@@ -7435,8 +7614,16 @@ fail_io_attach:
megasas_release_fusion(instance);
else
megasas_release_mfi(instance);
+
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
+ instance->msix_vectors = 0;
+
+ if (instance->fw_crash_state != UNAVAILABLE)
+ megasas_free_host_crash_buffer(instance);
+
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_fusion_stop_watchdog(instance);
fail_init_mfi:
scsi_host_put(host);
fail_alloc_instance:
@@ -7538,25 +7725,23 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
megasas_return_cmd(instance, cmd);
}
-#ifdef CONFIG_PM
/**
* megasas_suspend - driver suspend entry point
- * @pdev: PCI device structure
- * @state: PCI power state to suspend routine
+ * @dev: Device structure
*/
-static int
-megasas_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+megasas_suspend(struct device *dev)
{
struct megasas_instance *instance;
- instance = pci_get_drvdata(pdev);
+ instance = dev_get_drvdata(dev);
if (!instance)
return 0;
instance->unload = 1;
- dev_info(&pdev->dev, "%s is called\n", __func__);
+ dev_info(dev, "%s is called\n", __func__);
/* Shutdown SR-IOV heartbeat timer */
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
@@ -7586,48 +7771,29 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
/**
* megasas_resume- driver resume entry point
- * @pdev: PCI device structure
+ * @dev: Device structure
*/
-static int
-megasas_resume(struct pci_dev *pdev)
+static int __maybe_unused
+megasas_resume(struct device *dev)
{
int rval;
struct Scsi_Host *host;
struct megasas_instance *instance;
u32 status_reg;
- instance = pci_get_drvdata(pdev);
+ instance = dev_get_drvdata(dev);
if (!instance)
return 0;
host = instance->host;
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- dev_info(&pdev->dev, "%s is called\n", __func__);
- /*
- * PCI prepping: enable device set bus mastering and dma mask
- */
- rval = pci_enable_device_mem(pdev);
-
- if (rval) {
- dev_err(&pdev->dev, "Enable device failed\n");
- return rval;
- }
-
- pci_set_master(pdev);
+ dev_info(dev, "%s is called\n", __func__);
/*
* We expect the FW state to be READY
@@ -7753,14 +7919,8 @@ fail_reenable_msix:
fail_set_dma_mask:
fail_ready_state:
- pci_disable_device(pdev);
-
return -ENODEV;
}
-#else
-#define megasas_suspend NULL
-#define megasas_resume NULL
-#endif
static inline int
megasas_wait_for_adapter_operational(struct megasas_instance *instance)
@@ -7801,7 +7961,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
struct Scsi_Host *host;
struct megasas_instance *instance;
struct fusion_context *fusion;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
@@ -7873,9 +8033,9 @@ skip_firing_dcmds:
if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) *
- (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz =
+ struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0,
+ seq, MAX_PHYSICAL_DEVICES);
for (i = 0; i < 2 ; i++) {
if (fusion->ld_map[i])
dma_free_coherent(&instance->pdev->dev,
@@ -7930,7 +8090,7 @@ skip_firing_dcmds:
/**
* megasas_shutdown - Shutdown entry point
- * @device: Generic device structure
+ * @pdev: PCI device structure
*/
static void megasas_shutdown(struct pci_dev *pdev)
{
@@ -7955,8 +8115,10 @@ skip_firing_dcmds:
pci_free_irq_vectors(instance->pdev);
}
-/**
+/*
* megasas_mgmt_open - char node "open" entry point
+ * @inode: char node inode
+ * @filep: char node file
*/
static int megasas_mgmt_open(struct inode *inode, struct file *filep)
{
@@ -7969,8 +8131,11 @@ static int megasas_mgmt_open(struct inode *inode, struct file *filep)
return 0;
}
-/**
+/*
* megasas_mgmt_fasync - Async notifier registration from applications
+ * @fd: char node file descriptor number
+ * @filep: char node file
+ * @mode: notifier on/off
*
* This function adds the calling process to a driver global queue. When an
* event occurs, SIGIO will be sent to all processes in this queue.
@@ -7996,9 +8161,11 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
return rc;
}
-/**
+/*
* megasas_mgmt_poll - char node "poll" entry point
- * */
+ * @filep: char node file
+ * @wait: Events to poll for
+ */
static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
{
__poll_t mask;
@@ -8056,7 +8223,8 @@ static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
/**
* megasas_mgmt_fw_ioctl - Issues management ioctls to FW
* @instance: Adapter soft state
- * @argp: User's ioctl packet
+ * @user_ioc: User's ioctl packet
+ * @ioc: ioctl packet
*/
static int
megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
@@ -8071,7 +8239,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
int error = 0, i;
void *sense = NULL;
dma_addr_t sense_handle;
- unsigned long *sense_ptr;
+ void *sense_ptr;
u32 opcode = 0;
int ret = DCMD_SUCCESS;
@@ -8194,6 +8362,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
}
if (ioc->sense_len) {
+ /* make sure the pointer is part of the frame */
+ if (ioc->sense_off >
+ (sizeof(union megasas_frame) - sizeof(__le64))) {
+ error = -EINVAL;
+ goto out;
+ }
+
sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
&sense_handle, GFP_KERNEL);
if (!sense) {
@@ -8201,12 +8376,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
goto out;
}
- sense_ptr =
- (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
- if (instance->consistent_mask_64bit)
- *sense_ptr = cpu_to_le64(sense_handle);
- else
- *sense_ptr = cpu_to_le32(sense_handle);
+ /* always store 64 bits regardless of addressing */
+ sense_ptr = (void *)cmd->frame + ioc->sense_off;
+ put_unaligned_le64(sense_handle, sense_ptr);
}
/*
@@ -8224,8 +8396,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
cmd->cmd_status_drv);
- error = -EBUSY;
- goto out;
+ error = -EBUSY;
+ goto out;
}
cmd->sync_cmd = 0;
@@ -8250,16 +8422,19 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* copy out the sense
*/
if (ioc->sense_len) {
+ void __user *uptr;
/*
* sense_ptr points to the location that has the user
* sense buffer address
*/
- sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
- ioc->sense_off);
+ sense_ptr = (void *)ioc->frame.raw + ioc->sense_off;
+ if (in_compat_syscall())
+ uptr = compat_ptr(get_unaligned((compat_uptr_t *)
+ sense_ptr));
+ else
+ uptr = get_unaligned((void __user **)sense_ptr);
- if (copy_to_user((void __user *)((unsigned long)
- get_unaligned((unsigned long *)sense_ptr)),
- sense, ioc->sense_len)) {
+ if (copy_to_user(uptr, sense, ioc->sense_len)) {
dev_err(&instance->pdev->dev, "Failed to copy out to user "
"sense data\n");
error = -EFAULT;
@@ -8302,6 +8477,38 @@ out:
return error;
}
+static struct megasas_iocpacket *
+megasas_compat_iocpacket_get_user(void __user *arg)
+{
+ struct megasas_iocpacket *ioc;
+ struct compat_megasas_iocpacket __user *cioc = arg;
+ size_t size;
+ int err = -EFAULT;
+ int i;
+
+ ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc)
+ return ERR_PTR(-ENOMEM);
+ size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame);
+ if (copy_from_user(ioc, arg, size))
+ goto out;
+
+ for (i = 0; i < MAX_IOCTL_SGE; i++) {
+ compat_uptr_t iov_base;
+
+ if (get_user(iov_base, &cioc->sgl[i].iov_base) ||
+ get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len))
+ goto out;
+
+ ioc->sgl[i].iov_base = compat_ptr(iov_base);
+ }
+
+ return ioc;
+out:
+ kfree(ioc);
+ return ERR_PTR(err);
+}
+
static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
{
struct megasas_iocpacket __user *user_ioc =
@@ -8310,7 +8517,11 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
struct megasas_instance *instance;
int error;
- ioc = memdup_user(user_ioc, sizeof(*ioc));
+ if (in_compat_syscall())
+ ioc = megasas_compat_iocpacket_get_user(user_ioc);
+ else
+ ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket));
+
if (IS_ERR(ioc))
return PTR_ERR(ioc);
@@ -8396,6 +8607,9 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
/**
* megasas_mgmt_ioctl - char node ioctl entry point
+ * @file: char device file pointer
+ * @cmd: ioctl command
+ * @arg: ioctl command arguments address
*/
static long
megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -8412,78 +8626,13 @@ megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
-static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
-{
- struct compat_megasas_iocpacket __user *cioc =
- (struct compat_megasas_iocpacket __user *)arg;
- struct megasas_iocpacket __user *ioc =
- compat_alloc_user_space(sizeof(struct megasas_iocpacket));
- int i;
- int error = 0;
- compat_uptr_t ptr;
- u32 local_sense_off;
- u32 local_sense_len;
- u32 user_sense_off;
-
- if (clear_user(ioc, sizeof(*ioc)))
- return -EFAULT;
-
- if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
- copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
- copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
- copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
- copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
- copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
- return -EFAULT;
-
- /*
- * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
- * sense_len is not null, so prepare the 64bit value under
- * the same condition.
- */
- if (get_user(local_sense_off, &ioc->sense_off) ||
- get_user(local_sense_len, &ioc->sense_len) ||
- get_user(user_sense_off, &cioc->sense_off))
- return -EFAULT;
-
- if (local_sense_off != user_sense_off)
- return -EINVAL;
-
- if (local_sense_len) {
- void __user **sense_ioc_ptr =
- (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
- compat_uptr_t *sense_cioc_ptr =
- (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
- if (get_user(ptr, sense_cioc_ptr) ||
- put_user(compat_ptr(ptr), sense_ioc_ptr))
- return -EFAULT;
- }
-
- for (i = 0; i < MAX_IOCTL_SGE; i++) {
- if (get_user(ptr, &cioc->sgl[i].iov_base) ||
- put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
- copy_in_user(&ioc->sgl[i].iov_len,
- &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
- return -EFAULT;
- }
-
- error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
-
- if (copy_in_user(&cioc->frame.hdr.cmd_status,
- &ioc->frame.hdr.cmd_status, sizeof(u8))) {
- printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
- return -EFAULT;
- }
- return error;
-}
-
static long
megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case MEGASAS_IOC_FIRMWARE32:
- return megasas_mgmt_compat_ioctl_fw(file, arg);
+ return megasas_mgmt_ioctl_fw(file, arg);
case MEGASAS_IOC_GET_AEN:
return megasas_mgmt_ioctl_aen(file, arg);
}
@@ -8507,6 +8656,8 @@ static const struct file_operations megasas_mgmt_fops = {
.llseek = noop_llseek,
};
+static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume);
+
/*
* PCI hotplug support registration structure
*/
@@ -8516,8 +8667,7 @@ static struct pci_driver megasas_pci_driver = {
.id_table = megasas_pci_table,
.probe = megasas_probe_one,
.remove = megasas_detach_one,
- .suspend = megasas_suspend,
- .resume = megasas_resume,
+ .driver.pm = &megasas_pm_ops,
.shutdown = megasas_shutdown,
};
@@ -8607,34 +8757,26 @@ static
int megasas_update_device_list(struct megasas_instance *instance,
int event_type)
{
- int dcmd_ret = DCMD_SUCCESS;
+ int dcmd_ret;
if (instance->enable_fw_dev_list) {
- dcmd_ret = megasas_host_device_list_query(instance, false);
- if (dcmd_ret != DCMD_SUCCESS)
- goto out;
+ return megasas_host_device_list_query(instance, false);
} else {
if (event_type & SCAN_PD_CHANNEL) {
dcmd_ret = megasas_get_pd_list(instance);
-
if (dcmd_ret != DCMD_SUCCESS)
- goto out;
+ return dcmd_ret;
}
if (event_type & SCAN_VD_CHANNEL) {
if (!instance->requestorId ||
- (instance->requestorId &&
- megasas_get_ld_vf_affiliation(instance, 0))) {
- dcmd_ret = megasas_ld_list_query(instance,
+ megasas_get_ld_vf_affiliation(instance, 0)) {
+ return megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
- if (dcmd_ret != DCMD_SUCCESS)
- goto out;
}
}
}
-
-out:
- return dcmd_ret;
+ return DCMD_SUCCESS;
}
/**
@@ -8733,8 +8875,10 @@ megasas_aen_polling(struct work_struct *work)
union megasas_evt_class_locale class_locale;
int event_type = 0;
u32 seq_num;
+ u16 ld_target_id;
int error;
u8 dcmd_ret = DCMD_SUCCESS;
+ struct scsi_device *sdev1;
if (!instance) {
printk(KERN_ERR "invalid instance!\n");
@@ -8757,12 +8901,23 @@ megasas_aen_polling(struct work_struct *work)
break;
case MR_EVT_LD_OFFLINE:
- case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
+ ld_target_id = instance->evt_detail->args.ld.target_id;
+ sdev1 = scsi_device_lookup(instance->host,
+ MEGASAS_MAX_PD_CHANNELS +
+ (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
+ (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL),
+ 0);
+ if (sdev1)
+ megasas_remove_scsi_device(sdev1);
+
+ event_type = SCAN_VD_CHANNEL;
+ break;
case MR_EVT_LD_CREATED:
event_type = SCAN_VD_CHANNEL;
break;
+ case MR_EVT_CFG_CLEARED:
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
@@ -8841,6 +8996,7 @@ static int __init megasas_init(void)
msix_vectors = 1;
rdpq_enable = 0;
dual_qdepth_disable = 1;
+ poll_queues = 0;
}
/*
@@ -8848,8 +9004,7 @@ static int __init megasas_init(void)
*/
pr_info("megasas: %s\n", MEGASAS_VERSION);
- spin_lock_init(&poll_aen_lock);
-
+ megasas_dbg_lvl = 0;
support_poll_for_event = 2;
support_device_change = 1;
support_nvme_encapsulation = true;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 89c3685f5163..da1cad1ee123 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -80,21 +80,20 @@ u32 mega_mod64(u64 dividend, u32 divisor)
}
/**
- * @param dividend : Dividend
- * @param divisor : Divisor
+ * mega_div64_32 - Do a 64-bit division
+ * @dividend: Dividend
+ * @divisor: Divisor
*
* @return quotient
**/
-u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
+static u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
{
- u32 remainder;
- u64 d;
+ u64 d = dividend;
if (!divisor)
printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
- d = dividend;
- remainder = do_div(d, divisor);
+ do_div(d, divisor);
return d;
}
@@ -327,9 +326,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
else if (instance->supportmax256vd)
expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
else
- expected_size =
- (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
+ expected_size = struct_size((struct MR_FW_RAID_MAP *)0,
+ ldSpanMap,
+ le16_to_cpu(pDrvRaidMap->ldCount));
if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
@@ -350,6 +349,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
+ memcpy(instance->ld_ids_prev,
+ instance->ld_ids_from_raidmap,
+ sizeof(instance->ld_ids_from_raidmap));
+ memset(instance->ld_ids_from_raidmap, 0xff, MEGASAS_MAX_LD_IDS);
/*Convert Raid capability values to CPU arch */
for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
ld = MR_TargetIdToLdGet(i, drv_map);
@@ -360,14 +363,14 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
raid = MR_LdRaidGet(ld, drv_map);
le32_to_cpus((u32 *)&raid->capability);
-
+ instance->ld_ids_from_raidmap[i] = i;
num_lds--;
}
return 1;
}
-u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
+static u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
@@ -417,7 +420,7 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
* div_error - Devide error code.
*/
-u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+static u32 mr_spanset_get_span_block(struct megasas_instance *instance,
u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
@@ -642,7 +645,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
}
/* This Function will return Phys arm */
-u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -785,7 +788,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
* span - Span number
* block - Absolute Block number in the physical disk
*/
-u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
+static u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u16 stripRef, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_DRV_RAID_MAP_ALL *map)
@@ -1342,7 +1345,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
}
}
-u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+static u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
struct LD_LOAD_BALANCE_INFO *lbInfo,
struct IO_REQUEST_INFO *io_info,
struct MR_DRV_RAID_MAP_ALL *drv_map)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b2ad96564484..6650f8c8e9b0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -48,9 +48,6 @@
#include "megaraid_sas.h"
-extern void megasas_free_cmds(struct megasas_instance *instance);
-extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
- *instance);
extern void
megasas_complete_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, u8 alt_status);
@@ -58,24 +55,14 @@ int
wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
int seconds);
-void
-megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
-int megasas_alloc_cmds(struct megasas_instance *instance);
int
megasas_clear_intr_fusion(struct megasas_instance *instance);
-int
-megasas_issue_polled(struct megasas_instance *instance,
- struct megasas_cmd *cmd);
-void
-megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
-void megaraid_sas_kill_hba(struct megasas_instance *instance);
extern u32 megasas_dbg_lvl;
int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
int initial);
-void megasas_start_timer(struct megasas_instance *instance);
extern struct megasas_mgmt_info megasas_mgmt_info;
extern unsigned int resetwaittime;
extern unsigned int dual_qdepth_disable;
@@ -84,15 +71,13 @@ static void megasas_free_reply_fusion(struct megasas_instance *instance);
static inline
void megasas_configure_queue_sizes(struct megasas_instance *instance);
static void megasas_fusion_crash_dump(struct megasas_instance *instance);
-extern u32 megasas_readl(struct megasas_instance *instance,
- const volatile void __iomem *addr);
/**
* megasas_adp_reset_wait_for_ready - initiate chip reset and wait for
* controller to come to ready state
- * @instance - adapter's soft state
- * @do_adp_reset - If true, do a chip reset
- * @ocr_context - If called from OCR context this will
+ * @instance: adapter's soft state
+ * @do_adp_reset: If true, do a chip reset
+ * @ocr_context: If called from OCR context this will
* be set to 1, else 0
*
* This function initates a chip reset followed by a wait for controller to
@@ -146,10 +131,10 @@ out:
/**
* megasas_check_same_4gb_region - check if allocation
* crosses same 4GB boundary or not
- * @instance - adapter's soft instance
- * start_addr - start address of DMA allocation
- * size - size of allocation in bytes
- * return - true : allocation does not cross same
+ * @instance: adapter's soft instance
+ * @start_addr: start address of DMA allocation
+ * @size: size of allocation in bytes
+ * @return: true : allocation does not cross same
* 4GB boundary
* false: allocation crosses same
* 4GB boundary
@@ -174,9 +159,9 @@ static inline bool megasas_check_same_4gb_region
/**
* megasas_enable_intr_fusion - Enables interrupts
- * @regs: MFI register set
+ * @instance: adapter's soft instance
*/
-void
+static void
megasas_enable_intr_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
@@ -196,9 +181,9 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
/**
* megasas_disable_intr_fusion - Disables interrupt
- * @regs: MFI register set
+ * @instance: adapter's soft instance
*/
-void
+static void
megasas_disable_intr_fusion(struct megasas_instance *instance)
{
u32 mask = 0xFFFFFFFF;
@@ -235,9 +220,44 @@ megasas_clear_intr_fusion(struct megasas_instance *instance)
return 1;
}
+static inline void
+megasas_sdev_busy_inc(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+ struct MR_PRIV_DEVICE *mr_device_priv_data =
+ scmd->device->hostdata;
+ atomic_inc(&mr_device_priv_data->sdev_priv_busy);
+ }
+}
+
+static inline void
+megasas_sdev_busy_dec(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+ struct MR_PRIV_DEVICE *mr_device_priv_data =
+ scmd->device->hostdata;
+ atomic_dec(&mr_device_priv_data->sdev_priv_busy);
+ }
+}
+
+static inline int
+megasas_sdev_busy_read(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+ struct MR_PRIV_DEVICE *mr_device_priv_data =
+ scmd->device->hostdata;
+ return atomic_read(&mr_device_priv_data->sdev_priv_busy);
+ }
+ return 0;
+}
+
/**
* megasas_get_cmd_fusion - Get a command from the free pool
* @instance: Adapter soft state
+ * @blk_tag: Command tag
*
* Returns a blk_tag indexed mpt frame
*/
@@ -309,8 +329,8 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
/**
* megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here
- * @instance: Adapter soft state
- * fw_boot_context: Whether this function called during probe or after OCR
+ * @instance: Adapter soft state
+ * @fw_boot_context: Whether this function called during probe or after OCR
*
* This function is only for fusion controllers.
* Update host can queue, if firmware downgrade max supported firmware commands.
@@ -371,26 +391,25 @@ megasas_get_msix_index(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd,
u8 data_arms)
{
- int sdev_busy;
-
- /* nr_hw_queue = 1 for MegaRAID */
- struct blk_mq_hw_ctx *hctx =
- scmd->device->request_queue->queue_hw_ctx[0];
-
- sdev_busy = atomic_read(&hctx->nr_active);
-
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ (megasas_sdev_busy_read(instance, scmd) >
+ (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))) {
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
+ } else if (instance->msix_load_balance) {
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
- else
+ } else if (instance->host->nr_hw_queues > 1) {
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+
+ cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
+ instance->low_latency_index_start;
+ } else {
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
+ }
}
/**
@@ -694,6 +713,8 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
fusion = instance->ctrl_context;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ count += instance->iopoll_q_count;
+
fusion->reply_frames_desc_pool =
dma_pool_create("mr_reply", &instance->pdev->dev,
fusion->reply_alloc_sz * count, 16, 0);
@@ -788,6 +809,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
}
msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ msix_count += instance->iopoll_q_count;
fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
&instance->pdev->dev,
@@ -970,9 +992,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
- dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
- instance->max_fw_cmds);
-
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1016,6 +1035,7 @@ fail_exit:
* wait_and_poll - Issues a polling command
* @instance: Adapter soft state
* @cmd: Command packet to be issued
+ * @seconds: Maximum poll time
*
* For polling, MFI requires the cmd_status to be set to 0xFF before posting.
*/
@@ -1115,8 +1135,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
- dev_info(&instance->pdev->dev, "Performance mode :%s\n",
- MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+ dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode),
+ instance->low_latency_index_start);
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
@@ -1139,7 +1160,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
- IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
+ IOCInitMessage->HostMSIxVectors = instance->msix_vectors + instance->iopoll_q_count;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
time = ktime_get_real();
@@ -1289,7 +1310,7 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
- pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1);
+ pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES);
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -1833,6 +1854,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
sizeof(union MPI2_SGE_IO_UNION))/16;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ count += instance->iopoll_q_count;
+
for (i = 0 ; i < count; i++)
fusion->last_reply_idx[i] = 0;
@@ -1845,6 +1868,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
MEGASAS_FUSION_IOCTL_CMDS);
sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+ for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++)
+ atomic_set(&fusion->busy_mq_poll[i], 0);
+
if (megasas_alloc_ioc_init_frame(instance))
return 1;
@@ -1906,6 +1932,7 @@ fail_alloc_mfi_cmds:
/**
* megasas_fault_detect_work - Worker function of
* FW fault handling workqueue.
+ * @work: FW fault work struct
*/
static void
megasas_fault_detect_work(struct work_struct *work)
@@ -1989,11 +2016,13 @@ megasas_fusion_stop_watchdog(struct megasas_instance *instance)
/**
* map_cmd_status - Maps FW cmd status to OS cmd status
- * @cmd : Pointer to cmd
- * @status : status of cmd returned by FW
- * @ext_status : ext status of cmd returned by FW
+ * @fusion: fusion context
+ * @scmd: Pointer to cmd
+ * @status: status of cmd returned by FW
+ * @ext_status: ext status of cmd returned by FW
+ * @data_length: command data length
+ * @sense: command sense data
*/
-
static void
map_cmd_status(struct fusion_context *fusion,
struct scsi_cmnd *scmd, u8 status, u8 ext_status,
@@ -2018,11 +2047,8 @@ map_cmd_status(struct fusion_context *fusion,
scmd->result = (DID_OK << 16) | ext_status;
if (ext_status == SAM_STAT_CHECK_CONDITION) {
- memset(scmd->sense_buffer, 0,
- SCSI_SENSE_BUFFERSIZE);
memcpy(scmd->sense_buffer, sense,
SCSI_SENSE_BUFFERSIZE);
- scmd->result |= DRIVER_SENSE << 24;
}
/*
@@ -2070,7 +2096,6 @@ static bool
megasas_is_prp_possible(struct megasas_instance *instance,
struct scsi_cmnd *scmd, int sge_count)
{
- int i;
u32 data_length = 0;
struct scatterlist *sg_scmd;
bool build_prp = false;
@@ -2099,63 +2124,6 @@ megasas_is_prp_possible(struct megasas_instance *instance,
build_prp = true;
}
-/*
- * Below code detects gaps/holes in IO data buffers.
- * What does holes/gaps mean?
- * Any SGE except first one in a SGL starts at non NVME page size
- * aligned address OR Any SGE except last one in a SGL ends at
- * non NVME page size boundary.
- *
- * Driver has already informed block layer by setting boundary rules for
- * bio merging done at NVME page size boundary calling kernel API
- * blk_queue_virt_boundary inside slave_config.
- * Still there is possibility of IO coming with holes to driver because of
- * IO merging done by IO scheduler.
- *
- * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
- * IO scheduling so no IO merging.
- *
- * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
- * then sending IOs with holes.
- *
- * Though driver can request block layer to disable IO merging by calling-
- * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
- * user may tune sysfs parameter- nomerges again to 0 or 1.
- *
- * If in future IO scheduling is enabled with SCSI BLK MQ,
- * this algorithm to detect holes will be required in driver
- * for SCSI BLK MQ enabled case as well.
- *
- *
- */
- scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
- if ((i != 0) && (i != (sge_count - 1))) {
- if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
- mega_mod64(sg_dma_address(sg_scmd),
- mr_nvme_pg_size)) {
- build_prp = false;
- break;
- }
- }
-
- if ((sge_count > 1) && (i == 0)) {
- if ((mega_mod64((sg_dma_address(sg_scmd) +
- sg_dma_len(sg_scmd)),
- mr_nvme_pg_size))) {
- build_prp = false;
- break;
- }
- }
-
- if ((sge_count > 1) && (i == (sge_count - 1))) {
- if (mega_mod64(sg_dma_address(sg_scmd),
- mr_nvme_pg_size)) {
- build_prp = false;
- break;
- }
- }
- }
-
return build_prp;
}
@@ -2292,7 +2260,7 @@ megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
* @scp: SCSI command from the mid-layer
* @sgl_ptr: SGL to be filled in
* @cmd: cmd we are working on
- * @sge_count sge count
+ * @sge_count: sge count
*
*/
static void
@@ -2401,9 +2369,12 @@ int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
/**
* megasas_set_pd_lba - Sets PD LBA
- * @cdb: CDB
+ * @io_request: IO request
* @cdb_len: cdb length
- * @start_blk: Start block of IO
+ * @io_info: IO information
+ * @scp: SCSI command
+ * @local_map_ptr: Raid map
+ * @ref_tag: Primary reference tag
*
* Used to set the PD LBA in CDB for FP IOs
*/
@@ -2661,10 +2632,12 @@ static void megasas_stream_detect(struct megasas_instance *instance,
* affinity (cpu of the controller) and raid_flags in the raid context
* based on IO type.
*
+ * @fusion: Fusion context
* @praid_context: IO RAID context
* @raid: LD raid map
* @fp_possible: Is fast path possible?
* @is_read: Is read IO?
+ * @scsi_buff_len: SCSI command buffer length
*
*/
static void
@@ -2940,7 +2913,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
get_updated_dev_handle(instance,
&fusion->load_balance_info[device_id],
&io_info, local_map_ptr);
- scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+ megasas_priv(scp)->status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
if (instance->adapter_type >= VENTURA_SERIES)
rctx_g35->span_arm = io_info.span_arm;
@@ -2948,7 +2921,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
rctx->span_arm = io_info.span_arm;
} else
- scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ megasas_priv(scp)->status &= ~MEGASAS_LOAD_BALANCE_FLAG;
if (instance->adapter_type >= VENTURA_SERIES)
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
@@ -2998,7 +2971,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
/**
* megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
* @instance: Adapter soft state
- * @scp: SCSI command
+ * @scmd: SCSI command
* @cmd: Command to be prepared
*
* Prepares the io_request frame for non-rw io cmds for vd.
@@ -3048,7 +3021,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
io_request->DevHandle = cpu_to_le16(device_id);
io_request->LUN[1] = scmd->device->lun;
pRAID_Context->timeout_value =
- cpu_to_le16 (scmd->request->timeout / HZ);
+ cpu_to_le16(scsi_cmd_to_rq(scmd)->timeout / HZ);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -3086,7 +3059,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
/**
* megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
* @instance: Adapter soft state
- * @scp: SCSI command
+ * @scmd: SCSI command
* @cmd: Command to be prepared
* @fp_possible: parameter to detect fast path or firmware path io.
*
@@ -3111,7 +3084,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(scmd);
pd_index = MEGASAS_PD_INDEX(scmd);
- os_timeout_value = scmd->request->timeout / HZ;
+ os_timeout_value = scsi_cmd_to_rq(scmd)->timeout / HZ;
mr_device_priv_data = scmd->device->hostdata;
cmd->pd_interface = mr_device_priv_data->interface_type;
@@ -3226,7 +3199,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd)
{
int sge_count;
- u8 cmd_type;
+ u16 pd_index = 0;
+ u8 drive_type = 0;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
struct MR_PRIV_DEVICE *mr_device_priv_data;
mr_device_priv_data = scp->device->hostdata;
@@ -3250,7 +3224,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
*/
io_request->IoFlags = cpu_to_le16(scp->cmd_len);
- switch (cmd_type = megasas_cmd_type(scp)) {
+ switch (megasas_cmd_type(scp)) {
case READ_WRITE_LDIO:
megasas_build_ldio_fusion(instance, scp, cmd);
break;
@@ -3261,8 +3235,12 @@ megasas_build_io_fusion(struct megasas_instance *instance,
megasas_build_syspd_fusion(instance, scp, cmd, true);
break;
case NON_READ_WRITE_SYSPDIO:
- if (instance->secure_jbod_support ||
- mr_device_priv_data->is_tm_capable)
+ pd_index = MEGASAS_PD_INDEX(scp);
+ drive_type = instance->pd_list[pd_index].driveType;
+ if ((instance->secure_jbod_support ||
+ mr_device_priv_data->is_tm_capable) ||
+ (instance->adapter_type >= VENTURA_SERIES &&
+ drive_type == TYPE_ENCLOSURE))
megasas_build_syspd_fusion(instance, scp, cmd, false);
else
megasas_build_syspd_fusion(instance, scp, cmd, true);
@@ -3312,7 +3290,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
- scp->SCp.ptr = (char *)cmd;
+ megasas_priv(scp)->cmd_priv = cmd;
return 0;
}
@@ -3400,7 +3378,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
return SCSI_MLQUEUE_HOST_BUSY;
}
- cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
+ cmd = megasas_get_cmd_fusion(instance, scsi_cmd_to_rq(scmd)->tag);
if (!cmd) {
atomic_dec(&instance->fw_outstanding);
@@ -3441,7 +3419,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
*/
if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
r1_cmd = megasas_get_cmd_fusion(instance,
- (scmd->request->tag + instance->max_fw_cmds));
+ scsi_cmd_to_rq(scmd)->tag + instance->max_fw_cmds);
megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
}
@@ -3450,6 +3428,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
* Issue the command to the FW
*/
+ megasas_sdev_busy_inc(instance, scmd);
megasas_fire_cmd_fusion(instance, req_desc);
if (r1_cmd)
@@ -3463,7 +3442,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
* megasas_complete_r1_command -
* completes R1 FP write commands which has valid peer smid
* @instance: Adapter soft state
- * @cmd_fusion: MPT command frame
+ * @cmd: MPT command frame
*
*/
static inline void
@@ -3507,16 +3486,55 @@ megasas_complete_r1_command(struct megasas_instance *instance,
if (instance->ldio_threshold &&
megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
atomic_dec(&instance->ldio_outstanding);
- scmd_local->SCp.ptr = NULL;
+ megasas_priv(scmd_local)->cmd_priv = NULL;
megasas_return_cmd_fusion(instance, cmd);
scsi_dma_unmap(scmd_local);
- scmd_local->scsi_done(scmd_local);
+ megasas_sdev_busy_dec(instance, scmd_local);
+ scsi_done(scmd_local);
}
}
/**
+ * access_irq_context: Access to reply processing
+ * @irq_context: IRQ context
+ *
+ * Synchronize access to reply processing.
+ *
+ * Return: true on success, false on failure.
+ */
+static inline
+bool access_irq_context(struct megasas_irq_context *irq_context)
+{
+ if (!irq_context)
+ return true;
+
+ if (atomic_add_unless(&irq_context->in_used, 1, 1))
+ return true;
+
+ return false;
+}
+
+/**
+ * release_irq_context: Release reply processing
+ * @irq_context: IRQ context
+ *
+ * Release access of reply processing.
+ *
+ * Return: Nothing.
+ */
+static inline
+void release_irq_context(struct megasas_irq_context *irq_context)
+{
+ if (irq_context)
+ atomic_dec(&irq_context->in_used);
+}
+
+/**
* complete_cmd_fusion - Completes command
* @instance: Adapter soft state
+ * @MSIxIndex: MSI number
+ * @irq_context: IRQ context
+ *
* Completes all commands that is in reply descriptor queue
*/
static int
@@ -3544,6 +3562,9 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return IRQ_HANDLED;
+ if (!access_irq_context(irq_context))
+ return 0;
+
desc = fusion->reply_frames_desc[MSIxIndex] +
fusion->last_reply_idx[MSIxIndex];
@@ -3554,8 +3575,10 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
reply_descript_type = reply_desc->ReplyFlags &
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
- if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+ release_irq_context(irq_context);
return IRQ_NONE;
+ }
num_completed = 0;
@@ -3587,14 +3610,15 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
/* Update load balancing info */
if (fusion->load_balance_info &&
- (cmd_fusion->scmd->SCp.Status &
+ (megasas_priv(cmd_fusion->scmd)->status &
MEGASAS_LOAD_BALANCE_FLAG)) {
device_id = MEGASAS_DEV_INDEX(scmd_local);
lbinfo = &fusion->load_balance_info[device_id];
atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
- cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ megasas_priv(cmd_fusion->scmd)->status &=
+ ~MEGASAS_LOAD_BALANCE_FLAG;
}
- /* Fall through - and complete IO */
+ fallthrough; /* and complete IO */
case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
atomic_dec(&instance->fw_outstanding);
if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
@@ -3604,10 +3628,11 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
if (instance->ldio_threshold &&
(megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
atomic_dec(&instance->ldio_outstanding);
- scmd_local->SCp.ptr = NULL;
+ megasas_priv(scmd_local)->cmd_priv = NULL;
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
- scmd_local->scsi_done(scmd_local);
+ megasas_sdev_busy_dec(instance, scmd_local);
+ scsi_done(scmd_local);
} else /* Optimal VD - R1 FP command completion. */
megasas_complete_r1_command(instance, cmd_fusion);
break;
@@ -3670,6 +3695,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
irq_context->irq_line_enable = true;
irq_poll_sched(&irq_context->irqpoll);
}
+ release_irq_context(irq_context);
return num_completed;
}
}
@@ -3687,11 +3713,37 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
instance->reply_post_host_index_addr[0]);
megasas_check_and_restore_queue_depth(instance);
}
+
+ release_irq_context(irq_context);
+
return num_completed;
}
+int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+
+ struct megasas_instance *instance;
+ int num_entries = 0;
+ struct fusion_context *fusion;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+
+ fusion = instance->ctrl_context;
+
+ queue_num = queue_num + instance->low_latency_index_start;
+
+ if (!atomic_add_unless(&fusion->busy_mq_poll[queue_num], 1, 1))
+ return 0;
+
+ num_entries = complete_cmd_fusion(instance, queue_num, NULL);
+ atomic_dec(&fusion->busy_mq_poll[queue_num]);
+
+ return num_entries;
+}
+
/**
* megasas_enable_irq_poll() - enable irqpoll
+ * @instance: Adapter soft state
*/
static void megasas_enable_irq_poll(struct megasas_instance *instance)
{
@@ -3708,7 +3760,7 @@ static void megasas_enable_irq_poll(struct megasas_instance *instance)
/**
* megasas_sync_irqs - Synchronizes all IRQs owned by adapter
- * @instance: Adapter soft state
+ * @instance_addr: Adapter soft state address
*/
static void megasas_sync_irqs(unsigned long instance_addr)
{
@@ -3726,6 +3778,7 @@ static void megasas_sync_irqs(unsigned long instance_addr)
if (irq_ctx->irq_poll_scheduled) {
irq_ctx->irq_poll_scheduled = false;
enable_irq(irq_ctx->os_irq);
+ complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
}
}
}
@@ -3748,7 +3801,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
instance = irq_ctx->instance;
if (irq_ctx->irq_line_enable) {
- disable_irq(irq_ctx->os_irq);
+ disable_irq_nosync(irq_ctx->os_irq);
irq_ctx->irq_line_enable = false;
}
@@ -3757,6 +3810,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
irq_poll_complete(irqpoll);
irq_ctx->irq_poll_scheduled = false;
enable_irq(irq_ctx->os_irq);
+ complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
}
return num_entries;
@@ -3764,7 +3818,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
/**
* megasas_complete_cmd_dpc_fusion - Completes command
- * @instance: Adapter soft state
+ * @instance_addr: Adapter soft state address
*
* Tasklet to complete cmds
*/
@@ -3773,6 +3827,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
{
struct megasas_instance *instance =
(struct megasas_instance *)instance_addr;
+ struct megasas_irq_context *irq_ctx = NULL;
u32 count, MSIxIndex;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
@@ -3781,12 +3836,16 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
- for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
- complete_cmd_fusion(instance, MSIxIndex, NULL);
+ for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) {
+ irq_ctx = &instance->irq_context[MSIxIndex];
+ complete_cmd_fusion(instance, MSIxIndex, irq_ctx);
+ }
}
/**
* megasas_isr_fusion - isr entry point
+ * @irq: IRQ number
+ * @devp: IRQ context
*/
static irqreturn_t megasas_isr_fusion(int irq, void *devp)
{
@@ -3797,10 +3856,8 @@ static irqreturn_t megasas_isr_fusion(int irq, void *devp)
if (instance->mask_interrupts)
return IRQ_NONE;
-#if defined(ENABLE_IRQ_POLL)
if (irq_context->irq_poll_scheduled)
return IRQ_HANDLED;
-#endif
if (!instance->msix_vectors) {
mfiStatus = instance->instancet->clear_intr(instance);
@@ -3821,7 +3878,7 @@ static irqreturn_t megasas_isr_fusion(int irq, void *devp)
/**
* build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
* @instance: Adapter soft state
- * mfi_cmd: megasas_cmd pointer
+ * @mfi_cmd: megasas_cmd pointer
*
*/
static void
@@ -3938,7 +3995,7 @@ megasas_release_fusion(struct megasas_instance *instance)
/**
* megasas_read_fw_status_reg_fusion - returns the current FW status value
- * @regs: MFI register set
+ * @instance: Adapter soft state
*/
static u32
megasas_read_fw_status_reg_fusion(struct megasas_instance *instance)
@@ -3949,7 +4006,7 @@ megasas_read_fw_status_reg_fusion(struct megasas_instance *instance)
/**
* megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
* @instance: Controller's soft instance
- * return: Number of allocated host crash buffers
+ * @return: Number of allocated host crash buffers
*/
static void
megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
@@ -3976,8 +4033,7 @@ megasas_free_host_crash_buffer(struct megasas_instance *instance)
{
unsigned int i;
for (i = 0; i < instance->drv_buf_alloc; i++) {
- if (instance->crash_buf[i])
- vfree(instance->crash_buf[i]);
+ vfree(instance->crash_buf[i]);
}
instance->drv_buf_index = 0;
instance->drv_buf_alloc = 0;
@@ -3987,6 +4043,7 @@ megasas_free_host_crash_buffer(struct megasas_instance *instance)
/**
* megasas_adp_reset_fusion - For controller reset
+ * @instance: Controller's soft instance
* @regs: MFI register set
*/
static int
@@ -4064,6 +4121,7 @@ megasas_adp_reset_fusion(struct megasas_instance *instance,
/**
* megasas_check_reset_fusion - For controller reset check
+ * @instance: Controller's soft instance
* @regs: MFI register set
*/
static int
@@ -4218,6 +4276,8 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
fusion = instance->ctrl_context;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ count += instance->iopoll_q_count;
+
for (i = 0 ; i < count ; i++) {
fusion->last_reply_idx[i] = 0;
reply_desc = fusion->reply_frames_desc[i];
@@ -4230,7 +4290,7 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
* megasas_refire_mgmt_cmd : Re-fire management commands
* @instance: Controller's soft instance
*/
-void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
+static void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
bool return_ioctl)
{
int j;
@@ -4238,8 +4298,9 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
u16 smid;
- bool refire_cmd = 0;
+ bool refire_cmd = false;
u8 result;
u32 opcode = 0;
@@ -4305,6 +4366,11 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
result = COMPLETE_CMD;
}
+ scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
+ if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
+ result = RETURN_CMD;
+
switch (result) {
case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
@@ -4387,8 +4453,8 @@ static int megasas_track_scsiio(struct megasas_instance *instance,
/**
* megasas_tm_response_code - translation of device response code
- * @ioc: per adapter object
- * @mpi_reply: MPI reply returned by firmware
+ * @instance: Controller's soft instance
+ * @mpi_reply: MPI reply returned by firmware
*
* Return nothing.
*/
@@ -4443,9 +4509,9 @@ megasas_tm_response_code(struct megasas_instance *instance,
* @device_handle: device handle
* @channel: the channel assigned by the OS
* @id: the id assigned by the OS
- * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
* @smid_task: smid assigned to the task
- * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
+ * @mr_device_priv_data: private data
* Context: user
*
* MegaRaid use MPT interface for Task Magement request.
@@ -4533,7 +4599,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
if (!timeleft) {
dev_err(&instance->pdev->dev,
"task mgmt type 0x%x timed out\n", type);
- cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
mutex_unlock(&instance->reset_mutex);
rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
mutex_lock(&instance->reset_mutex);
@@ -4713,12 +4778,12 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
"attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
scmd, devhandle);
- mr_device_priv_data->tm_busy = 1;
+ mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, smid,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
mr_device_priv_data);
- mr_device_priv_data->tm_busy = 0;
+ mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
@@ -4783,12 +4848,12 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device,
"attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
scmd, devhandle);
- mr_device_priv_data->tm_busy = 1;
+ mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
mr_device_priv_data);
- mr_device_priv_data->tm_busy = 0;
+ mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
(ret == SUCCESS) ? "SUCCESS" : "FAILED");
@@ -4946,7 +5011,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
atomic_dec(&instance->ldio_outstanding);
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
- scmd_local->scsi_done(scmd_local);
+ scsi_done(scmd_local);
}
}
@@ -5032,8 +5097,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
if (instance->adapter_type >= VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
memset(fusion->stream_detect_by_ld[j],
- 0, sizeof(struct LD_STREAM_DETECT));
- fusion->stream_detect_by_ld[j]->mru_bit_map
+ 0, sizeof(struct LD_STREAM_DETECT));
+ fusion->stream_detect_by_ld[j]->mru_bit_map
= MR_STREAM_BITMAP;
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index d57ecc7f88d8..49e9a9048ee7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -774,7 +774,7 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_CPU_AFFINITY_MASK {
union {
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u8 hw_path:1;
u8 cpu0:1;
u8 cpu1:1;
@@ -866,7 +866,7 @@ struct MR_LD_RAID {
__le16 seqNum;
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 ldSyncRequired:1;
u32 regTypeReqOnReadIsValid:1;
u32 isEPD:1;
@@ -889,7 +889,7 @@ struct {
/* 0x30 - 0x33, Logical block size for the LD */
u32 logical_block_length;
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
u32 ld_pi_exp:4;
/* 0x34, LOGICAL BLOCKS PER PHYSICAL
@@ -942,7 +942,7 @@ struct MR_FW_RAID_MAP {
u8 reserved2[7];
struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
- struct MR_LD_SPAN_MAP ldSpanMap[1];
+ struct MR_LD_SPAN_MAP ldSpanMap[];
};
struct IO_REQUEST_INFO {
@@ -1053,7 +1053,7 @@ struct MR_FW_RAID_MAP_DYNAMIC {
struct MR_RAID_MAP_DESC_TABLE
raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
/* Variable Size buffer containing all data */
- u32 raid_map_desc_data[1];
+ u32 raid_map_desc_data[];
}; /* Dynamicaly sized RAID MAp structure */
#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
@@ -1148,7 +1148,7 @@ typedef struct LOG_BLOCK_SPAN_INFO {
struct MR_FW_RAID_MAP_ALL {
struct MR_FW_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
} __attribute__ ((packed));
struct MR_DRV_RAID_MAP {
@@ -1182,7 +1182,7 @@ struct MR_DRV_RAID_MAP {
devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
- struct MR_LD_SPAN_MAP ldSpanMap[1];
+ struct MR_LD_SPAN_MAP ldSpanMap[];
};
@@ -1193,7 +1193,7 @@ struct MR_DRV_RAID_MAP {
struct MR_DRV_RAID_MAP_ALL {
struct MR_DRV_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
} __packed;
@@ -1249,7 +1249,7 @@ struct MR_PD_CFG_SEQ {
struct MR_PD_CFG_SEQ_NUM_SYNC {
__le32 size;
__le32 count;
- struct MR_PD_CFG_SEQ seq[1];
+ struct MR_PD_CFG_SEQ seq[];
} __packed;
/* stream detection */
@@ -1303,6 +1303,8 @@ struct fusion_context {
u8 *sense;
dma_addr_t sense_phys_addr;
+ atomic_t busy_mq_poll[MAX_MSIX_QUEUES_FUSION];
+
dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 74fb50644678..84b541a57b7b 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -31,14 +31,14 @@
#include <linux/reboot.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/pgtable.h>
#include <asm/dbdma.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/processor.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/pmac_feature.h>
#include <asm/macio.h>
@@ -342,15 +342,6 @@ static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr)
}
-/*
- * Complete a SCSI command
- */
-static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd)
-{
- (*cmd->scsi_done)(cmd);
-}
-
-
/* Called with meshinterrupt disabled, initialize the chipset
* and eventually do the initial bus reset. The lock must not be
* held since we can schedule.
@@ -595,9 +586,12 @@ static void mesh_done(struct mesh_state *ms, int start_next)
ms->current_req = NULL;
tp->current_req = NULL;
if (cmd) {
- cmd->result = (ms->stat << 16) | cmd->SCp.Status;
+ struct mesh_cmd_priv *mcmd = mesh_priv(cmd);
+
+ set_host_byte(cmd, ms->stat);
+ set_status_byte(cmd, mcmd->status);
if (ms->stat == DID_OK)
- cmd->result |= cmd->SCp.Message << 8;
+ scsi_msg_to_host_byte(cmd, mcmd->message);
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
cmd->result, ms->data_ptr, scsi_bufflen(cmd));
@@ -611,8 +605,8 @@ static void mesh_done(struct mesh_state *ms, int start_next)
}
#endif
}
- cmd->SCp.this_residual -= ms->data_ptr;
- mesh_completed(ms, cmd);
+ mcmd->this_residual -= ms->data_ptr;
+ scsi_done(cmd);
}
if (start_next) {
out_8(&ms->mesh->sequence, SEQ_ENBRESEL);
@@ -993,9 +987,9 @@ static void handle_reset(struct mesh_state *ms)
for (tgt = 0; tgt < 8; ++tgt) {
tp = &ms->tgts[tgt];
if ((cmd = tp->current_req) != NULL) {
- cmd->result = DID_RESET << 16;
+ set_host_byte(cmd, DID_RESET);
tp->current_req = NULL;
- mesh_completed(ms, cmd);
+ scsi_done(cmd);
}
ms->tgts[tgt].sdtr_state = do_sdtr;
ms->tgts[tgt].sync_params = ASYNC_PARAMS;
@@ -1003,8 +997,8 @@ static void handle_reset(struct mesh_state *ms)
ms->current_req = NULL;
while ((cmd = ms->request_q) != NULL) {
ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
- cmd->result = DID_RESET << 16;
- mesh_completed(ms, cmd);
+ set_host_byte(cmd, DID_RESET);
+ scsi_done(cmd);
}
ms->phase = idle;
ms->msgphase = msg_none;
@@ -1045,6 +1039,8 @@ static void handle_error(struct mesh_state *ms)
while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
udelay(1);
printk("done\n");
+ if (ms->dma_started)
+ halt_dma(ms);
handle_reset(ms);
/* request_q is empty, no point in mesh_start() */
return;
@@ -1177,7 +1173,7 @@ static void handle_msgin(struct mesh_state *ms)
if (ms->n_msgin < msgin_length(ms))
goto reject;
if (cmd)
- cmd->SCp.Message = code;
+ mesh_priv(cmd)->message = code;
switch (code) {
case COMMAND_COMPLETE:
break;
@@ -1268,7 +1264,7 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
if (cmd) {
int nseg;
- cmd->SCp.this_residual = scsi_bufflen(cmd);
+ mesh_priv(cmd)->this_residual = scsi_bufflen(cmd);
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
@@ -1357,7 +1353,8 @@ static void halt_dma(struct mesh_state *ms)
ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
ms->tgts[ms->conn_tgt].data_goes_out);
}
- scsi_dma_unmap(cmd);
+ if (cmd)
+ scsi_dma_unmap(cmd);
ms->dma_started = 0;
}
@@ -1454,7 +1451,7 @@ static void cmd_complete(struct mesh_state *ms)
/* huh? we expected a phase mismatch */
ms->n_msgin = 0;
ms->msgphase = msg_in;
- /* fall through */
+ fallthrough;
case msg_in:
/* should have some message bytes in fifo */
@@ -1597,10 +1594,12 @@ static void cmd_complete(struct mesh_state *ms)
break;
case statusing:
if (cmd) {
- cmd->SCp.Status = mr->fifo;
+ struct mesh_cmd_priv *mcmd = mesh_priv(cmd);
+
+ mcmd->status = mr->fifo;
if (DEBUG_TARGET(cmd))
printk(KERN_DEBUG "mesh: status is %x\n",
- cmd->SCp.Status);
+ mcmd->status);
}
ms->msgphase = msg_in;
break;
@@ -1626,11 +1625,10 @@ static void cmd_complete(struct mesh_state *ms)
* Called by midlayer with host locked to queue a new
* request
*/
-static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int mesh_queue_lck(struct scsi_cmnd *cmd)
{
struct mesh_state *ms;
- cmd->scsi_done = done;
cmd->host_scribble = NULL;
ms = (struct mesh_state *) cmd->device->host->hostdata;
@@ -1712,6 +1710,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd)
spin_lock_irqsave(ms->host->host_lock, flags);
+ if (ms->dma_started)
+ halt_dma(ms);
+
/* Reset the controller & dbdma channel */
out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
out_8(&mr->exception, 0xff); /* clear all exception bits */
@@ -1840,6 +1841,7 @@ static struct scsi_host_template mesh_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.max_segment_size = 65535,
+ .cmd_size = sizeof(struct mesh_cmd_priv),
};
static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
@@ -1880,11 +1882,6 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
goto out_release;
}
- /* Old junk for root discovery, that will die ultimately */
-#if !defined(MODULE)
- note_scsi_host(mesh, mesh_host);
-#endif
-
mesh_host->base = macio_resource_start(mdev, 0);
mesh_host->irq = macio_irq(mdev, 0);
ms = (struct mesh_state *) mesh_host->hostdata;
diff --git a/drivers/scsi/mesh.h b/drivers/scsi/mesh.h
index ee53c05ace95..f70181acceac 100644
--- a/drivers/scsi/mesh.h
+++ b/drivers/scsi/mesh.h
@@ -8,6 +8,17 @@
#ifndef _MESH_H
#define _MESH_H
+struct mesh_cmd_priv {
+ int this_residual;
+ int message;
+ int status;
+};
+
+static inline struct mesh_cmd_priv *mesh_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/*
* Registers in the MESH controller.
*/
diff --git a/drivers/scsi/mpi3mr/Kconfig b/drivers/scsi/mpi3mr/Kconfig
new file mode 100644
index 000000000000..f48740cd5b95
--- /dev/null
+++ b/drivers/scsi/mpi3mr/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config SCSI_MPI3MR
+ tristate "Broadcom MPI3 Storage Controller Device Driver"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ select SCSI_SAS_ATTRS
+ help
+ MPI3 based Storage & RAID Controllers Driver.
diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile
new file mode 100644
index 000000000000..ef86ca46646b
--- /dev/null
+++ b/drivers/scsi/mpi3mr/Makefile
@@ -0,0 +1,6 @@
+# mpi3mr makefile
+obj-m += mpi3mr.o
+mpi3mr-y += mpi3mr_os.o \
+ mpi3mr_fw.o \
+ mpi3mr_app.o \
+ mpi3mr_transport.o
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
new file mode 100644
index 000000000000..0a2af48915a5
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -0,0 +1,2414 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2017-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_CNFG_H
+#define MPI30_CNFG_H 1
+#define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01)
+#define MPI3_CONFIG_PAGETYPE_IOC (0x02)
+#define MPI3_CONFIG_PAGETYPE_DRIVER (0x03)
+#define MPI3_CONFIG_PAGETYPE_SECURITY (0x04)
+#define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11)
+#define MPI3_CONFIG_PAGETYPE_DEVICE (0x12)
+#define MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT (0x20)
+#define MPI3_CONFIG_PAGETYPE_SAS_EXPANDER (0x21)
+#define MPI3_CONFIG_PAGETYPE_SAS_PHY (0x23)
+#define MPI3_CONFIG_PAGETYPE_SAS_PORT (0x24)
+#define MPI3_CONFIG_PAGETYPE_PCIE_IO_UNIT (0x30)
+#define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31)
+#define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33)
+#define MPI3_CONFIG_PAGEATTR_MASK (0xf0)
+#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI3_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI3_CONFIG_ACTION_READ_DEFAULT (0x01)
+#define MPI3_CONFIG_ACTION_READ_CURRENT (0x02)
+#define MPI3_CONFIG_ACTION_WRITE_CURRENT (0x03)
+#define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04)
+#define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05)
+#define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00ff0000)
+#define MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+#define MPI3_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_SAS_PHY_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000ff)
+#define MPI3_SASPORT_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI3_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+#define MPI3_SASPORT_PGAD_PORT_NUMBER_MASK (0x000000ff)
+#define MPI3_ENCLOS_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+#define MPI3_ENCLOS_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE_PORT_NUM (0x10000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00ff0000)
+#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16)
+#define MPI3_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_PCIE_LINK_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000)
+#define MPI3_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000)
+#define MPI3_PCIE_LINK_PGAD_LINKNUM_MASK (0x000000ff)
+#define MPI3_SECURITY_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SECURITY_PGAD_FORM_GET_NEXT_SLOT (0x00000000)
+#define MPI3_SECURITY_PGAD_FORM_SOT_NUM (0x10000000)
+#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00)
+#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff)
+struct mpi3_config_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ u8 page_version;
+ u8 page_number;
+ u8 page_type;
+ u8 action;
+ __le32 page_address;
+ __le16 page_length;
+ __le16 reserved16;
+ __le32 reserved18[2];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_config_page_header {
+ u8 page_version;
+ u8 reserved01;
+ u8 page_number;
+ u8 page_attribute;
+ __le16 page_length;
+ u8 page_type;
+ u8 reserved07;
+};
+
+#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK (0xf0)
+#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT (4)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_MASK (0x0f)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
+#define MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI3_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI3_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI3_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI3_SAS_NEG_LINK_RATE_6_0 (0x0a)
+#define MPI3_SAS_NEG_LINK_RATE_12_0 (0x0b)
+#define MPI3_SAS_NEG_LINK_RATE_22_5 (0x0c)
+#define MPI3_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI3_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI3_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+#define MPI3_SAS_APHYINFO_REASON_MASK (0x0000000f)
+#define MPI3_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI3_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI3_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI3_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI3_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI3_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI3_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI3_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI3_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+#define MPI3_SAS_APHYINFO_REASON_EXP_REDUCED_FUNC (0x00000009)
+#define MPI3_SAS_PHYINFO_STATUS_MASK (0xc0000000)
+#define MPI3_SAS_PHYINFO_STATUS_SHIFT (30)
+#define MPI3_SAS_PHYINFO_STATUS_ACCESSIBLE (0x00000000)
+#define MPI3_SAS_PHYINFO_STATUS_NOT_EXIST (0x40000000)
+#define MPI3_SAS_PHYINFO_STATUS_VACANT (0x80000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_MASK (0x04000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_SHIFT (26)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_MASK (0x02000000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_SHIFT (25)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_MASK (0x01000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_SHIFT (24)
+#define MPI3_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_WITHIN (0x00200000)
+#define MPI3_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
+#define MPI3_SAS_PHYINFO_REASON_MASK (0x000f0000)
+#define MPI3_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI3_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI3_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI3_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI3_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI3_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI3_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI3_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI3_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+#define MPI3_SAS_PHYINFO_REASON_EXP_REDUCED_FUNC (0x00090000)
+#define MPI3_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI3_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI3_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_MASK (0x00000f00)
+#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_SHIFT (8)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_MASK (0x000000f0)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_DIRECT (0x00000000)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_SUBTRACTIVE (0x00000010)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_TABLE (0x00000020)
+#define MPI3_SAS_PRATE_MAX_RATE_MASK (0xf0)
+#define MPI3_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI3_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI3_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI3_SAS_PRATE_MAX_RATE_6_0 (0xa0)
+#define MPI3_SAS_PRATE_MAX_RATE_12_0 (0xb0)
+#define MPI3_SAS_PRATE_MAX_RATE_22_5 (0xc0)
+#define MPI3_SAS_PRATE_MIN_RATE_MASK (0x0f)
+#define MPI3_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI3_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI3_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI3_SAS_PRATE_MIN_RATE_6_0 (0x0a)
+#define MPI3_SAS_PRATE_MIN_RATE_12_0 (0x0b)
+#define MPI3_SAS_PRATE_MIN_RATE_22_5 (0x0c)
+#define MPI3_SAS_HWRATE_MAX_RATE_MASK (0xf0)
+#define MPI3_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI3_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI3_SAS_HWRATE_MAX_RATE_6_0 (0xa0)
+#define MPI3_SAS_HWRATE_MAX_RATE_12_0 (0xb0)
+#define MPI3_SAS_HWRATE_MAX_RATE_22_5 (0xc0)
+#define MPI3_SAS_HWRATE_MIN_RATE_MASK (0x0f)
+#define MPI3_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI3_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a)
+#define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b)
+#define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c)
+#define MPI3_SLOT_INVALID (0xffff)
+#define MPI3_SLOT_INDEX_INVALID (0xffff)
+#define MPI3_LINK_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_RATE_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_TEMP_SENSOR_LOCATION_INTERNAL (0x0)
+#define MPI3_TEMP_SENSOR_LOCATION_INLET (0x1)
+#define MPI3_TEMP_SENSOR_LOCATION_OUTLET (0x2)
+#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3)
+#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000)
+#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5)
+struct mpi3_man_page0 {
+ struct mpi3_config_page_header header;
+ u8 chip_revision[8];
+ u8 chip_name[32];
+ u8 board_name[32];
+ u8 board_assembly[32];
+ u8 board_tracer_number[32];
+ __le32 board_power;
+ __le32 reserved94;
+ __le32 reserved98;
+ u8 oem;
+ u8 profile_identifier;
+ __le16 flags;
+ u8 board_mfg_day;
+ u8 board_mfg_month;
+ __le16 board_mfg_year;
+ u8 board_rework_day;
+ u8 board_rework_month;
+ __le16 board_rework_year;
+ u8 board_revision[8];
+ u8 e_pack_fru[16];
+ u8 product_name[256];
+};
+
+#define MPI3_MAN0_PAGEVERSION (0x00)
+#define MPI3_MAN0_FLAGS_SWITCH_PRESENT (0x0002)
+#define MPI3_MAN0_FLAGS_EXPANDER_PRESENT (0x0001)
+#define MPI3_MAN1_VPD_SIZE (512)
+struct mpi3_man_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08[2];
+ u8 vpd[MPI3_MAN1_VPD_SIZE];
+};
+
+#define MPI3_MAN1_PAGEVERSION (0x00)
+struct mpi3_man_page2 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 reserved0c[3];
+ u8 oem_board_tracer_number[32];
+};
+#define MPI3_MAN2_PAGEVERSION (0x00)
+#define MPI3_MAN2_FLAGS_TRACER_PRESENT (0x01)
+struct mpi3_man5_phy_entry {
+ __le64 ioc_wwid;
+ __le64 device_name;
+ __le64 sata_wwid;
+};
+
+#ifndef MPI3_MAN5_PHY_MAX
+#define MPI3_MAN5_PHY_MAX (1)
+#endif
+struct mpi3_man_page5 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man5_phy_entry phy[MPI3_MAN5_PHY_MAX];
+};
+
+#define MPI3_MAN5_PAGEVERSION (0x00)
+struct mpi3_man6_gpio_entry {
+ u8 function_code;
+ u8 function_flags;
+ __le16 flags;
+ u8 param1;
+ u8 param2;
+ __le16 reserved06;
+ __le32 param3;
+};
+
+#define MPI3_MAN6_GPIO_FUNCTION_GENERIC (0x00)
+#define MPI3_MAN6_GPIO_FUNCTION_ALTERNATE (0x01)
+#define MPI3_MAN6_GPIO_FUNCTION_EXT_INTERRUPT (0x02)
+#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_ACTIVITY (0x03)
+#define MPI3_MAN6_GPIO_FUNCTION_OVER_TEMPERATURE (0x04)
+#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_GREEN (0x05)
+#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06)
+#define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07)
+#define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08)
+#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a)
+#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b)
+#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c)
+#define MPI3_MAN6_GPIO_FUNCTION_PBLP_STATUS_CHANGE (0x0d)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ONLINE (0x0e)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_FAULT (0x0f)
+#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10)
+#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11)
+#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12)
+#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET_CLAMP (0x13)
+#define MPI3_MAN6_GPIO_FUNCTION_AUXILIARY_POWER (0x14)
+#define MPI3_MAN6_GPIO_FUNCTION_RAID_DATA_CACHE_DIRTY (0x15)
+#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_CONTROL (0x16)
+#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_FAULT (0x17)
+#define MPI3_MAN6_GPIO_FUNCTION_POWER_BRAKE (0x18)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_ACTIVE_CABLE_OVERCURRENT (0x20)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01)
+#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ALL_UP (0x00)
+#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ONE_OR_MORE_UP (0x01)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02)
+#define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_SLOW_EDGE (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_MASK (0x00c0)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_100OHM (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_66OHM (0x0040)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_50OHM (0x0080)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_33OHM (0x00c0)
+#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_MASK (0x0030)
+#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_SHIFT (4)
+#define MPI3_MAN6_GPIO_FLAGS_ACTIVE_HIGH (0x0008)
+#define MPI3_MAN6_GPIO_FLAGS_BI_DIR_ENABLED (0x0004)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_MASK (0x0003)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_INPUT (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_DRAIN_OUTPUT (0x0001)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_SOURCE_OUTPUT (0x0002)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_PUSH_PULL_OUTPUT (0x0003)
+#ifndef MPI3_MAN6_GPIO_MAX
+#define MPI3_MAN6_GPIO_MAX (1)
+#endif
+struct mpi3_man_page6 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_gpio;
+ u8 reserved0d[3];
+ struct mpi3_man6_gpio_entry gpio[MPI3_MAN6_GPIO_MAX];
+};
+
+#define MPI3_MAN6_PAGEVERSION (0x00)
+#define MPI3_MAN6_FLAGS_HEARTBEAT_LED_DISABLED (0x0001)
+struct mpi3_man7_receptacle_info {
+ __le32 name[4];
+ u8 location;
+ u8 connector_type;
+ u8 ped_clk;
+ u8 connector_id;
+ __le32 reserved14;
+};
+
+#define MPI3_MAN7_LOCATION_UNKNOWN (0x00)
+#define MPI3_MAN7_LOCATION_INTERNAL (0x01)
+#define MPI3_MAN7_LOCATION_EXTERNAL (0x02)
+#define MPI3_MAN7_LOCATION_VIRTUAL (0x03)
+#define MPI3_MAN7_LOCATION_HOST (0x04)
+#define MPI3_MAN7_CONNECTOR_TYPE_NO_INFO (0x00)
+#define MPI3_MAN7_PEDCLK_ROUTING_MASK (0x10)
+#define MPI3_MAN7_PEDCLK_ROUTING_DIRECT (0x00)
+#define MPI3_MAN7_PEDCLK_ROUTING_CLOCK_BUFFER (0x10)
+#define MPI3_MAN7_PEDCLK_ID_MASK (0x0f)
+#ifndef MPI3_MAN7_RECEPTACLE_INFO_MAX
+#define MPI3_MAN7_RECEPTACLE_INFO_MAX (1)
+#endif
+struct mpi3_man_page7 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ u8 num_receptacles;
+ u8 reserved0d[3];
+ __le32 enclosure_name[4];
+ struct mpi3_man7_receptacle_info receptacle_info[MPI3_MAN7_RECEPTACLE_INFO_MAX];
+};
+
+#define MPI3_MAN7_PAGEVERSION (0x00)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_MASK (0x01)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_0 (0x00)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_1 (0x01)
+struct mpi3_man8_phy_info {
+ u8 receptacle_id;
+ u8 connector_lane;
+ __le16 reserved02;
+ __le16 slotx1;
+ __le16 slotx2;
+ __le16 slotx4;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_NOT_ASSOCIATED (0xff)
+#define MPI3_MAN8_PHY_INFO_CONNECTOR_LANE_NOT_ASSOCIATED (0xff)
+#ifndef MPI3_MAN8_PHY_INFO_MAX
+#define MPI3_MAN8_PHY_INFO_MAX (1)
+#endif
+struct mpi3_man_page8 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 reserved0d[3];
+ struct mpi3_man8_phy_info phy_info[MPI3_MAN8_PHY_INFO_MAX];
+};
+
+#define MPI3_MAN8_PAGEVERSION (0x00)
+struct mpi3_man9_rsrc_entry {
+ __le32 maximum;
+ __le32 decrement;
+ __le32 minimum;
+ __le32 actual;
+};
+
+enum mpi3_man9_resources {
+ MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
+ MPI3_MAN9_RSRC_TARGET_CMDS = 1,
+ MPI3_MAN9_RSRC_RESERVED02 = 2,
+ MPI3_MAN9_RSRC_NVME = 3,
+ MPI3_MAN9_RSRC_INITIATORS = 4,
+ MPI3_MAN9_RSRC_VDS = 5,
+ MPI3_MAN9_RSRC_ENCLOSURES = 6,
+ MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
+ MPI3_MAN9_RSRC_EXPANDERS = 8,
+ MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
+ MPI3_MAN9_RSRC_RESERVED10 = 10,
+ MPI3_MAN9_RSRC_HOST_PD_DRIVES = 11,
+ MPI3_MAN9_RSRC_ADV_HOST_PD_DRIVES = 12,
+ MPI3_MAN9_RSRC_RAID_PD_DRIVES = 13,
+ MPI3_MAN9_RSRC_DRV_DIAG_BUF = 14,
+ MPI3_MAN9_RSRC_NAMESPACE_COUNT = 15,
+ MPI3_MAN9_RSRC_NUM_RESOURCES
+};
+
+#define MPI3_MAN9_MIN_OUTSTANDING_REQS (1)
+#define MPI3_MAN9_MAX_OUTSTANDING_REQS (65000)
+#define MPI3_MAN9_MIN_TARGET_CMDS (0)
+#define MPI3_MAN9_MAX_TARGET_CMDS (65535)
+#define MPI3_MAN9_MIN_NVME_TARGETS (0)
+#define MPI3_MAN9_MIN_INITIATORS (0)
+#define MPI3_MAN9_MIN_VDS (0)
+#define MPI3_MAN9_MIN_ENCLOSURES (1)
+#define MPI3_MAN9_MAX_ENCLOSURES (65535)
+#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0)
+#define MPI3_MAN9_MIN_EXPANDERS (0)
+#define MPI3_MAN9_MAX_EXPANDERS (65535)
+#define MPI3_MAN9_MIN_PCIE_SWITCHES (0)
+#define MPI3_MAN9_MIN_HOST_PD_DRIVES (0)
+#define MPI3_MAN9_ADV_HOST_PD_DRIVES (0)
+#define MPI3_MAN9_RAID_PD_DRIVES (0)
+#define MPI3_MAN9_DRIVER_DIAG_BUFFER (0)
+#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1)
+#define MPI3_MAN9_MIN_EXPANDERS (0)
+#define MPI3_MAN9_MAX_EXPANDERS (65535)
+struct mpi3_man_page9 {
+ struct mpi3_config_page_header header;
+ u8 num_resources;
+ u8 reserved09;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ __le32 reserved10;
+ __le32 reserved14;
+ __le32 reserved18;
+ __le32 reserved1c;
+ struct mpi3_man9_rsrc_entry resource[MPI3_MAN9_RSRC_NUM_RESOURCES];
+};
+
+#define MPI3_MAN9_PAGEVERSION (0x00)
+struct mpi3_man10_istwi_ctrlr_entry {
+ __le16 slave_address;
+ __le16 flags;
+ u8 scl_low_override;
+ u8 scl_high_override;
+ __le16 reserved06;
+};
+
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_MASK (0x000c)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_100K (0x0000)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_400K (0x0004)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001)
+#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX
+#define MPI3_MAN10_ISTWI_CTRLR_MAX (1)
+#endif
+struct mpi3_man_page10 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_istwi_ctrl;
+ u8 reserved0d[3];
+ struct mpi3_man10_istwi_ctrlr_entry istwi_controller[MPI3_MAN10_ISTWI_CTRLR_MAX];
+};
+
+#define MPI3_MAN10_PAGEVERSION (0x00)
+struct mpi3_man11_mux_device_format {
+ u8 max_channel;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+struct mpi3_man11_temp_sensor_device_format {
+ u8 type;
+ u8 reserved01[3];
+ u8 temp_channel[4];
+};
+
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_SE97B (0x03)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_MASK (0xe0)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_SHIFT (5)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
+struct mpi3_man11_seeprom_device_format {
+ u8 size;
+ u8 page_write_size;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_SEEPROM_SIZE_1KBITS (0x01)
+#define MPI3_MAN11_SEEPROM_SIZE_2KBITS (0x02)
+#define MPI3_MAN11_SEEPROM_SIZE_4KBITS (0x03)
+#define MPI3_MAN11_SEEPROM_SIZE_8KBITS (0x04)
+#define MPI3_MAN11_SEEPROM_SIZE_16KBITS (0x05)
+#define MPI3_MAN11_SEEPROM_SIZE_32KBITS (0x06)
+#define MPI3_MAN11_SEEPROM_SIZE_64KBITS (0x07)
+#define MPI3_MAN11_SEEPROM_SIZE_128KBITS (0x08)
+struct mpi3_man11_ddr_spd_device_format {
+ u8 channel;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+struct mpi3_man11_cable_mgmt_device_format {
+ u8 type;
+ u8 receptacle_id;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_CABLE_MGMT_TYPE_SFF_8636 (0x00)
+struct mpi3_man11_bkplane_spec_ubm_format {
+ __le16 flags;
+ __le16 reserved02;
+};
+
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_FORCE_POLLING (0x0100)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_MASK (0x00f0)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
+struct mpi3_man11_bkplane_spec_non_ubm_format {
+ __le16 flags;
+ u8 reserved02;
+ u8 type;
+};
+
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_MASK (0x00c0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_4 (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_2 (0x0040)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_1 (0x0080)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_TYPE_VPP (0x00)
+union mpi3_man11_bkplane_spec_format {
+ struct mpi3_man11_bkplane_spec_ubm_format ubm;
+ struct mpi3_man11_bkplane_spec_non_ubm_format non_ubm;
+};
+
+struct mpi3_man11_bkplane_mgmt_device_format {
+ u8 type;
+ u8 receptacle_id;
+ u8 reset_info;
+ u8 reserved03;
+ union mpi3_man11_bkplane_spec_format backplane_mgmt_specific;
+};
+
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00)
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_NON_UBM (0x01)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_MASK (0xf0)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_SHIFT (4)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_MASK (0x0f)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_SHIFT (0)
+struct mpi3_man11_gas_gauge_device_format {
+ u8 type;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00)
+struct mpi3_man11_mgmt_ctrlr_device_format {
+ __le32 reserved00;
+ __le32 reserved04;
+};
+struct mpi3_man11_board_fan_device_format {
+ u8 flags;
+ u8 reserved01;
+ u8 min_fan_speed;
+ u8 max_fan_speed;
+ __le32 reserved04;
+};
+#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_MASK (0x07)
+#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_AMC6821 (0x00)
+union mpi3_man11_device_specific_format {
+ struct mpi3_man11_mux_device_format mux;
+ struct mpi3_man11_temp_sensor_device_format temp_sensor;
+ struct mpi3_man11_seeprom_device_format seeprom;
+ struct mpi3_man11_ddr_spd_device_format ddr_spd;
+ struct mpi3_man11_cable_mgmt_device_format cable_mgmt;
+ struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt;
+ struct mpi3_man11_gas_gauge_device_format gas_gauge;
+ struct mpi3_man11_mgmt_ctrlr_device_format mgmt_controller;
+ struct mpi3_man11_board_fan_device_format board_fan;
+ __le32 words[2];
+};
+struct mpi3_man11_istwi_device_format {
+ u8 device_type;
+ u8 controller;
+ u8 reserved02;
+ u8 flags;
+ __le16 device_address;
+ u8 mux_channel;
+ u8 mux_index;
+ union mpi3_man11_device_specific_format device_specific;
+};
+
+#define MPI3_MAN11_ISTWI_DEVTYPE_MUX (0x00)
+#define MPI3_MAN11_ISTWI_DEVTYPE_TEMP_SENSOR (0x01)
+#define MPI3_MAN11_ISTWI_DEVTYPE_SEEPROM (0x02)
+#define MPI3_MAN11_ISTWI_DEVTYPE_DDR_SPD (0x03)
+#define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04)
+#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05)
+#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06)
+#define MPI3_MAN11_ISTWI_DEVTYPE_MGMT_CONTROLLER (0x07)
+#define MPI3_MAN11_ISTWI_DEVTYPE_BOARD_FAN (0x08)
+#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01)
+#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX
+#define MPI3_MAN11_ISTWI_DEVICE_MAX (1)
+#endif
+struct mpi3_man_page11 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_istwi_dev;
+ u8 reserved0d[3];
+ struct mpi3_man11_istwi_device_format istwi_device[MPI3_MAN11_ISTWI_DEVICE_MAX];
+};
+
+#define MPI3_MAN11_PAGEVERSION (0x00)
+#ifndef MPI3_MAN12_NUM_SGPIO_MAX
+#define MPI3_MAN12_NUM_SGPIO_MAX (1)
+#endif
+struct mpi3_man12_sgpio_info {
+ u8 slot_count;
+ u8 reserved01[3];
+ __le32 reserved04;
+ u8 phy_order[32];
+};
+
+struct mpi3_man_page12 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 s_clock_freq;
+ __le32 activity_modulation;
+ u8 num_sgpio;
+ u8 reserved15[3];
+ __le32 reserved18;
+ __le32 reserved1c;
+ __le32 pattern[8];
+ struct mpi3_man12_sgpio_info sgpio_info[MPI3_MAN12_NUM_SGPIO_MAX];
+};
+
+#define MPI3_MAN12_PAGEVERSION (0x00)
+#define MPI3_MAN12_FLAGS_ERROR_PRESENCE_ENABLED (0x0400)
+#define MPI3_MAN12_FLAGS_ACTIVITY_INVERT_ENABLED (0x0200)
+#define MPI3_MAN12_FLAGS_GROUP_ID_DISABLED (0x0100)
+#define MPI3_MAN12_FLAGS_SIO_CLK_FILTER_ENABLED (0x0004)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_MASK (0x0002)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_PUSH_PULL (0x0000)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_OPEN_DRAIN (0x0002)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_MASK (0x0001)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_PUSH_PULL (0x0000)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_OPEN_DRAIN (0x0001)
+#define MPI3_MAN12_SIO_CLK_FREQ_MIN (32)
+#define MPI3_MAN12_SIO_CLK_FREQ_MAX (100000)
+#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_MASK (0x0000f000)
+#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_SHIFT (12)
+#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_MASK (0x00000f00)
+#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_SHIFT (8)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_MASK (0x000000f0)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_SHIFT (4)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_MASK (0x0000000f)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_SHIFT (0)
+#define MPI3_MAN12_PATTERN_RATE_MASK (0xe0000000)
+#define MPI3_MAN12_PATTERN_RATE_2_HZ (0x00000000)
+#define MPI3_MAN12_PATTERN_RATE_4_HZ (0x20000000)
+#define MPI3_MAN12_PATTERN_RATE_8_HZ (0x40000000)
+#define MPI3_MAN12_PATTERN_RATE_16_HZ (0x60000000)
+#define MPI3_MAN12_PATTERN_RATE_10_HZ (0x80000000)
+#define MPI3_MAN12_PATTERN_RATE_20_HZ (0xa0000000)
+#define MPI3_MAN12_PATTERN_RATE_40_HZ (0xc0000000)
+#define MPI3_MAN12_PATTERN_LENGTH_MASK (0x1f000000)
+#define MPI3_MAN12_PATTERN_LENGTH_SHIFT (24)
+#define MPI3_MAN12_PATTERN_BIT_PATTERN_MASK (0x00ffffff)
+#define MPI3_MAN12_PATTERN_BIT_PATTERN_SHIFT (0)
+#ifndef MPI3_MAN13_NUM_TRANSLATION_MAX
+#define MPI3_MAN13_NUM_TRANSLATION_MAX (1)
+#endif
+struct mpi3_man13_translation_info {
+ __le32 slot_status;
+ __le32 mask;
+ u8 activity;
+ u8 locate;
+ u8 error;
+ u8 reserved0b;
+};
+
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_FAULT (0x20000000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_OFF (0x10000000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_ACTIVITY (0x00800000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DO_NOT_REMOVE (0x00400000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_MISSING (0x00100000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_INSERT (0x00080000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REMOVAL (0x00040000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IDENTIFY (0x00020000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_OK (0x00008000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_RESERVED_DEVICE (0x00004000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_HOT_SPARE (0x00002000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_CONSISTENCY_CHECK (0x00001000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000800)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_FAILED_ARRAY (0x00000400)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP (0x00000200)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP_ABORT (0x00000100)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_PREDICTED_FAILURE (0x00000040)
+#define MPI3_MAN13_BLINK_PATTERN_FORCE_OFF (0x00)
+#define MPI3_MAN13_BLINK_PATTERN_FORCE_ON (0x01)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_0 (0x02)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_1 (0x03)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_2 (0x04)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_3 (0x05)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_4 (0x06)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_5 (0x07)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_6 (0x08)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_7 (0x09)
+#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY (0x0a)
+#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY_TRAIL (0x0b)
+struct mpi3_man_page13 {
+ struct mpi3_config_page_header header;
+ u8 num_trans;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man13_translation_info translation[MPI3_MAN13_NUM_TRANSLATION_MAX];
+};
+
+#define MPI3_MAN13_PAGEVERSION (0x00)
+struct mpi3_man_page14 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_slot_groups;
+ u8 num_slots;
+ __le16 max_cert_chain_length;
+ __le32 sealed_slots;
+ __le32 populated_slots;
+ __le32 mgmt_pt_updatable_slots;
+};
+#define MPI3_MAN14_PAGEVERSION (0x00)
+#define MPI3_MAN14_NUMSLOTS_MAX (32)
+#ifndef MPI3_MAN15_VERSION_RECORD_MAX
+#define MPI3_MAN15_VERSION_RECORD_MAX 1
+#endif
+struct mpi3_man15_version_record {
+ __le16 spdm_version;
+ __le16 reserved02;
+};
+
+struct mpi3_man_page15 {
+ struct mpi3_config_page_header header;
+ u8 num_version_records;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man15_version_record version_record[MPI3_MAN15_VERSION_RECORD_MAX];
+};
+
+#define MPI3_MAN15_PAGEVERSION (0x00)
+#ifndef MPI3_MAN16_CERT_ALGO_MAX
+#define MPI3_MAN16_CERT_ALGO_MAX 1
+#endif
+struct mpi3_man16_certificate_algorithm {
+ u8 slot_group;
+ u8 reserved01[3];
+ __le32 base_asym_algo;
+ __le32 base_hash_algo;
+ __le32 reserved0c[3];
+};
+
+struct mpi3_man_page16 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_cert_algos;
+ u8 reserved0d[3];
+ struct mpi3_man16_certificate_algorithm certificate_algorithm[MPI3_MAN16_CERT_ALGO_MAX];
+};
+
+#define MPI3_MAN16_PAGEVERSION (0x00)
+#ifndef MPI3_MAN17_HASH_ALGORITHM_MAX
+#define MPI3_MAN17_HASH_ALGORITHM_MAX 1
+#endif
+struct mpi3_man17_hash_algorithm {
+ u8 meas_specification;
+ u8 reserved01[3];
+ __le32 measurement_hash_algo;
+ __le32 reserved08[2];
+};
+
+struct mpi3_man_page17 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_hash_algos;
+ u8 reserved0d[3];
+ struct mpi3_man17_hash_algorithm hash_algorithm[MPI3_MAN17_HASH_ALGORITHM_MAX];
+};
+
+#define MPI3_MAN17_PAGEVERSION (0x00)
+struct mpi3_man_page20 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 nonpremium_features;
+ u8 allowed_personalities;
+ u8 reserved11[3];
+};
+
+#define MPI3_MAN20_PAGEVERSION (0x00)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_MASK (0x02)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_ALLOWED (0x02)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_NOT_ALLOWED (0x00)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_MASK (0x01)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_ALLOWED (0x01)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_NOT_ALLOWED (0x00)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_MASK (0x01)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_ENABLED (0x00)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_DISABLED (0x01)
+struct mpi3_man_page21 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 flags;
+};
+
+#define MPI3_MAN21_PAGEVERSION (0x00)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x00000060)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00000000)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x00000020)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x00000040)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x00000008)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00000000)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x00000008)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x00000001)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00000000)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x00000001)
+#ifndef MPI3_MAN_PROD_SPECIFIC_MAX
+#define MPI3_MAN_PROD_SPECIFIC_MAX (1)
+#endif
+struct mpi3_man_page_product_specific {
+ struct mpi3_config_page_header header;
+ __le32 product_specific_info[MPI3_MAN_PROD_SPECIFIC_MAX];
+};
+
+struct mpi3_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le64 unique_value;
+ __le32 nvdata_version_default;
+ __le32 nvdata_version_persistent;
+};
+
+#define MPI3_IOUNIT0_PAGEVERSION (0x00)
+struct mpi3_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ u8 dmd_io_delay;
+ u8 dmd_report_pcie;
+ u8 dmd_report_sata;
+ u8 dmd_report_sas;
+};
+
+#define MPI3_IOUNIT1_PAGEVERSION (0x00)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_MASK (0x00000030)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_ENABLE (0x00000000)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_DISABLE (0x00000010)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_NO_MODIFY (0x00000020)
+#define MPI3_IOUNIT1_FLAGS_ATA_SECURITY_FREEZE_LOCK (0x00000008)
+#define MPI3_IOUNIT1_FLAGS_WRITE_SAME_BUFFER (0x00000004)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_MASK (0x00000003)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_ENABLE (0x00000000)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_DISABLE (0x00000001)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_UNCHANGED (0x00000002)
+#define MPI3_IOUNIT1_DMD_REPORT_DELAY_TIME_MASK (0x7f)
+#define MPI3_IOUNIT1_DMD_REPORT_UNIT_16_SEC (0x80)
+#ifndef MPI3_IO_UNIT2_GPIO_VAL_MAX
+#define MPI3_IO_UNIT2_GPIO_VAL_MAX (1)
+#endif
+struct mpi3_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ u8 gpio_count;
+ u8 reserved09[3];
+ __le16 gpio_val[MPI3_IO_UNIT2_GPIO_VAL_MAX];
+};
+
+#define MPI3_IOUNIT2_PAGEVERSION (0x00)
+#define MPI3_IOUNIT2_GPIO_FUNCTION_MASK (0xfffc)
+#define MPI3_IOUNIT2_GPIO_FUNCTION_SHIFT (2)
+#define MPI3_IOUNIT2_GPIO_SETTING_MASK (0x0001)
+#define MPI3_IOUNIT2_GPIO_SETTING_OFF (0x0000)
+#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001)
+struct mpi3_io_unit3_sensor {
+ __le16 flags;
+ u8 threshold_margin;
+ u8 reserved03;
+ __le16 threshold[3];
+ __le16 reserved0a;
+ __le32 reserved0c;
+ __le32 reserved10;
+ __le32 reserved14;
+};
+
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_EVENT_ENABLED (0x0010)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_ACTION_ENABLED (0x0008)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_EVENT_ENABLED (0x0004)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_ACTION_ENABLED (0x0002)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_WARNING_EVENT_ENABLED (0x0001)
+#ifndef MPI3_IO_UNIT3_SENSOR_MAX
+#define MPI3_IO_UNIT3_SENSOR_MAX (1)
+#endif
+struct mpi3_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_sensors;
+ u8 nominal_poll_interval;
+ u8 warning_poll_interval;
+ u8 reserved0f;
+ struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX];
+};
+
+#define MPI3_IOUNIT3_PAGEVERSION (0x00)
+struct mpi3_io_unit4_sensor {
+ __le16 current_temperature;
+ __le16 reserved02;
+ u8 flags;
+ u8 reserved05[3];
+ __le16 istwi_index;
+ u8 channel;
+ u8 reserved0b;
+ __le32 reserved0c;
+};
+
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_MASK (0xe0)
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_SHIFT (5)
+#define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01)
+#define MPI3_IOUNIT4_SENSOR_ISTWI_INDEX_INTERNAL (0xffff)
+#define MPI3_IOUNIT4_SENSOR_CHANNEL_RESERVED (0xff)
+#ifndef MPI3_IO_UNIT4_SENSOR_MAX
+#define MPI3_IO_UNIT4_SENSOR_MAX (1)
+#endif
+struct mpi3_io_unit_page4 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_sensors;
+ u8 reserved0d[3];
+ struct mpi3_io_unit4_sensor sensor[MPI3_IO_UNIT4_SENSOR_MAX];
+};
+
+#define MPI3_IOUNIT4_PAGEVERSION (0x00)
+struct mpi3_io_unit5_spinup_group {
+ u8 max_target_spinup;
+ u8 spinup_delay;
+ u8 spinup_flags;
+ u8 reserved03;
+};
+
+#define MPI3_IOUNIT5_SPINUP_FLAGS_DISABLE (0x01)
+#ifndef MPI3_IO_UNIT5_PHY_MAX
+#define MPI3_IO_UNIT5_PHY_MAX (4)
+#endif
+struct mpi3_io_unit_page5 {
+ struct mpi3_config_page_header header;
+ struct mpi3_io_unit5_spinup_group spinup_group_parameters[4];
+ __le32 reserved18;
+ __le32 reserved1c;
+ __le16 device_shutdown;
+ __le16 reserved22;
+ u8 pcie_device_wait_time;
+ u8 sata_device_wait_time;
+ u8 spinup_encl_drive_count;
+ u8 spinup_encl_delay;
+ u8 num_phys;
+ u8 pe_initial_spinup_delay;
+ u8 topology_stable_time;
+ u8 flags;
+ u8 phy[MPI3_IO_UNIT5_PHY_MAX];
+};
+
+#define MPI3_IOUNIT5_PAGEVERSION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NO_ACTION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_ATTACHED (0x01)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_EXPANDER_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SWITCH_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_EXPANDER (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_SWITCH (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_MASK (0x0300)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_SHIFT (8)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_MASK (0x00c0)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_SHIFT (6)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_MASK (0x0030)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_SHIFT (4)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_SHIFT (0)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_MASK (0x0c)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_NOT_SUPPORTED (0x00)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_OS_CONTROLLED (0x04)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_APP_CONTROLLED (0x08)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_BLOCKED (0x0c)
+#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
+#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
+#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
+struct mpi3_io_unit_page6 {
+ struct mpi3_config_page_header header;
+ __le32 board_power_requirement;
+ __le32 pci_slot_power_allocation;
+ u8 flags;
+ u8 reserved11[3];
+};
+
+#define MPI3_IOUNIT6_PAGEVERSION (0x00)
+#define MPI3_IOUNIT6_FLAGS_ACT_CABLE_PWR_EXC (0x01)
+#ifndef MPI3_IOUNIT8_DIGEST_MAX
+#define MPI3_IOUNIT8_DIGEST_MAX (1)
+#endif
+union mpi3_iounit8_digest {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+struct mpi3_io_unit_page8 {
+ struct mpi3_config_page_header header;
+ u8 sb_mode;
+ u8 sb_state;
+ __le16 reserved0a;
+ u8 num_slots;
+ u8 slots_available;
+ u8 current_key_encryption_algo;
+ u8 key_digest_hash_algo;
+ union mpi3_version_union current_svn;
+ __le32 reserved14;
+ __le32 current_key[128];
+ union mpi3_iounit8_digest digest[MPI3_IOUNIT8_DIGEST_MAX];
+};
+
+#define MPI3_IOUNIT8_PAGEVERSION (0x00)
+#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04)
+#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02)
+#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01)
+#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
+#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
+#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
+struct mpi3_io_unit_page9 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le16 first_device;
+ __le16 reserved0e;
+};
+
+#define MPI3_IOUNIT9_PAGEVERSION (0x00)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_MASK (0x00000006)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_SHIFT (1)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_NONE (0x00000000)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_RECEPTACLE (0x00000002)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004)
+#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001)
+#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+struct mpi3_io_unit_page10 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 silicon_id;
+ u8 fw_version_minor;
+ u8 fw_version_major;
+ u8 hw_version_minor;
+ u8 hw_version_major;
+ u8 part_number[16];
+};
+#define MPI3_IOUNIT10_PAGEVERSION (0x00)
+#define MPI3_IOUNIT10_FLAGS_VALID (0x01)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_MASK (0x02)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_FIRST_REGION (0x00)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_SECOND_REGION (0x02)
+#define MPI3_IOUNIT10_FLAGS_PBLP_EXPECTED (0x80)
+#ifndef MPI3_IOUNIT11_PROFILE_MAX
+#define MPI3_IOUNIT11_PROFILE_MAX (1)
+#endif
+struct mpi3_iounit11_profile {
+ u8 profile_identifier;
+ u8 reserved01[3];
+ __le16 max_vds;
+ __le16 max_host_pds;
+ __le16 max_adv_host_pds;
+ __le16 max_raid_pds;
+ __le16 max_nvme;
+ __le16 max_outstanding_requests;
+ __le16 subsystem_id;
+ __le16 reserved12;
+ __le32 reserved14[2];
+};
+struct mpi3_io_unit_page11 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_profiles;
+ u8 current_profile_identifier;
+ __le16 reserved0e;
+ struct mpi3_iounit11_profile profile[MPI3_IOUNIT11_PROFILE_MAX];
+};
+#define MPI3_IOUNIT11_PAGEVERSION (0x00)
+#ifndef MPI3_IOUNIT12_BUCKET_MAX
+#define MPI3_IOUNIT12_BUCKET_MAX (1)
+#endif
+struct mpi3_iounit12_bucket {
+ u8 coalescing_depth;
+ u8 coalescing_timeout;
+ __le16 io_count_low_boundary;
+ __le32 reserved04;
+};
+struct mpi3_io_unit_page12 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c[4];
+ u8 num_buckets;
+ u8 reserved1d[3];
+ struct mpi3_iounit12_bucket bucket[MPI3_IOUNIT12_BUCKET_MAX];
+};
+#define MPI3_IOUNIT12_PAGEVERSION (0x00)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_MASK (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_SHIFT (8)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_8 (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_16 (0x00000100)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_32 (0x00000200)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_64 (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_MASK (0x00000003)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_DISABLED (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_500US (0x00000001)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_1MS (0x00000002)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_2MS (0x00000003)
+#ifndef MPI3_IOUNIT13_FUNC_MAX
+#define MPI3_IOUNIT13_FUNC_MAX (1)
+#endif
+struct mpi3_iounit13_allowed_function {
+ __le16 sub_function;
+ u8 function_code;
+ u8 fuction_flags;
+};
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_ADMIN_BLOCKED (0x04)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_OOB_BLOCKED (0x02)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_CHECK_SUBFUNCTION_ENABLED (0x01)
+struct mpi3_io_unit_page13 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_functions;
+ u8 reserved0d[3];
+ struct mpi3_iounit13_allowed_function allowed_function[MPI3_IOUNIT13_FUNC_MAX];
+};
+#define MPI3_IOUNIT13_PAGEVERSION (0x00)
+#define MPI3_IOUNIT13_FLAGS_ADMIN_BLOCKED (0x0002)
+#define MPI3_IOUNIT13_FLAGS_OOB_BLOCKED (0x0001)
+struct mpi3_ioc_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le16 vendor_id;
+ __le16 device_id;
+ u8 revision_id;
+ u8 reserved11[3];
+ __le32 class_code;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+};
+
+#define MPI3_IOC0_PAGEVERSION (0x00)
+struct mpi3_ioc_page1 {
+ struct mpi3_config_page_header header;
+ __le32 coalescing_timeout;
+ u8 coalescing_depth;
+ u8 obsolete;
+ __le16 reserved0e;
+};
+#define MPI3_IOC1_PAGEVERSION (0x00)
+#ifndef MPI3_IOC2_EVENTMASK_WORDS
+#define MPI3_IOC2_EVENTMASK_WORDS (4)
+#endif
+struct mpi3_ioc_page2 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le16 sas_broadcast_primitive_masks;
+ __le16 sas_notify_primitive_masks;
+ __le32 event_masks[MPI3_IOC2_EVENTMASK_WORDS];
+};
+
+#define MPI3_IOC2_PAGEVERSION (0x00)
+#define MPI3_DRIVER_FLAGS_ADMINRAIDPD_BLOCKED (0x0010)
+#define MPI3_DRIVER_FLAGS_OOBRAIDPD_BLOCKED (0x0008)
+#define MPI3_DRIVER_FLAGS_OOBRAIDVD_BLOCKED (0x0004)
+#define MPI3_DRIVER_FLAGS_OOBADVHOSTPD_BLOCKED (0x0002)
+#define MPI3_DRIVER_FLAGS_OOBHOSTPD_BLOCKED (0x0001)
+struct mpi3_allowed_cmd_scsi {
+ __le16 service_action;
+ u8 operation_code;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_ata {
+ u8 subcommand;
+ u8 reserved01;
+ u8 command;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_nvme {
+ u8 reserved00;
+ u8 nvme_cmd_flags;
+ u8 op_code;
+ u8 command_flags;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_MASK (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_IO (0x00)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_ADMIN (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_MASK (0x3f)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_NVM (0x00)
+union mpi3_allowed_cmd {
+ struct mpi3_allowed_cmd_scsi scsi;
+ struct mpi3_allowed_cmd_ata ata;
+ struct mpi3_allowed_cmd_nvme nvme;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_ADMINRAIDPD_BLOCKED (0x20)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDPD_BLOCKED (0x10)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDVD_BLOCKED (0x08)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBADVHOSTPD_BLOCKED (0x04)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBHOSTPD_BLOCKED (0x02)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_CHECKSUBCMD_ENABLED (0x01)
+#ifndef MPI3_ALLOWED_CMDS_MAX
+#define MPI3_ALLOWED_CMDS_MAX (1)
+#endif
+struct mpi3_driver_page0 {
+ struct mpi3_config_page_header header;
+ __le32 bsd_options;
+ u8 ssu_timeout;
+ u8 io_timeout;
+ u8 tur_retries;
+ u8 tur_interval;
+ u8 reserved10;
+ u8 security_key_timeout;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le32 reserved18;
+};
+#define MPI3_DRIVER0_PAGEVERSION (0x00)
+#define MPI3_DRIVER0_BSDOPTS_HEADLESS_MODE_ENABLE (0x00000008)
+#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
+struct mpi3_driver_page1 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c;
+ __le16 host_diag_trace_max_size;
+ __le16 host_diag_trace_min_size;
+ __le16 host_diag_trace_decrement_size;
+ __le16 reserved16;
+ __le16 host_diag_fw_max_size;
+ __le16 host_diag_fw_min_size;
+ __le16 host_diag_fw_decrement_size;
+ __le16 reserved1e;
+ __le16 host_diag_driver_max_size;
+ __le16 host_diag_driver_min_size;
+ __le16 host_diag_driver_decrement_size;
+ __le16 reserved26;
+};
+
+#define MPI3_DRIVER1_PAGEVERSION (0x00)
+#ifndef MPI3_DRIVER2_TRIGGER_MAX
+#define MPI3_DRIVER2_TRIGGER_MAX (1)
+#endif
+struct mpi3_driver2_trigger_event {
+ u8 type;
+ u8 flags;
+ u8 reserved02;
+ u8 event;
+ __le32 reserved04[3];
+};
+
+struct mpi3_driver2_trigger_scsi_sense {
+ u8 type;
+ u8 flags;
+ __le16 reserved02;
+ u8 ascq;
+ u8 asc;
+ u8 sense_key;
+ u8 reserved07;
+ __le32 reserved08[2];
+};
+
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL (0xff)
+struct mpi3_driver2_trigger_reply {
+ u8 type;
+ u8 flags;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 ioc_log_info_mask;
+ __le32 reserved0c;
+};
+
+#define MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL (0xffff)
+union mpi3_driver2_trigger_element {
+ struct mpi3_driver2_trigger_event event;
+ struct mpi3_driver2_trigger_scsi_sense scsi_sense;
+ struct mpi3_driver2_trigger_reply reply;
+};
+
+#define MPI3_DRIVER2_TRIGGER_TYPE_EVENT (0x00)
+#define MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE (0x01)
+#define MPI3_DRIVER2_TRIGGER_TYPE_REPLY (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE (0x01)
+struct mpi3_driver_page2 {
+ struct mpi3_config_page_header header;
+ __le64 master_trigger;
+ __le32 reserved10[3];
+ u8 num_triggers;
+ u8 reserved1d[3];
+ union mpi3_driver2_trigger_element trigger[MPI3_DRIVER2_TRIGGER_MAX];
+};
+
+#define MPI3_DRIVER2_PAGEVERSION (0x00)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_SNAPDUMP (0x2000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL)
+struct mpi3_driver_page10 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER10_PAGEVERSION (0x00)
+struct mpi3_driver_page20 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER20_PAGEVERSION (0x00)
+struct mpi3_driver_page30 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER30_PAGEVERSION (0x00)
+union mpi3_security_mac {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+union mpi3_security_nonce {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+union mpi3_security0_cert_chain {
+ __le32 dword[1024];
+ __le16 word[2048];
+ u8 byte[4096];
+};
+
+struct mpi3_security_page0 {
+ struct mpi3_config_page_header header;
+ u8 slot_num_group;
+ u8 slot_num;
+ __le16 cert_chain_length;
+ u8 cert_chain_flags;
+ u8 reserved0d[3];
+ __le32 base_asym_algo;
+ __le32 base_hash_algo;
+ __le32 reserved18[4];
+ union mpi3_security_mac mac;
+ union mpi3_security_nonce nonce;
+ union mpi3_security0_cert_chain certificate_chain;
+};
+
+#define MPI3_SECURITY0_PAGEVERSION (0x00)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_MASK (0x0e)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_UNUSED (0x00)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_CERBERUS (0x02)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_SPDM (0x04)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_SEALED (0x01)
+#ifndef MPI3_SECURITY1_KEY_RECORD_MAX
+#define MPI3_SECURITY1_KEY_RECORD_MAX 1
+#endif
+#ifndef MPI3_SECURITY1_PAD_MAX
+#define MPI3_SECURITY1_PAD_MAX 1
+#endif
+union mpi3_security1_key_data {
+ __le32 dword[128];
+ __le16 word[256];
+ u8 byte[512];
+};
+
+struct mpi3_security1_key_record {
+ u8 flags;
+ u8 consumer;
+ __le16 key_data_size;
+ __le32 additional_key_data;
+ __le32 reserved08[2];
+ union mpi3_security1_key_data key_data;
+};
+
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_MASK (0x1f)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_NOT_VALID (0x00)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_HMAC (0x01)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_AES (0x02)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PRIVATE (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PUBLIC (0x04)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_DEVICE_KEY (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04)
+struct mpi3_security_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08[2];
+ union mpi3_security_mac mac;
+ union mpi3_security_nonce nonce;
+ u8 num_keys;
+ u8 reserved91[3];
+ __le32 reserved94[3];
+ struct mpi3_security1_key_record key_record[MPI3_SECURITY1_KEY_RECORD_MAX];
+ u8 pad[MPI3_SECURITY1_PAD_MAX];
+};
+
+#define MPI3_SECURITY1_PAGEVERSION (0x00)
+struct mpi3_sas_io_unit0_phy_data {
+ u8 io_unit_port;
+ u8 port_flags;
+ u8 phy_flags;
+ u8 negotiated_link_rate;
+ __le16 controller_phy_device_info;
+ __le16 reserved06;
+ __le16 attached_dev_handle;
+ __le16 controller_dev_handle;
+ __le32 discovery_status;
+ __le32 reserved10;
+};
+
+#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX
+#define MPI3_SAS_IO_UNIT0_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 init_status;
+ __le16 reserved0e;
+ struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
+#define MPI3_SASIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
+#define MPI3_SASIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
+#define MPI3_SASIOUNIT0_INITSTATUS_HOST_PHYS_ENABLED (0x06)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MIN (0xf0)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MAX (0xff)
+#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_MASK (0x03)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_IOUNIT1 (0x00)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_DYNAMIC (0x01)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_BACKPLANE (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
+struct mpi3_sas_io_unit1_phy_data {
+ u8 io_unit_port;
+ u8 port_flags;
+ u8 phy_flags;
+ u8 max_min_link_rate;
+ __le16 controller_phy_device_info;
+ __le16 max_target_port_connect_time;
+ __le32 reserved08;
+};
+
+#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX
+#define MPI3_SAS_IO_UNIT1_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le16 control_flags;
+ __le16 sas_narrow_max_queue_depth;
+ __le16 additional_control_flags;
+ __le16 sas_wide_max_queue_depth;
+ u8 num_phys;
+ u8 sata_max_q_depth;
+ __le16 reserved12;
+ struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT1_CONTROL_CONTROLLER_DEVICE_SELF_TEST (0x8000)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI3_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI3_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI3_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_MASK (0x0001)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_DEVICE_NAME (0x0000)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_SAS_ADDRESS (0x0001)
+#define MPI3_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
+#define MPI3_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI3_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI3_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI3_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI3_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI3_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI3_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI3_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+#define MPI3_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+#define MPI3_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_6_0 (0xa0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_12_0 (0xb0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_22_5 (0xc0)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_MASK (0x0f)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_6_0 (0x0a)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_12_0 (0x0b)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_22_5 (0x0c)
+struct mpi3_sas_io_unit2_phy_pm_settings {
+ u8 control_flags;
+ u8 reserved01;
+ __le16 inactivity_timer_exponent;
+ u8 sata_partial_timeout;
+ u8 reserved05;
+ u8 sata_slumber_timeout;
+ u8 reserved07;
+ u8 sas_partial_timeout;
+ u8 reserved09;
+ u8 sas_slumber_timeout;
+ u8 reserved0b;
+};
+
+#ifndef MPI3_SAS_IO_UNIT2_PHY_MAX
+#define MPI3_SAS_IO_UNIT2_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_sas_io_unit2_phy_pm_settings sas_phy_power_management_settings[MPI3_SAS_IO_UNIT2_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT2_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT2_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI3_SASIOUNIT2_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI3_SASIOUNIT2_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI3_SASIOUNIT2_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_MASK (0x7000)
+#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_SHIFT (12)
+#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_MASK (0x0700)
+#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_SHIFT (8)
+#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_MASK (0x0070)
+#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_SHIFT (4)
+#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_MASK (0x0007)
+#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_SHIFT (0)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_SECONDS (7)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_SECOND (6)
+#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MILLISECONDS (5)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MILLISECONDS (4)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MILLISECOND (3)
+#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MICROSECONDS (2)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MICROSECONDS (1)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MICROSECOND (0)
+struct mpi3_sas_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 power_management_capabilities;
+};
+
+#define MPI3_SASIOUNIT3_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT3_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI3_SASIOUNIT3_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI3_SASIOUNIT3_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI3_SASIOUNIT3_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+struct mpi3_sas_expander_page0 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 report_gen_length;
+ __le16 enclosure_handle;
+ __le32 reserved0c;
+ __le64 sas_address;
+ __le32 discovery_status;
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ __le16 expander_change_count;
+ __le16 expander_route_indexes;
+ u8 num_phys;
+ u8 sas_level;
+ __le16 flags;
+ __le16 stp_bus_inactivity_time_limit;
+ __le16 stp_max_connect_time_limit;
+ __le16 stp_smp_nexus_loss_time;
+ __le16 max_num_routed_sas_addresses;
+ __le64 active_zone_manager_sas_address;
+ __le16 zone_lock_inactivity_limit;
+ __le16 reserved3a;
+ u8 time_to_reduced_func;
+ u8 initial_time_to_reduced_func;
+ u8 max_reduced_func_time;
+ u8 exp_status;
+};
+
+#define MPI3_SASEXPANDER0_PAGEVERSION (0x00)
+#define MPI3_SASEXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI3_SASEXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI3_SASEXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI3_SASEXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI3_SASEXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI3_SASEXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI3_SASEXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI3_SASEXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI3_SASEXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI3_SASEXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI3_SASEXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+#define MPI3_SASEXPANDER0_ES_NOT_RESPONDING (0x02)
+#define MPI3_SASEXPANDER0_ES_RESPONDING (0x03)
+#define MPI3_SASEXPANDER0_ES_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_sas_expander_page1 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 reserved09[3];
+ u8 num_phys;
+ u8 phy;
+ __le16 num_table_entries_programmed;
+ u8 programmed_link_rate;
+ u8 hw_link_rate;
+ __le16 attached_dev_handle;
+ __le32 phy_info;
+ __le16 attached_device_info;
+ __le16 reserved1a;
+ __le16 expander_dev_handle;
+ u8 change_count;
+ u8 negotiated_link_rate;
+ u8 phy_identifier;
+ u8 attached_phy_identifier;
+ u8 reserved22;
+ u8 discovery_info;
+ __le32 attached_phy_info;
+ u8 zone_group;
+ u8 self_config_status;
+ __le16 reserved2a;
+ __le16 slot;
+ __le16 slot_index;
+};
+
+#define MPI3_SASEXPANDER1_PAGEVERSION (0x00)
+#define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+#ifndef MPI3_SASEXPANDER2_MAX_NUM_PHYS
+#define MPI3_SASEXPANDER2_MAX_NUM_PHYS (1)
+#endif
+struct mpi3_sasexpander2_phy_element {
+ u8 link_change_count;
+ u8 reserved01;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_sas_expander_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_sasexpander2_phy_element phy[MPI3_SASEXPANDER2_MAX_NUM_PHYS];
+};
+
+#define MPI3_SASEXPANDER2_PAGEVERSION (0x00)
+struct mpi3_sas_port_page0 {
+ struct mpi3_config_page_header header;
+ u8 port_number;
+ u8 reserved09;
+ u8 port_width;
+ u8 reserved0b;
+ u8 zone_group;
+ u8 reserved0d[3];
+ __le64 sas_address;
+ __le16 device_info;
+ __le16 reserved1a;
+ __le32 reserved1c;
+};
+
+#define MPI3_SASPORT0_PAGEVERSION (0x00)
+struct mpi3_sas_phy_page0 {
+ struct mpi3_config_page_header header;
+ __le16 owner_dev_handle;
+ __le16 reserved0a;
+ __le16 attached_dev_handle;
+ u8 attached_phy_identifier;
+ u8 reserved0f;
+ __le32 attached_phy_info;
+ u8 programmed_link_rate;
+ u8 hw_link_rate;
+ u8 change_count;
+ u8 flags;
+ __le32 phy_info;
+ u8 negotiated_link_rate;
+ u8 reserved1d[3];
+ __le16 slot;
+ __le16 slot_index;
+};
+
+#define MPI3_SASPHY0_PAGEVERSION (0x00)
+#define MPI3_SASPHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+struct mpi3_sas_phy_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 invalid_dword_count;
+ __le32 running_disparity_error_count;
+ __le32 loss_dword_synch_count;
+ __le32 phy_reset_problem_count;
+};
+
+#define MPI3_SASPHY1_PAGEVERSION (0x00)
+struct mpi3_sas_phy2_phy_event {
+ u8 phy_event_code;
+ u8 reserved01[3];
+ __le32 phy_event_info;
+};
+
+#ifndef MPI3_SAS_PHY2_PHY_EVENT_MAX
+#define MPI3_SAS_PHY2_PHY_EVENT_MAX (1)
+#endif
+struct mpi3_sas_phy_page2 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phy_events;
+ u8 reserved0d[3];
+ struct mpi3_sas_phy2_phy_event phy_event[MPI3_SAS_PHY2_PHY_EVENT_MAX];
+};
+
+#define MPI3_SASPHY2_PAGEVERSION (0x00)
+struct mpi3_sas_phy3_phy_event_config {
+ u8 phy_event_code;
+ u8 reserved01[3];
+ u8 counter_type;
+ u8 threshold_window;
+ u8 time_units;
+ u8 reserved07;
+ __le32 event_threshold;
+ __le16 threshold_flags;
+ __le16 reserved0e;
+};
+
+#define MPI3_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI3_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI3_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI3_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI3_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI3_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI3_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI3_SASPHY3_EVENT_CODE_INV_SPL_PACKETS (0x07)
+#define MPI3_SASPHY3_EVENT_CODE_LOSS_SPL_PACKET_SYNC (0x08)
+#define MPI3_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI3_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI3_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI3_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI3_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI3_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI3_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI3_SASPHY3_EVENT_CODE_CONNECTION (0x2a)
+#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2b)
+#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2c)
+#define MPI3_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2d)
+#define MPI3_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2e)
+#define MPI3_SASPHY3_EVENT_CODE_PERSIST_CONN (0x2f)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI3_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI3_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI3_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xd0)
+#define MPI3_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xd1)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP (0xd2)
+#define MPI3_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xd3)
+#define MPI3_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xd4)
+#define MPI3_SASPHY3_EVENT_CODE_LCCONN_TIME (0xd5)
+#define MPI3_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xd6)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_TX_START (0xd7)
+#define MPI3_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xd8)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xd9)
+#define MPI3_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xda)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xdb)
+#define MPI3_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xdc)
+#define MPI3_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI3_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI3_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+#define MPI3_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI3_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI3_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI3_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+#define MPI3_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI3_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+#ifndef MPI3_SAS_PHY3_PHY_EVENT_MAX
+#define MPI3_SAS_PHY3_PHY_EVENT_MAX (1)
+#endif
+struct mpi3_sas_phy_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phy_events;
+ u8 reserved0d[3];
+ struct mpi3_sas_phy3_phy_event_config phy_event_config[MPI3_SAS_PHY3_PHY_EVENT_MAX];
+};
+
+#define MPI3_SASPHY3_PAGEVERSION (0x00)
+struct mpi3_sas_phy_page4 {
+ struct mpi3_config_page_header header;
+ u8 reserved08[3];
+ u8 flags;
+ u8 initial_frame[28];
+};
+
+#define MPI3_SASPHY4_PAGEVERSION (0x00)
+#define MPI3_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI3_SASPHY4_FLAGS_SATA_FRAME (0x01)
+#define MPI3_PCIE_LINK_RETIMERS_MASK (0x30)
+#define MPI3_PCIE_LINK_RETIMERS_SHIFT (4)
+#define MPI3_PCIE_NEG_LINK_RATE_MASK (0x0f)
+#define MPI3_PCIE_NEG_LINK_RATE_UNKNOWN (0x00)
+#define MPI3_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI3_PCIE_NEG_LINK_RATE_2_5 (0x02)
+#define MPI3_PCIE_NEG_LINK_RATE_5_0 (0x03)
+#define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04)
+#define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05)
+#define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06)
+#define MPI3_PCIE_ASPM_ENABLE_NONE (0x0)
+#define MPI3_PCIE_ASPM_ENABLE_L0S (0x1)
+#define MPI3_PCIE_ASPM_ENABLE_L1 (0x2)
+#define MPI3_PCIE_ASPM_ENABLE_L0S_L1 (0x3)
+#define MPI3_PCIE_ASPM_SUPPORT_NONE (0x0)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S (0x1)
+#define MPI3_PCIE_ASPM_SUPPORT_L1 (0x2)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S_L1 (0x3)
+struct mpi3_pcie_io_unit0_phy_data {
+ u8 link;
+ u8 link_flags;
+ u8 phy_flags;
+ u8 negotiated_link_rate;
+ __le16 attached_dev_handle;
+ __le16 controller_dev_handle;
+ __le32 enumeration_status;
+ u8 io_unit_port;
+ u8 reserved0d[3];
+};
+
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_MASK (0x10)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_IOUNIT1 (0x00)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_BKPLANE (0x10)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_ENUM_IN_PROGRESS (0x08)
+#define MPI3_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_PCIEIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
+#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCH_DEPTH_EXCEEDED (0x80000000)
+#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000)
+#define MPI3_PCIEIOUNIT0_ES_MAX_ENDPOINTS_EXCEEDED (0x20000000)
+#define MPI3_PCIEIOUNIT0_ES_INSUFFICIENT_RESOURCES (0x10000000)
+#ifndef MPI3_PCIE_IO_UNIT0_PHY_MAX
+#define MPI3_PCIE_IO_UNIT0_PHY_MAX (1)
+#endif
+struct mpi3_pcie_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 init_status;
+ u8 aspm;
+ u8 reserved0f;
+ struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX];
+};
+
+#define MPI3_PCIEIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_RESOURCE_ALLOC_FAILED (0x03)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_HOST_PORT_MISMATCH (0x06)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PHYS_NOT_CONSECUTIVE (0x07)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_MASK (0xc0)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_SHIFT (6)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_MASK (0x30)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_SHIFT (4)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_MASK (0x0c)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_SHIFT (2)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_MASK (0x03)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_SHIFT (0)
+struct mpi3_pcie_io_unit1_phy_data {
+ u8 link;
+ u8 link_flags;
+ u8 phy_flags;
+ u8 max_min_link_rate;
+ __le32 reserved04;
+ __le32 reserved08;
+};
+
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02)
+#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
+#ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX
+#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
+#endif
+struct mpi3_pcie_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le32 control_flags;
+ __le32 reserved0c;
+ u8 num_phys;
+ u8 reserved11;
+ u8 aspm;
+ u8 reserved13;
+ struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX];
+};
+
+#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_MASK (0xe0000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_DEASSERT (0x20000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_ASSERT (0x40000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_BACKPLANE_ERROR (0x60000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_MASK (0x1c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_DEASSERT (0x04000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_ASSERT (0x08000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_BACKPLANE_ERROR (0x0c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x00000080)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x00000040)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x00000030)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x00000010)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x00000020)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0000000f)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_USE_BACKPLANE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x00000002)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x00000003)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x00000004)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x00000005)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x00000006)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_SHIFT (0)
+struct mpi3_pcie_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ __le16 nvme_max_q_dx1;
+ __le16 nvme_max_q_dx2;
+ u8 nvme_abort_to;
+ u8 reserved0d;
+ __le16 nvme_max_q_dx4;
+};
+
+#define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ERROR_RECEIVER_ERROR (0)
+#define MPI3_PCIEIOUNIT3_ERROR_RECOVERY (1)
+#define MPI3_PCIEIOUNIT3_ERROR_CORRECTABLE_ERROR_MSG (2)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_DLLP (3)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_TLP (4)
+#define MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX (5)
+struct mpi3_pcie_io_unit3_error {
+ __le16 threshold_count;
+ __le16 reserved02;
+};
+
+struct mpi3_pcie_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ u8 threshold_window;
+ u8 threshold_action;
+ u8 escalation_count;
+ u8 escalation_action;
+ u8 num_errors;
+ u8 reserved0d[3];
+ struct mpi3_pcie_io_unit3_error error[MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX];
+};
+
+#define MPI3_PCIEIOUNIT3_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_NO_ACTION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_HOT_RESET (0x01)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_ONLY (0x02)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_NO_ACCESS (0x03)
+struct mpi3_pcie_switch_page0 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 switch_status;
+ u8 reserved0a[2];
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ u8 num_ports;
+ u8 pcie_level;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le32 reserved18;
+ __le32 reserved1c;
+};
+
+#define MPI3_PCIESWITCH0_PAGEVERSION (0x00)
+#define MPI3_PCIESWITCH0_SS_NOT_RESPONDING (0x02)
+#define MPI3_PCIESWITCH0_SS_RESPONDING (0x03)
+#define MPI3_PCIESWITCH0_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_pcie_switch_page1 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 flags;
+ __le16 reserved0a;
+ u8 num_ports;
+ u8 port_num;
+ __le16 attached_dev_handle;
+ __le16 switch_dev_handle;
+ u8 negotiated_port_width;
+ u8 negotiated_link_rate;
+ __le16 slot;
+ __le16 slot_index;
+ __le32 reserved18;
+};
+
+#define MPI3_PCIESWITCH1_PAGEVERSION (0x00)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_MASK (0x0c)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_SHIFT (2)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_MASK (0x03)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_SHIFT (0)
+#ifndef MPI3_PCIESWITCH2_MAX_NUM_PORTS
+#define MPI3_PCIESWITCH2_MAX_NUM_PORTS (1)
+#endif
+struct mpi3_pcieswitch2_port_element {
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_pcie_switch_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_ports;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_pcieswitch2_port_element port[MPI3_PCIESWITCH2_MAX_NUM_PORTS];
+};
+
+#define MPI3_PCIESWITCH2_PAGEVERSION (0x00)
+struct mpi3_pcie_link_page0 {
+ struct mpi3_config_page_header header;
+ u8 link;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ __le32 receiver_error_count;
+ __le32 recovery_count;
+ __le32 corr_error_msg_count;
+ __le32 non_fatal_error_msg_count;
+ __le32 fatal_error_msg_count;
+ __le32 non_fatal_error_count;
+ __le32 fatal_error_count;
+ __le32 bad_dllp_count;
+ __le32 bad_tlp_count;
+};
+
+#define MPI3_PCIELINK0_PAGEVERSION (0x00)
+struct mpi3_enclosure_page0 {
+ struct mpi3_config_page_header header;
+ __le64 enclosure_logical_id;
+ __le16 flags;
+ __le16 enclosure_handle;
+ __le16 num_slots;
+ __le16 reserved16;
+ u8 io_unit_port;
+ u8 enclosure_level;
+ __le16 sep_dev_handle;
+ u8 chassis_slot;
+ u8 reserved1d[3];
+};
+
+#define MPI3_ENCLOSURE0_PAGEVERSION (0x00)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_MASK (0xc000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000)
+#define MPI3_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010)
+#define MPI3_ENCLS0_FLAGS_MNG_MASK (0x000f)
+#define MPI3_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI3_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI3_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0002)
+#define MPI3_DEVICE_DEVFORM_SAS_SATA (0x00)
+#define MPI3_DEVICE_DEVFORM_PCIE (0x01)
+#define MPI3_DEVICE_DEVFORM_VD (0x02)
+struct mpi3_device0_sas_sata_format {
+ __le64 sas_address;
+ __le16 flags;
+ __le16 device_info;
+ u8 phy_num;
+ u8 attached_phy_identifier;
+ u8 max_port_connections;
+ u8 zone_group;
+};
+
+#define MPI3_DEVICE0_SASSATA_FLAGS_WRITE_SAME_UNMAP_NCQ (0x0400)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200)
+#define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100)
+#define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SW_PRESERVE (0x0040)
+#define MPI3_DEVICE0_SASSATA_FLAGS_UNSUPP_DEV (0x0020)
+#define MPI3_DEVICE0_SASSATA_FLAGS_48BIT_LBA (0x0010)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SMART_SUPP (0x0008)
+#define MPI3_DEVICE0_SASSATA_FLAGS_NCQ_SUPP (0x0004)
+#define MPI3_DEVICE0_SASSATA_FLAGS_FUA_SUPP (0x0002)
+#define MPI3_DEVICE0_SASSATA_FLAGS_PERSIST_CAP (0x0001)
+struct mpi3_device0_pcie_format {
+ u8 supported_link_rates;
+ u8 max_port_width;
+ u8 negotiated_port_width;
+ u8 negotiated_link_rate;
+ u8 port_num;
+ u8 controller_reset_to;
+ __le16 device_info;
+ __le32 maximum_data_transfer_size;
+ __le32 capabilities;
+ __le16 noiob;
+ u8 nvme_abort_to;
+ u8 page_size;
+ __le16 shutdown_latency;
+ u8 recovery_info;
+ u8 reserved17;
+};
+
+#define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_16_0_SUPP (0x08)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0007)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_MASK (0x0030)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_SHIFT (4)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK (0x00c0)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0 (0x0000)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_1 (0x0040)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_2 (0x0080)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_3 (0x00c0)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_EXTRA_LENGTH_SUPPORTED (0x00000020)
+#define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_SGL (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_PRP (0x00000000)
+#define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_MASK (0x000000c0)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_MASK (0xe0)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_NS_MGMT (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_FORMAT (0x20)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_MASK (0x1f)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NS (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NSID_1 (0x01)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_TOO_MANY_NS (0x02)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_PROTECTION (0x03)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_METADATA_SZ (0x04)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_LBA_DATA_SZ (0x05)
+struct mpi3_device0_vd_format {
+ u8 vd_state;
+ u8 raid_level;
+ __le16 device_info;
+ __le16 flags;
+ __le16 io_throttle_group;
+ __le16 io_throttle_group_low;
+ __le16 io_throttle_group_high;
+ __le32 reserved0c;
+};
+#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00)
+#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01)
+#define MPI3_DEVICE0_VD_STATE_DEGRADED (0x02)
+#define MPI3_DEVICE0_VD_STATE_OPTIMAL (0x03)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_0 (0)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_1 (1)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_5 (5)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_6 (6)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_10 (10)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_50 (50)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_60 (60)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_HDD (0x0010)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SSD (0x0008)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_NVME (0x0004)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SATA (0x0002)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000)
+#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12)
+union mpi3_device0_dev_spec_format {
+ struct mpi3_device0_sas_sata_format sas_sata_format;
+ struct mpi3_device0_pcie_format pcie_format;
+ struct mpi3_device0_vd_format vd_format;
+};
+
+struct mpi3_device_page0 {
+ struct mpi3_config_page_header header;
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ __le16 slot;
+ __le16 enclosure_handle;
+ __le64 wwid;
+ __le16 persistent_id;
+ u8 io_unit_port;
+ u8 access_status;
+ __le16 flags;
+ __le16 reserved1e;
+ __le16 slot_index;
+ __le16 queue_depth;
+ u8 reserved24[3];
+ u8 device_form;
+ union mpi3_device0_dev_spec_format device_specific;
+};
+
+#define MPI3_DEVICE0_PAGEVERSION (0x00)
+#define MPI3_DEVICE0_PARENT_INVALID (0xffff)
+#define MPI3_DEVICE0_ENCLOSURE_HANDLE_NO_ENCLOSURE (0x0000)
+#define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff)
+#define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff)
+#define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff)
+#define MPI3_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_DEVICE0_ASTATUS_CAP_UNSUPPORTED (0x02)
+#define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03)
+#define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04)
+#define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05)
+#define MPI3_DEVICE0_ASTATUS_PREPARE (0x06)
+#define MPI3_DEVICE0_ASTATUS_SAFE_MODE (0x07)
+#define MPI3_DEVICE0_ASTATUS_GENERIC_MAX (0x0f)
+#define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10)
+#define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11)
+#define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12)
+#define MPI3_DEVICE0_ASTATUS_SAS_MAX (0x1f)
+#define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20)
+#define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21)
+#define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22)
+#define MPI3_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x23)
+#define MPI3_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x24)
+#define MPI3_DEVICE0_ASTATUS_SIF_PIO_SN (0x25)
+#define MPI3_DEVICE0_ASTATUS_SIF_MDMA_SN (0x26)
+#define MPI3_DEVICE0_ASTATUS_SIF_UDMA_SN (0x27)
+#define MPI3_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x28)
+#define MPI3_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x29)
+#define MPI3_DEVICE0_ASTATUS_SIF_MAX (0x2f)
+#define MPI3_DEVICE0_ASTATUS_PCIE_UNKNOWN (0x30)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31)
+#define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33)
+#define MPI3_DEVICE0_ASTATUS_PCIE_ECRC_REQUIRED (0x34)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MAX (0x3f)
+#define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40)
+#define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41)
+#define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42)
+#define MPI3_DEVICE0_ASTATUS_NVME_IDENTIFY_FAILED (0x43)
+#define MPI3_DEVICE0_ASTATUS_NVME_QCONFIG_FAILED (0x44)
+#define MPI3_DEVICE0_ASTATUS_NVME_QCREATION_FAILED (0x45)
+#define MPI3_DEVICE0_ASTATUS_NVME_EVENTCFG_FAILED (0x46)
+#define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47)
+#define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48)
+#define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49)
+#define MPI3_DEVICE0_ASTATUS_NVME_INSUFFICIENT_POWER (0x4a)
+#define MPI3_DEVICE0_ASTATUS_NVME_DOORBELL_STRIDE (0x4b)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEM_PAGE_MIN_SIZE (0x4c)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEMORY_ALLOCATION (0x4d)
+#define MPI3_DEVICE0_ASTATUS_NVME_COMPLETION_TIME (0x4e)
+#define MPI3_DEVICE0_ASTATUS_NVME_BAR (0x4f)
+#define MPI3_DEVICE0_ASTATUS_NVME_NS_DESCRIPTOR (0x50)
+#define MPI3_DEVICE0_ASTATUS_NVME_INCOMPATIBLE_SETTINGS (0x51)
+#define MPI3_DEVICE0_ASTATUS_NVME_TOO_MANY_ERRORS (0x52)
+#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f)
+#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80)
+#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f)
+#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
+#define MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED (0x0010)
+#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL (0x0004)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED (0x0002)
+#define MPI3_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+#define MPI3_DEVICE0_QUEUE_DEPTH_NOT_APPLICABLE (0x0000)
+struct mpi3_device1_sas_sata_format {
+ __le32 reserved00;
+};
+struct mpi3_device1_pcie_format {
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+ __le32 reserved08;
+ u8 revision_id;
+ u8 reserved0d;
+ __le16 pci_parameters;
+};
+
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_128B (0x0)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_256B (0x1)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_512B (0x2)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_1024B (0x3)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_2048B (0x4)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_4096B (0x5)
+#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_MASK (0x01c0)
+#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_SHIFT (6)
+#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_MASK (0x0038)
+#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_SHIFT (3)
+#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_MASK (0x0007)
+#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_SHIFT (0)
+struct mpi3_device1_vd_format {
+ __le32 reserved00;
+};
+
+union mpi3_device1_dev_spec_format {
+ struct mpi3_device1_sas_sata_format sas_sata_format;
+ struct mpi3_device1_pcie_format pcie_format;
+ struct mpi3_device1_vd_format vd_format;
+};
+
+struct mpi3_device_page1 {
+ struct mpi3_config_page_header header;
+ __le16 dev_handle;
+ __le16 reserved0a;
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le16 tm_count;
+ __le16 reserved12;
+ __le32 reserved14[10];
+ u8 reserved3c[3];
+ u8 device_form;
+ union mpi3_device1_dev_spec_format device_specific;
+};
+
+#define MPI3_DEVICE1_PAGEVERSION (0x00)
+#define MPI3_DEVICE1_COUNTER_MAX (0xfffe)
+#define MPI3_DEVICE1_COUNTER_INVALID (0xffff)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
new file mode 100644
index 000000000000..64c58815988a
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2018-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_IMAGE_H
+#define MPI30_IMAGE_H 1
+struct mpi3_comp_image_version {
+ __le16 build_num;
+ __le16 customer_id;
+ u8 phase_minor;
+ u8 phase_major;
+ u8 gen_minor;
+ u8 gen_major;
+};
+
+struct mpi3_hash_exclusion_format {
+ __le32 offset;
+ __le32 size;
+};
+
+#define MPI3_IMAGE_HASH_EXCUSION_NUM (4)
+struct mpi3_component_image_header {
+ __le32 signature0;
+ __le32 load_address;
+ __le32 data_size;
+ __le32 start_offset;
+ __le32 signature1;
+ __le32 flash_offset;
+ __le32 image_size;
+ __le32 version_string_offset;
+ __le32 build_date_string_offset;
+ __le32 build_time_string_offset;
+ __le32 environment_variable_offset;
+ __le32 application_specific;
+ __le32 signature2;
+ __le32 header_size;
+ __le32 crc;
+ __le32 flags;
+ __le32 secondary_flash_offset;
+ __le32 etp_offset;
+ __le32 etp_size;
+ union mpi3_version_union rmc_interface_version;
+ union mpi3_version_union etp_interface_version;
+ struct mpi3_comp_image_version component_image_version;
+ struct mpi3_hash_exclusion_format hash_exclusion[MPI3_IMAGE_HASH_EXCUSION_NUM];
+ __le32 next_image_header_offset;
+ union mpi3_version_union security_version;
+ __le32 reserved84[31];
+};
+
+#define MPI3_IMAGE_HEADER_SIGNATURE0_MPI3 (0xeb00003e)
+#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_INVALID (0x00000000)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_FIRST_MUTABLE (0x20434d46)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_BSP (0x20505342)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_ROM_BIOS (0x534f4942)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_X64 (0x4d494948)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_ARM (0x41494948)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_CPLD (0x444c5043)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_RMC (0x20434d52)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350)
+#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008)
+#define MPI3_IMAGE_HEADER_FLAGS_REQUIRES_ACTIVATION (0x00000004)
+#define MPI3_IMAGE_HEADER_FLAGS_COMPRESSED (0x00000002)
+#define MPI3_IMAGE_HEADER_FLAGS_FLASH (0x00000001)
+#define MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00)
+#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04)
+#define MPI3_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08)
+#define MPI3_IMAGE_HEADER_START_OFFSET_OFFSET (0x0c)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10)
+#define MPI3_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14)
+#define MPI3_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18)
+#define MPI3_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1c)
+#define MPI3_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20)
+#define MPI3_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24)
+#define MPI3_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28)
+#define MPI3_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2c)
+#define MPI3_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30)
+#define MPI3_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34)
+#define MPI3_IMAGE_HEADER_CRC_OFFSET (0x38)
+#define MPI3_IMAGE_HEADER_FLAGS_OFFSET (0x3c)
+#define MPI3_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40)
+#define MPI3_IMAGE_HEADER_ETP_OFFSET_OFFSET (0x44)
+#define MPI3_IMAGE_HEADER_ETP_SIZE_OFFSET (0x48)
+#define MPI3_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4c)
+#define MPI3_IMAGE_HEADER_ETP_INTERFACE_VER_OFFSET (0x50)
+#define MPI3_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54)
+#define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c)
+#define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c)
+#define MPI3_IMAGE_HEADER_SIZE (0x100)
+#ifndef MPI3_CI_MANIFEST_MPI_MAX
+#define MPI3_CI_MANIFEST_MPI_MAX (1)
+#endif
+struct mpi3_ci_manifest_mpi_comp_image_ref {
+ __le32 signature1;
+ __le32 reserved04[3];
+ struct mpi3_comp_image_version component_image_version;
+ __le32 component_image_version_string_offset;
+ __le32 crc;
+};
+
+struct mpi3_ci_manifest_mpi {
+ u8 manifest_type;
+ u8 reserved01[3];
+ __le32 reserved04[3];
+ u8 num_image_references;
+ u8 release_level;
+ __le16 reserved12;
+ __le16 reserved14;
+ __le16 flags;
+ __le32 reserved18[2];
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+ __le32 reserved28[2];
+ union mpi3_version_union package_security_version;
+ __le32 reserved34;
+ struct mpi3_comp_image_version package_version;
+ __le32 package_version_string_offset;
+ __le32 package_build_date_string_offset;
+ __le32 package_build_time_string_offset;
+ __le32 reserved4c;
+ __le32 diag_authorization_identifier[16];
+ struct mpi3_ci_manifest_mpi_comp_image_ref component_image_ref[MPI3_CI_MANIFEST_MPI_MAX];
+};
+
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DEV (0x00)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PREALPHA (0x10)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_ALPHA (0x20)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_BETA (0x30)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_RC (0x40)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60)
+#define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01)
+#define MPI3_CI_MANIFEST_MPI_SUBSYSTEMID_IGNORED (0xffff)
+#define MPI3_CI_MANIFEST_MPI_PKG_VER_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_DATE_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_TIME_STR_OFF_UNSPECIFIED (0x00000000)
+union mpi3_ci_manifest {
+ struct mpi3_ci_manifest_mpi mpi;
+ __le32 dword[1];
+};
+
+#define MPI3_CI_MANIFEST_TYPE_MPI (0x00)
+struct mpi3_extended_image_header {
+ u8 image_type;
+ u8 reserved01[3];
+ __le32 checksum;
+ __le32 image_size;
+ __le32 next_image_header_offset;
+ __le32 reserved10[4];
+ __le32 identify_string[8];
+};
+
+#define MPI3_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI3_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI3_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0c)
+#define MPI3_EXT_IMAGE_HEADER_SIZE (0x40)
+#define MPI3_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI3_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI3_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI3_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
+#define MPI3_EXT_IMAGE_TYPE_RDE (0x0a)
+#define MPI3_EXT_IMAGE_TYPE_AUXILIARY_PROCESSOR (0x0b)
+#define MPI3_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI3_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xff)
+struct mpi3_supported_device {
+ __le16 device_id;
+ __le16 vendor_id;
+ __le16 device_id_mask;
+ __le16 reserved06;
+ u8 low_pci_rev;
+ u8 high_pci_rev;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#ifndef MPI3_SUPPORTED_DEVICE_MAX
+#define MPI3_SUPPORTED_DEVICE_MAX (1)
+#endif
+struct mpi3_supported_devices_data {
+ u8 image_version;
+ u8 reserved01;
+ u8 num_devices;
+ u8 reserved03;
+ __le32 reserved04;
+ struct mpi3_supported_device supported_device[MPI3_SUPPORTED_DEVICE_MAX];
+};
+
+#ifndef MPI3_ENCRYPTED_HASH_MAX
+#define MPI3_ENCRYPTED_HASH_MAX (1)
+#endif
+struct mpi3_encrypted_hash_entry {
+ u8 hash_image_type;
+ u8 hash_algorithm;
+ u8 encryption_algorithm;
+ u8 reserved03;
+ __le32 reserved04;
+ __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
+};
+
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03)
+#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
+#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60)
+#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f)
+#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA384 (0x03)
+#define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA1024 (0x03)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA2048 (0x04)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06)
+#ifndef MPI3_PUBLIC_KEY_MAX
+#define MPI3_PUBLIC_KEY_MAX (1)
+#endif
+struct mpi3_encrypted_key_with_hash_entry {
+ u8 hash_image_type;
+ u8 hash_algorithm;
+ u8 encryption_algorithm;
+ u8 reserved03;
+ __le32 reserved04;
+ __le32 public_key[MPI3_PUBLIC_KEY_MAX];
+};
+
+#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
+#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
+#endif
+struct mpi3_encrypted_hash_data {
+ u8 image_version;
+ u8 num_hash;
+ __le16 reserved02;
+ __le32 reserved04;
+ struct mpi3_encrypted_hash_entry encrypted_hash_entry[MPI3_ENCRYPTED_HASH_ENTRY_MAX];
+};
+
+#ifndef MPI3_AUX_PROC_DATA_MAX
+#define MPI3_AUX_PROC_DATA_MAX (1)
+#endif
+struct mpi3_aux_processor_data {
+ u8 boot_method;
+ u8 num_load_addr;
+ u8 reserved02;
+ u8 type;
+ __le32 version;
+ __le32 load_address[8];
+ __le32 reserved28[22];
+ __le32 aux_processor_data[MPI3_AUX_PROC_DATA_MAX];
+};
+
+#define MPI3_AUX_PROC_DATA_OFFSET (0x80)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_MSG (0x00)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_DOORBELL (0x01)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_COMPONENT (0x02)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_A15 (0x00)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_M0 (0x01)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_R4 (0x02)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
new file mode 100644
index 000000000000..3c03610ecfa6
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_INIT_H
+#define MPI30_INIT_H 1
+struct mpi3_scsi_io_cdb_eedp32 {
+ u8 cdb[20];
+ __be32 primary_reference_tag;
+ __le16 primary_application_tag;
+ __le16 primary_application_tag_mask;
+ __le32 transfer_length;
+};
+
+union mpi3_scsi_io_cdb_union {
+ u8 cdb32[32];
+ struct mpi3_scsi_io_cdb_eedp32 eedp32;
+ struct mpi3_sge_common sge;
+};
+
+struct mpi3_scsi_io_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le32 flags;
+ __le32 skip_count;
+ __le32 data_length;
+ u8 lun[8];
+ union mpi3_scsi_io_cdb_union cdb;
+ union mpi3_sge_union sgl[4];
+};
+
+#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
+#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
+#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
+#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
+#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000)
+#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000)
+#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
+#define MPI3_SCSIIO_METASGL_INDEX (3)
+struct mpi3_scsi_io_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 scsi_status;
+ u8 scsi_state;
+ __le16 dev_handle;
+ __le32 transfer_count;
+ __le32 sense_count;
+ __le32 response_data;
+ __le16 task_tag;
+ __le16 scsi_status_qualifier;
+ __le32 eedp_error_offset;
+ __le16 eedp_observed_app_tag;
+ __le16 eedp_observed_guard;
+ __le32 eedp_observed_ref_tag;
+ __le64 sense_data_buffer_address;
+};
+
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01)
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x02)
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x04)
+#define MPI3_SCSI_STATUS_GOOD (0x00)
+#define MPI3_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI3_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI3_SCSI_STATUS_BUSY (0x08)
+#define MPI3_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI3_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI3_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI3_SCSI_STATUS_COMMAND_TERMINATED (0x22)
+#define MPI3_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI3_SCSI_STATUS_TASK_ABORTED (0x40)
+#define MPI3_SCSI_STATE_SENSE_MASK (0x03)
+#define MPI3_SCSI_STATE_SENSE_VALID (0x00)
+#define MPI3_SCSI_STATE_SENSE_FAILED (0x01)
+#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02)
+#define MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE (0x03)
+#define MPI3_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI3_SCSI_STATE_TERMINATED (0x08)
+#define MPI3_SCSI_STATE_RESPONSE_DATA_VALID (0x10)
+#define MPI3_SCSI_RSP_RESPONSECODE_MASK (0x000000ff)
+#define MPI3_SCSI_RSP_RESPONSECODE_SHIFT (0)
+#define MPI3_SCSI_RSP_ARI2_MASK (0x0000ff00)
+#define MPI3_SCSI_RSP_ARI2_SHIFT (8)
+#define MPI3_SCSI_RSP_ARI1_MASK (0x00ff0000)
+#define MPI3_SCSI_RSP_ARI1_SHIFT (16)
+#define MPI3_SCSI_RSP_ARI0_MASK (0xff000000)
+#define MPI3_SCSI_RSP_ARI0_SHIFT (24)
+#define MPI3_SCSI_TASKTAG_UNKNOWN (0xffff)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
new file mode 100644
index 000000000000..1c6c6730df5c
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -0,0 +1,1063 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_IOC_H
+#define MPI30_IOC_H 1
+struct mpi3_ioc_init_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ union mpi3_version_union mpi_version;
+ __le64 time_stamp;
+ u8 reserved18;
+ u8 who_init;
+ __le16 reserved1a;
+ __le16 reply_free_queue_depth;
+ __le16 reserved1e;
+ __le64 reply_free_queue_address;
+ __le32 reserved28;
+ __le16 sense_buffer_free_queue_depth;
+ __le16 sense_buffer_length;
+ __le64 sense_buffer_free_queue_address;
+ __le64 driver_information_address;
+};
+
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03)
+#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI3_WHOINIT_ROM_BIOS (0x02)
+#define MPI3_WHOINIT_HOST_DRIVER (0x03)
+#define MPI3_WHOINIT_MANUFACTURER (0x04)
+
+struct mpi3_ioc_facts_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_ioc_facts_data {
+ __le16 ioc_facts_data_length;
+ __le16 reserved02;
+ union mpi3_version_union mpi_version;
+ struct mpi3_comp_image_version fw_version;
+ __le32 ioc_capabilities;
+ u8 ioc_number;
+ u8 who_init;
+ __le16 max_msix_vectors;
+ __le16 max_outstanding_requests;
+ __le16 product_id;
+ __le16 ioc_request_frame_size;
+ __le16 reply_frame_size;
+ __le16 ioc_exceptions;
+ __le16 max_persistent_id;
+ u8 sge_modifier_mask;
+ u8 sge_modifier_value;
+ u8 sge_modifier_shift;
+ u8 protocol_flags;
+ __le16 max_sas_initiators;
+ __le16 max_data_length;
+ __le16 max_sas_expanders;
+ __le16 max_enclosures;
+ __le16 min_dev_handle;
+ __le16 max_dev_handle;
+ __le16 max_pcie_switches;
+ __le16 max_nvme;
+ __le16 reserved38;
+ __le16 max_vds;
+ __le16 max_host_pds;
+ __le16 max_adv_host_pds;
+ __le16 max_raid_pds;
+ __le16 max_posted_cmd_buffers;
+ __le32 flags;
+ __le16 max_operational_request_queues;
+ __le16 max_operational_reply_queues;
+ __le16 shutdown_timeout;
+ __le16 reserved4e;
+ __le32 diag_trace_size;
+ __le32 diag_fw_size;
+ __le32 diag_driver_size;
+ u8 max_host_pd_ns_count;
+ u8 max_adv_host_pd_ns_count;
+ u8 max_raidpd_ns_count;
+ u8 max_devices_per_throttle_group;
+ __le16 io_throttle_data_length;
+ __le16 max_io_throttle_group;
+ __le16 io_throttle_low;
+ __le16 io_throttle_high;
+};
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
+#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020)
+#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010)
+#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
+#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001)
+#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000)
+#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12)
+#define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00)
+#define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8)
+#define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff)
+#define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_REKEY (0x2000)
+#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
+#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_MGMT (0x0300)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0500)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_OOB (0x0600)
+#define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080)
+#define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040)
+#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
+#define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010)
+#define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
+#define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010)
+#define MPI3_IOCFACTS_PROTOCOL_SATA (0x0008)
+#define MPI3_IOCFACTS_PROTOCOL_NVME (0x0004)
+#define MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+#define MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED (0x0000)
+#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
+#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
+#define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000)
+struct mpi3_mgmt_passthrough_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 reserved0c[5];
+ union mpi3_sge_union command_sgl;
+ union mpi3_sge_union response_sgl;
+};
+
+struct mpi3_create_request_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 flags;
+ u8 burst;
+ __le16 size;
+ __le16 queue_id;
+ __le16 reply_queue_id;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le64 base_address;
+};
+
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
+struct mpi3_delete_request_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 queue_id;
+};
+
+struct mpi3_create_reply_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 flags;
+ u8 reserved0b;
+ __le16 size;
+ __le16 queue_id;
+ __le16 msix_index;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le64 base_address;
+};
+
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
+struct mpi3_delete_reply_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 queue_id;
+};
+
+struct mpi3_port_enable_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+};
+
+#define MPI3_EVENT_LOG_DATA (0x01)
+#define MPI3_EVENT_CHANGE (0x02)
+#define MPI3_EVENT_GPIO_INTERRUPT (0x04)
+#define MPI3_EVENT_CABLE_MGMT (0x06)
+#define MPI3_EVENT_DEVICE_ADDED (0x07)
+#define MPI3_EVENT_DEVICE_INFO_CHANGED (0x08)
+#define MPI3_EVENT_PREPARE_FOR_RESET (0x09)
+#define MPI3_EVENT_COMP_IMAGE_ACT_START (0x0a)
+#define MPI3_EVENT_ENCL_DEVICE_ADDED (0x0b)
+#define MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x0c)
+#define MPI3_EVENT_DEVICE_STATUS_CHANGE (0x0d)
+#define MPI3_EVENT_ENERGY_PACK_CHANGE (0x0e)
+#define MPI3_EVENT_SAS_DISCOVERY (0x11)
+#define MPI3_EVENT_SAS_BROADCAST_PRIMITIVE (0x12)
+#define MPI3_EVENT_SAS_NOTIFY_PRIMITIVE (0x13)
+#define MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x14)
+#define MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW (0x15)
+#define MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x16)
+#define MPI3_EVENT_SAS_PHY_COUNTER (0x18)
+#define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19)
+#define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20)
+#define MPI3_EVENT_PCIE_ENUMERATION (0x22)
+#define MPI3_EVENT_PCIE_ERROR_THRESHOLD (0x23)
+#define MPI3_EVENT_HARD_RESET_RECEIVED (0x40)
+#define MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE (0x50)
+#define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60)
+#define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f)
+#define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+struct mpi3_event_notification_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le16 sas_broadcast_primitive_masks;
+ __le16 sas_notify_primitive_masks;
+ __le32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
+};
+
+struct mpi3_event_notification_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 event_data_length;
+ u8 event;
+ __le16 ioc_change_count;
+ __le32 event_context;
+ __le32 event_data[1];
+};
+
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02)
+struct mpi3_event_data_gpio_interrupt {
+ u8 gpio_num;
+ u8 reserved01[3];
+};
+struct mpi3_event_data_cable_management {
+ __le32 active_cable_power_requirement;
+ u8 status;
+ u8 receptacle_id;
+ __le16 reserved06;
+};
+
+#define MPI3_EVENT_CABLE_MGMT_ACT_CABLE_PWR_INVALID (0xffffffff)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER (0x00)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_PRESENT (0x01)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED (0x02)
+struct mpi3_event_ack_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ u8 event;
+ u8 reserved0d[3];
+ __le32 event_context;
+};
+
+struct mpi3_event_data_prepare_for_reset {
+ u8 reason_code;
+ u8 reserved01;
+ __le16 reserved02;
+};
+
+#define MPI3_EVENT_PREPARE_RESET_RC_START (0x01)
+#define MPI3_EVENT_PREPARE_RESET_RC_ABORT (0x02)
+struct mpi3_event_data_comp_image_activation {
+ __le32 reserved00;
+};
+
+struct mpi3_event_data_device_status_change {
+ __le16 task_tag;
+ u8 reason_code;
+ u8 io_unit_port;
+ __le16 parent_dev_handle;
+ __le16 dev_handle;
+ __le64 wwid;
+ u8 lun[8];
+};
+
+#define MPI3_EVENT_DEV_STAT_RC_MOVED (0x01)
+#define MPI3_EVENT_DEV_STAT_RC_HIDDEN (0x02)
+#define MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN (0x03)
+#define MPI3_EVENT_DEV_STAT_RC_ASYNC_NOTIFICATION (0x04)
+#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT (0x20)
+#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP (0x21)
+#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_STRT (0x22)
+#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_CMP (0x23)
+#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT (0x24)
+#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP (0x25)
+#define MPI3_EVENT_DEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x30)
+#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_STRT (0x40)
+#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_CMP (0x41)
+#define MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING (0x50)
+struct mpi3_event_data_energy_pack_change {
+ __le32 reserved00;
+ __le16 shutdown_timeout;
+ __le16 reserved06;
+};
+
+struct mpi3_event_data_sas_discovery {
+ u8 flags;
+ u8 reason_code;
+ u8 io_unit_port;
+ u8 reserved03;
+ __le32 discovery_status;
+};
+
+#define MPI3_EVENT_SAS_DISC_FLAGS_DEVICE_CHANGE (0x02)
+#define MPI3_EVENT_SAS_DISC_FLAGS_IN_PROGRESS (0x01)
+#define MPI3_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI3_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+#define MPI3_SAS_DISC_STATUS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI3_SAS_DISC_STATUS_INVALID_CEI (0x00010000)
+#define MPI3_SAS_DISC_STATUS_FECEI_MISMATCH (0x00008000)
+#define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000)
+#define MPI3_SAS_DISC_STATUS_NECEI_MISMATCH (0x00002000)
+#define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000)
+#define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800)
+#define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400)
+#define MPI3_SAS_DISC_STATUS_TABLE_TO_SUBTRACTIVE_LINK (0x00000200)
+#define MPI3_SAS_DISC_STATUS_UNSUPPORTED_DEVICE (0x00000100)
+#define MPI3_SAS_DISC_STATUS_TABLE_LINK (0x00000080)
+#define MPI3_SAS_DISC_STATUS_SUBTRACTIVE_LINK (0x00000040)
+#define MPI3_SAS_DISC_STATUS_SMP_CRC_ERROR (0x00000020)
+#define MPI3_SAS_DISC_STATUS_SMP_FUNCTION_FAILED (0x00000010)
+#define MPI3_SAS_DISC_STATUS_SMP_TIMEOUT (0x00000008)
+#define MPI3_SAS_DISC_STATUS_MULTIPLE_PORTS (0x00000004)
+#define MPI3_SAS_DISC_STATUS_INVALID_SAS_ADDRESS (0x00000002)
+#define MPI3_SAS_DISC_STATUS_LOOP_DETECTED (0x00000001)
+struct mpi3_event_data_sas_broadcast_primitive {
+ u8 phy_num;
+ u8 io_unit_port;
+ u8 port_width;
+ u8 primitive;
+};
+
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE (0x01)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_SES (0x02)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_EXPANDER (0x03)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED3 (0x05)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED4 (0x06)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE1_RESERVED (0x08)
+struct mpi3_event_data_sas_notify_primitive {
+ u8 phy_num;
+ u8 io_unit_port;
+ u8 reserved02;
+ u8 primitive;
+};
+
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_ENABLE_SPINUP (0x01)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04)
+#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+struct mpi3_event_sas_topo_phy_entry {
+ __le16 attached_dev_handle;
+ u8 link_rate;
+ u8 status;
+};
+
+#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xf0)
+#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI3_EVENT_SAS_TOPO_LR_PREV_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+#define MPI3_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI3_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI3_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI3_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI3_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI3_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI3_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0a)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0b)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0c)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_MASK (0xc0)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_SHIFT (6)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_ACCESSIBLE (0x00)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING (0x05)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING (0x06)
+struct mpi3_event_data_sas_topology_change_list {
+ __le16 enclosure_handle;
+ __le16 expander_dev_handle;
+ u8 num_phys;
+ u8 reserved05[3];
+ u8 num_entries;
+ u8 start_phy_num;
+ u8 exp_status;
+ u8 io_unit_port;
+ struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT];
+};
+
+#define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_sas_phy_counter {
+ __le64 time_stamp;
+ __le32 reserved08;
+ u8 phy_event_code;
+ u8 phy_num;
+ __le16 reserved0e;
+ __le32 phy_event_info;
+ u8 counter_type;
+ u8 threshold_window;
+ u8 time_units;
+ u8 reserved17;
+ __le32 event_threshold;
+ __le16 threshold_flags;
+ __le16 reserved1e;
+};
+
+struct mpi3_event_data_sas_device_disc_err {
+ __le16 dev_handle;
+ u8 reason_code;
+ u8 io_unit_port;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_FAILED (0x01)
+#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_TIMEOUT (0x02)
+struct mpi3_event_data_pcie_enumeration {
+ u8 flags;
+ u8 reason_code;
+ u8 io_unit_port;
+ u8 reserved03;
+ __le32 enumeration_status;
+};
+
+#define MPI3_EVENT_PCIE_ENUM_FLAGS_DEVICE_CHANGE (0x02)
+#define MPI3_EVENT_PCIE_ENUM_FLAGS_IN_PROGRESS (0x01)
+#define MPI3_EVENT_PCIE_ENUM_RC_STARTED (0x01)
+#define MPI3_EVENT_PCIE_ENUM_RC_COMPLETED (0x02)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCH_DEPTH_EXCEED (0x80000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
+#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT
+#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1)
+#endif
+struct mpi3_event_pcie_topo_port_entry {
+ __le16 attached_dev_handle;
+ u8 port_status;
+ u8 reserved03;
+ u8 current_port_info;
+ u8 reserved05;
+ u8 previous_port_info;
+ u8 reserved07;
+};
+
+#define MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03)
+#define MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04)
+#define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
+#define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_4 (0x30)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_32_0 (0x06)
+struct mpi3_event_data_pcie_topology_change_list {
+ __le16 enclosure_handle;
+ __le16 switch_dev_handle;
+ u8 num_ports;
+ u8 reserved05[3];
+ u8 num_entries;
+ u8 start_port_num;
+ u8 switch_status;
+ u8 io_unit_port;
+ __le32 reserved0c;
+ struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT];
+};
+
+#define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
+#define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
+#define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_pcie_error_threshold {
+ __le64 timestamp;
+ u8 reason_code;
+ u8 port;
+ __le16 switch_dev_handle;
+ u8 error;
+ u8 action;
+ __le16 threshold_count;
+ __le16 attached_dev_handle;
+ __le16 reserved12;
+};
+
+#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00)
+#define MPI3_EVENT_PCI_ERROR_RC_ESCALATION (0x01)
+struct mpi3_event_data_sas_init_dev_status_change {
+ u8 reason_code;
+ u8 io_unit_port;
+ __le16 dev_handle;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+#define MPI3_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI3_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+struct mpi3_event_data_sas_init_table_overflow {
+ __le16 max_init;
+ __le16 current_init;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+struct mpi3_event_data_hard_reset_received {
+ u8 reserved00;
+ u8 io_unit_port;
+ __le16 reserved02;
+};
+
+struct mpi3_event_data_diag_buffer_status_change {
+ u8 type;
+ u8 reason_code;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03)
+#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
+#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
+#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
+#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040)
+#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020)
+#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010)
+#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008)
+#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004)
+#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002)
+#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001)
+#define MPI3_PEL_CLASS_DEBUG (0x00)
+#define MPI3_PEL_CLASS_PROGRESS (0x01)
+#define MPI3_PEL_CLASS_INFORMATIONAL (0x02)
+#define MPI3_PEL_CLASS_WARNING (0x03)
+#define MPI3_PEL_CLASS_CRITICAL (0x04)
+#define MPI3_PEL_CLASS_FATAL (0x05)
+#define MPI3_PEL_CLASS_FAULT (0x06)
+#define MPI3_PEL_CLEARTYPE_CLEAR (0x00)
+#define MPI3_PEL_WAITTIME_INFINITE_WAIT (0x00)
+#define MPI3_PEL_ACTION_GET_SEQNUM (0x01)
+#define MPI3_PEL_ACTION_MARK_CLEAR (0x02)
+#define MPI3_PEL_ACTION_GET_LOG (0x03)
+#define MPI3_PEL_ACTION_GET_COUNT (0x04)
+#define MPI3_PEL_ACTION_WAIT (0x05)
+#define MPI3_PEL_ACTION_ABORT (0x06)
+#define MPI3_PEL_ACTION_GET_PRINT_STRINGS (0x07)
+#define MPI3_PEL_ACTION_ACKNOWLEDGE (0x08)
+#define MPI3_PEL_STATUS_SUCCESS (0x00)
+#define MPI3_PEL_STATUS_NOT_FOUND (0x01)
+#define MPI3_PEL_STATUS_ABORTED (0x02)
+#define MPI3_PEL_STATUS_NOT_READY (0x03)
+struct mpi3_pel_seq {
+ __le32 newest;
+ __le32 oldest;
+ __le32 clear;
+ __le32 shutdown;
+ __le32 boot;
+ __le32 last_acknowledged;
+};
+
+struct mpi3_pel_entry {
+ __le64 time_stamp;
+ __le32 sequence_number;
+ __le16 log_code;
+ __le16 arg_type;
+ __le16 locale;
+ u8 class;
+ u8 flags;
+ u8 ext_num;
+ u8 num_exts;
+ u8 arg_data_size;
+ u8 fixed_format_strings_size;
+ __le32 reserved18[2];
+ __le32 pel_info[24];
+};
+
+#define MPI3_PEL_FLAGS_COMPLETE_RESET_NEEDED (0x02)
+#define MPI3_PEL_FLAGS_ACK_NEEDED (0x01)
+struct mpi3_pel_list {
+ __le32 log_count;
+ __le32 reserved04;
+ struct mpi3_pel_entry entry[1];
+};
+
+struct mpi3_pel_arg_map {
+ u8 arg_type;
+ u8 length;
+ __le16 start_location;
+};
+
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_APPEND_STRING (0x00)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_INTEGER (0x01)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_STRING (0x02)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_BIT_FIELD (0x03)
+struct mpi3_pel_print_string {
+ __le16 log_code;
+ __le16 string_length;
+ u8 num_arg_map;
+ u8 reserved05[3];
+ struct mpi3_pel_arg_map arg_map[1];
+};
+
+struct mpi3_pel_print_string_list {
+ __le32 num_print_strings;
+ __le32 residual_bytes_remain;
+ __le32 reserved08[2];
+ struct mpi3_pel_print_string print_string[1];
+};
+
+#ifndef MPI3_PEL_ACTION_SPECIFIC_MAX
+#define MPI3_PEL_ACTION_SPECIFIC_MAX (1)
+#endif
+struct mpi3_pel_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 action_specific[MPI3_PEL_ACTION_SPECIFIC_MAX];
+};
+
+struct mpi3_pel_req_action_get_sequence_numbers {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c[5];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_clear_log_marker {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ u8 clear_type;
+ u8 reserved0d[3];
+};
+
+struct mpi3_pel_req_action_get_log {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_get_count {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_wait {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le16 wait_time;
+ __le16 reserved16;
+ __le32 reserved18[2];
+};
+
+struct mpi3_pel_req_action_abort {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c;
+ __le16 abort_host_tag;
+ __le16 reserved12;
+ __le32 reserved14;
+};
+
+struct mpi3_pel_req_action_get_print_strings {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c;
+ __le16 start_log_code;
+ __le16 reserved12;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_acknowledge {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 sequence_number;
+ __le32 reserved10;
+};
+
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
+struct mpi3_pel_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 action;
+ u8 reserved11;
+ __le16 reserved12;
+ __le16 pe_log_status;
+ __le16 reserved16;
+ __le32 transfer_length;
+};
+
+struct mpi3_ci_download_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 signature1;
+ __le32 total_image_size;
+ __le32 image_offset;
+ __le32 segment_size;
+ __le32 reserved1c;
+ union mpi3_sge_union sgl;
+};
+
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_LAST_SEGMENT (0x80)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02)
+#define MPI3_CI_DOWNLOAD_ACTION_DOWNLOAD (0x01)
+#define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02)
+#define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03)
+#define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04)
+#define MPI3_CI_DOWNLOAD_ACTION_CANCEL_OFFLINE_ACTIVATION (0x05)
+struct mpi3_ci_download_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 flags;
+ u8 cache_dirty;
+ u8 pending_count;
+ u8 reserved13;
+};
+
+#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_FAILURE (0x40)
+#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
+#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_OFFLINE_PENDING (0x06)
+#define MPI3_CI_DOWNLOAD_FLAGS_COMPATIBLE (0x01)
+struct mpi3_ci_upload_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 signature1;
+ __le32 reserved10;
+ __le32 image_offset;
+ __le32 segment_size;
+ __le32 reserved1c;
+ union mpi3_sge_union sgl;
+};
+
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02)
+#define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01)
+#define MPI3_CTRL_OP_LOOKUP_MAPPING (0x02)
+#define MPI3_CTRL_OP_UPDATE_TIMESTAMP (0x04)
+#define MPI3_CTRL_OP_GET_TIMESTAMP (0x05)
+#define MPI3_CTRL_OP_GET_IOC_CHANGE_COUNT (0x06)
+#define MPI3_CTRL_OP_CHANGE_PROFILE (0x07)
+#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10)
+#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11)
+#define MPI3_CTRL_OP_HIDDEN_ACK (0x12)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS (0x13)
+#define MPI3_CTRL_OP_SEND_SAS_PRIMITIVE (0x20)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL (0x21)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS (0x23)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS (0x24)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL (0x30)
+#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00)
+#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00)
+#define MPI3_CTRL_OP_CHANGE_PROFILE_PARAM8_PROFILE_ID_INDEX (0x00)
+#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX (0x01)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM32_VALUE_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_LINK_INDEX (0x01)
+#define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
+#define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTENT_ID (0x04)
+#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM64_WWID_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM16_SLOTNUM_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM64_ENCLOSURELID_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM64_DEVNAME_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1)
+#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0)
+#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0)
+#define MPI3_CTRL_GET_IOC_CHANGE_COUNT_VALUE16_CHANGECOUNT_INDEX (0)
+#define MPI3_CTRL_READ_INTERNAL_BUS_VALUE32_VALUE_INDEX (0)
+#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01)
+#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03)
+#define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06)
+#define MPI3_CTRL_ACTION_NOP (0x00)
+#define MPI3_CTRL_ACTION_LINK_RESET (0x01)
+#define MPI3_CTRL_ACTION_HARD_RESET (0x02)
+#define MPI3_CTRL_ACTION_CLEAR_ERROR_LOG (0x05)
+struct mpi3_iounit_control_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 reserved0a;
+ u8 operation;
+ __le32 reserved0c;
+ __le64 param64[2];
+ __le32 param32[4];
+ __le16 param16[4];
+ u8 param8[8];
+};
+
+struct mpi3_iounit_control_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le64 value64[2];
+ __le32 value32[4];
+ __le16 value16[4];
+ u8 value8[8];
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
new file mode 100644
index 000000000000..b7a5df01120d
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_PCI_H
+#define MPI30_PCI_H 1
+
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
+
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
new file mode 100644
index 000000000000..e587f77ccd68
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_SAS_H
+#define MPI30_SAS_H 1
+#define MPI3_SAS_DEVICE_INFO_SSP_TARGET (0x00000100)
+#define MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET (0x00000080)
+#define MPI3_SAS_DEVICE_INFO_SMP_TARGET (0x00000040)
+#define MPI3_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000020)
+#define MPI3_SAS_DEVICE_INFO_STP_INITIATOR (0x00000010)
+#define MPI3_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000008)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK (0x00000007)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE (0x00000000)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE (0x00000001)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER (0x00000002)
+struct mpi3_smp_passthrough_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 reserved0a;
+ u8 io_unit_port;
+ __le32 reserved0c[3];
+ __le64 sas_address;
+ struct mpi3_sge_common request_sge;
+ struct mpi3_sge_common response_sge;
+};
+
+struct mpi3_smp_passthrough_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le16 response_data_length;
+ __le16 reserved12;
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
new file mode 100644
index 000000000000..9b76b9632751
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_TRANSPORT_H
+#define MPI30_TRANSPORT_H 1
+struct mpi3_version_struct {
+ u8 dev;
+ u8 unit;
+ u8 minor;
+ u8 major;
+};
+
+union mpi3_version_union {
+ struct mpi3_version_struct mpi3_version;
+ __le32 word;
+};
+
+#define MPI3_VERSION_MAJOR (3)
+#define MPI3_VERSION_MINOR (0)
+#define MPI3_VERSION_UNIT (26)
+#define MPI3_VERSION_DEV (0)
+#define MPI3_DEVHANDLE_INVALID (0xffff)
+struct mpi3_sysif_oper_queue_indexes {
+ __le16 producer_index;
+ __le16 reserved02;
+ __le16 consumer_index;
+ __le16 reserved06;
+};
+
+struct mpi3_sysif_registers {
+ __le64 ioc_information;
+ union mpi3_version_union version;
+ __le32 reserved0c[2];
+ __le32 ioc_configuration;
+ __le32 reserved18;
+ __le32 ioc_status;
+ __le32 reserved20;
+ __le32 admin_queue_num_entries;
+ __le64 admin_request_queue_address;
+ __le64 admin_reply_queue_address;
+ __le32 reserved38[2];
+ __le32 coalesce_control;
+ __le32 reserved44[1007];
+ __le16 admin_request_queue_pi;
+ __le16 reserved1002;
+ __le16 admin_reply_queue_ci;
+ __le16 reserved1006;
+ struct mpi3_sysif_oper_queue_indexes oper_queue_indexes[383];
+ __le32 reserved1c00;
+ __le32 write_sequence;
+ __le32 host_diagnostic;
+ __le32 reserved1c0c;
+ __le32 fault;
+ __le32 fault_info[3];
+ __le32 reserved1c20[4];
+ __le64 hcb_address;
+ __le32 hcb_size;
+ __le32 reserved1c3c;
+ __le32 reply_free_host_index;
+ __le32 sense_buffer_free_host_index;
+ __le32 reserved1c48[2];
+ __le64 diag_rw_data;
+ __le64 diag_rw_address;
+ __le16 diag_rw_control;
+ __le16 diag_rw_status;
+ __le32 reserved1c64[35];
+ __le32 scratchpad[4];
+ __le32 reserved1d00[192];
+ __le32 device_assigned_registers[2048];
+};
+
+#define MPI3_SYSIF_IOC_INFO_LOW_OFFSET (0x00000000)
+#define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004)
+#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000)
+#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24)
+#define MPI3_SYSIF_IOC_INFO_LOW_HCB_DISABLED (0x00000001)
+#define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
+#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ (0x00002000)
+#define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010)
+#define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001)
+#define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c)
+#define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_SHIFT (0x00000002)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008)
+#define MPI3_SYSIF_IOC_STATUS_FAULT (0x00000002)
+#define MPI3_SYSIF_IOC_STATUS_READY (0x00000001)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16)
+#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET (0x00000028)
+#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_HIGH_OFFSET (0x0000002c)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET (0x00000030)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034)
+#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x20000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_MASK (0x01ff0000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_SHIFT (16)
+#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00)
+#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8)
+#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff)
+#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_SHIFT (0)
+#define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004)
+#define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008)
+#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(N) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((N) - 1) * 8))
+#define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c)
+#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
+#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD (0xb)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH (0x2)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH (0x7)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd)
+#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080)
+#define MPI3_SYSIF_HOST_DIAG_SECURE_BOOT (0x00000040)
+#define MPI3_SYSIF_HOST_DIAG_CLEAR_INVALID_FW_IMAGE (0x00000020)
+#define MPI3_SYSIF_HOST_DIAG_INVALID_FW_IMAGE (0x00000010)
+#define MPI3_SYSIF_HOST_DIAG_HCBENABLE (0x00000008)
+#define MPI3_SYSIF_HOST_DIAG_HCBMODE (0x00000004)
+#define MPI3_SYSIF_HOST_DIAG_DIAG_RW_ENABLE (0x00000002)
+#define MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE (0x00000001)
+#define MPI3_SYSIF_FAULT_OFFSET (0x00001c10)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_MASK (0xff000000)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000)
+#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff)
+#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000)
+#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
+#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
+#define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003)
+#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004)
+#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005)
+#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006)
+#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
+#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
+#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
+#define MPI3_SYSIF_HCB_ADDRESS_LOW_OFFSET (0x00001c30)
+#define MPI3_SYSIF_HCB_ADDRESS_HIGH_OFFSET (0x00001c34)
+#define MPI3_SYSIF_HCB_SIZE_OFFSET (0x00001c38)
+#define MPI3_SYSIF_HCB_SIZE_SIZE_MASK (0xfffff000)
+#define MPI3_SYSIF_HCB_SIZE_SIZE_SHIFT (12)
+#define MPI3_SYSIF_HCB_SIZE_HCDW_ENABLE (0x00000001)
+#define MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET (0x00001c40)
+#define MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET (0x00001c44)
+#define MPI3_SYSIF_DIAG_RW_DATA_LOW_OFFSET (0x00001c50)
+#define MPI3_SYSIF_DIAG_RW_DATA_HIGH_OFFSET (0x00001c54)
+#define MPI3_SYSIF_DIAG_RW_ADDRESS_LOW_OFFSET (0x00001c58)
+#define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001)
+#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_PAR_ERR (0x00000006)
+#define MPI3_SYSIF_DIAG_RW_STATUS_BUSY (0x00000001)
+#define MPI3_SYSIF_SCRATCHPAD0_OFFSET (0x00001cf0)
+#define MPI3_SYSIF_SCRATCHPAD1_OFFSET (0x00001cf4)
+#define MPI3_SYSIF_SCRATCHPAD2_OFFSET (0x00001cf8)
+#define MPI3_SYSIF_SCRATCHPAD3_OFFSET (0x00001cfc)
+#define MPI3_SYSIF_DEVICE_ASSIGNED_REGS_OFFSET (0x00002000)
+#define MPI3_SYSIF_DIAG_SAVE_TIMEOUT (60)
+struct mpi3_default_reply_descriptor {
+ __le32 descriptor_type_dependent1[2];
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 descriptor_type_dependent2;
+ __le16 reply_flags;
+};
+
+#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS (0x3000)
+#define MPI3_REPLY_DESCRIPT_REQUEST_QUEUE_ID_INVALID (0xffff)
+struct mpi3_address_reply_descriptor {
+ __le64 reply_frame_address;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 reserved0c;
+ __le16 reply_flags;
+};
+
+struct mpi3_success_reply_descriptor {
+ __le32 reserved00[2];
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 host_tag;
+ __le16 reply_flags;
+};
+
+struct mpi3_target_command_buffer_reply_descriptor {
+ __le32 reserved00;
+ __le16 initiator_dev_handle;
+ u8 phy_num;
+ u8 reserved07;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 io_index;
+ __le16 reply_flags;
+};
+
+struct mpi3_status_reply_descriptor {
+ __le16 ioc_status;
+ __le16 reserved02;
+ __le32 ioc_log_info;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 host_tag;
+ __le16 reply_flags;
+};
+
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL (0x8000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_MASK (0xf0000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_NO_INFO (0x00000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_SAS (0x30000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_DATA_MASK (0x0fffffff)
+union mpi3_reply_descriptors_union {
+ struct mpi3_default_reply_descriptor default_reply;
+ struct mpi3_address_reply_descriptor address_reply;
+ struct mpi3_success_reply_descriptor success;
+ struct mpi3_target_command_buffer_reply_descriptor target_command_buffer;
+ struct mpi3_status_reply_descriptor status;
+ __le32 words[4];
+};
+
+struct mpi3_sge_common {
+ __le64 address;
+ __le32 length;
+ u8 reserved0c[3];
+ u8 flags;
+};
+
+struct mpi3_sge_bit_bucket {
+ __le64 reserved00;
+ __le32 length;
+ u8 reserved0c[3];
+ u8 flags;
+};
+
+struct mpi3_sge_extended_eedp {
+ u8 user_data_size;
+ u8 reserved01;
+ __le16 eedp_flags;
+ __le32 secondary_reference_tag;
+ __le16 secondary_application_tag;
+ __le16 application_tag_translation_mask;
+ __le16 reserved0c;
+ u8 extended_operation;
+ u8 flags;
+};
+
+union mpi3_sge_union {
+ struct mpi3_sge_common simple;
+ struct mpi3_sge_common chain;
+ struct mpi3_sge_common last_chain;
+ struct mpi3_sge_bit_bucket bit_bucket;
+ struct mpi3_sge_extended_eedp eedp;
+ __le32 words[4];
+};
+
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN (0x30)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED (0xf0)
+#define MPI3_SGE_FLAGS_END_OF_LIST (0x08)
+#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
+#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
+#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
+#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01)
+#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
+#define MPI3_SGE_EXT_OPER_EEDP (0x00)
+#define MPI3_EEDPFLAGS_INCR_PRI_REF_TAG (0x8000)
+#define MPI3_EEDPFLAGS_INCR_SEC_REF_TAG (0x4000)
+#define MPI3_EEDPFLAGS_INCR_PRI_APP_TAG (0x2000)
+#define MPI3_EEDPFLAGS_INCR_SEC_APP_TAG (0x1000)
+#define MPI3_EEDPFLAGS_ESC_PASSTHROUGH (0x0800)
+#define MPI3_EEDPFLAGS_CHK_REF_TAG (0x0400)
+#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
+#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
+#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0)
+#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
+#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
+#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0)
+#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
+#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
+#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
+#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
+#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
+#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
+#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
+#define MPI3_EEDPFLAGS_EEDP_OP_INSERT (0x0004)
+#define MPI3_EEDPFLAGS_EEDP_OP_REPLACE (0x0006)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN (0x0007)
+#define MPI3_EEDP_UDS_512 (0x01)
+#define MPI3_EEDP_UDS_520 (0x02)
+#define MPI3_EEDP_UDS_4080 (0x03)
+#define MPI3_EEDP_UDS_4088 (0x04)
+#define MPI3_EEDP_UDS_4096 (0x05)
+#define MPI3_EEDP_UDS_4104 (0x06)
+#define MPI3_EEDP_UDS_4160 (0x07)
+struct mpi3_request_header {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 function_dependent;
+};
+
+struct mpi3_default_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+};
+
+#define MPI3_HOST_TAG_INVALID (0xffff)
+#define MPI3_FUNCTION_IOC_FACTS (0x01)
+#define MPI3_FUNCTION_IOC_INIT (0x02)
+#define MPI3_FUNCTION_PORT_ENABLE (0x03)
+#define MPI3_FUNCTION_EVENT_NOTIFICATION (0x04)
+#define MPI3_FUNCTION_EVENT_ACK (0x05)
+#define MPI3_FUNCTION_CI_DOWNLOAD (0x06)
+#define MPI3_FUNCTION_CI_UPLOAD (0x07)
+#define MPI3_FUNCTION_IO_UNIT_CONTROL (0x08)
+#define MPI3_FUNCTION_PERSISTENT_EVENT_LOG (0x09)
+#define MPI3_FUNCTION_MGMT_PASSTHROUGH (0x0a)
+#define MPI3_FUNCTION_CONFIG (0x10)
+#define MPI3_FUNCTION_SCSI_IO (0x20)
+#define MPI3_FUNCTION_SCSI_TASK_MGMT (0x21)
+#define MPI3_FUNCTION_SMP_PASSTHROUGH (0x22)
+#define MPI3_FUNCTION_NVME_ENCAPSULATED (0x24)
+#define MPI3_FUNCTION_TARGET_ASSIST (0x30)
+#define MPI3_FUNCTION_TARGET_STATUS_SEND (0x31)
+#define MPI3_FUNCTION_TARGET_MODE_ABORT (0x32)
+#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_BASE (0x33)
+#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_LIST (0x34)
+#define MPI3_FUNCTION_CREATE_REQUEST_QUEUE (0x70)
+#define MPI3_FUNCTION_DELETE_REQUEST_QUEUE (0x71)
+#define MPI3_FUNCTION_CREATE_REPLY_QUEUE (0x72)
+#define MPI3_FUNCTION_DELETE_REPLY_QUEUE (0x73)
+#define MPI3_FUNCTION_TOOLBOX (0x80)
+#define MPI3_FUNCTION_DIAG_BUFFER_POST (0x81)
+#define MPI3_FUNCTION_DIAG_BUFFER_MANAGE (0x82)
+#define MPI3_FUNCTION_DIAG_BUFFER_UPLOAD (0x83)
+#define MPI3_FUNCTION_MIN_IOC_USE_ONLY (0xc0)
+#define MPI3_FUNCTION_MAX_IOC_USE_ONLY (0xef)
+#define MPI3_FUNCTION_MIN_PRODUCT_SPECIFIC (0xf0)
+#define MPI3_FUNCTION_MAX_PRODUCT_SPECIFIC (0xff)
+#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000)
+#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000)
+#define MPI3_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_IOCSTATUS_SUCCESS (0x0000)
+#define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI3_IOCSTATUS_BUSY (0x0002)
+#define MPI3_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI3_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI3_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
+#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
+#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)
+#define MPI3_IOCSTATUS_SUPERVISOR_ONLY (0x000d)
+#define MPI3_IOCSTATUS_FAILURE (0x001f)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI3_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI3_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+#define MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI3_IOCSTATUS_SCSI_TM_NOT_SUPPORTED (0x0041)
+#define MPI3_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI3_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI3_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004a)
+#define MPI3_IOCSTATUS_SCSI_IOC_TERMINATED (0x004b)
+#define MPI3_IOCSTATUS_SCSI_EXT_TERMINATED (0x004c)
+#define MPI3_IOCSTATUS_EEDP_GUARD_ERROR (0x004d)
+#define MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004e)
+#define MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004f)
+#define MPI3_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI3_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI3_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI3_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI3_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006a)
+#define MPI3_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006d)
+#define MPI3_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006e)
+#define MPI3_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006f)
+#define MPI3_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI3_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+#define MPI3_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI3_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+#define MPI3_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00a0)
+#define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0)
+#define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1)
+#define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_NOT_POSSIBLE (0x00b4)
+#define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0)
+#define MPI3_IOCSTATUS_SECURITY_VIOLATION (0x00c1)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01)
+#define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02)
+#define MPI3_IOCSTATUS_INVALID_REPLY_QUEUE_ID (0x0f03)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_DELETION (0x0f04)
+#define MPI3_IOCLOGINFO_TYPE_MASK (0xf0000000)
+#define MPI3_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI3_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI3_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
new file mode 100644
index 000000000000..def4c5e15cd8
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -0,0 +1,1400 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef MPI3MR_H_INCLUDED
+#define MPI3MR_H_INCLUDED
+
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <uapi/scsi/scsi_bsg_mpi3mr.h>
+#include <scsi/scsi_transport_sas.h>
+
+#include "mpi/mpi30_transport.h"
+#include "mpi/mpi30_cnfg.h"
+#include "mpi/mpi30_image.h"
+#include "mpi/mpi30_init.h"
+#include "mpi/mpi30_ioc.h"
+#include "mpi/mpi30_sas.h"
+#include "mpi/mpi30_pci.h"
+#include "mpi3mr_debug.h"
+
+/* Global list and lock for storing multiple adapters managed by the driver */
+extern spinlock_t mrioc_list_lock;
+extern struct list_head mrioc_list;
+extern int prot_mask;
+extern atomic64_t event_counter;
+
+#define MPI3MR_DRIVER_VERSION "8.2.0.3.0"
+#define MPI3MR_DRIVER_RELDATE "08-September-2022"
+
+#define MPI3MR_DRIVER_NAME "mpi3mr"
+#define MPI3MR_DRIVER_LICENSE "GPL"
+#define MPI3MR_DRIVER_AUTHOR "Broadcom Inc. <mpi3mr-linuxdrv.pdl@broadcom.com>"
+#define MPI3MR_DRIVER_DESC "MPI3 Storage Controller Device Driver"
+
+#define MPI3MR_NAME_LENGTH 32
+#define IOCNAME "%s: "
+
+#define MPI3MR_MAX_SECTORS 2048
+
+/* Definitions for internal SGL and Chain SGL buffers */
+#define MPI3MR_PAGE_SIZE_4K 4096
+#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
+
+/* Definitions for MAX values for shost */
+#define MPI3MR_MAX_CMDS_LUN 128
+#define MPI3MR_MAX_CDB_LENGTH 32
+
+/* Admin queue management definitions */
+#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
+#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
+
+/* Operational queue management definitions */
+#define MPI3MR_OP_REQ_Q_QD 512
+#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD4K 4096
+#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
+#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
+#define MPI3MR_MAX_SEG_LIST_SIZE 4096
+
+/* Reserved Host Tag definitions */
+#define MPI3MR_HOSTTAG_INVALID 0xFFFF
+#define MPI3MR_HOSTTAG_INITCMDS 1
+#define MPI3MR_HOSTTAG_BSG_CMDS 2
+#define MPI3MR_HOSTTAG_PEL_ABORT 3
+#define MPI3MR_HOSTTAG_PEL_WAIT 4
+#define MPI3MR_HOSTTAG_BLK_TMS 5
+#define MPI3MR_HOSTTAG_CFG_CMDS 6
+#define MPI3MR_HOSTTAG_TRANSPORT_CMDS 7
+
+#define MPI3MR_NUM_DEVRMCMD 16
+#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_TRANSPORT_CMDS + 1)
+#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
+ MPI3MR_NUM_DEVRMCMD - 1)
+
+#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+#define MPI3MR_NUM_EVTACKCMD 4
+#define MPI3MR_HOSTTAG_EVTACKCMD_MIN (MPI3MR_HOSTTAG_DEVRMCMD_MAX + 1)
+#define MPI3MR_HOSTTAG_EVTACKCMD_MAX (MPI3MR_HOSTTAG_EVTACKCMD_MIN + \
+ MPI3MR_NUM_EVTACKCMD - 1)
+
+/* Reduced resource count definition for crash kernel */
+#define MPI3MR_HOST_IOS_KDUMP 128
+
+/* command/controller interaction timeout definitions in seconds */
+#define MPI3MR_INTADMCMD_TIMEOUT 60
+#define MPI3MR_PORTENABLE_TIMEOUT 300
+#define MPI3MR_PORTENABLE_POLL_INTERVAL 5
+#define MPI3MR_ABORTTM_TIMEOUT 60
+#define MPI3MR_RESETTM_TIMEOUT 60
+#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
+#define MPI3MR_TSUPDATE_INTERVAL 900
+#define MPI3MR_DEFAULT_SHUTDOWN_TIME 120
+#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180
+#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180
+#define MPI3MR_RESET_ACK_TIMEOUT 30
+
+#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
+
+#define MPI3MR_DEFAULT_CFG_PAGE_SZ 1024 /* in bytes */
+
+#define MPI3MR_RESET_TOPOLOGY_SETTLE_TIME 10
+
+#define MPI3MR_SCMD_TIMEOUT (60 * HZ)
+#define MPI3MR_EH_SCMD_TIMEOUT (60 * HZ)
+
+/* Internal admin command state definitions*/
+#define MPI3MR_CMD_NOTUSED 0x8000
+#define MPI3MR_CMD_COMPLETE 0x0001
+#define MPI3MR_CMD_PENDING 0x0002
+#define MPI3MR_CMD_REPLY_VALID 0x0004
+#define MPI3MR_CMD_RESET 0x0008
+
+/* Definitions for Event replies and sense buffer allocated per controller */
+#define MPI3MR_NUM_EVT_REPLIES 64
+#define MPI3MR_SENSE_BUF_SZ 256
+#define MPI3MR_SENSEBUF_FACTOR 3
+#define MPI3MR_CHAINBUF_FACTOR 3
+#define MPI3MR_CHAINBUFDIX_FACTOR 2
+
+/* Invalid target device handle */
+#define MPI3MR_INVALID_DEV_HANDLE 0xFFFF
+
+/* Controller Reset related definitions */
+#define MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT 5
+#define MPI3MR_MAX_RESET_RETRY_COUNT 3
+
+/* ResponseCode definitions */
+#define MPI3MR_RI_MASK_RESPCODE (0x000000FF)
+#define MPI3MR_RSP_IO_QUEUED_ON_IOC \
+ MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC
+
+#define MPI3MR_DEFAULT_MDTS (128 * 1024)
+#define MPI3MR_DEFAULT_PGSZEXP (12)
+
+/* Command retry count definitions */
+#define MPI3MR_DEV_RMHS_RETRY_COUNT 3
+#define MPI3MR_PEL_RETRY_COUNT 3
+
+/* Default target device queue depth */
+#define MPI3MR_DEFAULT_SDEV_QD 32
+
+/* Definitions for Threaded IRQ poll*/
+#define MPI3MR_IRQ_POLL_SLEEP 2
+#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8
+
+/* Definitions for the controller security status*/
+#define MPI3MR_CTLR_SECURITY_STATUS_MASK 0x0C
+#define MPI3MR_CTLR_SECURE_DBG_STATUS_MASK 0x02
+
+#define MPI3MR_INVALID_DEVICE 0x00
+#define MPI3MR_CONFIG_SECURE_DEVICE 0x04
+#define MPI3MR_HARD_SECURE_DEVICE 0x08
+#define MPI3MR_TAMPERED_DEVICE 0x0C
+
+/* SGE Flag definition */
+#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
+ (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \
+ MPI3_SGE_FLAGS_END_OF_LIST)
+
+/* MSI Index from Reply Queue Index */
+#define REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, offset) (qidx + offset)
+
+/*
+ * Maximum data transfer size definitions for management
+ * application commands
+ */
+#define MPI3MR_MAX_APP_XFER_SIZE (1 * 1024 * 1024)
+#define MPI3MR_MAX_APP_XFER_SEGMENTS 512
+/*
+ * 2048 sectors are for data buffers and additional 512 sectors for
+ * other buffers
+ */
+#define MPI3MR_MAX_APP_XFER_SECTORS (2048 + 512)
+
+/**
+ * struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe
+ * Encapsulated commands.
+ *
+ * @base_addr: Physical address
+ * @length: SGE length
+ * @rsvd: Reserved
+ * @rsvd1: Reserved
+ * @sgl_type: sgl type
+ */
+struct mpi3mr_nvme_pt_sge {
+ u64 base_addr;
+ u32 length;
+ u16 rsvd;
+ u8 rsvd1;
+ u8 sgl_type;
+};
+
+/**
+ * struct mpi3mr_buf_map - local structure to
+ * track kernel and user buffers associated with an BSG
+ * structure.
+ *
+ * @bsg_buf: BSG buffer virtual address
+ * @bsg_buf_len: BSG buffer length
+ * @kern_buf: Kernel buffer virtual address
+ * @kern_buf_len: Kernel buffer length
+ * @kern_buf_dma: Kernel buffer DMA address
+ * @data_dir: Data direction.
+ */
+struct mpi3mr_buf_map {
+ void *bsg_buf;
+ u32 bsg_buf_len;
+ void *kern_buf;
+ u32 kern_buf_len;
+ dma_addr_t kern_buf_dma;
+ u8 data_dir;
+};
+
+/* IOC State definitions */
+enum mpi3mr_iocstate {
+ MRIOC_STATE_READY = 1,
+ MRIOC_STATE_RESET,
+ MRIOC_STATE_FAULT,
+ MRIOC_STATE_BECOMING_READY,
+ MRIOC_STATE_RESET_REQUESTED,
+ MRIOC_STATE_UNRECOVERABLE,
+};
+
+/* Reset reason code definitions*/
+enum mpi3mr_reset_reason {
+ MPI3MR_RESET_FROM_BRINGUP = 1,
+ MPI3MR_RESET_FROM_FAULT_WATCH = 2,
+ MPI3MR_RESET_FROM_APP = 3,
+ MPI3MR_RESET_FROM_EH_HOS = 4,
+ MPI3MR_RESET_FROM_TM_TIMEOUT = 5,
+ MPI3MR_RESET_FROM_APP_TIMEOUT = 6,
+ MPI3MR_RESET_FROM_MUR_FAILURE = 7,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP = 8,
+ MPI3MR_RESET_FROM_CIACTIV_FAULT = 9,
+ MPI3MR_RESET_FROM_PE_TIMEOUT = 10,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT = 11,
+ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT = 12,
+ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT = 13,
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT = 14,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT = 15,
+ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT = 16,
+ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT = 17,
+ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT = 18,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT = 19,
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER = 20,
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21,
+ MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22,
+ MPI3MR_RESET_FROM_SYSFS = 23,
+ MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24,
+ MPI3MR_RESET_FROM_FIRMWARE = 27,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30,
+};
+
+/* Queue type definitions */
+enum queue_type {
+ MPI3MR_DEFAULT_QUEUE = 0,
+ MPI3MR_POLL_QUEUE,
+};
+
+/**
+ * struct mpi3mr_compimg_ver - replica of component image
+ * version defined in mpi30_image.h in host endianness
+ *
+ */
+struct mpi3mr_compimg_ver {
+ u16 build_num;
+ u16 cust_id;
+ u8 ph_minor;
+ u8 ph_major;
+ u8 gen_minor;
+ u8 gen_major;
+};
+
+/**
+ * struct mpi3mr_ioc_facs - replica of component image version
+ * defined in mpi30_ioc.h in host endianness
+ *
+ */
+struct mpi3mr_ioc_facts {
+ u32 ioc_capabilities;
+ struct mpi3mr_compimg_ver fw_ver;
+ u32 mpi_version;
+ u16 max_reqs;
+ u16 product_id;
+ u16 op_req_sz;
+ u16 reply_sz;
+ u16 exceptions;
+ u16 max_perids;
+ u16 max_pds;
+ u16 max_sasexpanders;
+ u16 max_sasinitiators;
+ u16 max_enclosures;
+ u16 max_pcie_switches;
+ u16 max_nvme;
+ u16 max_vds;
+ u16 max_hpds;
+ u16 max_advhpds;
+ u16 max_raid_pds;
+ u16 min_devhandle;
+ u16 max_devhandle;
+ u16 max_op_req_q;
+ u16 max_op_reply_q;
+ u16 shutdown_timeout;
+ u8 ioc_num;
+ u8 who_init;
+ u16 max_msix_vectors;
+ u8 personality;
+ u8 dma_mask;
+ u8 protocol_flags;
+ u8 sge_mod_mask;
+ u8 sge_mod_value;
+ u8 sge_mod_shift;
+ u8 max_dev_per_tg;
+ u16 max_io_throttle_group;
+ u16 io_throttle_data_length;
+ u16 io_throttle_low;
+ u16 io_throttle_high;
+
+};
+
+/**
+ * struct segments - memory descriptor structure to store
+ * virtual and dma addresses for operational queue segments.
+ *
+ * @segment: virtual address
+ * @segment_dma: dma address
+ */
+struct segments {
+ void *segment;
+ dma_addr_t segment_dma;
+};
+
+/**
+ * struct op_req_qinfo - Operational Request Queue Information
+ *
+ * @ci: consumer index
+ * @pi: producer index
+ * @num_request: Maximum number of entries in the queue
+ * @qid: Queue Id starting from 1
+ * @reply_qid: Associated reply queue Id
+ * @num_segments: Number of discontiguous memory segments
+ * @segment_qd: Depth of each segments
+ * @q_lock: Concurrent queue access lock
+ * @q_segments: Segment descriptor pointer
+ * @q_segment_list: Segment list base virtual address
+ * @q_segment_list_dma: Segment list base DMA address
+ */
+struct op_req_qinfo {
+ u16 ci;
+ u16 pi;
+ u16 num_requests;
+ u16 qid;
+ u16 reply_qid;
+ u16 num_segments;
+ u16 segment_qd;
+ spinlock_t q_lock;
+ struct segments *q_segments;
+ void *q_segment_list;
+ dma_addr_t q_segment_list_dma;
+};
+
+/**
+ * struct op_reply_qinfo - Operational Reply Queue Information
+ *
+ * @ci: consumer index
+ * @qid: Queue Id starting from 1
+ * @num_replies: Maximum number of entries in the queue
+ * @num_segments: Number of discontiguous memory segments
+ * @segment_qd: Depth of each segments
+ * @q_segments: Segment descriptor pointer
+ * @q_segment_list: Segment list base virtual address
+ * @q_segment_list_dma: Segment list base DMA address
+ * @ephase: Expected phased identifier for the reply queue
+ * @pend_ios: Number of IOs pending in HW for this queue
+ * @enable_irq_poll: Flag to indicate polling is enabled
+ * @in_use: Queue is handled by poll/ISR
+ * @qtype: Type of queue (types defined in enum queue_type)
+ */
+struct op_reply_qinfo {
+ u16 ci;
+ u16 qid;
+ u16 num_replies;
+ u16 num_segments;
+ u16 segment_qd;
+ struct segments *q_segments;
+ void *q_segment_list;
+ dma_addr_t q_segment_list_dma;
+ u8 ephase;
+ atomic_t pend_ios;
+ bool enable_irq_poll;
+ atomic_t in_use;
+ enum queue_type qtype;
+};
+
+/**
+ * struct mpi3mr_intr_info - Interrupt cookie information
+ *
+ * @mrioc: Adapter instance reference
+ * @os_irq: irq number
+ * @msix_index: MSIx index
+ * @op_reply_q: Associated operational reply queue
+ * @name: Dev name for the irq claiming device
+ */
+struct mpi3mr_intr_info {
+ struct mpi3mr_ioc *mrioc;
+ int os_irq;
+ u16 msix_index;
+ struct op_reply_qinfo *op_reply_q;
+ char name[MPI3MR_NAME_LENGTH];
+};
+
+/**
+ * struct mpi3mr_throttle_group_info - Throttle group info
+ *
+ * @io_divert: Flag indicates io divert is on or off for the TG
+ * @need_qd_reduction: Flag to indicate QD reduction is needed
+ * @qd_reduction: Queue Depth reduction in units of 10%
+ * @fw_qd: QueueDepth value reported by the firmware
+ * @modified_qd: Modified QueueDepth value due to throttling
+ * @id: Throttle Group ID.
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @pend_large_data_sz: Counter to track pending large data
+ */
+struct mpi3mr_throttle_group_info {
+ u8 io_divert;
+ u8 need_qd_reduction;
+ u8 qd_reduction;
+ u16 fw_qd;
+ u16 modified_qd;
+ u16 id;
+ u32 high;
+ u32 low;
+ atomic_t pend_large_data_sz;
+};
+
+/* HBA port flags */
+#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01
+
+/**
+ * struct mpi3mr_hba_port - HBA's port information
+ * @port_id: Port number
+ * @flags: HBA port flags
+ */
+struct mpi3mr_hba_port {
+ struct list_head list;
+ u8 port_id;
+ u8 flags;
+};
+
+/**
+ * struct mpi3mr_sas_port - Internal SAS port information
+ * @port_list: List of ports belonging to a SAS node
+ * @num_phys: Number of phys associated with port
+ * @marked_responding: used while refresing the sas ports
+ * @lowest_phy: lowest phy ID of current sas port
+ * @phy_mask: phy_mask of current sas port
+ * @hba_port: HBA port entry
+ * @remote_identify: Attached device identification
+ * @rphy: SAS transport layer rphy object
+ * @port: SAS transport layer port object
+ * @phy_list: mpi3mr_sas_phy objects belonging to this port
+ */
+struct mpi3mr_sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ u8 marked_responding;
+ int lowest_phy;
+ u32 phy_mask;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct mpi3mr_sas_phy - Internal SAS Phy information
+ * @port_siblings: List of phys belonging to a port
+ * @identify: Phy identification
+ * @remote_identify: Attached device identification
+ * @phy: SAS transport layer Phy object
+ * @phy_id: Unique phy id within a port
+ * @handle: Firmware device handle for this phy
+ * @attached_handle: Firmware device handle for attached device
+ * @phy_belongs_to_port: Flag to indicate phy belongs to port
+ @hba_port: HBA port entry
+ */
+struct mpi3mr_sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+ struct mpi3mr_hba_port *hba_port;
+};
+
+/**
+ * struct mpi3mr_sas_node - SAS host/expander information
+ * @list: List of sas nodes in a controller
+ * @parent_dev: Parent device class
+ * @num_phys: Number phys belonging to sas_node
+ * @sas_address: SAS address of sas_node
+ * @handle: Firmware device handle for this sas_host/expander
+ * @sas_address_parent: SAS address of parent expander or host
+ * @enclosure_handle: Firmware handle of enclosure of this node
+ * @device_info: Capabilities of this sas_host/expander
+ * @non_responding: used to refresh the expander devices during reset
+ * @host_node: Flag to indicate this is a host_node
+ * @hba_port: HBA port entry
+ * @phy: A list of phys that make up this sas_host/expander
+ * @sas_port_list: List of internal ports of this node
+ * @rphy: sas_rphy object of this expander node
+ */
+struct mpi3mr_sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 non_responding;
+ u8 host_node;
+ struct mpi3mr_hba_port *hba_port;
+ struct mpi3mr_sas_phy *phy;
+ struct list_head sas_port_list;
+ struct sas_rphy *rphy;
+};
+
+/**
+ * struct mpi3mr_enclosure_node - enclosure information
+ * @list: List of enclosures
+ * @pg0: Enclosure page 0;
+ */
+struct mpi3mr_enclosure_node {
+ struct list_head list;
+ struct mpi3_enclosure_page0 pg0;
+};
+
+/**
+ * struct tgt_dev_sas_sata - SAS/SATA device specific
+ * information cached from firmware given data
+ *
+ * @sas_address: World wide unique SAS address
+ * @sas_address_parent: Sas address of parent expander or host
+ * @dev_info: Device information bits
+ * @phy_id: Phy identifier provided in device page 0
+ * @attached_phy_id: Attached phy identifier provided in device page 0
+ * @sas_transport_attached: Is this device exposed to transport
+ * @pend_sas_rphy_add: Flag to check device is in process of add
+ * @hba_port: HBA port entry
+ * @rphy: SAS transport layer rphy object
+ */
+struct tgt_dev_sas_sata {
+ u64 sas_address;
+ u64 sas_address_parent;
+ u16 dev_info;
+ u8 phy_id;
+ u8 attached_phy_id;
+ u8 sas_transport_attached;
+ u8 pend_sas_rphy_add;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_rphy *rphy;
+};
+
+/**
+ * struct tgt_dev_pcie - PCIe device specific information cached
+ * from firmware given data
+ *
+ * @mdts: Maximum data transfer size
+ * @capb: Device capabilities
+ * @pgsz: Device page size
+ * @abort_to: Timeout for abort TM
+ * @reset_to: Timeout for Target/LUN reset TM
+ * @dev_info: Device information bits
+ */
+struct tgt_dev_pcie {
+ u32 mdts;
+ u16 capb;
+ u8 pgsz;
+ u8 abort_to;
+ u8 reset_to;
+ u16 dev_info;
+};
+
+/**
+ * struct tgt_dev_vd - virtual device specific information
+ * cached from firmware given data
+ *
+ * @state: State of the VD
+ * @tg_qd_reduction: Queue Depth reduction in units of 10%
+ * @tg_id: VDs throttle group ID
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @tg: Pointer to throttle group info
+ */
+struct tgt_dev_vd {
+ u8 state;
+ u8 tg_qd_reduction;
+ u16 tg_id;
+ u32 tg_high;
+ u32 tg_low;
+ struct mpi3mr_throttle_group_info *tg;
+};
+
+
+/**
+ * union _form_spec_inf - union of device specific information
+ */
+union _form_spec_inf {
+ struct tgt_dev_sas_sata sas_sata_inf;
+ struct tgt_dev_pcie pcie_inf;
+ struct tgt_dev_vd vd_inf;
+};
+
+
+
+/**
+ * struct mpi3mr_tgt_dev - target device data structure
+ *
+ * @list: List pointer
+ * @starget: Scsi_target pointer
+ * @dev_handle: FW device handle
+ * @parent_handle: FW parent device handle
+ * @slot: Slot number
+ * @encl_handle: FW enclosure handle
+ * @perst_id: FW assigned Persistent ID
+ * @devpg0_flag: Device Page0 flag
+ * @dev_type: SAS/SATA/PCIE device type
+ * @is_hidden: Should be exposed to upper layers or not
+ * @host_exposed: Already exposed to host or not
+ * @io_unit_port: IO Unit port ID
+ * @non_stl: Is this device not to be attached with SAS TL
+ * @io_throttle_enabled: I/O throttling needed or not
+ * @q_depth: Device specific Queue Depth
+ * @wwid: World wide ID
+ * @enclosure_logical_id: Enclosure logical identifier
+ * @dev_spec: Device type specific information
+ * @ref_count: Reference count
+ */
+struct mpi3mr_tgt_dev {
+ struct list_head list;
+ struct scsi_target *starget;
+ u16 dev_handle;
+ u16 parent_handle;
+ u16 slot;
+ u16 encl_handle;
+ u16 perst_id;
+ u16 devpg0_flag;
+ u8 dev_type;
+ u8 is_hidden;
+ u8 host_exposed;
+ u8 io_unit_port;
+ u8 non_stl;
+ u8 io_throttle_enabled;
+ u16 q_depth;
+ u64 wwid;
+ u64 enclosure_logical_id;
+ union _form_spec_inf dev_spec;
+ struct kref ref_count;
+};
+
+/**
+ * mpi3mr_tgtdev_get - k reference incrementor
+ * @s: Target device reference
+ *
+ * Increment target device reference count.
+ */
+static inline void mpi3mr_tgtdev_get(struct mpi3mr_tgt_dev *s)
+{
+ kref_get(&s->ref_count);
+}
+
+/**
+ * mpi3mr_free_tgtdev - target device memory dealloctor
+ * @r: k reference pointer of the target device
+ *
+ * Free target device memory when no reference.
+ */
+static inline void mpi3mr_free_tgtdev(struct kref *r)
+{
+ kfree(container_of(r, struct mpi3mr_tgt_dev, ref_count));
+}
+
+/**
+ * mpi3mr_tgtdev_put - k reference decrementor
+ * @s: Target device reference
+ *
+ * Decrement target device reference count.
+ */
+static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
+{
+ kref_put(&s->ref_count, mpi3mr_free_tgtdev);
+}
+
+
+/**
+ * struct mpi3mr_stgt_priv_data - SCSI target private structure
+ *
+ * @starget: Scsi_target pointer
+ * @dev_handle: FW device handle
+ * @perst_id: FW assigned Persistent ID
+ * @num_luns: Number of Logical Units
+ * @block_io: I/O blocked to the device or not
+ * @dev_removed: Device removed in the Firmware
+ * @dev_removedelay: Device is waiting to be removed in FW
+ * @dev_type: Device type
+ * @io_throttle_enabled: I/O throttling needed or not
+ * @io_divert: Flag indicates io divert is on or off for the dev
+ * @throttle_group: Pointer to throttle group info
+ * @tgt_dev: Internal target device pointer
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
+ */
+struct mpi3mr_stgt_priv_data {
+ struct scsi_target *starget;
+ u16 dev_handle;
+ u16 perst_id;
+ u32 num_luns;
+ atomic_t block_io;
+ u8 dev_removed;
+ u8 dev_removedelay;
+ u8 dev_type;
+ u8 io_throttle_enabled;
+ u8 io_divert;
+ struct mpi3mr_throttle_group_info *throttle_group;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ u32 pend_count;
+};
+
+/**
+ * struct mpi3mr_stgt_priv_data - SCSI device private structure
+ *
+ * @tgt_priv_data: Scsi_target private data pointer
+ * @lun_id: LUN ID of the device
+ * @ncq_prio_enable: NCQ priority enable for SATA device
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
+ */
+struct mpi3mr_sdev_priv_data {
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ u32 lun_id;
+ u8 ncq_prio_enable;
+ u32 pend_count;
+};
+
+/**
+ * struct mpi3mr_drv_cmd - Internal command tracker
+ *
+ * @mutex: Command mutex
+ * @done: Completeor for wakeup
+ * @reply: Firmware reply for internal commands
+ * @sensebuf: Sensebuf for SCSI IO commands
+ * @iou_rc: IO Unit control reason code
+ * @state: Command State
+ * @dev_handle: Firmware handle for device specific commands
+ * @ioc_status: IOC status from the firmware
+ * @ioc_loginfo:IOC log info from the firmware
+ * @is_waiting: Is the command issued in block mode
+ * @is_sense: Is Sense data present
+ * @retry_count: Retry count for retriable commands
+ * @host_tag: Host tag used by the command
+ * @callback: Callback for non blocking commands
+ */
+struct mpi3mr_drv_cmd {
+ struct mutex mutex;
+ struct completion done;
+ void *reply;
+ u8 *sensebuf;
+ u8 iou_rc;
+ u16 state;
+ u16 dev_handle;
+ u16 ioc_status;
+ u32 ioc_loginfo;
+ u8 is_waiting;
+ u8 is_sense;
+ u8 retry_count;
+ u16 host_tag;
+
+ void (*callback)(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+};
+
+/**
+ * struct dma_memory_desc - memory descriptor structure to store
+ * virtual address, dma address and size for any generic dma
+ * memory allocations in the driver.
+ *
+ * @size: buffer size
+ * @addr: virtual address
+ * @dma_addr: dma address
+ */
+struct dma_memory_desc {
+ u32 size;
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
+
+/**
+ * struct chain_element - memory descriptor structure to store
+ * virtual and dma addresses for chain elements.
+ *
+ * @addr: virtual address
+ * @dma_addr: dma address
+ */
+struct chain_element {
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct scmd_priv - SCSI command private data
+ *
+ * @host_tag: Host tag specific to operational queue
+ * @in_lld_scope: Command in LLD scope or not
+ * @meta_sg_valid: DIX command with meta data SGL or not
+ * @scmd: SCSI Command pointer
+ * @req_q_idx: Operational request queue index
+ * @chain_idx: Chain frame index
+ * @meta_chain_idx: Chain frame index of meta data SGL
+ * @mpi3mr_scsiio_req: MPI SCSI IO request
+ */
+struct scmd_priv {
+ u16 host_tag;
+ u8 in_lld_scope;
+ u8 meta_sg_valid;
+ struct scsi_cmnd *scmd;
+ u16 req_q_idx;
+ int chain_idx;
+ int meta_chain_idx;
+ u8 mpi3mr_scsiio_req[MPI3MR_ADMIN_REQ_FRAME_SZ];
+};
+
+/**
+ * struct mpi3mr_ioc - Adapter anchor structure stored in shost
+ * private data
+ *
+ * @list: List pointer
+ * @pdev: PCI device pointer
+ * @shost: Scsi_Host pointer
+ * @id: Controller ID
+ * @cpu_count: Number of online CPUs
+ * @irqpoll_sleep: usleep unit used in threaded isr irqpoll
+ * @name: Controller ASCII name
+ * @driver_name: Driver ASCII name
+ * @sysif_regs: System interface registers virtual address
+ * @sysif_regs_phys: System interface registers physical address
+ * @bars: PCI BARS
+ * @dma_mask: DMA mask
+ * @msix_count: Number of MSIX vectors used
+ * @intr_enabled: Is interrupts enabled
+ * @num_admin_req: Number of admin requests
+ * @admin_req_q_sz: Admin request queue size
+ * @admin_req_pi: Admin request queue producer index
+ * @admin_req_ci: Admin request queue consumer index
+ * @admin_req_base: Admin request queue base virtual address
+ * @admin_req_dma: Admin request queue base dma address
+ * @admin_req_lock: Admin queue access lock
+ * @num_admin_replies: Number of admin replies
+ * @admin_reply_q_sz: Admin reply queue size
+ * @admin_reply_ci: Admin reply queue consumer index
+ * @admin_reply_ephase:Admin reply queue expected phase
+ * @admin_reply_base: Admin reply queue base virtual address
+ * @admin_reply_dma: Admin reply queue base dma address
+ * @ready_timeout: Controller ready timeout
+ * @intr_info: Interrupt cookie pointer
+ * @intr_info_count: Number of interrupt cookies
+ * @is_intr_info_set: Flag to indicate intr info is setup
+ * @num_queues: Number of operational queues
+ * @num_op_req_q: Number of operational request queues
+ * @req_qinfo: Operational request queue info pointer
+ * @num_op_reply_q: Number of operational reply queues
+ * @op_reply_qinfo: Operational reply queue info pointer
+ * @init_cmds: Command tracker for initialization commands
+ * @cfg_cmds: Command tracker for configuration requests
+ * @facts: Cached IOC facts data
+ * @op_reply_desc_sz: Operational reply descriptor size
+ * @num_reply_bufs: Number of reply buffers allocated
+ * @reply_buf_pool: Reply buffer pool
+ * @reply_buf: Reply buffer base virtual address
+ * @reply_buf_dma: Reply buffer DMA address
+ * @reply_buf_dma_max_address: Reply DMA address max limit
+ * @reply_free_qsz: Reply free queue size
+ * @reply_free_q_pool: Reply free queue pool
+ * @reply_free_q: Reply free queue base virtual address
+ * @reply_free_q_dma: Reply free queue base DMA address
+ * @reply_free_queue_lock: Reply free queue lock
+ * @reply_free_queue_host_index: Reply free queue host index
+ * @num_sense_bufs: Number of sense buffers
+ * @sense_buf_pool: Sense buffer pool
+ * @sense_buf: Sense buffer base virtual address
+ * @sense_buf_dma: Sense buffer base DMA address
+ * @sense_buf_q_sz: Sense buffer queue size
+ * @sense_buf_q_pool: Sense buffer queue pool
+ * @sense_buf_q: Sense buffer queue virtual address
+ * @sense_buf_q_dma: Sense buffer queue DMA address
+ * @sbq_lock: Sense buffer queue lock
+ * @sbq_host_index: Sense buffer queuehost index
+ * @event_masks: Event mask bitmap
+ * @fwevt_worker_name: Firmware event worker thread name
+ * @fwevt_worker_thread: Firmware event worker thread
+ * @fwevt_lock: Firmware event lock
+ * @fwevt_list: Firmware event list
+ * @watchdog_work_q_name: Fault watchdog worker thread name
+ * @watchdog_work_q: Fault watchdog worker thread
+ * @watchdog_work: Fault watchdog work
+ * @watchdog_lock: Fault watchdog lock
+ * @is_driver_loading: Is driver still loading
+ * @scan_started: Async scan started
+ * @scan_failed: Asycn scan failed
+ * @stop_drv_processing: Stop all command processing
+ * @device_refresh_on: Don't process the events until devices are refreshed
+ * @max_host_ios: Maximum host I/O count
+ * @chain_buf_count: Chain buffer count
+ * @chain_buf_pool: Chain buffer pool
+ * @chain_sgl_list: Chain SGL list
+ * @chain_bitmap_sz: Chain buffer allocator bitmap size
+ * @chain_bitmap: Chain buffer allocator bitmap
+ * @chain_buf_lock: Chain buffer list lock
+ * @bsg_cmds: Command tracker for BSG command
+ * @host_tm_cmds: Command tracker for task management commands
+ * @dev_rmhs_cmds: Command tracker for device removal commands
+ * @evtack_cmds: Command tracker for event ack commands
+ * @devrem_bitmap_sz: Device removal bitmap size
+ * @devrem_bitmap: Device removal bitmap
+ * @dev_handle_bitmap_sz: Device handle bitmap size
+ * @removepend_bitmap: Remove pending bitmap
+ * @delayed_rmhs_list: Delayed device removal list
+ * @evtack_cmds_bitmap_sz: Event Ack bitmap size
+ * @evtack_cmds_bitmap: Event Ack bitmap
+ * @delayed_evtack_cmds_list: Delayed event acknowledgment list
+ * @ts_update_counter: Timestamp update counter
+ * @reset_in_progress: Reset in progress flag
+ * @unrecoverable: Controller unrecoverable flag
+ * @prev_reset_result: Result of previous reset
+ * @reset_mutex: Controller reset mutex
+ * @reset_waitq: Controller reset wait queue
+ * @prepare_for_reset: Prepare for reset event received
+ * @prepare_for_reset_timeout_counter: Prepare for reset timeout
+ * @prp_list_virt: NVMe encapsulated PRP list virtual base
+ * @prp_list_dma: NVMe encapsulated PRP list DMA
+ * @prp_sz: NVME encapsulated PRP list size
+ * @diagsave_timeout: Diagnostic information save timeout
+ * @logging_level: Controller debug logging level
+ * @flush_io_count: I/O count to flush after reset
+ * @current_event: Firmware event currently in process
+ * @driver_info: Driver, Kernel, OS information to firmware
+ * @change_count: Topology change count
+ * @pel_enabled: Persistent Event Log(PEL) enabled or not
+ * @pel_abort_requested: PEL abort is requested or not
+ * @pel_class: PEL Class identifier
+ * @pel_locale: PEL Locale identifier
+ * @pel_cmds: Command tracker for PEL wait command
+ * @pel_abort_cmd: Command tracker for PEL abort command
+ * @pel_newest_seqnum: Newest PEL sequenece number
+ * @pel_seqnum_virt: PEL sequence number virtual address
+ * @pel_seqnum_dma: PEL sequence number DMA address
+ * @pel_seqnum_sz: PEL sequenece number size
+ * @op_reply_q_offset: Operational reply queue offset with MSIx
+ * @default_qcount: Total Default queues
+ * @active_poll_qcount: Currently active poll queue count
+ * @requested_poll_qcount: User requested poll queue count
+ * @bsg_dev: BSG device structure
+ * @bsg_queue: Request queue for BSG device
+ * @stop_bsgs: Stop BSG request flag
+ * @logdata_buf: Circular buffer to store log data entries
+ * @logdata_buf_idx: Index of entry in buffer to store
+ * @logdata_entry_sz: log data entry size
+ * @pend_large_data_sz: Counter to track pending large data
+ * @io_throttle_data_length: I/O size to track in 512b blocks
+ * @io_throttle_high: I/O size to start throttle in 512b blocks
+ * @io_throttle_low: I/O size to stop throttle in 512b blocks
+ * @num_io_throttle_group: Maximum number of throttle groups
+ * @throttle_groups: Pointer to throttle group info structures
+ * @cfg_page: Default memory for configuration pages
+ * @cfg_page_dma: Configuration page DMA address
+ * @cfg_page_sz: Default configuration page memory size
+ * @sas_transport_enabled: SAS transport enabled or not
+ * @scsi_device_channel: Channel ID for SCSI devices
+ * @transport_cmds: Command tracker for SAS transport commands
+ * @sas_hba: SAS node for the controller
+ * @sas_expander_list: SAS node list of expanders
+ * @sas_node_lock: Lock to protect SAS node list
+ * @hba_port_table_list: List of HBA Ports
+ * @enclosure_list: List of Enclosure objects
+ */
+struct mpi3mr_ioc {
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct Scsi_Host *shost;
+ u8 id;
+ int cpu_count;
+ bool enable_segqueue;
+ u32 irqpoll_sleep;
+
+ char name[MPI3MR_NAME_LENGTH];
+ char driver_name[MPI3MR_NAME_LENGTH];
+
+ volatile struct mpi3_sysif_registers __iomem *sysif_regs;
+ resource_size_t sysif_regs_phys;
+ int bars;
+ u64 dma_mask;
+
+ u16 msix_count;
+ u8 intr_enabled;
+
+ u16 num_admin_req;
+ u32 admin_req_q_sz;
+ u16 admin_req_pi;
+ u16 admin_req_ci;
+ void *admin_req_base;
+ dma_addr_t admin_req_dma;
+ spinlock_t admin_req_lock;
+
+ u16 num_admin_replies;
+ u32 admin_reply_q_sz;
+ u16 admin_reply_ci;
+ u8 admin_reply_ephase;
+ void *admin_reply_base;
+ dma_addr_t admin_reply_dma;
+
+ u32 ready_timeout;
+
+ struct mpi3mr_intr_info *intr_info;
+ u16 intr_info_count;
+ bool is_intr_info_set;
+
+ u16 num_queues;
+ u16 num_op_req_q;
+ struct op_req_qinfo *req_qinfo;
+
+ u16 num_op_reply_q;
+ struct op_reply_qinfo *op_reply_qinfo;
+
+ struct mpi3mr_drv_cmd init_cmds;
+ struct mpi3mr_drv_cmd cfg_cmds;
+ struct mpi3mr_ioc_facts facts;
+ u16 op_reply_desc_sz;
+
+ u32 num_reply_bufs;
+ struct dma_pool *reply_buf_pool;
+ u8 *reply_buf;
+ dma_addr_t reply_buf_dma;
+ dma_addr_t reply_buf_dma_max_address;
+
+ u16 reply_free_qsz;
+ u16 reply_sz;
+ struct dma_pool *reply_free_q_pool;
+ __le64 *reply_free_q;
+ dma_addr_t reply_free_q_dma;
+ spinlock_t reply_free_queue_lock;
+ u32 reply_free_queue_host_index;
+
+ u32 num_sense_bufs;
+ struct dma_pool *sense_buf_pool;
+ u8 *sense_buf;
+ dma_addr_t sense_buf_dma;
+
+ u16 sense_buf_q_sz;
+ struct dma_pool *sense_buf_q_pool;
+ __le64 *sense_buf_q;
+ dma_addr_t sense_buf_q_dma;
+ spinlock_t sbq_lock;
+ u32 sbq_host_index;
+ u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
+
+ char fwevt_worker_name[MPI3MR_NAME_LENGTH];
+ struct workqueue_struct *fwevt_worker_thread;
+ spinlock_t fwevt_lock;
+ struct list_head fwevt_list;
+
+ char watchdog_work_q_name[20];
+ struct workqueue_struct *watchdog_work_q;
+ struct delayed_work watchdog_work;
+ spinlock_t watchdog_lock;
+
+ u8 is_driver_loading;
+ u8 scan_started;
+ u16 scan_failed;
+ u8 stop_drv_processing;
+ u8 device_refresh_on;
+
+ u16 max_host_ios;
+ spinlock_t tgtdev_lock;
+ struct list_head tgtdev_list;
+
+ u32 chain_buf_count;
+ struct dma_pool *chain_buf_pool;
+ struct chain_element *chain_sgl_list;
+ u16 chain_bitmap_sz;
+ void *chain_bitmap;
+ spinlock_t chain_buf_lock;
+
+ struct mpi3mr_drv_cmd bsg_cmds;
+ struct mpi3mr_drv_cmd host_tm_cmds;
+ struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
+ struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
+ u16 devrem_bitmap_sz;
+ void *devrem_bitmap;
+ u16 dev_handle_bitmap_sz;
+ void *removepend_bitmap;
+ struct list_head delayed_rmhs_list;
+ u16 evtack_cmds_bitmap_sz;
+ void *evtack_cmds_bitmap;
+ struct list_head delayed_evtack_cmds_list;
+
+ u32 ts_update_counter;
+ u8 reset_in_progress;
+ u8 unrecoverable;
+ int prev_reset_result;
+ struct mutex reset_mutex;
+ wait_queue_head_t reset_waitq;
+
+ u8 prepare_for_reset;
+ u16 prepare_for_reset_timeout_counter;
+
+ void *prp_list_virt;
+ dma_addr_t prp_list_dma;
+ u32 prp_sz;
+
+ u16 diagsave_timeout;
+ int logging_level;
+ u16 flush_io_count;
+
+ struct mpi3mr_fwevt *current_event;
+ struct mpi3_driver_info_layout driver_info;
+ u16 change_count;
+
+ u8 pel_enabled;
+ u8 pel_abort_requested;
+ u8 pel_class;
+ u16 pel_locale;
+ struct mpi3mr_drv_cmd pel_cmds;
+ struct mpi3mr_drv_cmd pel_abort_cmd;
+
+ u32 pel_newest_seqnum;
+ void *pel_seqnum_virt;
+ dma_addr_t pel_seqnum_dma;
+ u32 pel_seqnum_sz;
+
+ u16 op_reply_q_offset;
+ u16 default_qcount;
+ u16 active_poll_qcount;
+ u16 requested_poll_qcount;
+
+ struct device bsg_dev;
+ struct request_queue *bsg_queue;
+ u8 stop_bsgs;
+ u8 *logdata_buf;
+ u16 logdata_buf_idx;
+ u16 logdata_entry_sz;
+
+ atomic_t pend_large_data_sz;
+ u32 io_throttle_data_length;
+ u32 io_throttle_high;
+ u32 io_throttle_low;
+ u16 num_io_throttle_group;
+ struct mpi3mr_throttle_group_info *throttle_groups;
+
+ void *cfg_page;
+ dma_addr_t cfg_page_dma;
+ u16 cfg_page_sz;
+
+ u8 sas_transport_enabled;
+ u8 scsi_device_channel;
+ struct mpi3mr_drv_cmd transport_cmds;
+ struct mpi3mr_sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head hba_port_table_list;
+ struct list_head enclosure_list;
+};
+
+/**
+ * struct mpi3mr_fwevt - Firmware event structure.
+ *
+ * @list: list head
+ * @work: Work structure
+ * @mrioc: Adapter instance reference
+ * @event_id: MPI3 firmware event ID
+ * @send_ack: Event acknowledgment required or not
+ * @process_evt: Bottomhalf processing required or not
+ * @evt_ctx: Event context to send in Ack
+ * @event_data_size: size of the event data in bytes
+ * @pending_at_sml: waiting for device add/remove API to complete
+ * @discard: discard this event
+ * @ref_count: kref count
+ * @event_data: Actual MPI3 event data
+ */
+struct mpi3mr_fwevt {
+ struct list_head list;
+ struct work_struct work;
+ struct mpi3mr_ioc *mrioc;
+ u16 event_id;
+ bool send_ack;
+ bool process_evt;
+ u32 evt_ctx;
+ u16 event_data_size;
+ bool pending_at_sml;
+ bool discard;
+ struct kref ref_count;
+ char event_data[] __aligned(4);
+};
+
+
+/**
+ * struct delayed_dev_rmhs_node - Delayed device removal node
+ *
+ * @list: list head
+ * @handle: Device handle
+ * @iou_rc: IO Unit Control Reason Code
+ */
+struct delayed_dev_rmhs_node {
+ struct list_head list;
+ u16 handle;
+ u8 iou_rc;
+};
+
+/**
+ * struct delayed_evt_ack_node - Delayed event ack node
+ * @list: list head
+ * @event: MPI3 event ID
+ * @event_ctx: event context
+ */
+struct delayed_evt_ack_node {
+ struct list_head list;
+ u8 event;
+ u32 event_ctx;
+};
+
+int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
+void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc);
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume);
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc);
+int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
+int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
+u16 admin_req_sz, u8 ignore_reset);
+int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ struct op_req_qinfo *opreqq, u8 *req);
+void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
+ dma_addr_t dma_addr);
+void mpi3mr_build_zero_len_sge(void *paddr);
+void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr);
+void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr);
+void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
+ u64 sense_buf_dma);
+
+void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc);
+void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply);
+void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc,
+ u64 *reply_dma, u16 qidx);
+void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc);
+void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc);
+
+int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason, u8 snapdump);
+void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc);
+void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc);
+
+enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc);
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ u32 event_ctx);
+
+void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
+void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
+void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
+void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q);
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
+void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc);
+void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc);
+int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ u16 handle, uint lun, u16 htag, ulong timeout,
+ struct mpi3mr_drv_cmd *drv_cmd,
+ u8 *resp_code, struct scsi_cmnd *scmd);
+struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle);
+void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
+ u16 event_data_size);
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle);
+extern const struct attribute_group *mpi3mr_host_groups[];
+extern const struct attribute_group *mpi3mr_dev_groups[];
+
+extern struct sas_function_template mpi3mr_transport_functions;
+extern struct scsi_transport_template *mpi3mr_transport_template;
+
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz);
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz);
+
+u8 mpi3mr_is_expander_device(u16 device_info);
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle);
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port);
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle);
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id);
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc);
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port);
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy);
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+ bool device_add);
+void mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc);
+void mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc);
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
+#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
new file mode 100644
index 000000000000..9baac224b213
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -0,0 +1,1860 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+#include <linux/bsg-lib.h>
+#include <uapi/scsi/scsi_bsg_mpi3mr.h>
+
+/**
+ * mpi3mr_bsg_pel_abort - sends PEL abort request
+ * @mrioc: Adapter instance reference
+ *
+ * This function sends PEL abort request to the firmware through
+ * admin request queue.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_pel_req_action_abort pel_abort_req;
+ struct mpi3_pel_reply *pel_reply;
+ int retval = 0;
+ u16 pe_log_status;
+
+ if (mrioc->reset_in_progress) {
+ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+ return -1;
+ }
+ if (mrioc->stop_bsgs) {
+ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+ return -1;
+ }
+
+ memset(&pel_abort_req, 0, sizeof(pel_abort_req));
+ mutex_lock(&mrioc->pel_abort_cmd.mutex);
+ if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) {
+ dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
+ mutex_unlock(&mrioc->pel_abort_cmd.mutex);
+ return -1;
+ }
+ mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING;
+ mrioc->pel_abort_cmd.is_waiting = 1;
+ mrioc->pel_abort_cmd.callback = NULL;
+ pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT);
+ pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+ pel_abort_req.action = MPI3_PEL_ACTION_ABORT;
+ pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+
+ mrioc->pel_abort_requested = 1;
+ init_completion(&mrioc->pel_abort_cmd.done);
+ retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req,
+ sizeof(pel_abort_req), 0);
+ if (retval) {
+ retval = -1;
+ dprint_bsg_err(mrioc, "%s: admin request post failed\n",
+ __func__);
+ mrioc->pel_abort_requested = 0;
+ goto out_unlock;
+ }
+
+ wait_for_completion_timeout(&mrioc->pel_abort_cmd.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) {
+ mrioc->pel_abort_cmd.is_waiting = 0;
+ dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
+ if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__, (mrioc->pel_abort_cmd.ioc_status &
+ MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->pel_abort_cmd.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) {
+ pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply;
+ pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
+ if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "%s: command failed, pel_status(0x%04x)\n",
+ __func__, pe_log_status);
+ retval = -1;
+ }
+ }
+
+out_unlock:
+ mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->pel_abort_cmd.mutex);
+ return retval;
+}
+/**
+ * mpi3mr_bsg_verify_adapter - verify adapter number is valid
+ * @ioc_number: Adapter number
+ *
+ * This function returns the adapter instance pointer of given
+ * adapter number. If adapter number does not match with the
+ * driver's adapter list, driver returns NULL.
+ *
+ * Return: adapter instance reference
+ */
+static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number)
+{
+ struct mpi3mr_ioc *mrioc = NULL;
+
+ spin_lock(&mrioc_list_lock);
+ list_for_each_entry(mrioc, &mrioc_list, list) {
+ if (mrioc->id == ioc_number) {
+ spin_unlock(&mrioc_list_lock);
+ return mrioc;
+ }
+ }
+ spin_unlock(&mrioc_list_lock);
+ return NULL;
+}
+
+/**
+ * mpi3mr_enable_logdata - Handler for log data enable
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function enables log data caching in the driver if not
+ * already enabled and return the maximum number of log data
+ * entries that can be cached in the driver.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ struct mpi3mr_logdata_enable logdata_enable;
+
+ if (!mrioc->logdata_buf) {
+ mrioc->logdata_entry_sz =
+ (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4))
+ + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ;
+ mrioc->logdata_buf_idx = 0;
+ mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES,
+ mrioc->logdata_entry_sz, GFP_KERNEL);
+
+ if (!mrioc->logdata_buf)
+ return -ENOMEM;
+ }
+
+ memset(&logdata_enable, 0, sizeof(logdata_enable));
+ logdata_enable.max_entries =
+ MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
+ if (job->request_payload.payload_len >= sizeof(logdata_enable)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &logdata_enable, sizeof(logdata_enable));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+/**
+ * mpi3mr_get_logdata - Handler for get log data
+ * @mrioc: Adapter instance reference
+ * @job: BSG job pointer
+ * This function copies the log data entries to the user buffer
+ * when log caching is enabled in the driver.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz;
+
+ if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz))
+ return -EINVAL;
+
+ num_entries = job->request_payload.payload_len / entry_sz;
+ if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES)
+ num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
+ sz = num_entries * entry_sz;
+
+ if (job->request_payload.payload_len >= sz) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ mrioc->logdata_buf, sz);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_pel_enable - Handler for PEL enable driver
+ * @mrioc: Adapter instance reference
+ * @job: BSG job pointer
+ *
+ * This function is the handler for PEL enable driver.
+ * Validates the application given class and locale and if
+ * requires aborts the existing PEL wait request and/or issues
+ * new PEL wait request to the firmware and returns.
+ *
+ * Return: 0 on success and proper error codes on failure.
+ */
+static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ struct mpi3mr_bsg_out_pel_enable pel_enable;
+ u8 issue_pel_wait;
+ u8 tmp_class;
+ u16 tmp_locale;
+
+ if (job->request_payload.payload_len != sizeof(pel_enable)) {
+ dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ __func__);
+ return rval;
+ }
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &pel_enable, sizeof(pel_enable));
+
+ if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) {
+ dprint_bsg_err(mrioc, "%s: out of range class %d sent\n",
+ __func__, pel_enable.pel_class);
+ rval = 0;
+ goto out;
+ }
+ if (!mrioc->pel_enabled)
+ issue_pel_wait = 1;
+ else {
+ if ((mrioc->pel_class <= pel_enable.pel_class) &&
+ !((mrioc->pel_locale & pel_enable.pel_locale) ^
+ pel_enable.pel_locale)) {
+ issue_pel_wait = 0;
+ rval = 0;
+ } else {
+ pel_enable.pel_locale |= mrioc->pel_locale;
+
+ if (mrioc->pel_class < pel_enable.pel_class)
+ pel_enable.pel_class = mrioc->pel_class;
+
+ rval = mpi3mr_bsg_pel_abort(mrioc);
+ if (rval) {
+ dprint_bsg_err(mrioc,
+ "%s: pel_abort failed, status(%ld)\n",
+ __func__, rval);
+ goto out;
+ }
+ issue_pel_wait = 1;
+ }
+ }
+ if (issue_pel_wait) {
+ tmp_class = mrioc->pel_class;
+ tmp_locale = mrioc->pel_locale;
+ mrioc->pel_class = pel_enable.pel_class;
+ mrioc->pel_locale = pel_enable.pel_locale;
+ mrioc->pel_enabled = 1;
+ rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL);
+ if (rval) {
+ mrioc->pel_class = tmp_class;
+ mrioc->pel_locale = tmp_locale;
+ mrioc->pel_enabled = 0;
+ dprint_bsg_err(mrioc,
+ "%s: pel get sequence number failed, status(%ld)\n",
+ __func__, rval);
+ }
+ }
+
+out:
+ return rval;
+}
+/**
+ * mpi3mr_get_all_tgt_info - Get all target information
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function copies the driver managed target devices device
+ * handle, persistent ID, bus ID and taret ID to the user
+ * provided buffer for the specific controller. This function
+ * also provides the number of devices managed by the driver for
+ * the specific controller.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ u16 num_devices = 0, i = 0, size;
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_device_map_info *devmap_info = NULL;
+ struct mpi3mr_all_tgt_info *alltgt_info = NULL;
+ uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0;
+
+ if (job->request_payload.payload_len < sizeof(u32)) {
+ dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ __func__);
+ return rval;
+ }
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ num_devices++;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ if ((job->request_payload.payload_len == sizeof(u32)) ||
+ list_empty(&mrioc->tgtdev_list)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &num_devices, sizeof(num_devices));
+ return 0;
+ }
+
+ kern_entrylen = (num_devices - 1) * sizeof(*devmap_info);
+ size = sizeof(*alltgt_info) + kern_entrylen;
+ alltgt_info = kzalloc(size, GFP_KERNEL);
+ if (!alltgt_info)
+ return -ENOMEM;
+
+ devmap_info = alltgt_info->dmi;
+ memset((u8 *)devmap_info, 0xFF, (kern_entrylen + sizeof(*devmap_info)));
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (i < num_devices) {
+ devmap_info[i].handle = tgtdev->dev_handle;
+ devmap_info[i].perst_id = tgtdev->perst_id;
+ if (tgtdev->host_exposed && tgtdev->starget) {
+ devmap_info[i].target_id = tgtdev->starget->id;
+ devmap_info[i].bus_id =
+ tgtdev->starget->channel;
+ }
+ i++;
+ }
+ }
+ num_devices = i;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ memcpy(&alltgt_info->num_devices, &num_devices, sizeof(num_devices));
+
+ usr_entrylen = (job->request_payload.payload_len - sizeof(u32)) / sizeof(*devmap_info);
+ usr_entrylen *= sizeof(*devmap_info);
+ min_entrylen = min(usr_entrylen, kern_entrylen);
+ if (min_entrylen && (!memcpy(&alltgt_info->dmi, devmap_info, min_entrylen))) {
+ dprint_bsg_err(mrioc, "%s:%d: device map info copy failed\n",
+ __func__, __LINE__);
+ rval = -EFAULT;
+ goto out;
+ }
+
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ alltgt_info, job->request_payload.payload_len);
+ rval = 0;
+out:
+ kfree(alltgt_info);
+ return rval;
+}
+/**
+ * mpi3mr_get_change_count - Get topology change count
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function copies the toplogy change count provided by the
+ * driver in events and cached in the driver to the user
+ * provided buffer for the specific controller.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ struct mpi3mr_change_count chgcnt;
+
+ memset(&chgcnt, 0, sizeof(chgcnt));
+ chgcnt.change_count = mrioc->change_count;
+ if (job->request_payload.payload_len >= sizeof(chgcnt)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &chgcnt, sizeof(chgcnt));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_adp_reset - Issue controller reset
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function identifies the user provided reset type and
+ * issues approporiate reset to the controller and wait for that
+ * to complete and reinitialize the controller and then returns
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ u8 save_snapdump;
+ struct mpi3mr_bsg_adp_reset adpreset;
+
+ if (job->request_payload.payload_len !=
+ sizeof(adpreset)) {
+ dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ __func__);
+ goto out;
+ }
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &adpreset, sizeof(adpreset));
+
+ switch (adpreset.reset_type) {
+ case MPI3MR_BSG_ADPRESET_SOFT:
+ save_snapdump = 0;
+ break;
+ case MPI3MR_BSG_ADPRESET_DIAG_FAULT:
+ save_snapdump = 1;
+ break;
+ default:
+ dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n",
+ __func__, adpreset.reset_type);
+ goto out;
+ }
+
+ rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP,
+ save_snapdump);
+
+ if (rval)
+ dprint_bsg_err(mrioc,
+ "%s: reset handler returned error(%ld) for reset type %d\n",
+ __func__, rval, adpreset.reset_type);
+out:
+ return rval;
+}
+
+/**
+ * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function provides adapter information for the given
+ * controller
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ enum mpi3mr_iocstate ioc_state;
+ struct mpi3mr_bsg_in_adpinfo adpinfo;
+
+ memset(&adpinfo, 0, sizeof(adpinfo));
+ adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY;
+ adpinfo.pci_dev_id = mrioc->pdev->device;
+ adpinfo.pci_dev_hw_rev = mrioc->pdev->revision;
+ adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device;
+ adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor;
+ adpinfo.pci_bus = mrioc->pdev->bus->number;
+ adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn);
+ adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn);
+ adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus);
+ adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION;
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
+ else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
+ else if (ioc_state == MRIOC_STATE_FAULT)
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
+ else
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
+
+ memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info,
+ sizeof(adpinfo.driver_info));
+
+ if (job->request_payload.payload_len >= sizeof(adpinfo)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &adpinfo, sizeof(adpinfo));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_process_drv_cmds - Driver Command handler
+ * @job: BSG job reference
+ *
+ * This function is the top level handler for driver commands,
+ * this does basic validation of the buffer and identifies the
+ * opcode and switches to correct sub handler.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ struct mpi3mr_ioc *mrioc = NULL;
+ struct mpi3mr_bsg_packet *bsg_req = NULL;
+ struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL;
+
+ bsg_req = job->request;
+ drvrcmd = &bsg_req->cmd.drvrcmd;
+
+ mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id);
+ if (!mrioc)
+ return -ENODEV;
+
+ if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) {
+ rval = mpi3mr_bsg_populate_adpinfo(mrioc, job);
+ return rval;
+ }
+
+ if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
+ return -ERESTARTSYS;
+
+ switch (drvrcmd->opcode) {
+ case MPI3MR_DRVBSG_OPCODE_ADPRESET:
+ rval = mpi3mr_bsg_adp_reset(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO:
+ rval = mpi3mr_get_all_tgt_info(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_GETCHGCNT:
+ rval = mpi3mr_get_change_count(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE:
+ rval = mpi3mr_enable_logdata(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_GETLOGDATA:
+ rval = mpi3mr_get_logdata(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_PELENABLE:
+ rval = mpi3mr_bsg_pel_enable(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_UNKNOWN:
+ default:
+ pr_err("%s: unsupported driver command opcode %d\n",
+ MPI3MR_DRIVER_NAME, drvrcmd->opcode);
+ break;
+ }
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ return rval;
+}
+
+/**
+ * mpi3mr_bsg_build_sgl - SGL construction for MPI commands
+ * @mpi_req: MPI request
+ * @sgl_offset: offset to start sgl in the MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in sgl
+ * @bufcnt: Number of DMA buffers
+ * @is_rmc: Does the buffer list has management command buffer
+ * @is_rmr: Does the buffer list has management response buffer
+ * @num_datasges: Number of data buffers in the list
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as SGEs in the given MPI request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset,
+ struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc,
+ u8 is_rmr, u8 num_datasges)
+{
+ u8 *sgl = (mpi_req + sgl_offset), count = 0;
+ struct mpi3_mgmt_passthrough_request *rmgmt_req =
+ (struct mpi3_mgmt_passthrough_request *)mpi_req;
+ struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+ u8 sgl_flags, sgl_flags_last;
+
+ sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER;
+ sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST;
+
+ if (is_rmc) {
+ mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
+ sgl_flags_last, drv_buf_iter->kern_buf_len,
+ drv_buf_iter->kern_buf_dma);
+ sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len;
+ drv_buf_iter++;
+ count++;
+ if (is_rmr) {
+ mpi3mr_add_sg_single(&rmgmt_req->response_sgl,
+ sgl_flags_last, drv_buf_iter->kern_buf_len,
+ drv_buf_iter->kern_buf_dma);
+ drv_buf_iter++;
+ count++;
+ } else
+ mpi3mr_build_zero_len_sge(
+ &rmgmt_req->response_sgl);
+ }
+ if (!num_datasges) {
+ mpi3mr_build_zero_len_sge(sgl);
+ return;
+ }
+ for (; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ if (num_datasges == 1 || !is_rmc)
+ mpi3mr_add_sg_single(sgl, sgl_flags_last,
+ drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
+ else
+ mpi3mr_add_sg_single(sgl, sgl_flags,
+ drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
+ sgl += sizeof(struct mpi3_sge_common);
+ num_datasges--;
+ }
+}
+
+/**
+ * mpi3mr_get_nvme_data_fmt - returns the NVMe data format
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ *
+ * This function returns the type of the data format specified
+ * in user provided NVMe command in NVMe encapsulated request.
+ *
+ * Return: Data format of the NVMe command (PRP/SGL etc)
+ */
+static unsigned int mpi3mr_get_nvme_data_fmt(
+ struct mpi3_nvme_encapsulated_request *nvme_encap_request)
+{
+ u8 format = 0;
+
+ format = ((nvme_encap_request->command[0] & 0xc000) >> 14);
+ return format;
+
+}
+
+/**
+ * mpi3mr_build_nvme_sgl - SGL constructor for NVME
+ * encapsulated request
+ * @mrioc: Adapter instance reference
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in sgl
+ * @bufcnt: Number of DMA buffers
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as SGEs in the given NVMe encapsulated request.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
+ struct mpi3_nvme_encapsulated_request *nvme_encap_request,
+ struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
+{
+ struct mpi3mr_nvme_pt_sge *nvme_sgl;
+ u64 sgl_ptr;
+ u8 count;
+ size_t length = 0;
+ struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+ u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
+ mrioc->facts.sge_mod_shift) << 32);
+ u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
+ mrioc->facts.sge_mod_shift) << 32;
+
+ /*
+ * Not all commands require a data transfer. If no data, just return
+ * without constructing any sgl.
+ */
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ sgl_ptr = (u64)drv_buf_iter->kern_buf_dma;
+ length = drv_buf_iter->kern_buf_len;
+ break;
+ }
+ if (!length)
+ return 0;
+
+ if (sgl_ptr & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: SGL address collides with SGE modifier\n",
+ __func__);
+ return -1;
+ }
+
+ sgl_ptr &= ~sgemod_mask;
+ sgl_ptr |= sgemod_val;
+ nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
+ ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
+ memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
+ nvme_sgl->base_addr = sgl_ptr;
+ nvme_sgl->length = length;
+ return 0;
+}
+
+/**
+ * mpi3mr_build_nvme_prp - PRP constructor for NVME
+ * encapsulated request
+ * @mrioc: Adapter instance reference
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in SGL
+ * @bufcnt: Number of DMA buffers
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as PRP entries in the given NVMe encapsulated
+ * request.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
+ struct mpi3_nvme_encapsulated_request *nvme_encap_request,
+ struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
+{
+ int prp_size = MPI3MR_NVME_PRP_SIZE;
+ __le64 *prp_entry, *prp1_entry, *prp2_entry;
+ __le64 *prp_page;
+ dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
+ u32 offset, entry_len, dev_pgsz;
+ u32 page_mask_result, page_mask;
+ size_t length = 0;
+ u8 count;
+ struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+ u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
+ mrioc->facts.sge_mod_shift) << 32);
+ u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
+ mrioc->facts.sge_mod_shift) << 32;
+ u16 dev_handle = nvme_encap_request->dev_handle;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev) {
+ dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n",
+ __func__, dev_handle);
+ return -1;
+ }
+
+ if (tgtdev->dev_spec.pcie_inf.pgsz == 0) {
+ dprint_bsg_err(mrioc,
+ "%s: NVMe device page size is zero for handle 0x%04x\n",
+ __func__, dev_handle);
+ mpi3mr_tgtdev_put(tgtdev);
+ return -1;
+ }
+
+ dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
+ mpi3mr_tgtdev_put(tgtdev);
+
+ /*
+ * Not all commands require a data transfer. If no data, just return
+ * without constructing any PRP.
+ */
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ dma_addr = drv_buf_iter->kern_buf_dma;
+ length = drv_buf_iter->kern_buf_len;
+ break;
+ }
+
+ if (!length)
+ return 0;
+
+ mrioc->prp_sz = 0;
+ mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+ dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
+
+ if (!mrioc->prp_list_virt)
+ return -1;
+ mrioc->prp_sz = dev_pgsz;
+
+ /*
+ * Set pointers to PRP1 and PRP2, which are in the NVMe command.
+ * PRP1 is located at a 24 byte offset from the start of the NVMe
+ * command. Then set the current PRP entry pointer to PRP1.
+ */
+ prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
+ MPI3MR_NVME_CMD_PRP1_OFFSET);
+ prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
+ MPI3MR_NVME_CMD_PRP2_OFFSET);
+ prp_entry = prp1_entry;
+ /*
+ * For the PRP entries, use the specially allocated buffer of
+ * contiguous memory.
+ */
+ prp_page = (__le64 *)mrioc->prp_list_virt;
+ prp_page_dma = mrioc->prp_list_dma;
+
+ /*
+ * Check if we are within 1 entry of a page boundary we don't
+ * want our first entry to be a PRP List entry.
+ */
+ page_mask = dev_pgsz - 1;
+ page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
+ if (!page_mask_result) {
+ dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
+ __func__);
+ goto err_out;
+ }
+
+ /*
+ * Set PRP physical pointer, which initially points to the current PRP
+ * DMA memory page.
+ */
+ prp_entry_dma = prp_page_dma;
+
+
+ /* Loop while the length is not zero. */
+ while (length) {
+ page_mask_result = (prp_entry_dma + prp_size) & page_mask;
+ if (!page_mask_result && (length > dev_pgsz)) {
+ dprint_bsg_err(mrioc,
+ "%s: single PRP page is not sufficient\n",
+ __func__);
+ goto err_out;
+ }
+
+ /* Need to handle if entry will be part of a page. */
+ offset = dma_addr & page_mask;
+ entry_len = dev_pgsz - offset;
+
+ if (prp_entry == prp1_entry) {
+ /*
+ * Must fill in the first PRP pointer (PRP1) before
+ * moving on.
+ */
+ *prp1_entry = cpu_to_le64(dma_addr);
+ if (*prp1_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP1 address collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp1_entry &= ~sgemod_mask;
+ *prp1_entry |= sgemod_val;
+
+ /*
+ * Now point to the second PRP entry within the
+ * command (PRP2).
+ */
+ prp_entry = prp2_entry;
+ } else if (prp_entry == prp2_entry) {
+ /*
+ * Should the PRP2 entry be a PRP List pointer or just
+ * a regular PRP pointer? If there is more than one
+ * more page of data, must use a PRP List pointer.
+ */
+ if (length > dev_pgsz) {
+ /*
+ * PRP2 will contain a PRP List pointer because
+ * more PRP's are needed with this command. The
+ * list will start at the beginning of the
+ * contiguous buffer.
+ */
+ *prp2_entry = cpu_to_le64(prp_entry_dma);
+ if (*prp2_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP list address collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp2_entry &= ~sgemod_mask;
+ *prp2_entry |= sgemod_val;
+
+ /*
+ * The next PRP Entry will be the start of the
+ * first PRP List.
+ */
+ prp_entry = prp_page;
+ continue;
+ } else {
+ /*
+ * After this, the PRP Entries are complete.
+ * This command uses 2 PRP's and no PRP list.
+ */
+ *prp2_entry = cpu_to_le64(dma_addr);
+ if (*prp2_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP2 collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp2_entry &= ~sgemod_mask;
+ *prp2_entry |= sgemod_val;
+ }
+ } else {
+ /*
+ * Put entry in list and bump the addresses.
+ *
+ * After PRP1 and PRP2 are filled in, this will fill in
+ * all remaining PRP entries in a PRP List, one per
+ * each time through the loop.
+ */
+ *prp_entry = cpu_to_le64(dma_addr);
+ if (*prp1_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP address collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp_entry &= ~sgemod_mask;
+ *prp_entry |= sgemod_val;
+ prp_entry++;
+ prp_entry_dma++;
+ }
+
+ /*
+ * Bump the phys address of the command's data buffer by the
+ * entry_len.
+ */
+ dma_addr += entry_len;
+
+ /* decrement length accounting for last partial page. */
+ if (entry_len > length)
+ length = 0;
+ else
+ length -= entry_len;
+ }
+ return 0;
+err_out:
+ if (mrioc->prp_list_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
+ mrioc->prp_list_virt, mrioc->prp_list_dma);
+ mrioc->prp_list_virt = NULL;
+ }
+ return -1;
+}
+/**
+ * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
+ * @job: BSG job reference
+ *
+ * This function is the top level handler for MPI Pass through
+ * command, this does basic validation of the input data buffers,
+ * identifies the given buffer types and MPI command, allocates
+ * DMAable memory for user given buffers, construstcs SGL
+ * properly and passes the command to the firmware.
+ *
+ * Once the MPI command is completed the driver copies the data
+ * if any and reply, sense information to user provided buffers.
+ * If the command is timed out then issues controller reset
+ * prior to returning.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+
+static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len)
+{
+ long rval = -EINVAL;
+
+ struct mpi3mr_ioc *mrioc = NULL;
+ u8 *mpi_req = NULL, *sense_buff_k = NULL;
+ u8 mpi_msg_size = 0;
+ struct mpi3mr_bsg_packet *bsg_req = NULL;
+ struct mpi3mr_bsg_mptcmd *karg;
+ struct mpi3mr_buf_entry *buf_entries = NULL;
+ struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
+ u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0;
+ u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0;
+ u8 block_io = 0, resp_code = 0, nvme_fmt = 0;
+ struct mpi3_request_header *mpi_header = NULL;
+ struct mpi3_status_reply_descriptor *status_desc;
+ struct mpi3_scsi_task_mgmt_request *tm_req;
+ u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen;
+ u16 dev_handle;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *stgt_priv = NULL;
+ struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL;
+ u32 din_size = 0, dout_size = 0;
+ u8 *din_buf = NULL, *dout_buf = NULL;
+ u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
+
+ bsg_req = job->request;
+ karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
+
+ mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id);
+ if (!mrioc)
+ return -ENODEV;
+
+ if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
+ karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
+
+ mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL);
+ if (!mpi_req)
+ return -ENOMEM;
+ mpi_header = (struct mpi3_request_header *)mpi_req;
+
+ bufcnt = karg->buf_entry_list.num_of_entries;
+ drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL);
+ if (!drv_bufs) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ dout_buf = kzalloc(job->request_payload.payload_len,
+ GFP_KERNEL);
+ if (!dout_buf) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ din_buf = kzalloc(job->reply_payload.payload_len,
+ GFP_KERNEL);
+ if (!din_buf) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ dout_buf, job->request_payload.payload_len);
+
+ buf_entries = karg->buf_entry_list.buf_entry;
+ sgl_din_iter = din_buf;
+ sgl_dout_iter = dout_buf;
+ drv_buf_iter = drv_bufs;
+
+ for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
+
+ if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
+ dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+ if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
+ dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ switch (buf_entries->buf_type) {
+ case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
+ sgl_iter = sgl_dout_iter;
+ sgl_dout_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_TO_DEVICE;
+ is_rmcb = 1;
+ if (count != 0)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_FROM_DEVICE;
+ is_rmrb = 1;
+ if (count != 1 || !is_rmcb)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_DATA_IN:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_FROM_DEVICE;
+ din_cnt++;
+ din_size += drv_buf_iter->bsg_buf_len;
+ if ((din_cnt > 1) && !is_rmcb)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_DATA_OUT:
+ sgl_iter = sgl_dout_iter;
+ sgl_dout_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_TO_DEVICE;
+ dout_cnt++;
+ dout_size += drv_buf_iter->bsg_buf_len;
+ if ((dout_cnt > 1) && !is_rmcb)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_MPI_REPLY:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_NONE;
+ mpirep_offset = count;
+ break;
+ case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_NONE;
+ erb_offset = count;
+ break;
+ case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
+ sgl_iter = sgl_dout_iter;
+ sgl_dout_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_NONE;
+ mpi_msg_size = buf_entries->buf_len;
+ if ((!mpi_msg_size || (mpi_msg_size % 4)) ||
+ (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) {
+ dprint_bsg_err(mrioc, "%s: invalid MPI message size\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+ memcpy(mpi_req, sgl_iter, buf_entries->buf_len);
+ break;
+ default:
+ invalid_be = 1;
+ break;
+ }
+ if (invalid_be) {
+ dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ drv_buf_iter->bsg_buf = sgl_iter;
+ drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
+
+ }
+ if (!is_rmcb && (dout_cnt || din_cnt)) {
+ sg_entries = dout_cnt + din_cnt;
+ if (((mpi_msg_size) + (sg_entries *
+ sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) {
+ dprint_bsg_err(mrioc,
+ "%s:%d: invalid message size passed\n",
+ __func__, __LINE__);
+ rval = -EINVAL;
+ goto out;
+ }
+ }
+ if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
+ dprint_bsg_err(mrioc,
+ "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
+ __func__, __LINE__, mpi_header->function, din_size);
+ rval = -EINVAL;
+ goto out;
+ }
+ if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) {
+ dprint_bsg_err(mrioc,
+ "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
+ __func__, __LINE__, mpi_header->function, dout_size);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ drv_buf_iter = drv_bufs;
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+
+ drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
+ if (is_rmcb && !count)
+ drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) *
+ sizeof(struct mpi3_sge_common));
+
+ if (!drv_buf_iter->kern_buf_len)
+ continue;
+
+ drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev,
+ drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma,
+ GFP_KERNEL);
+ if (!drv_buf_iter->kern_buf) {
+ rval = -ENOMEM;
+ goto out;
+ }
+ if (drv_buf_iter->data_dir == DMA_TO_DEVICE) {
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
+ }
+ }
+
+ if (erb_offset != 0xFF) {
+ sense_buff_k = kzalloc(erbsz, GFP_KERNEL);
+ if (!sense_buff_k) {
+ rval = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) {
+ rval = -ERESTARTSYS;
+ goto out;
+ }
+ if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
+ rval = -EAGAIN;
+ dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ if (mrioc->unrecoverable) {
+ dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
+ __func__);
+ rval = -EFAULT;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ if (mrioc->reset_in_progress) {
+ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+ rval = -EAGAIN;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ if (mrioc->stop_bsgs) {
+ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+ rval = -EAGAIN;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+
+ if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) {
+ nvme_fmt = mpi3mr_get_nvme_data_fmt(
+ (struct mpi3_nvme_encapsulated_request *)mpi_req);
+ if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) {
+ if (mpi3mr_build_nvme_prp(mrioc,
+ (struct mpi3_nvme_encapsulated_request *)mpi_req,
+ drv_bufs, bufcnt)) {
+ rval = -ENOMEM;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 ||
+ nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) {
+ if (mpi3mr_build_nvme_sgl(mrioc,
+ (struct mpi3_nvme_encapsulated_request *)mpi_req,
+ drv_bufs, bufcnt)) {
+ rval = -EINVAL;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ } else {
+ dprint_bsg_err(mrioc,
+ "%s:invalid NVMe command format\n", __func__);
+ rval = -EINVAL;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ } else {
+ mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size),
+ drv_bufs, bufcnt, is_rmcb, is_rmrb,
+ (dout_cnt + din_cnt));
+ }
+
+ if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
+ tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req;
+ if (tm_req->task_type !=
+ MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ dev_handle = tm_req->dev_handle;
+ block_io = 1;
+ }
+ }
+ if (block_io) {
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
+ stgt_priv = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ atomic_inc(&stgt_priv->block_io);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+
+ mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->bsg_cmds.is_waiting = 1;
+ mrioc->bsg_cmds.callback = NULL;
+ mrioc->bsg_cmds.is_sense = 0;
+ mrioc->bsg_cmds.sensebuf = sense_buff_k;
+ memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz);
+ mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS);
+ if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) {
+ dprint_bsg_info(mrioc,
+ "%s: posting bsg request to the controller\n", __func__);
+ dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
+ "bsg_mpi3_req");
+ if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
+ drv_buf_iter = &drv_bufs[0];
+ dprint_dump(drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+ }
+ }
+
+ init_completion(&mrioc->bsg_cmds.done);
+ rval = mpi3mr_admin_request_post(mrioc, mpi_req,
+ MPI3MR_ADMIN_REQ_FRAME_SZ, 0);
+
+
+ if (rval) {
+ mrioc->bsg_cmds.is_waiting = 0;
+ dprint_bsg_err(mrioc,
+ "%s: posting bsg request is failed\n", __func__);
+ rval = -EAGAIN;
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->bsg_cmds.done,
+ (karg->timeout * HZ));
+ if (block_io && stgt_priv)
+ atomic_dec(&stgt_priv->block_io);
+ if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mrioc->bsg_cmds.is_waiting = 0;
+ rval = -EAGAIN;
+ if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)
+ goto out_unlock;
+ dprint_bsg_err(mrioc,
+ "%s: bsg request timedout after %d seconds\n", __func__,
+ karg->timeout);
+ if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) {
+ dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
+ "bsg_mpi3_req");
+ if (mpi_header->function ==
+ MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
+ drv_buf_iter = &drv_bufs[0];
+ dprint_dump(drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+ }
+ }
+
+ if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
+ (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO))
+ mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ mpi_header->function_dependent, 0,
+ MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT,
+ &mrioc->host_tm_cmds, &resp_code, NULL);
+ if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) &&
+ !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_APP_TIMEOUT, 1);
+ goto out_unlock;
+ }
+ dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__);
+
+ if (mrioc->prp_list_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
+ mrioc->prp_list_virt, mrioc->prp_list_dma);
+ mrioc->prp_list_virt = NULL;
+ }
+
+ if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_bsg_info(mrioc,
+ "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__,
+ (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->bsg_cmds.ioc_loginfo);
+ }
+
+ if ((mpirep_offset != 0xFF) &&
+ drv_bufs[mpirep_offset].bsg_buf_len) {
+ drv_buf_iter = &drv_bufs[mpirep_offset];
+ drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
+ mrioc->reply_sz);
+ bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
+
+ if (!bsg_reply_buf) {
+ rval = -ENOMEM;
+ goto out_unlock;
+ }
+ if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) {
+ bsg_reply_buf->mpi_reply_type =
+ MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS;
+ memcpy(bsg_reply_buf->reply_buf,
+ mrioc->bsg_cmds.reply, mrioc->reply_sz);
+ } else {
+ bsg_reply_buf->mpi_reply_type =
+ MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS;
+ status_desc = (struct mpi3_status_reply_descriptor *)
+ bsg_reply_buf->reply_buf;
+ status_desc->ioc_status = mrioc->bsg_cmds.ioc_status;
+ status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo;
+ }
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen);
+ }
+
+ if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf &&
+ mrioc->bsg_cmds.is_sense) {
+ drv_buf_iter = &drv_bufs[erb_offset];
+ tmplen = min(erbsz, drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen);
+ }
+
+ drv_buf_iter = drv_bufs;
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->bsg_buf,
+ drv_buf_iter->kern_buf, tmplen);
+ }
+ }
+
+out_unlock:
+ if (din_buf) {
+ *reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ din_buf, job->reply_payload.payload_len);
+ }
+ mrioc->bsg_cmds.is_sense = 0;
+ mrioc->bsg_cmds.sensebuf = NULL;
+ mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+out:
+ kfree(sense_buff_k);
+ kfree(dout_buf);
+ kfree(din_buf);
+ kfree(mpi_req);
+ if (drv_bufs) {
+ drv_buf_iter = drv_bufs;
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma)
+ dma_free_coherent(&mrioc->pdev->dev,
+ drv_buf_iter->kern_buf_len,
+ drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_dma);
+ }
+ kfree(drv_bufs);
+ }
+ kfree(bsg_reply_buf);
+ return rval;
+}
+
+/**
+ * mpi3mr_app_save_logdata - Save Log Data events
+ * @mrioc: Adapter instance reference
+ * @event_data: event data associated with log data event
+ * @event_data_size: event data size to copy
+ *
+ * If log data event caching is enabled by the applicatiobns,
+ * then this function saves the log data in the circular queue
+ * and Sends async signal SIGIO to indicate there is an async
+ * event from the firmware to the event monitoring applications.
+ *
+ * Return:Nothing
+ */
+void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
+ u16 event_data_size)
+{
+ u32 index = mrioc->logdata_buf_idx, sz;
+ struct mpi3mr_logdata_entry *entry;
+
+ if (!(mrioc->logdata_buf))
+ return;
+
+ entry = (struct mpi3mr_logdata_entry *)
+ (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz));
+ entry->valid_entry = 1;
+ sz = min(mrioc->logdata_entry_sz, event_data_size);
+ memcpy(entry->data, event_data, sz);
+ mrioc->logdata_buf_idx =
+ ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES);
+ atomic64_inc(&event_counter);
+}
+
+/**
+ * mpi3mr_bsg_request - bsg request entry point
+ * @job: BSG job reference
+ *
+ * This is driver's entry point for bsg requests
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static int mpi3mr_bsg_request(struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ unsigned int reply_payload_rcv_len = 0;
+
+ struct mpi3mr_bsg_packet *bsg_req = job->request;
+
+ switch (bsg_req->cmd_type) {
+ case MPI3MR_DRV_CMD:
+ rval = mpi3mr_bsg_process_drv_cmds(job);
+ break;
+ case MPI3MR_MPT_CMD:
+ rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len);
+ break;
+ default:
+ pr_err("%s: unsupported BSG command(0x%08x)\n",
+ MPI3MR_DRIVER_NAME, bsg_req->cmd_type);
+ break;
+ }
+
+ bsg_job_done(job, rval, reply_payload_rcv_len);
+
+ return 0;
+}
+
+/**
+ * mpi3mr_bsg_exit - de-registration from bsg layer
+ *
+ * This will be called during driver unload and all
+ * bsg resources allocated during load will be freed.
+ *
+ * Return:Nothing
+ */
+void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
+{
+ struct device *bsg_dev = &mrioc->bsg_dev;
+ if (!mrioc->bsg_queue)
+ return;
+
+ bsg_remove_queue(mrioc->bsg_queue);
+ mrioc->bsg_queue = NULL;
+
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+}
+
+/**
+ * mpi3mr_bsg_node_release -release bsg device node
+ * @dev: bsg device node
+ *
+ * decrements bsg dev parent reference count
+ *
+ * Return:Nothing
+ */
+static void mpi3mr_bsg_node_release(struct device *dev)
+{
+ put_device(dev->parent);
+}
+
+/**
+ * mpi3mr_bsg_init - registration with bsg layer
+ *
+ * This will be called during driver load and it will
+ * register driver with bsg layer
+ *
+ * Return:Nothing
+ */
+void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
+{
+ struct device *bsg_dev = &mrioc->bsg_dev;
+ struct device *parent = &mrioc->shost->shost_gendev;
+
+ device_initialize(bsg_dev);
+
+ bsg_dev->parent = get_device(parent);
+ bsg_dev->release = mpi3mr_bsg_node_release;
+
+ dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
+
+ if (device_add(bsg_dev)) {
+ ioc_err(mrioc, "%s: bsg device add failed\n",
+ dev_name(bsg_dev));
+ put_device(bsg_dev);
+ return;
+ }
+
+ mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
+ mpi3mr_bsg_request, NULL, 0);
+ if (IS_ERR(mrioc->bsg_queue)) {
+ ioc_err(mrioc, "%s: bsg registration failed\n",
+ dev_name(bsg_dev));
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+ return;
+ }
+
+ blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
+ blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
+
+ return;
+}
+
+/**
+ * version_fw_show - SysFS callback for firmware version read
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware version
+ */
+static ssize_t
+version_fw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
+
+ return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n",
+ fwver->gen_major, fwver->gen_minor, fwver->ph_major,
+ fwver->ph_minor, fwver->cust_id, fwver->build_num);
+}
+static DEVICE_ATTR_RO(version_fw);
+
+/**
+ * fw_queue_depth_show - SysFS callback for firmware max cmds
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware max commands
+ */
+static ssize_t
+fw_queue_depth_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs);
+}
+static DEVICE_ATTR_RO(fw_queue_depth);
+
+/**
+ * op_req_q_count_show - SysFS callback for request queue count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying request queue count
+ */
+static ssize_t
+op_req_q_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q);
+}
+static DEVICE_ATTR_RO(op_req_q_count);
+
+/**
+ * reply_queue_count_show - SysFS callback for reply queue count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying reply queue count
+ */
+static ssize_t
+reply_queue_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q);
+}
+
+static DEVICE_ATTR_RO(reply_queue_count);
+
+/**
+ * logging_level_show - Show controller debug level
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * A sysfs 'read/write' shost attribute, to show the current
+ * debug log level used by the driver for the specific
+ * controller.
+ *
+ * Return: sysfs_emit() return
+ */
+static ssize_t
+logging_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%08xh\n", mrioc->logging_level);
+}
+
+/**
+ * logging_level_store- Change controller debug level
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ * @count: size of the buffer
+ *
+ * A sysfs 'read/write' shost attribute, to change the current
+ * debug log level used by the driver for the specific
+ * controller.
+ *
+ * Return: strlen() return
+ */
+static ssize_t
+logging_level_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ int val = 0;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ mrioc->logging_level = val;
+ ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level);
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(logging_level);
+
+/**
+ * adp_state_show() - SysFS callback for adapter state show
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying adapter state
+ */
+static ssize_t
+adp_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ enum mpi3mr_iocstate ioc_state;
+ uint8_t adp_state;
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
+ adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
+ else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
+ adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
+ else if (ioc_state == MRIOC_STATE_FAULT)
+ adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
+ else
+ adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
+
+ return sysfs_emit(buf, "%u\n", adp_state);
+}
+
+static DEVICE_ATTR_RO(adp_state);
+
+static struct attribute *mpi3mr_host_attrs[] = {
+ &dev_attr_version_fw.attr,
+ &dev_attr_fw_queue_depth.attr,
+ &dev_attr_op_req_q_count.attr,
+ &dev_attr_reply_queue_count.attr,
+ &dev_attr_logging_level.attr,
+ &dev_attr_adp_state.attr,
+ NULL,
+};
+
+static const struct attribute_group mpi3mr_host_attr_group = {
+ .attrs = mpi3mr_host_attrs
+};
+
+const struct attribute_group *mpi3mr_host_groups[] = {
+ &mpi3mr_host_attr_group,
+ NULL,
+};
+
+
+/*
+ * SCSI Device attributes under sysfs
+ */
+
+/**
+ * sas_address_show - SysFS callback for dev SASaddress display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying SAS address of the
+ * specific SAS/SATA end device.
+ */
+static ssize_t
+sas_address_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ sdev_priv_data = sdev->hostdata;
+ if (!sdev_priv_data)
+ return 0;
+
+ tgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (!tgt_priv_data)
+ return 0;
+ tgtdev = tgt_priv_data->tgt_dev;
+ if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA)
+ return 0;
+ return sysfs_emit(buf, "0x%016llx\n",
+ (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address);
+}
+
+static DEVICE_ATTR_RO(sas_address);
+
+/**
+ * device_handle_show - SysFS callback for device handle display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware internal
+ * device handle of the specific device.
+ */
+static ssize_t
+device_handle_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ sdev_priv_data = sdev->hostdata;
+ if (!sdev_priv_data)
+ return 0;
+
+ tgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (!tgt_priv_data)
+ return 0;
+ tgtdev = tgt_priv_data->tgt_dev;
+ if (!tgtdev)
+ return 0;
+ return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle);
+}
+
+static DEVICE_ATTR_RO(device_handle);
+
+/**
+ * persistent_id_show - SysFS callback for persisten ID display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying persistent ID of the
+ * of the specific device.
+ */
+static ssize_t
+persistent_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ sdev_priv_data = sdev->hostdata;
+ if (!sdev_priv_data)
+ return 0;
+
+ tgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (!tgt_priv_data)
+ return 0;
+ tgtdev = tgt_priv_data->tgt_dev;
+ if (!tgtdev)
+ return 0;
+ return sysfs_emit(buf, "%d\n", tgtdev->perst_id);
+}
+static DEVICE_ATTR_RO(persistent_id);
+
+static struct attribute *mpi3mr_dev_attrs[] = {
+ &dev_attr_sas_address.attr,
+ &dev_attr_device_handle.attr,
+ &dev_attr_persistent_id.attr,
+ NULL,
+};
+
+static const struct attribute_group mpi3mr_dev_attr_group = {
+ .attrs = mpi3mr_dev_attrs
+};
+
+const struct attribute_group *mpi3mr_dev_groups[] = {
+ &mpi3mr_dev_attr_group,
+ NULL,
+};
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
new file mode 100644
index 000000000000..ee6edd8322e6
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef MPI3SAS_DEBUG_H_INCLUDED
+
+#define MPI3SAS_DEBUG_H_INCLUDED
+
+/*
+ * debug levels
+ */
+
+#define MPI3_DEBUG_EVENT 0x00000001
+#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000002
+#define MPI3_DEBUG_INIT 0x00000004
+#define MPI3_DEBUG_EXIT 0x00000008
+#define MPI3_DEBUG_TM 0x00000010
+#define MPI3_DEBUG_RESET 0x00000020
+#define MPI3_DEBUG_SCSI_ERROR 0x00000040
+#define MPI3_DEBUG_REPLY 0x00000080
+#define MPI3_DEBUG_CFG_ERROR 0x00000100
+#define MPI3_DEBUG_TRANSPORT_ERROR 0x00000200
+#define MPI3_DEBUG_BSG_ERROR 0x00008000
+#define MPI3_DEBUG_BSG_INFO 0x00010000
+#define MPI3_DEBUG_SCSI_INFO 0x00020000
+#define MPI3_DEBUG_CFG_INFO 0x00040000
+#define MPI3_DEBUG_TRANSPORT_INFO 0x00080000
+#define MPI3_DEBUG 0x01000000
+#define MPI3_DEBUG_SG 0x02000000
+
+
+/*
+ * debug macros
+ */
+
+#define ioc_err(ioc, fmt, ...) \
+ pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...) \
+ pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...) \
+ pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+
+#define dprint(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_th(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_bh(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_init(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_INIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_exit(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EXIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_tm(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TM) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reply(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_REPLY) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reset(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_RESET) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_command(ioc, SCMD, LOG_LEVEL) \
+ do { \
+ if (ioc->logging_level & LOG_LEVEL) \
+ scsi_print_command(SCMD); \
+ } while (0)
+
+
+#define dprint_bsg_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_BSG_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_bsg_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_BSG_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_cfg_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_cfg_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+#define dprint_transport_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_transport_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#endif /* MPT3SAS_DEBUG_H_INCLUDED */
+
+/**
+ * dprint_dump - print contents of a memory buffer
+ * @req: Pointer to a memory buffer
+ * @sz: Memory buffer size
+ * @namestr: Name String to identify the buffer type
+ */
+static inline void
+dprint_dump(void *req, int sz, const char *name_string)
+{
+ int i;
+ __le32 *mfp = (__le32 *)req;
+
+ sz = sz/4;
+ if (name_string)
+ pr_info("%s:\n\t", name_string);
+ else
+ pr_info("request:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+
+/**
+ * dprint_dump_req - print message frame contents
+ * @req: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+dprint_dump_req(void *req, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)req;
+
+ pr_info("request:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
new file mode 100644
index 000000000000..0c4aabaefdcc
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -0,0 +1,5780 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+static int
+mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
+static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
+static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data);
+static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
+
+#if defined(writeq) && defined(CONFIG_64BIT)
+static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
+{
+ writeq(b, addr);
+}
+#else
+static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
+{
+ __u64 data_out = b;
+
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+}
+#endif
+
+static inline bool
+mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
+{
+ u16 pi, ci, max_entries;
+ bool is_qfull = false;
+
+ pi = op_req_q->pi;
+ ci = READ_ONCE(op_req_q->ci);
+ max_entries = op_req_q->num_requests;
+
+ if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
+ is_qfull = true;
+
+ return is_qfull;
+}
+
+static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
+{
+ u16 i, max_vectors;
+
+ max_vectors = mrioc->intr_info_count;
+
+ for (i = 0; i < max_vectors; i++)
+ synchronize_irq(pci_irq_vector(mrioc->pdev, i));
+}
+
+void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
+{
+ mrioc->intr_enabled = 0;
+ mpi3mr_sync_irqs(mrioc);
+}
+
+void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
+{
+ mrioc->intr_enabled = 1;
+}
+
+static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ if (!mrioc->intr_info)
+ return;
+
+ for (i = 0; i < mrioc->intr_info_count; i++)
+ free_irq(pci_irq_vector(mrioc->pdev, i),
+ (mrioc->intr_info + i));
+
+ kfree(mrioc->intr_info);
+ mrioc->intr_info = NULL;
+ mrioc->intr_info_count = 0;
+ mrioc->is_intr_info_set = false;
+ pci_free_irq_vectors(mrioc->pdev);
+}
+
+void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
+ dma_addr_t dma_addr)
+{
+ struct mpi3_sge_common *sgel = paddr;
+
+ sgel->flags = flags;
+ sgel->length = cpu_to_le32(length);
+ sgel->address = cpu_to_le64(dma_addr);
+}
+
+void mpi3mr_build_zero_len_sge(void *paddr)
+{
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
+}
+
+void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+
+ if ((phys_addr < mrioc->reply_buf_dma) ||
+ (phys_addr > mrioc->reply_buf_dma_max_address))
+ return NULL;
+
+ return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
+}
+
+void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+
+ return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
+}
+
+static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
+ u64 reply_dma)
+{
+ u32 old_idx = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
+ old_idx = mrioc->reply_free_queue_host_index;
+ mrioc->reply_free_queue_host_index = (
+ (mrioc->reply_free_queue_host_index ==
+ (mrioc->reply_free_qsz - 1)) ? 0 :
+ (mrioc->reply_free_queue_host_index + 1));
+ mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+ spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
+}
+
+void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
+ u64 sense_buf_dma)
+{
+ u32 old_idx = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->sbq_lock, flags);
+ old_idx = mrioc->sbq_host_index;
+ mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
+ (mrioc->sense_buf_q_sz - 1)) ? 0 :
+ (mrioc->sbq_host_index + 1));
+ mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
+ spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
+}
+
+static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ char *desc = NULL;
+ u16 event;
+
+ event = event_reply->event;
+
+ switch (event) {
+ case MPI3_EVENT_LOG_DATA:
+ desc = "Log Data";
+ break;
+ case MPI3_EVENT_CHANGE:
+ desc = "Event Change";
+ break;
+ case MPI3_EVENT_GPIO_INTERRUPT:
+ desc = "GPIO Interrupt";
+ break;
+ case MPI3_EVENT_CABLE_MGMT:
+ desc = "Cable Management";
+ break;
+ case MPI3_EVENT_ENERGY_PACK_CHANGE:
+ desc = "Energy Pack Change";
+ break;
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *event_data =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
+ event_data->dev_handle, event_data->device_form);
+ return;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ struct mpi3_device_page0 *event_data =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
+ event_data->dev_handle, event_data->device_form);
+ return;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ struct mpi3_event_data_device_status_change *event_data =
+ (struct mpi3_event_data_device_status_change *)event_reply->event_data;
+ ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
+ event_data->dev_handle, event_data->reason_code);
+ return;
+ }
+ case MPI3_EVENT_SAS_DISCOVERY:
+ {
+ struct mpi3_event_data_sas_discovery *event_data =
+ (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
+ ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
+ (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop",
+ le32_to_cpu(event_data->discovery_status));
+ return;
+ }
+ case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
+ desc = "SAS Broadcast Primitive";
+ break;
+ case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
+ desc = "SAS Notify Primitive";
+ break;
+ case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ desc = "SAS Init Device Status Change";
+ break;
+ case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ desc = "SAS Init Table Overflow";
+ break;
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ desc = "SAS Topology Change List";
+ break;
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ desc = "Enclosure Device Status Change";
+ break;
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ desc = "Enclosure Added";
+ break;
+ case MPI3_EVENT_HARD_RESET_RECEIVED:
+ desc = "Hard Reset Received";
+ break;
+ case MPI3_EVENT_SAS_PHY_COUNTER:
+ desc = "SAS PHY Counter";
+ break;
+ case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ desc = "SAS Device Discovery Error";
+ break;
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ desc = "PCIE Topology Change List";
+ break;
+ case MPI3_EVENT_PCIE_ENUMERATION:
+ {
+ struct mpi3_event_data_pcie_enumeration *event_data =
+ (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
+ ioc_info(mrioc, "PCIE Enumeration: (%s)",
+ (event_data->reason_code ==
+ MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
+ if (event_data->enumeration_status)
+ ioc_info(mrioc, "enumeration_status(0x%08x)\n",
+ le32_to_cpu(event_data->enumeration_status));
+ return;
+ }
+ case MPI3_EVENT_PREPARE_FOR_RESET:
+ desc = "Prepare For Reset";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ ioc_info(mrioc, "%s\n", desc);
+}
+
+static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply *def_reply)
+{
+ struct mpi3_event_notification_reply *event_reply =
+ (struct mpi3_event_notification_reply *)def_reply;
+
+ mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
+ mpi3mr_print_event_data(mrioc, event_reply);
+ mpi3mr_os_handle_events(mrioc, event_reply);
+}
+
+static struct mpi3mr_drv_cmd *
+mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
+ struct mpi3_default_reply *def_reply)
+{
+ u16 idx;
+
+ switch (host_tag) {
+ case MPI3MR_HOSTTAG_INITCMDS:
+ return &mrioc->init_cmds;
+ case MPI3MR_HOSTTAG_CFG_CMDS:
+ return &mrioc->cfg_cmds;
+ case MPI3MR_HOSTTAG_BSG_CMDS:
+ return &mrioc->bsg_cmds;
+ case MPI3MR_HOSTTAG_BLK_TMS:
+ return &mrioc->host_tm_cmds;
+ case MPI3MR_HOSTTAG_PEL_ABORT:
+ return &mrioc->pel_abort_cmd;
+ case MPI3MR_HOSTTAG_PEL_WAIT:
+ return &mrioc->pel_cmds;
+ case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
+ return &mrioc->transport_cmds;
+ case MPI3MR_HOSTTAG_INVALID:
+ if (def_reply && def_reply->function ==
+ MPI3_FUNCTION_EVENT_NOTIFICATION)
+ mpi3mr_handle_events(mrioc, def_reply);
+ return NULL;
+ default:
+ break;
+ }
+ if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
+ host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
+ idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ return &mrioc->dev_rmhs_cmds[idx];
+ }
+
+ if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
+ host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
+ idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ return &mrioc->evtack_cmds[idx];
+ }
+
+ return NULL;
+}
+
+static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
+{
+ u16 reply_desc_type, host_tag = 0;
+ u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u32 ioc_loginfo = 0;
+ struct mpi3_status_reply_descriptor *status_desc;
+ struct mpi3_address_reply_descriptor *addr_desc;
+ struct mpi3_success_reply_descriptor *success_desc;
+ struct mpi3_default_reply *def_reply = NULL;
+ struct mpi3mr_drv_cmd *cmdptr = NULL;
+ struct mpi3_scsi_io_reply *scsi_reply;
+ u8 *sense_buf = NULL;
+
+ *reply_dma = 0;
+ reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
+ switch (reply_desc_type) {
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
+ status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(status_desc->host_tag);
+ ioc_status = le16_to_cpu(status_desc->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
+ addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
+ *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
+ def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
+ if (!def_reply)
+ goto out;
+ host_tag = le16_to_cpu(def_reply->host_tag);
+ ioc_status = le16_to_cpu(def_reply->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
+ scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
+ sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ }
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
+ success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(success_desc->host_tag);
+ break;
+ default:
+ break;
+ }
+
+ cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
+ if (cmdptr) {
+ if (cmdptr->state & MPI3MR_CMD_PENDING) {
+ cmdptr->state |= MPI3MR_CMD_COMPLETE;
+ cmdptr->ioc_loginfo = ioc_loginfo;
+ cmdptr->ioc_status = ioc_status;
+ cmdptr->state &= ~MPI3MR_CMD_PENDING;
+ if (def_reply) {
+ cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
+ memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
+ mrioc->reply_sz);
+ }
+ if (cmdptr->is_waiting) {
+ complete(&cmdptr->done);
+ cmdptr->is_waiting = 0;
+ } else if (cmdptr->callback)
+ cmdptr->callback(mrioc, cmdptr);
+ }
+ }
+out:
+ if (sense_buf)
+ mpi3mr_repost_sense_buf(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+}
+
+static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+{
+ u32 exp_phase = mrioc->admin_reply_ephase;
+ u32 admin_reply_ci = mrioc->admin_reply_ci;
+ u32 num_admin_replies = 0;
+ u64 reply_dma = 0;
+ struct mpi3_default_reply_descriptor *reply_desc;
+
+ reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ admin_reply_ci;
+
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ return 0;
+
+ do {
+ if (mrioc->unrecoverable)
+ break;
+
+ mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
+ mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
+ if (reply_dma)
+ mpi3mr_repost_reply_buf(mrioc, reply_dma);
+ num_admin_replies++;
+ if (++admin_reply_ci == mrioc->num_admin_replies) {
+ admin_reply_ci = 0;
+ exp_phase ^= 1;
+ }
+ reply_desc =
+ (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ admin_reply_ci;
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ break;
+ } while (1);
+
+ writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ mrioc->admin_reply_ci = admin_reply_ci;
+ mrioc->admin_reply_ephase = exp_phase;
+
+ return num_admin_replies;
+}
+
+/**
+ * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
+ * queue's consumer index from operational reply descriptor queue.
+ * @op_reply_q: op_reply_qinfo object
+ * @reply_ci: operational reply descriptor's queue consumer index
+ *
+ * Returns reply descriptor frame address
+ */
+static inline struct mpi3_default_reply_descriptor *
+mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
+{
+ void *segment_base_addr;
+ struct segments *segments = op_reply_q->q_segments;
+ struct mpi3_default_reply_descriptor *reply_desc = NULL;
+
+ segment_base_addr =
+ segments[reply_ci / op_reply_q->segment_qd].segment;
+ reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
+ (reply_ci % op_reply_q->segment_qd);
+ return reply_desc;
+}
+
+/**
+ * mpi3mr_process_op_reply_q - Operational reply queue handler
+ * @mrioc: Adapter instance reference
+ * @op_reply_q: Operational reply queue info
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q)
+{
+ struct op_req_qinfo *op_req_q;
+ u32 exp_phase;
+ u32 reply_ci;
+ u32 num_op_reply = 0;
+ u64 reply_dma = 0;
+ struct mpi3_default_reply_descriptor *reply_desc;
+ u16 req_q_idx = 0, reply_qidx;
+
+ reply_qidx = op_reply_q->qid - 1;
+
+ if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
+ return 0;
+
+ exp_phase = op_reply_q->ephase;
+ reply_ci = op_reply_q->ci;
+
+ reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+ atomic_dec(&op_reply_q->in_use);
+ return 0;
+ }
+
+ do {
+ if (mrioc->unrecoverable)
+ break;
+
+ req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
+ op_req_q = &mrioc->req_qinfo[req_q_idx];
+
+ WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
+ mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
+ reply_qidx);
+ atomic_dec(&op_reply_q->pend_ios);
+ if (reply_dma)
+ mpi3mr_repost_reply_buf(mrioc, reply_dma);
+ num_op_reply++;
+
+ if (++reply_ci == op_reply_q->num_replies) {
+ reply_ci = 0;
+ exp_phase ^= 1;
+ }
+
+ reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
+
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ break;
+#ifndef CONFIG_PREEMPT_RT
+ /*
+ * Exit completion loop to avoid CPU lockup
+ * Ensure remaining completion happens from threaded ISR.
+ */
+ if (num_op_reply > mrioc->max_host_ios) {
+ op_reply_q->enable_irq_poll = true;
+ break;
+ }
+#endif
+ } while (1);
+
+ writel(reply_ci,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
+ op_reply_q->ci = reply_ci;
+ op_reply_q->ephase = exp_phase;
+
+ atomic_dec(&op_reply_q->in_use);
+ return num_op_reply;
+}
+
+/**
+ * mpi3mr_blk_mq_poll - Operational reply queue handler
+ * @shost: SCSI Host reference
+ * @queue_num: Request queue number (w.r.t OS it is hardware context number)
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ int num_entries = 0;
+ struct mpi3mr_ioc *mrioc;
+
+ mrioc = (struct mpi3mr_ioc *)shost->hostdata;
+
+ if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
+ mrioc->unrecoverable))
+ return 0;
+
+ num_entries = mpi3mr_process_op_reply_q(mrioc,
+ &mrioc->op_reply_qinfo[queue_num]);
+
+ return num_entries;
+}
+
+static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_admin_replies = 0, num_op_reply = 0;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+
+ if (!mrioc->intr_enabled)
+ return IRQ_NONE;
+
+ midx = intr_info->msix_index;
+
+ if (!midx)
+ num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply = mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
+
+ if (num_admin_replies || num_op_reply)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+#ifndef CONFIG_PREEMPT_RT
+
+static irqreturn_t mpi3mr_isr(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ int ret;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ /* Call primary ISR routine */
+ ret = mpi3mr_isr_primary(irq, privdata);
+
+ /*
+ * If more IOs are expected, schedule IRQ polling thread.
+ * Otherwise exit from ISR.
+ */
+ if (!intr_info->op_reply_q)
+ return ret;
+
+ if (!intr_info->op_reply_q->enable_irq_poll ||
+ !atomic_read(&intr_info->op_reply_q->pend_ios))
+ return ret;
+
+ disable_irq_nosync(intr_info->os_irq);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mpi3mr_isr_poll - Reply queue polling routine
+ * @irq: IRQ
+ * @privdata: Interrupt info
+ *
+ * poll for pending I/O completions in a loop until pending I/Os
+ * present or controller queue depth I/Os are processed.
+ *
+ * Return: IRQ_NONE or IRQ_HANDLED
+ */
+static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_op_reply = 0;
+
+ if (!intr_info || !intr_info->op_reply_q)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+ midx = intr_info->msix_index;
+
+ /* Poll for pending IOs completions */
+ do {
+ if (!mrioc->intr_enabled || mrioc->unrecoverable)
+ break;
+
+ if (!midx)
+ mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply +=
+ mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
+
+ usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
+
+ } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
+ (num_op_reply < mrioc->max_host_ios));
+
+ intr_info->op_reply_q->enable_irq_poll = false;
+ enable_irq(intr_info->os_irq);
+
+ return IRQ_HANDLED;
+}
+
+#endif
+
+/**
+ * mpi3mr_request_irq - Request IRQ and register ISR
+ * @mrioc: Adapter instance reference
+ * @index: IRQ vector index
+ *
+ * Request threaded ISR with primary ISR and secondary
+ *
+ * Return: 0 on success and non zero on failures.
+ */
+static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
+ int retval = 0;
+
+ intr_info->mrioc = mrioc;
+ intr_info->msix_index = index;
+ intr_info->op_reply_q = NULL;
+
+ snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
+ mrioc->driver_name, mrioc->id, index);
+
+#ifndef CONFIG_PREEMPT_RT
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
+ mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+#else
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
+ NULL, IRQF_SHARED, intr_info->name, intr_info);
+#endif
+ if (retval) {
+ ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
+ intr_info->name, pci_irq_vector(pdev, index));
+ return retval;
+ }
+
+ intr_info->os_irq = pci_irq_vector(pdev, index);
+ return retval;
+}
+
+static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
+{
+ if (!mrioc->requested_poll_qcount)
+ return;
+
+ /* Reserved for Admin and Default Queue */
+ if (max_vectors > 2 &&
+ (mrioc->requested_poll_qcount < max_vectors - 2)) {
+ ioc_info(mrioc,
+ "enabled polled queues (%d) msix (%d)\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ } else {
+ ioc_info(mrioc,
+ "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ mrioc->requested_poll_qcount = 0;
+ }
+}
+
+/**
+ * mpi3mr_setup_isr - Setup ISR for the controller
+ * @mrioc: Adapter instance reference
+ * @setup_one: Request one IRQ or more
+ *
+ * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
+ *
+ * Return: 0 on success and non zero on failures.
+ */
+static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
+{
+ unsigned int irq_flags = PCI_IRQ_MSIX;
+ int max_vectors, min_vec;
+ int retval;
+ int i;
+ struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 };
+
+ if (mrioc->is_intr_info_set)
+ return 0;
+
+ mpi3mr_cleanup_isr(mrioc);
+
+ if (setup_one || reset_devices) {
+ max_vectors = 1;
+ retval = pci_alloc_irq_vectors(mrioc->pdev,
+ 1, max_vectors, irq_flags);
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
+ } else {
+ max_vectors =
+ min_t(int, mrioc->cpu_count + 1 +
+ mrioc->requested_poll_qcount, mrioc->msix_count);
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
+
+ ioc_info(mrioc,
+ "MSI-X vectors supported: %d, no of cores: %d,",
+ mrioc->msix_count, mrioc->cpu_count);
+ ioc_info(mrioc,
+ "MSI-x vectors requested: %d poll_queues %d\n",
+ max_vectors, mrioc->requested_poll_qcount);
+
+ desc.post_vectors = mrioc->requested_poll_qcount;
+ min_vec = desc.pre_vectors + desc.post_vectors;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
+
+ retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
+ min_vec, max_vectors, irq_flags, &desc);
+
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
+
+
+ /*
+ * If only one MSI-x is allocated, then MSI-x 0 will be shared
+ * between Admin queue and operational queue
+ */
+ if (retval == min_vec)
+ mrioc->op_reply_q_offset = 0;
+ else if (retval != (max_vectors)) {
+ ioc_info(mrioc,
+ "allocated vectors (%d) are less than configured (%d)\n",
+ retval, max_vectors);
+ }
+
+ max_vectors = retval;
+ mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
+
+ }
+
+ mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
+ GFP_KERNEL);
+ if (!mrioc->intr_info) {
+ retval = -ENOMEM;
+ pci_free_irq_vectors(mrioc->pdev);
+ goto out_failed;
+ }
+ for (i = 0; i < max_vectors; i++) {
+ retval = mpi3mr_request_irq(mrioc, i);
+ if (retval) {
+ mrioc->intr_info_count = i;
+ goto out_failed;
+ }
+ }
+ if (reset_devices || !setup_one)
+ mrioc->is_intr_info_set = true;
+ mrioc->intr_info_count = max_vectors;
+ mpi3mr_ioc_enable_intr(mrioc);
+ return 0;
+
+out_failed:
+ mpi3mr_cleanup_isr(mrioc);
+
+ return retval;
+}
+
+static const struct {
+ enum mpi3mr_iocstate value;
+ char *name;
+} mrioc_states[] = {
+ { MRIOC_STATE_READY, "ready" },
+ { MRIOC_STATE_FAULT, "fault" },
+ { MRIOC_STATE_RESET, "reset" },
+ { MRIOC_STATE_BECOMING_READY, "becoming ready" },
+ { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
+ { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
+};
+
+static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
+ if (mrioc_states[i].value == mrioc_state) {
+ name = mrioc_states[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/* Reset reason to name mapper structure*/
+static const struct {
+ enum mpi3mr_reset_reason value;
+ char *name;
+} mpi3mr_reset_reason_codes[] = {
+ { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
+ { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
+ { MPI3MR_RESET_FROM_APP, "application invocation" },
+ { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
+ { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
+ { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
+ { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
+ { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
+ { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
+ { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
+ { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
+ { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
+ { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
+ {
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
+ "create request queue timeout"
+ },
+ {
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
+ "create reply queue timeout"
+ },
+ { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
+ { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
+ { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
+ { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
+ {
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER,
+ "component image activation timeout"
+ },
+ {
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
+ "get package version timeout"
+ },
+ { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
+ { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
+ { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
+ { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
+ { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
+};
+
+/**
+ * mpi3mr_reset_rc_name - get reset reason code name
+ * @reason_code: reset reason code value
+ *
+ * Map reset reason to an NULL terminated ASCII string
+ *
+ * Return: name corresponding to reset reason value or NULL.
+ */
+static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
+ if (mpi3mr_reset_reason_codes[i].value == reason_code) {
+ name = mpi3mr_reset_reason_codes[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/* Reset type to name mapper structure*/
+static const struct {
+ u16 reset_type;
+ char *name;
+} mpi3mr_reset_types[] = {
+ { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
+ { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
+};
+
+/**
+ * mpi3mr_reset_type_name - get reset type name
+ * @reset_type: reset type value
+ *
+ * Map reset type to an NULL terminated ASCII string
+ *
+ * Return: name corresponding to reset type value or NULL.
+ */
+static const char *mpi3mr_reset_type_name(u16 reset_type)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
+ if (mpi3mr_reset_types[i].reset_type == reset_type) {
+ name = mpi3mr_reset_types[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/**
+ * mpi3mr_print_fault_info - Display fault information
+ * @mrioc: Adapter instance reference
+ *
+ * Display the controller fault information if there is a
+ * controller fault.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status, code, code1, code2, code3;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ code = readl(&mrioc->sysif_regs->fault);
+ code1 = readl(&mrioc->sysif_regs->fault_info[0]);
+ code2 = readl(&mrioc->sysif_regs->fault_info[1]);
+ code3 = readl(&mrioc->sysif_regs->fault_info[2]);
+
+ ioc_info(mrioc,
+ "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
+ code, code1, code2, code3);
+ }
+}
+
+/**
+ * mpi3mr_get_iocstate - Get IOC State
+ * @mrioc: Adapter instance reference
+ *
+ * Return a proper IOC state enum based on the IOC status and
+ * IOC configuration and unrcoverable state of the controller.
+ *
+ * Return: Current IOC state.
+ */
+enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status, ioc_config;
+ u8 ready, enabled;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ if (mrioc->unrecoverable)
+ return MRIOC_STATE_UNRECOVERABLE;
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
+ return MRIOC_STATE_FAULT;
+
+ ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
+ enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
+
+ if (ready && enabled)
+ return MRIOC_STATE_READY;
+ if ((!ready) && (!enabled))
+ return MRIOC_STATE_RESET;
+ if ((!ready) && (enabled))
+ return MRIOC_STATE_BECOMING_READY;
+
+ return MRIOC_STATE_RESET_REQUESTED;
+}
+
+/**
+ * mpi3mr_clear_reset_history - clear reset history
+ * @mrioc: Adapter instance reference
+ *
+ * Write the reset history bit in IOC status to clear the bit,
+ * if it is already set.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
+ writel(ioc_status, &mrioc->sysif_regs->ioc_status);
+}
+
+/**
+ * mpi3mr_issue_and_process_mur - Message unit Reset handler
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ *
+ * Issue Message unit Reset to the controller and wait for it to
+ * be complete.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason)
+{
+ u32 ioc_config, timeout, ioc_status;
+ int retval = -1;
+
+ ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
+ if (mrioc->unrecoverable) {
+ ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
+ return retval;
+ }
+ mpi3mr_clear_reset_history(mrioc);
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
+ mpi3mr_clear_reset_history(mrioc);
+ break;
+ }
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ mpi3mr_print_fault_info(mrioc);
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
+ retval = 0;
+
+ ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status, ioc_config);
+ return retval;
+}
+
+/**
+ * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
+ * during reset/resume
+ * @mrioc: Adapter instance reference
+ *
+ * Return zero if the new IOCFacts parameters value is compatible with
+ * older values else return -EPERM
+ */
+static int
+mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+{
+ u16 dev_handle_bitmap_sz;
+ void *removepend_bitmap;
+
+ if (mrioc->facts.reply_sz > mrioc->reply_sz) {
+ ioc_err(mrioc,
+ "cannot increase reply size from %d to %d\n",
+ mrioc->reply_sz, mrioc->facts.reply_sz);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational reply queues from %d to %d\n",
+ mrioc->num_op_reply_q,
+ mrioc->facts.max_op_reply_q);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational request queues from %d to %d\n",
+ mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
+ return -EPERM;
+ }
+
+ if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
+ ioc_err(mrioc,
+ "critical error: multipath capability is enabled at the\n"
+ "\tcontroller while sas transport support is enabled at the\n"
+ "\tdriver, please reboot the system or reload the driver\n");
+
+ dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+ if (mrioc->facts.max_devhandle % 8)
+ dev_handle_bitmap_sz++;
+ if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) {
+ removepend_bitmap = krealloc(mrioc->removepend_bitmap,
+ dev_handle_bitmap_sz, GFP_KERNEL);
+ if (!removepend_bitmap) {
+ ioc_err(mrioc,
+ "failed to increase removepend_bitmap sz from: %d to %d\n",
+ mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+ return -EPERM;
+ }
+ memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0,
+ dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz);
+ mrioc->removepend_bitmap = removepend_bitmap;
+ ioc_info(mrioc,
+ "increased dev_handle_bitmap_sz from %d to %d\n",
+ mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+ mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_bring_ioc_ready - Bring controller to ready state
+ * @mrioc: Adapter instance reference
+ *
+ * Set Enable IOC bit in IOC configuration register and wait for
+ * the controller to become ready.
+ *
+ * Return: 0 on success, appropriate error on failure.
+ */
+static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config, ioc_status, timeout;
+ int retval = 0;
+ enum mpi3mr_iocstate ioc_state;
+ u64 base_info;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
+ ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
+ ioc_status, ioc_config, base_info);
+
+ /*The timeout value is in 2sec unit, changing it to seconds*/
+ mrioc->ready_timeout =
+ ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
+ MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+
+ ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc, "controller is in %s state during detection\n",
+ mpi3mr_iocstate_name(ioc_state));
+
+ if (ioc_state == MRIOC_STATE_BECOMING_READY ||
+ ioc_state == MRIOC_STATE_RESET_REQUESTED) {
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ msleep(100);
+ } while (--timeout);
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present while waiting to reset\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc,
+ "controller is in %s state after waiting to reset\n",
+ mpi3mr_iocstate_name(ioc_state));
+ }
+
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
+ retval = mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_BRINGUP);
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (retval)
+ ioc_err(mrioc,
+ "message unit reset failed with error %d current state %s\n",
+ retval, mpi3mr_iocstate_name(ioc_state));
+ }
+ if (ioc_state != MRIOC_STATE_RESET) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ ioc_err(mrioc,
+ "soft reset failed with error %d\n", retval);
+ goto out_failed;
+ }
+ }
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state != MRIOC_STATE_RESET) {
+ ioc_err(mrioc,
+ "cannot bring controller to reset state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+ goto out_failed;
+ }
+ mpi3mr_clear_reset_history(mrioc);
+ retval = mpi3mr_setup_admin_qpair(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to setup admin queues: error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ ioc_info(mrioc, "bringing controller to ready state\n");
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc,
+ "successfully transitioned to %s state\n",
+ mpi3mr_iocstate_name(ioc_state));
+ return 0;
+ }
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present at the bringup\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
+ msleep(100);
+ } while (--timeout);
+
+out_failed:
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_err(mrioc,
+ "failed to bring to ready state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+out_device_not_present:
+ return retval;
+}
+
+/**
+ * mpi3mr_soft_reset_success - Check softreset is success or not
+ * @ioc_status: IOC status register value
+ * @ioc_config: IOC config register value
+ *
+ * Check whether the soft reset is successful or not based on
+ * IOC status and IOC config register values.
+ *
+ * Return: True when the soft reset is success, false otherwise.
+ */
+static inline bool
+mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
+{
+ if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
+ return true;
+ return false;
+}
+
+/**
+ * mpi3mr_diagfault_success - Check diag fault is success or not
+ * @mrioc: Adapter reference
+ * @ioc_status: IOC status register value
+ *
+ * Check whether the controller hit diag reset fault code.
+ *
+ * Return: True when there is diag fault, false otherwise.
+ */
+static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
+ u32 ioc_status)
+{
+ u32 fault;
+
+ if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
+ return false;
+ fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
+ if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
+ mpi3mr_print_fault_info(mrioc);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * mpi3mr_set_diagsave - Set diag save bit for snapdump
+ * @mrioc: Adapter reference
+ *
+ * Set diag save bit in IOC configuration register to enable
+ * snapdump.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config;
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+}
+
+/**
+ * mpi3mr_issue_reset - Issue reset to the controller
+ * @mrioc: Adapter reference
+ * @reset_type: Reset type
+ * @reset_reason: Reset reason code
+ *
+ * Unlock the host diagnostic registers and write the specific
+ * reset type to that, wait for reset acknowledgment from the
+ * controller, if the reset is not successful retry for the
+ * predefined number of times.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
+ u32 reset_reason)
+{
+ int retval = -1;
+ u8 unlock_retry_count = 0;
+ u32 host_diagnostic, ioc_status, ioc_config;
+ u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
+
+ if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
+ (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
+ return retval;
+ if (mrioc->unrecoverable)
+ return retval;
+ if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
+ retval = 0;
+ return retval;
+ }
+
+ ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ mpi3mr_reset_rc_name(reset_reason), reset_reason);
+
+ mpi3mr_clear_reset_history(mrioc);
+ do {
+ ioc_info(mrioc,
+ "Write magic sequence to unlock host diag register (retry=%d)\n",
+ ++unlock_retry_count);
+ if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
+ ioc_err(mrioc,
+ "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ host_diagnostic);
+ mrioc->unrecoverable = 1;
+ return retval;
+ }
+
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
+ &mrioc->sysif_regs->write_sequence);
+ usleep_range(1000, 1100);
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ ioc_info(mrioc,
+ "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
+ unlock_retry_count, host_diagnostic);
+ } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
+
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ writel(host_diagnostic | reset_type,
+ &mrioc->sysif_regs->host_diagnostic);
+ switch (reset_type) {
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config =
+ readl(&mrioc->sysif_regs->ioc_configuration);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
+ && mpi3mr_soft_reset_success(ioc_status, ioc_config)
+ ) {
+ mpi3mr_clear_reset_history(mrioc);
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+ mpi3mr_print_fault_info(mrioc);
+ break;
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+ break;
+ default:
+ break;
+ }
+
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_info(mrioc,
+ "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ (!retval)?"successful":"failed", ioc_status,
+ ioc_config);
+ if (retval)
+ mrioc->unrecoverable = 1;
+ return retval;
+}
+
+/**
+ * mpi3mr_admin_request_post - Post request to admin queue
+ * @mrioc: Adapter reference
+ * @admin_req: MPI3 request
+ * @admin_req_sz: Request size
+ * @ignore_reset: Ignore reset in process
+ *
+ * Post the MPI3 request into admin request queue and
+ * inform the controller, if the queue is full return
+ * appropriate error.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
+ u16 admin_req_sz, u8 ignore_reset)
+{
+ u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
+ int retval = 0;
+ unsigned long flags;
+ u8 *areq_entry;
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&mrioc->admin_req_lock, flags);
+ areq_pi = mrioc->admin_req_pi;
+ areq_ci = mrioc->admin_req_ci;
+ max_entries = mrioc->num_admin_req;
+ if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
+ (areq_pi == (max_entries - 1)))) {
+ ioc_err(mrioc, "AdminReqQ full condition detected\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ if (!ignore_reset && mrioc->reset_in_progress) {
+ ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ areq_entry = (u8 *)mrioc->admin_req_base +
+ (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
+ memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
+ memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
+
+ if (++areq_pi == max_entries)
+ areq_pi = 0;
+ mrioc->admin_req_pi = areq_pi;
+
+ writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
+
+out:
+ spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_free_op_req_q_segments - free request memory segments
+ * @mrioc: Adapter instance reference
+ * @q_idx: operational request queue index
+ *
+ * Free memory segments allocated for operational request queue
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
+{
+ u16 j;
+ int size;
+ struct segments *segments;
+
+ segments = mrioc->req_qinfo[q_idx].q_segments;
+ if (!segments)
+ return;
+
+ if (mrioc->enable_segqueue) {
+ size = MPI3MR_OP_REQ_Q_SEG_SIZE;
+ if (mrioc->req_qinfo[q_idx].q_segment_list) {
+ dma_free_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE,
+ mrioc->req_qinfo[q_idx].q_segment_list,
+ mrioc->req_qinfo[q_idx].q_segment_list_dma);
+ mrioc->req_qinfo[q_idx].q_segment_list = NULL;
+ }
+ } else
+ size = mrioc->req_qinfo[q_idx].segment_qd *
+ mrioc->facts.op_req_sz;
+
+ for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
+ if (!segments[j].segment)
+ continue;
+ dma_free_coherent(&mrioc->pdev->dev,
+ size, segments[j].segment, segments[j].segment_dma);
+ segments[j].segment = NULL;
+ }
+ kfree(mrioc->req_qinfo[q_idx].q_segments);
+ mrioc->req_qinfo[q_idx].q_segments = NULL;
+ mrioc->req_qinfo[q_idx].qid = 0;
+}
+
+/**
+ * mpi3mr_free_op_reply_q_segments - free reply memory segments
+ * @mrioc: Adapter instance reference
+ * @q_idx: operational reply queue index
+ *
+ * Free memory segments allocated for operational reply queue
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
+{
+ u16 j;
+ int size;
+ struct segments *segments;
+
+ segments = mrioc->op_reply_qinfo[q_idx].q_segments;
+ if (!segments)
+ return;
+
+ if (mrioc->enable_segqueue) {
+ size = MPI3MR_OP_REP_Q_SEG_SIZE;
+ if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
+ dma_free_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE,
+ mrioc->op_reply_qinfo[q_idx].q_segment_list,
+ mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
+ mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
+ }
+ } else
+ size = mrioc->op_reply_qinfo[q_idx].segment_qd *
+ mrioc->op_reply_desc_sz;
+
+ for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
+ if (!segments[j].segment)
+ continue;
+ dma_free_coherent(&mrioc->pdev->dev,
+ size, segments[j].segment, segments[j].segment_dma);
+ segments[j].segment = NULL;
+ }
+
+ kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
+ mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
+ mrioc->op_reply_qinfo[q_idx].qid = 0;
+}
+
+/**
+ * mpi3mr_delete_op_reply_q - delete operational reply queue
+ * @mrioc: Adapter instance reference
+ * @qidx: operational reply queue index
+ *
+ * Delete operatinal reply queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct mpi3_delete_reply_queue_request delq_req;
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int retval = 0;
+ u16 reply_qid = 0, midx;
+
+ reply_qid = op_reply_q->qid;
+
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
+
+ if (!reply_qid) {
+ retval = -1;
+ ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
+ goto out;
+ }
+
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
+ mrioc->active_poll_qcount--;
+
+ memset(&delq_req, 0, sizeof(delq_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
+ delq_req.queue_id = cpu_to_le16(reply_qid);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
+ 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "delete reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ mrioc->intr_info[midx].op_reply_q = NULL;
+
+ mpi3mr_free_op_reply_q_segments(mrioc, qidx);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
+ * @mrioc: Adapter instance reference
+ * @qidx: request queue index
+ *
+ * Allocate segmented memory pools for operational reply
+ * queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int i, size;
+ u64 *q_segment_list_entry = NULL;
+ struct segments *segments;
+
+ if (mrioc->enable_segqueue) {
+ op_reply_q->segment_qd =
+ MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
+
+ size = MPI3MR_OP_REP_Q_SEG_SIZE;
+
+ op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
+ GFP_KERNEL);
+ if (!op_reply_q->q_segment_list)
+ return -ENOMEM;
+ q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
+ } else {
+ op_reply_q->segment_qd = op_reply_q->num_replies;
+ size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
+ }
+
+ op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
+ op_reply_q->segment_qd);
+
+ op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
+ sizeof(struct segments), GFP_KERNEL);
+ if (!op_reply_q->q_segments)
+ return -ENOMEM;
+
+ segments = op_reply_q->q_segments;
+ for (i = 0; i < op_reply_q->num_segments; i++) {
+ segments[i].segment =
+ dma_alloc_coherent(&mrioc->pdev->dev,
+ size, &segments[i].segment_dma, GFP_KERNEL);
+ if (!segments[i].segment)
+ return -ENOMEM;
+ if (mrioc->enable_segqueue)
+ q_segment_list_entry[i] =
+ (unsigned long)segments[i].segment_dma;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
+ * @mrioc: Adapter instance reference
+ * @qidx: request queue index
+ *
+ * Allocate segmented memory pools for operational request
+ * queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
+ int i, size;
+ u64 *q_segment_list_entry = NULL;
+ struct segments *segments;
+
+ if (mrioc->enable_segqueue) {
+ op_req_q->segment_qd =
+ MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
+
+ size = MPI3MR_OP_REQ_Q_SEG_SIZE;
+
+ op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
+ GFP_KERNEL);
+ if (!op_req_q->q_segment_list)
+ return -ENOMEM;
+ q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
+
+ } else {
+ op_req_q->segment_qd = op_req_q->num_requests;
+ size = op_req_q->num_requests * mrioc->facts.op_req_sz;
+ }
+
+ op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
+ op_req_q->segment_qd);
+
+ op_req_q->q_segments = kcalloc(op_req_q->num_segments,
+ sizeof(struct segments), GFP_KERNEL);
+ if (!op_req_q->q_segments)
+ return -ENOMEM;
+
+ segments = op_req_q->q_segments;
+ for (i = 0; i < op_req_q->num_segments; i++) {
+ segments[i].segment =
+ dma_alloc_coherent(&mrioc->pdev->dev,
+ size, &segments[i].segment_dma, GFP_KERNEL);
+ if (!segments[i].segment)
+ return -ENOMEM;
+ if (mrioc->enable_segqueue)
+ q_segment_list_entry[i] =
+ (unsigned long)segments[i].segment_dma;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_create_op_reply_q - create operational reply queue
+ * @mrioc: Adapter instance reference
+ * @qidx: operational reply queue index
+ *
+ * Create operatinal reply queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct mpi3_create_reply_queue_request create_req;
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int retval = 0;
+ u16 reply_qid = 0, midx;
+
+ reply_qid = op_reply_q->qid;
+
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
+
+ if (reply_qid) {
+ retval = -1;
+ ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
+ reply_qid);
+
+ return retval;
+ }
+
+ reply_qid = qidx + 1;
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ if (!mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+ op_reply_q->ci = 0;
+ op_reply_q->ephase = 1;
+ atomic_set(&op_reply_q->pend_ios, 0);
+ atomic_set(&op_reply_q->in_use, 0);
+ op_reply_q->enable_irq_poll = false;
+
+ if (!op_reply_q->q_segments) {
+ retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
+ if (retval) {
+ mpi3mr_free_op_reply_q_segments(mrioc, qidx);
+ goto out;
+ }
+ }
+
+ memset(&create_req, 0, sizeof(create_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
+ goto out_unlock;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
+ create_req.queue_id = cpu_to_le16(reply_qid);
+
+ if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
+ op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
+ else
+ op_reply_q->qtype = MPI3MR_POLL_QUEUE;
+
+ if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
+ create_req.flags =
+ MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
+ create_req.msix_index =
+ cpu_to_le16(mrioc->intr_info[midx].msix_index);
+ } else {
+ create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
+ ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
+ reply_qid, midx);
+ if (!mrioc->active_poll_qcount)
+ disable_irq_nosync(pci_irq_vector(mrioc->pdev,
+ mrioc->intr_info_count - 1));
+ }
+
+ if (mrioc->enable_segqueue) {
+ create_req.flags |=
+ MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
+ create_req.base_address = cpu_to_le64(
+ op_reply_q->q_segment_list_dma);
+ } else
+ create_req.base_address = cpu_to_le64(
+ op_reply_q->q_segments[0].segment_dma);
+
+ create_req.size = cpu_to_le16(op_reply_q->num_replies);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &create_req,
+ sizeof(create_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "create reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ op_reply_q->qid = reply_qid;
+ if (midx < mrioc->intr_info_count)
+ mrioc->intr_info[midx].op_reply_q = op_reply_q;
+
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
+ mrioc->active_poll_qcount++;
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_create_op_req_q - create operational request queue
+ * @mrioc: Adapter instance reference
+ * @idx: operational request queue index
+ * @reply_qid: Reply queue ID
+ *
+ * Create operatinal request queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
+ u16 reply_qid)
+{
+ struct mpi3_create_request_queue_request create_req;
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
+ int retval = 0;
+ u16 req_qid = 0;
+
+ req_qid = op_req_q->qid;
+
+ if (req_qid) {
+ retval = -1;
+ ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
+ req_qid);
+
+ return retval;
+ }
+ req_qid = idx + 1;
+
+ op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
+ op_req_q->ci = 0;
+ op_req_q->pi = 0;
+ op_req_q->reply_qid = reply_qid;
+ spin_lock_init(&op_req_q->q_lock);
+
+ if (!op_req_q->q_segments) {
+ retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
+ if (retval) {
+ mpi3mr_free_op_req_q_segments(mrioc, idx);
+ goto out;
+ }
+ }
+
+ memset(&create_req, 0, sizeof(create_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
+ goto out_unlock;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
+ create_req.queue_id = cpu_to_le16(req_qid);
+ if (mrioc->enable_segqueue) {
+ create_req.flags =
+ MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
+ create_req.base_address = cpu_to_le64(
+ op_req_q->q_segment_list_dma);
+ } else
+ create_req.base_address = cpu_to_le64(
+ op_req_q->q_segments[0].segment_dma);
+ create_req.reply_queue_id = cpu_to_le16(reply_qid);
+ create_req.size = cpu_to_le16(op_req_q->num_requests);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &create_req,
+ sizeof(create_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "create request queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ op_req_q->qid = req_qid;
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_create_op_queues - create operational queue pairs
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate memory for operational queue meta data and call
+ * create request and reply queue functions.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u16 num_queues = 0, i = 0, msix_count_op_q = 1;
+
+ num_queues = min_t(int, mrioc->facts.max_op_reply_q,
+ mrioc->facts.max_op_req_q);
+
+ msix_count_op_q =
+ mrioc->intr_info_count - mrioc->op_reply_q_offset;
+ if (!mrioc->num_queues)
+ mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
+ /*
+ * During reset set the num_queues to the number of queues
+ * that was set before the reset.
+ */
+ num_queues = mrioc->num_op_reply_q ?
+ mrioc->num_op_reply_q : mrioc->num_queues;
+ ioc_info(mrioc, "trying to create %d operational queue pairs\n",
+ num_queues);
+
+ if (!mrioc->req_qinfo) {
+ mrioc->req_qinfo = kcalloc(num_queues,
+ sizeof(struct op_req_qinfo), GFP_KERNEL);
+ if (!mrioc->req_qinfo) {
+ retval = -1;
+ goto out_failed;
+ }
+
+ mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
+ num_queues, GFP_KERNEL);
+ if (!mrioc->op_reply_qinfo) {
+ retval = -1;
+ goto out_failed;
+ }
+ }
+
+ if (mrioc->enable_segqueue)
+ ioc_info(mrioc,
+ "allocating operational queues through segmented queues\n");
+
+ for (i = 0; i < num_queues; i++) {
+ if (mpi3mr_create_op_reply_q(mrioc, i)) {
+ ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
+ break;
+ }
+ if (mpi3mr_create_op_req_q(mrioc, i,
+ mrioc->op_reply_qinfo[i].qid)) {
+ ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
+ mpi3mr_delete_op_reply_q(mrioc, i);
+ break;
+ }
+ }
+
+ if (i == 0) {
+ /* Not even one queue is created successfully*/
+ retval = -1;
+ goto out_failed;
+ }
+ mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
+ ioc_info(mrioc,
+ "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
+ mrioc->num_op_reply_q, mrioc->default_qcount,
+ mrioc->active_poll_qcount);
+
+ return retval;
+out_failed:
+ kfree(mrioc->req_qinfo);
+ mrioc->req_qinfo = NULL;
+
+ kfree(mrioc->op_reply_qinfo);
+ mrioc->op_reply_qinfo = NULL;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_op_request_post - Post request to operational queue
+ * @mrioc: Adapter reference
+ * @op_req_q: Operational request queue info
+ * @req: MPI3 request
+ *
+ * Post the MPI3 request into operational request queue and
+ * inform the controller, if the queue is full return
+ * appropriate error.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ struct op_req_qinfo *op_req_q, u8 *req)
+{
+ u16 pi = 0, max_entries, reply_qidx = 0, midx;
+ int retval = 0;
+ unsigned long flags;
+ u8 *req_entry;
+ void *segment_base_addr;
+ u16 req_sz = mrioc->facts.op_req_sz;
+ struct segments *segments = op_req_q->q_segments;
+
+ reply_qidx = op_req_q->reply_qid - 1;
+
+ if (mrioc->unrecoverable)
+ return -EFAULT;
+
+ spin_lock_irqsave(&op_req_q->q_lock, flags);
+ pi = op_req_q->pi;
+ max_entries = op_req_q->num_requests;
+
+ if (mpi3mr_check_req_qfull(op_req_q)) {
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
+ reply_qidx, mrioc->op_reply_q_offset);
+ mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
+
+ if (mpi3mr_check_req_qfull(op_req_q)) {
+ retval = -EAGAIN;
+ goto out;
+ }
+ }
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "OpReqQ submit reset in progress\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
+ req_entry = (u8 *)segment_base_addr +
+ ((pi % op_req_q->segment_qd) * req_sz);
+
+ memset(req_entry, 0, req_sz);
+ memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
+
+ if (++pi == max_entries)
+ pi = 0;
+ op_req_q->pi = pi;
+
+#ifndef CONFIG_PREEMPT_RT
+ if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
+ > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
+ mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+#else
+ atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
+#endif
+
+ writel(op_req_q->pi,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
+
+out:
+ spin_unlock_irqrestore(&op_req_q->q_lock, flags);
+ return retval;
+}
+
+/**
+ * mpi3mr_check_rh_fault_ioc - check reset history and fault
+ * controller
+ * @mrioc: Adapter instance reference
+ * @reason_code: reason code for the fault.
+ *
+ * This routine will save snapdump and fault the controller with
+ * the given reason code if it is not already in the fault or
+ * not asynchronosuly reset. This will be used to handle
+ * initilaization time faults/resets/timeout as in those cases
+ * immediate soft reset invocation is not required.
+ *
+ * Return: None.
+ */
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
+{
+ u32 ioc_status, host_diagnostic, timeout;
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc, "controller is unrecoverable\n");
+ return;
+ }
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "controller is not present\n");
+ return;
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ return;
+ }
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ reason_code);
+ timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+ do {
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ msleep(100);
+ } while (--timeout);
+}
+
+/**
+ * mpi3mr_sync_timestamp - Issue time stamp sync request
+ * @mrioc: Adapter reference
+ *
+ * Issue IO unit control MPI request to synchornize firmware
+ * timestamp with host time.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
+{
+ ktime_t current_time;
+ struct mpi3_iounit_control_request iou_ctrl;
+ int retval = 0;
+
+ memset(&iou_ctrl, 0, sizeof(iou_ctrl));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
+ current_time = ktime_get_real();
+ iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
+ sizeof(iou_ctrl), 0);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
+ goto out_unlock;
+ }
+
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
+ mrioc->init_cmds.is_waiting = 0;
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_print_pkg_ver - display controller fw package version
+ * @mrioc: Adapter reference
+ *
+ * Retrieve firmware package version from the component image
+ * header of the controller flash and display it.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_ci_upload_request ci_upload;
+ int retval = -1;
+ void *data = NULL;
+ dma_addr_t data_dma;
+ struct mpi3_ci_manifest_mpi *manifest;
+ u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memset(&ci_upload, 0, sizeof(ci_upload));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ ioc_err(mrioc, "sending get package version failed due to command in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
+ ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
+ ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
+ ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
+ ci_upload.segment_size = cpu_to_le32(data_len);
+
+ mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
+ data_dma);
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
+ sizeof(ci_upload), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting get package version failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "get package version timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ == MPI3_IOCSTATUS_SUCCESS) {
+ manifest = (struct mpi3_ci_manifest_mpi *) data;
+ if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
+ ioc_info(mrioc,
+ "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
+ manifest->package_version.gen_major,
+ manifest->package_version.gen_minor,
+ manifest->package_version.phase_major,
+ manifest->package_version.phase_minor,
+ manifest->package_version.customer_id,
+ manifest->package_version.build_num);
+ }
+ }
+ retval = 0;
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (data)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, data,
+ data_dma);
+ return retval;
+}
+
+/**
+ * mpi3mr_watchdog_work - watchdog thread to monitor faults
+ * @work: work struct
+ *
+ * Watch dog work periodically executed (1 second interval) to
+ * monitor firmware fault and to issue periodic timer sync to
+ * the firmware.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_watchdog_work(struct work_struct *work)
+{
+ struct mpi3mr_ioc *mrioc =
+ container_of(work, struct mpi3mr_ioc, watchdog_work.work);
+ unsigned long flags;
+ enum mpi3mr_iocstate ioc_state;
+ u32 fault, host_diagnostic, ioc_status;
+ u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
+ ioc_err(mrioc, "watchdog could not detect the controller\n");
+ mrioc->unrecoverable = 1;
+ }
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc,
+ "flush pending commands for unrecoverable controller\n");
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ return;
+ }
+
+ if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
+ mrioc->ts_update_counter = 0;
+ mpi3mr_sync_timestamp(mrioc);
+ }
+
+ if ((mrioc->prepare_for_reset) &&
+ ((mrioc->prepare_for_reset_timeout_counter++) >=
+ MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
+ return;
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
+ return;
+ }
+
+ /*Check for fault state every one second and issue Soft reset*/
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state != MRIOC_STATE_FAULT)
+ goto schedule_work;
+
+ fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
+ if (!mrioc->diagsave_timeout) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_warn(mrioc, "diag save in progress\n");
+ }
+ if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
+ goto schedule_work;
+ }
+
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->diagsave_timeout = 0;
+
+ switch (fault) {
+ case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
+ case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
+ ioc_warn(mrioc,
+ "controller requires system power cycle, marking controller as unrecoverable\n");
+ mrioc->unrecoverable = 1;
+ goto schedule_work;
+ case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
+ return;
+ case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
+ reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
+ break;
+ default:
+ break;
+ }
+ mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
+ return;
+
+schedule_work:
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ return;
+}
+
+/**
+ * mpi3mr_start_watchdog - Start watchdog
+ * @mrioc: Adapter instance reference
+ *
+ * Create and start the watchdog thread to monitor controller
+ * faults.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
+{
+ if (mrioc->watchdog_work_q)
+ return;
+
+ INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
+ snprintf(mrioc->watchdog_work_q_name,
+ sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
+ mrioc->id);
+ mrioc->watchdog_work_q =
+ create_singlethread_workqueue(mrioc->watchdog_work_q_name);
+ if (!mrioc->watchdog_work_q) {
+ ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
+ return;
+ }
+
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+}
+
+/**
+ * mpi3mr_stop_watchdog - Stop watchdog
+ * @mrioc: Adapter instance reference
+ *
+ * Stop the watchdog thread created to monitor controller
+ * faults.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ wq = mrioc->watchdog_work_q;
+ mrioc->watchdog_work_q = NULL;
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
+ * mpi3mr_setup_admin_qpair - Setup admin queue pair
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate memory for admin queue pair if required and register
+ * the admin queue with the controller.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 num_admin_entries = 0;
+
+ mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
+ mrioc->num_admin_req = mrioc->admin_req_q_sz /
+ MPI3MR_ADMIN_REQ_FRAME_SZ;
+ mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
+ mrioc->admin_req_base = NULL;
+
+ mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
+ mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
+ MPI3MR_ADMIN_REPLY_FRAME_SZ;
+ mrioc->admin_reply_ci = 0;
+ mrioc->admin_reply_ephase = 1;
+ mrioc->admin_reply_base = NULL;
+
+ if (!mrioc->admin_req_base) {
+ mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
+
+ if (!mrioc->admin_req_base) {
+ retval = -1;
+ goto out_failed;
+ }
+
+ mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
+ GFP_KERNEL);
+
+ if (!mrioc->admin_reply_base) {
+ retval = -1;
+ goto out_failed;
+ }
+ }
+
+ num_admin_entries = (mrioc->num_admin_replies << 16) |
+ (mrioc->num_admin_req);
+ writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
+ mpi3mr_writeq(mrioc->admin_req_dma,
+ &mrioc->sysif_regs->admin_request_queue_address);
+ mpi3mr_writeq(mrioc->admin_reply_dma,
+ &mrioc->sysif_regs->admin_reply_queue_address);
+ writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
+ writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ return retval;
+
+out_failed:
+
+ if (mrioc->admin_reply_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
+ mrioc->admin_reply_base, mrioc->admin_reply_dma);
+ mrioc->admin_reply_base = NULL;
+ }
+ if (mrioc->admin_req_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
+ mrioc->admin_req_base, mrioc->admin_req_dma);
+ mrioc->admin_req_base = NULL;
+ }
+ return retval;
+}
+
+/**
+ * mpi3mr_issue_iocfacts - Send IOC Facts
+ * @mrioc: Adapter instance reference
+ * @facts_data: Cached IOC facts data
+ *
+ * Issue IOC Facts MPI request through admin queue and wait for
+ * the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data)
+{
+ struct mpi3_ioc_facts_request iocfacts_req;
+ void *data = NULL;
+ dma_addr_t data_dma;
+ u32 data_len = sizeof(*facts_data);
+ int retval = 0;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+
+ if (!data) {
+ retval = -1;
+ goto out;
+ }
+
+ memset(&iocfacts_req, 0, sizeof(iocfacts_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
+
+ mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
+ data_dma);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
+ sizeof(iocfacts_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "ioc_facts timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ memcpy(facts_data, (u8 *)data, data_len);
+ mpi3mr_process_factsdata(mrioc, facts_data);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (data)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_check_reset_dma_mask - Process IOC facts data
+ * @mrioc: Adapter instance reference
+ *
+ * Check whether the new DMA mask requested through IOCFacts by
+ * firmware needs to be set, if so set it .
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ int r;
+ u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
+
+ if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
+ return 0;
+
+ ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
+ mrioc->dma_mask, facts_dma_mask);
+
+ r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
+ if (r) {
+ ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
+ facts_dma_mask, r);
+ return r;
+ }
+ mrioc->dma_mask = facts_dma_mask;
+ return r;
+}
+
+/**
+ * mpi3mr_process_factsdata - Process IOC facts data
+ * @mrioc: Adapter instance reference
+ * @facts_data: Cached IOC facts data
+ *
+ * Convert IOC facts data into cpu endianness and cache it in
+ * the driver .
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data)
+{
+ u32 ioc_config, req_sz, facts_flags;
+
+ if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
+ (sizeof(*facts_data) / 4)) {
+ ioc_warn(mrioc,
+ "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
+ sizeof(*facts_data),
+ le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
+ }
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
+ MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
+ if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
+ ioc_err(mrioc,
+ "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
+ req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
+ }
+
+ memset(&mrioc->facts, 0, sizeof(mrioc->facts));
+
+ facts_flags = le32_to_cpu(facts_data->flags);
+ mrioc->facts.op_req_sz = req_sz;
+ mrioc->op_reply_desc_sz = 1 << ((ioc_config &
+ MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
+ MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
+
+ mrioc->facts.ioc_num = facts_data->ioc_number;
+ mrioc->facts.who_init = facts_data->who_init;
+ mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
+ mrioc->facts.personality = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
+ mrioc->facts.dma_mask = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.protocol_flags = facts_data->protocol_flags;
+ mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
+ mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
+ mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
+ mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
+ mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
+ mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
+ mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
+ mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
+ mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
+ mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
+ mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
+ mrioc->facts.max_pcie_switches =
+ le16_to_cpu(facts_data->max_pcie_switches);
+ mrioc->facts.max_sasexpanders =
+ le16_to_cpu(facts_data->max_sas_expanders);
+ mrioc->facts.max_sasinitiators =
+ le16_to_cpu(facts_data->max_sas_initiators);
+ mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
+ mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
+ mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
+ mrioc->facts.max_op_req_q =
+ le16_to_cpu(facts_data->max_operational_request_queues);
+ mrioc->facts.max_op_reply_q =
+ le16_to_cpu(facts_data->max_operational_reply_queues);
+ mrioc->facts.ioc_capabilities =
+ le32_to_cpu(facts_data->ioc_capabilities);
+ mrioc->facts.fw_ver.build_num =
+ le16_to_cpu(facts_data->fw_version.build_num);
+ mrioc->facts.fw_ver.cust_id =
+ le16_to_cpu(facts_data->fw_version.customer_id);
+ mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
+ mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
+ mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
+ mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
+ mrioc->msix_count = min_t(int, mrioc->msix_count,
+ mrioc->facts.max_msix_vectors);
+ mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
+ mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
+ mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
+ mrioc->facts.shutdown_timeout =
+ le16_to_cpu(facts_data->shutdown_timeout);
+
+ mrioc->facts.max_dev_per_tg =
+ facts_data->max_devices_per_throttle_group;
+ mrioc->facts.io_throttle_data_length =
+ le16_to_cpu(facts_data->io_throttle_data_length);
+ mrioc->facts.max_io_throttle_group =
+ le16_to_cpu(facts_data->max_io_throttle_group);
+ mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
+ mrioc->facts.io_throttle_high =
+ le16_to_cpu(facts_data->io_throttle_high);
+
+ /* Store in 512b block count */
+ if (mrioc->facts.io_throttle_data_length)
+ mrioc->io_throttle_data_length =
+ (mrioc->facts.io_throttle_data_length * 2 * 4);
+ else
+ /* set the length to 1MB + 1K to disable throttle */
+ mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
+
+ mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
+ mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
+
+ ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
+ mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
+ mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
+ ioc_info(mrioc,
+ "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
+ mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
+ mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
+ ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
+ mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
+ mrioc->facts.sge_mod_shift);
+ ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
+ mrioc->facts.dma_mask, (facts_flags &
+ MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+ ioc_info(mrioc,
+ "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
+ mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
+ ioc_info(mrioc,
+ "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
+ mrioc->facts.io_throttle_data_length * 4,
+ mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
+}
+
+/**
+ * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate and initialize the reply free buffers, sense
+ * buffers, reply free queue and sense buffer queue.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 sz, i;
+
+ if (mrioc->init_cmds.reply)
+ return retval;
+
+ mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->init_cmds.reply)
+ goto out_failed;
+
+ mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->bsg_cmds.reply)
+ goto out_failed;
+
+ mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->transport_cmds.reply)
+ goto out_failed;
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
+ GFP_KERNEL);
+ if (!mrioc->dev_rmhs_cmds[i].reply)
+ goto out_failed;
+ }
+
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds[i].reply)
+ goto out_failed;
+ }
+
+ mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->host_tm_cmds.reply)
+ goto out_failed;
+
+ mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->pel_cmds.reply)
+ goto out_failed;
+
+ mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->pel_abort_cmd.reply)
+ goto out_failed;
+
+ mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+ if (mrioc->facts.max_devhandle % 8)
+ mrioc->dev_handle_bitmap_sz++;
+ mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->removepend_bitmap)
+ goto out_failed;
+
+ mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
+ if (MPI3MR_NUM_DEVRMCMD % 8)
+ mrioc->devrem_bitmap_sz++;
+ mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->devrem_bitmap)
+ goto out_failed;
+
+ mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8;
+ if (MPI3MR_NUM_EVTACKCMD % 8)
+ mrioc->evtack_cmds_bitmap_sz++;
+ mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds_bitmap)
+ goto out_failed;
+
+ mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
+ mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
+ mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
+ mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
+
+ /* reply buffer pool, 16 byte align */
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
+ mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
+ &mrioc->pdev->dev, sz, 16, 0);
+ if (!mrioc->reply_buf_pool) {
+ ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+
+ mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
+ &mrioc->reply_buf_dma);
+ if (!mrioc->reply_buf)
+ goto out_failed;
+
+ mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
+
+ /* reply free queue, 8 byte align */
+ sz = mrioc->reply_free_qsz * 8;
+ mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
+ &mrioc->pdev->dev, sz, 8, 0);
+ if (!mrioc->reply_free_q_pool) {
+ ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
+ GFP_KERNEL, &mrioc->reply_free_q_dma);
+ if (!mrioc->reply_free_q)
+ goto out_failed;
+
+ /* sense buffer pool, 4 byte align */
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
+ mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
+ &mrioc->pdev->dev, sz, 4, 0);
+ if (!mrioc->sense_buf_pool) {
+ ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
+ &mrioc->sense_buf_dma);
+ if (!mrioc->sense_buf)
+ goto out_failed;
+
+ /* sense buffer queue, 8 byte align */
+ sz = mrioc->sense_buf_q_sz * 8;
+ mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
+ &mrioc->pdev->dev, sz, 8, 0);
+ if (!mrioc->sense_buf_q_pool) {
+ ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
+ GFP_KERNEL, &mrioc->sense_buf_q_dma);
+ if (!mrioc->sense_buf_q)
+ goto out_failed;
+
+ return retval;
+
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpimr_initialize_reply_sbuf_queues - initialize reply sense
+ * buffers
+ * @mrioc: Adapter instance reference
+ *
+ * Helper function to initialize reply and sense buffers along
+ * with some debug prints.
+ *
+ * Return: None.
+ */
+static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
+{
+ u32 sz, i;
+ dma_addr_t phy_addr;
+
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
+ ioc_info(mrioc,
+ "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
+ mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
+ (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
+ sz = mrioc->reply_free_qsz * 8;
+ ioc_info(mrioc,
+ "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
+ mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
+ (unsigned long long)mrioc->reply_free_q_dma);
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
+ ioc_info(mrioc,
+ "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
+ mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
+ (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
+ sz = mrioc->sense_buf_q_sz * 8;
+ ioc_info(mrioc,
+ "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
+ mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
+ (unsigned long long)mrioc->sense_buf_q_dma);
+
+ /* initialize Reply buffer Queue */
+ for (i = 0, phy_addr = mrioc->reply_buf_dma;
+ i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
+ mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
+ mrioc->reply_free_q[i] = cpu_to_le64(0);
+
+ /* initialize Sense Buffer Queue */
+ for (i = 0, phy_addr = mrioc->sense_buf_dma;
+ i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
+ mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
+ mrioc->sense_buf_q[i] = cpu_to_le64(0);
+}
+
+/**
+ * mpi3mr_issue_iocinit - Send IOC Init
+ * @mrioc: Adapter instance reference
+ *
+ * Issue IOC Init MPI request through admin queue and wait for
+ * the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_ioc_init_request iocinit_req;
+ struct mpi3_driver_info_layout *drv_info;
+ dma_addr_t data_dma;
+ u32 data_len = sizeof(*drv_info);
+ int retval = 0;
+ ktime_t current_time;
+
+ drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+ if (!drv_info) {
+ retval = -1;
+ goto out;
+ }
+ mpimr_initialize_reply_sbuf_queues(mrioc);
+
+ drv_info->information_length = cpu_to_le32(data_len);
+ strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
+ strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
+ strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
+ strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
+ strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
+ strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
+ sizeof(drv_info->driver_release_date));
+ drv_info->driver_capabilities = 0;
+ memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
+ sizeof(mrioc->driver_info));
+
+ memset(&iocinit_req, 0, sizeof(iocinit_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
+ iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
+ iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
+ iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
+ iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
+ iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
+ iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
+ iocinit_req.reply_free_queue_address =
+ cpu_to_le64(mrioc->reply_free_q_dma);
+ iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
+ iocinit_req.sense_buffer_free_queue_depth =
+ cpu_to_le16(mrioc->sense_buf_q_sz);
+ iocinit_req.sense_buffer_free_queue_address =
+ cpu_to_le64(mrioc->sense_buf_q_dma);
+ iocinit_req.driver_information_address = cpu_to_le64(data_dma);
+
+ current_time = ktime_get_real();
+ iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
+ sizeof(iocinit_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
+ ioc_err(mrioc, "ioc_init timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+ mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+
+ mrioc->sbq_host_index = mrioc->num_sense_bufs;
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (drv_info)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
+ data_dma);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_unmask_events - Unmask events in event mask bitmap
+ * @mrioc: Adapter instance reference
+ * @event: MPI event ID
+ *
+ * Un mask the specific event by resetting the event_mask
+ * bitmap.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
+{
+ u32 desired_event;
+ u8 word;
+
+ if (event >= 128)
+ return;
+
+ desired_event = (1 << (event % 32));
+ word = event / 32;
+
+ mrioc->event_masks[word] &= ~desired_event;
+}
+
+/**
+ * mpi3mr_issue_event_notification - Send event notification
+ * @mrioc: Adapter instance reference
+ *
+ * Issue event notification MPI request through admin queue and
+ * wait for the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_event_notification_request evtnotify_req;
+ int retval = 0;
+ u8 i;
+
+ memset(&evtnotify_req, 0, sizeof(evtnotify_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ evtnotify_req.event_masks[i] =
+ cpu_to_le32(mrioc->event_masks[i]);
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
+ sizeof(evtnotify_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "event notification timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_process_event_ack - Process event acknowledgment
+ * @mrioc: Adapter instance reference
+ * @event: MPI3 event ID
+ * @event_ctx: event context
+ *
+ * Send event acknowledgment through admin queue and wait for
+ * it to complete.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ u32 event_ctx)
+{
+ struct mpi3_event_ack_request evtack_req;
+ int retval = 0;
+
+ memset(&evtack_req, 0, sizeof(evtack_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
+ evtack_req.event = event;
+ evtack_req.event_context = cpu_to_le32(event_ctx);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
+ sizeof(evtack_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_alloc_chain_bufs - Allocate chain buffers
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate chain buffers and set a bitmap to indicate free
+ * chain buffers. Chain buffers are used to pass the SGE
+ * information along with MPI3 SCSI IO requests for host I/O.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 sz, i;
+ u16 num_chains;
+
+ if (mrioc->chain_sgl_list)
+ return retval;
+
+ num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
+
+ if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE2_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION))
+ num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
+
+ mrioc->chain_buf_count = num_chains;
+ sz = sizeof(struct chain_element) * num_chains;
+ mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
+ if (!mrioc->chain_sgl_list)
+ goto out_failed;
+
+ sz = MPI3MR_PAGE_SIZE_4K;
+ mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
+ &mrioc->pdev->dev, sz, 16, 0);
+ if (!mrioc->chain_buf_pool) {
+ ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+
+ for (i = 0; i < num_chains; i++) {
+ mrioc->chain_sgl_list[i].addr =
+ dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
+ &mrioc->chain_sgl_list[i].dma_addr);
+
+ if (!mrioc->chain_sgl_list[i].addr)
+ goto out_failed;
+ }
+ mrioc->chain_bitmap_sz = num_chains / 8;
+ if (num_chains % 8)
+ mrioc->chain_bitmap_sz++;
+ mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
+ if (!mrioc->chain_bitmap)
+ goto out_failed;
+ return retval;
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpi3mr_port_enable_complete - Mark port enable complete
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Call back for asynchronous port enable request sets the
+ * driver command to indicate port enable request is complete.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ drv_cmd->callback = NULL;
+ mrioc->scan_started = 0;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ else
+ mrioc->scan_failed = drv_cmd->ioc_status;
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+}
+
+/**
+ * mpi3mr_issue_port_enable - Issue Port Enable
+ * @mrioc: Adapter instance reference
+ * @async: Flag to wait for completion or not
+ *
+ * Issue Port Enable MPI request through admin queue and if the
+ * async flag is not set wait for the completion of the port
+ * enable or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
+{
+ struct mpi3_port_enable_request pe_req;
+ int retval = 0;
+ u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+
+ memset(&pe_req, 0, sizeof(pe_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ if (async) {
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
+ } else {
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ init_completion(&mrioc->init_cmds.done);
+ }
+ pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
+
+ retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
+ goto out_unlock;
+ }
+ if (async) {
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+
+ wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "port enable timed out\n");
+ retval = -1;
+ mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
+ goto out_unlock;
+ }
+ mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/* Protocol type to name mapper structure */
+static const struct {
+ u8 protocol;
+ char *name;
+} mpi3mr_protocols[] = {
+ { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
+ { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
+ { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
+};
+
+/* Capability to name mapper structure*/
+static const struct {
+ u32 capability;
+ char *name;
+} mpi3mr_capabilities[] = {
+ { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
+ { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" },
+};
+
+/**
+ * mpi3mr_print_ioc_info - Display controller information
+ * @mrioc: Adapter instance reference
+ *
+ * Display controller personalit, capability, supported
+ * protocols etc.
+ *
+ * Return: Nothing
+ */
+static void
+mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
+{
+ int i = 0, bytes_written = 0;
+ char personality[16];
+ char protocol[50] = {0};
+ char capabilities[100] = {0};
+ struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
+
+ switch (mrioc->facts.personality) {
+ case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
+ strncpy(personality, "Enhanced HBA", sizeof(personality));
+ break;
+ case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
+ strncpy(personality, "RAID", sizeof(personality));
+ break;
+ default:
+ strncpy(personality, "Unknown", sizeof(personality));
+ break;
+ }
+
+ ioc_info(mrioc, "Running in %s Personality", personality);
+
+ ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
+ fwver->gen_major, fwver->gen_minor, fwver->ph_major,
+ fwver->ph_minor, fwver->cust_id, fwver->build_num);
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
+ if (mrioc->facts.protocol_flags &
+ mpi3mr_protocols[i].protocol) {
+ bytes_written += scnprintf(protocol + bytes_written,
+ sizeof(protocol) - bytes_written, "%s%s",
+ bytes_written ? "," : "",
+ mpi3mr_protocols[i].name);
+ }
+ }
+
+ bytes_written = 0;
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
+ if (mrioc->facts.protocol_flags &
+ mpi3mr_capabilities[i].capability) {
+ bytes_written += scnprintf(capabilities + bytes_written,
+ sizeof(capabilities) - bytes_written, "%s%s",
+ bytes_written ? "," : "",
+ mpi3mr_capabilities[i].name);
+ }
+ }
+
+ ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
+ protocol, capabilities);
+}
+
+/**
+ * mpi3mr_cleanup_resources - Free PCI resources
+ * @mrioc: Adapter instance reference
+ *
+ * Unmap PCI device memory and disable PCI device.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+
+ mpi3mr_cleanup_isr(mrioc);
+
+ if (mrioc->sysif_regs) {
+ iounmap((void __iomem *)mrioc->sysif_regs);
+ mrioc->sysif_regs = NULL;
+ }
+
+ if (pci_is_enabled(pdev)) {
+ if (mrioc->bars)
+ pci_release_selected_regions(pdev, mrioc->bars);
+ pci_disable_device(pdev);
+ }
+}
+
+/**
+ * mpi3mr_setup_resources - Enable PCI resources
+ * @mrioc: Adapter instance reference
+ *
+ * Enable PCI device memory, MSI-x registers and set DMA mask.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ u32 memap_sz = 0;
+ int i, retval = 0, capb = 0;
+ u16 message_control;
+ u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
+ (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
+ (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
+
+ if (pci_enable_device_mem(pdev)) {
+ ioc_err(mrioc, "pci_enable_device_mem: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (!capb) {
+ ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+ mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ if (pci_request_selected_regions(pdev, mrioc->bars,
+ mrioc->driver_name)) {
+ ioc_err(mrioc, "pci_request_selected_regions: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
+ memap_sz = pci_resource_len(pdev, i);
+ mrioc->sysif_regs =
+ ioremap(mrioc->sysif_regs_phys, memap_sz);
+ break;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+ if (retval) {
+ if (dma_mask != DMA_BIT_MASK(32)) {
+ ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
+ dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_mask_and_coherent(&pdev->dev,
+ dma_mask);
+ }
+ if (retval) {
+ mrioc->dma_mask = 0;
+ ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
+ goto out_failed;
+ }
+ }
+ mrioc->dma_mask = dma_mask;
+
+ if (!mrioc->sysif_regs) {
+ ioc_err(mrioc,
+ "Unable to map adapter memory or resource not found\n");
+ retval = -EINVAL;
+ goto out_failed;
+ }
+
+ pci_read_config_word(pdev, capb + 2, &message_control);
+ mrioc->msix_count = (message_control & 0x3FF) + 1;
+
+ pci_save_state(pdev);
+
+ pci_set_drvdata(pdev, mrioc->shost);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ (unsigned long long)mrioc->sysif_regs_phys,
+ mrioc->sysif_regs, memap_sz);
+ ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
+ mrioc->msix_count);
+
+ if (!reset_devices && poll_queues > 0)
+ mrioc->requested_poll_qcount = min_t(int, poll_queues,
+ mrioc->msix_count - 2);
+ return retval;
+
+out_failed:
+ mpi3mr_cleanup_resources(mrioc);
+ return retval;
+}
+
+/**
+ * mpi3mr_enable_events - Enable required events
+ * @mrioc: Adapter instance reference
+ *
+ * This routine unmasks the events required by the driver by
+ * sennding appropriate event mask bitmapt through an event
+ * notification request.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 i;
+
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
+
+ retval = mpi3mr_issue_event_notification(mrioc);
+ if (retval)
+ ioc_err(mrioc, "failed to issue event notification %d\n",
+ retval);
+ return retval;
+}
+
+/**
+ * mpi3mr_init_ioc - Initialize the controller
+ * @mrioc: Adapter instance reference
+ *
+ * This the controller initialization routine, executed either
+ * after soft reset or from pci probe callback.
+ * Setup the required resources, memory map the controller
+ * registers, create admin and operational reply queue pairs,
+ * allocate required memory for reply pool, sense buffer pool,
+ * issue IOC init request to the firmware, unmask the events and
+ * issue port enable to discover SAS/SATA/NVMe devies and RAID
+ * volumes.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u8 retry = 0;
+ struct mpi3_ioc_facts_data facts_data;
+ u32 sz;
+
+retry_init:
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_setup_isr(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup ISR error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
+
+ mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+
+ if (reset_devices)
+ mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
+ MPI3MR_HOST_IOS_KDUMP);
+
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) {
+ mrioc->sas_transport_enabled = 1;
+ mrioc->scsi_device_channel = 1;
+ mrioc->shost->max_channel = 1;
+ mrioc->shost->transportt = mpi3mr_transport_template;
+ }
+
+ mrioc->reply_sz = mrioc->facts.reply_sz;
+
+ retval = mpi3mr_check_reset_dma_mask(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Resetting dma mask failed %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ mpi3mr_print_ioc_info(mrioc);
+
+ dprint_init(mrioc, "allocating config page buffers\n");
+ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
+ if (!mrioc->cfg_page)
+ goto out_failed_noretry;
+
+ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_issue_iocinit(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ retval = mpi3mr_print_pkg_ver(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to get package version\n");
+ goto out_failed;
+ }
+
+ retval = mpi3mr_setup_isr(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_create_op_queues(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to create OpQueues error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (!mrioc->pel_seqnum_virt) {
+ dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
+ mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
+ mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
+ GFP_KERNEL);
+ if (!mrioc->pel_seqnum_virt) {
+ retval = -ENOMEM;
+ goto out_failed_noretry;
+ }
+ }
+
+ if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
+ dprint_init(mrioc, "allocating memory for throttle groups\n");
+ sz = sizeof(struct mpi3mr_throttle_group_info);
+ mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+ if (!mrioc->throttle_groups)
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_enable_events(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to enable events %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ ioc_info(mrioc, "controller initialization completed successfully\n");
+ return retval;
+out_failed:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
+ retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+out_failed_noretry:
+ ioc_err(mrioc, "controller initialization failed\n");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
+ return retval;
+}
+
+/**
+ * mpi3mr_reinit_ioc - Re-Initialize the controller
+ * @mrioc: Adapter instance reference
+ * @is_resume: Called from resume or reset path
+ *
+ * This the controller re-initialization routine, executed from
+ * the soft reset handler or resume callback. Creates
+ * operational reply queue pairs, allocate required memory for
+ * reply pool, sense buffer pool, issue IOC init request to the
+ * firmware, unmask the events and issue port enable to discover
+ * SAS/SATA/NVMe devices and RAID volumes.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
+{
+ int retval = 0;
+ u8 retry = 0;
+ struct mpi3_ioc_facts_data facts_data;
+ u32 pe_timeout, ioc_status;
+
+retry_init:
+ pe_timeout =
+ (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
+
+ dprint_reset(mrioc, "bringing up the controller to ready state\n");
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to bring to ready state\n");
+ goto out_failed_noretry;
+ }
+
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up single ISR\n");
+ retval = mpi3mr_setup_isr(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "failed to setup ISR\n");
+ goto out_failed_noretry;
+ }
+ } else
+ mpi3mr_ioc_enable_intr(mrioc);
+
+ dprint_reset(mrioc, "getting ioc_facts\n");
+ retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
+ if (retval) {
+ ioc_err(mrioc, "failed to get ioc_facts\n");
+ goto out_failed;
+ }
+
+ dprint_reset(mrioc, "validating ioc_facts\n");
+ retval = mpi3mr_revalidate_factsdata(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
+ goto out_failed_noretry;
+ }
+
+ mpi3mr_print_ioc_info(mrioc);
+
+ dprint_reset(mrioc, "sending ioc_init\n");
+ retval = mpi3mr_issue_iocinit(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to send ioc_init\n");
+ goto out_failed;
+ }
+
+ dprint_reset(mrioc, "getting package version\n");
+ retval = mpi3mr_print_pkg_ver(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to get package version\n");
+ goto out_failed;
+ }
+
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up multiple ISR\n");
+ retval = mpi3mr_setup_isr(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "failed to re-setup ISR\n");
+ goto out_failed_noretry;
+ }
+ }
+
+ dprint_reset(mrioc, "creating operational queue pairs\n");
+ retval = mpi3mr_create_op_queues(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to create operational queue pairs\n");
+ goto out_failed;
+ }
+
+ if (!mrioc->pel_seqnum_virt) {
+ dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
+ mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
+ mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
+ GFP_KERNEL);
+ if (!mrioc->pel_seqnum_virt) {
+ retval = -ENOMEM;
+ goto out_failed_noretry;
+ }
+ }
+
+ if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
+ ioc_err(mrioc,
+ "cannot create minimum number of operational queues expected:%d created:%d\n",
+ mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
+ goto out_failed_noretry;
+ }
+
+ dprint_reset(mrioc, "enabling events\n");
+ retval = mpi3mr_enable_events(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to enable events\n");
+ goto out_failed;
+ }
+
+ mrioc->device_refresh_on = 1;
+ mpi3mr_add_event_wait_for_device_refresh(mrioc);
+
+ ioc_info(mrioc, "sending port enable\n");
+ retval = mpi3mr_issue_port_enable(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "failed to issue port enable\n");
+ goto out_failed;
+ }
+ do {
+ ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
+ if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
+ break;
+ if (!pci_device_is_present(mrioc->pdev))
+ mrioc->unrecoverable = 1;
+ if (mrioc->unrecoverable) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ goto out_failed;
+ }
+ } while (--pe_timeout);
+
+ if (!pe_timeout) {
+ ioc_err(mrioc, "port enable timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ goto out_failed;
+ } else if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable completed successfully\n");
+
+ ioc_info(mrioc, "controller %s completed successfully\n",
+ (is_resume)?"resume":"re-initialization");
+ return retval;
+out_failed:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
+ (is_resume)?"resume":"re-initialization", retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+out_failed_noretry:
+ ioc_err(mrioc, "controller %s is failed\n",
+ (is_resume)?"resume":"re-initialization");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
+ return retval;
+}
+
+/**
+ * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
+ * segments
+ * @mrioc: Adapter instance reference
+ * @qidx: Operational reply queue index
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ struct segments *segments;
+ int i, size;
+
+ if (!op_reply_q->q_segments)
+ return;
+
+ size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
+ segments = op_reply_q->q_segments;
+ for (i = 0; i < op_reply_q->num_segments; i++)
+ memset(segments[i].segment, 0, size);
+}
+
+/**
+ * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
+ * segments
+ * @mrioc: Adapter instance reference
+ * @qidx: Operational request queue index
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
+ struct segments *segments;
+ int i, size;
+
+ if (!op_req_q->q_segments)
+ return;
+
+ size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
+ segments = op_req_q->q_segments;
+ for (i = 0; i < op_req_q->num_segments; i++)
+ memset(segments[i].segment, 0, size);
+}
+
+/**
+ * mpi3mr_memset_buffers - memset memory for a controller
+ * @mrioc: Adapter instance reference
+ *
+ * clear all the memory allocated for a controller, typically
+ * called post reset to reuse the memory allocated during the
+ * controller init.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ struct mpi3mr_throttle_group_info *tg;
+
+ mrioc->change_count = 0;
+ mrioc->active_poll_qcount = 0;
+ mrioc->default_qcount = 0;
+ if (mrioc->admin_req_base)
+ memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ if (mrioc->admin_reply_base)
+ memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+
+ if (mrioc->init_cmds.reply) {
+ memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+ memset(mrioc->bsg_cmds.reply, 0,
+ sizeof(*mrioc->bsg_cmds.reply));
+ memset(mrioc->host_tm_cmds.reply, 0,
+ sizeof(*mrioc->host_tm_cmds.reply));
+ memset(mrioc->pel_cmds.reply, 0,
+ sizeof(*mrioc->pel_cmds.reply));
+ memset(mrioc->pel_abort_cmd.reply, 0,
+ sizeof(*mrioc->pel_abort_cmd.reply));
+ memset(mrioc->transport_cmds.reply, 0,
+ sizeof(*mrioc->transport_cmds.reply));
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ memset(mrioc->dev_rmhs_cmds[i].reply, 0,
+ sizeof(*mrioc->dev_rmhs_cmds[i].reply));
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
+ memset(mrioc->evtack_cmds[i].reply, 0,
+ sizeof(*mrioc->evtack_cmds[i].reply));
+ memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ memset(mrioc->evtack_cmds_bitmap, 0,
+ mrioc->evtack_cmds_bitmap_sz);
+ }
+
+ for (i = 0; i < mrioc->num_queues; i++) {
+ mrioc->op_reply_qinfo[i].qid = 0;
+ mrioc->op_reply_qinfo[i].ci = 0;
+ mrioc->op_reply_qinfo[i].num_replies = 0;
+ mrioc->op_reply_qinfo[i].ephase = 0;
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
+ mpi3mr_memset_op_reply_q_buffers(mrioc, i);
+
+ mrioc->req_qinfo[i].ci = 0;
+ mrioc->req_qinfo[i].pi = 0;
+ mrioc->req_qinfo[i].num_requests = 0;
+ mrioc->req_qinfo[i].qid = 0;
+ mrioc->req_qinfo[i].reply_qid = 0;
+ spin_lock_init(&mrioc->req_qinfo[i].q_lock);
+ mpi3mr_memset_op_req_q_buffers(mrioc, i);
+ }
+
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+ if (mrioc->throttle_groups) {
+ tg = mrioc->throttle_groups;
+ for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
+ tg->id = 0;
+ tg->fw_qd = 0;
+ tg->modified_qd = 0;
+ tg->io_divert = 0;
+ tg->need_qd_reduction = 0;
+ tg->high = 0;
+ tg->low = 0;
+ tg->qd_reduction = 0;
+ atomic_set(&tg->pend_large_data_sz, 0);
+ }
+ }
+}
+
+/**
+ * mpi3mr_free_mem - Free memory allocated for a controller
+ * @mrioc: Adapter instance reference
+ *
+ * Free all the memory allocated for a controller.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ struct mpi3mr_intr_info *intr_info;
+
+ mpi3mr_free_enclosure_list(mrioc);
+
+ if (mrioc->sense_buf_pool) {
+ if (mrioc->sense_buf)
+ dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
+ mrioc->sense_buf_dma);
+ dma_pool_destroy(mrioc->sense_buf_pool);
+ mrioc->sense_buf = NULL;
+ mrioc->sense_buf_pool = NULL;
+ }
+ if (mrioc->sense_buf_q_pool) {
+ if (mrioc->sense_buf_q)
+ dma_pool_free(mrioc->sense_buf_q_pool,
+ mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
+ dma_pool_destroy(mrioc->sense_buf_q_pool);
+ mrioc->sense_buf_q = NULL;
+ mrioc->sense_buf_q_pool = NULL;
+ }
+
+ if (mrioc->reply_buf_pool) {
+ if (mrioc->reply_buf)
+ dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
+ mrioc->reply_buf_dma);
+ dma_pool_destroy(mrioc->reply_buf_pool);
+ mrioc->reply_buf = NULL;
+ mrioc->reply_buf_pool = NULL;
+ }
+ if (mrioc->reply_free_q_pool) {
+ if (mrioc->reply_free_q)
+ dma_pool_free(mrioc->reply_free_q_pool,
+ mrioc->reply_free_q, mrioc->reply_free_q_dma);
+ dma_pool_destroy(mrioc->reply_free_q_pool);
+ mrioc->reply_free_q = NULL;
+ mrioc->reply_free_q_pool = NULL;
+ }
+
+ for (i = 0; i < mrioc->num_op_req_q; i++)
+ mpi3mr_free_op_req_q_segments(mrioc, i);
+
+ for (i = 0; i < mrioc->num_op_reply_q; i++)
+ mpi3mr_free_op_reply_q_segments(mrioc, i);
+
+ for (i = 0; i < mrioc->intr_info_count; i++) {
+ intr_info = mrioc->intr_info + i;
+ intr_info->op_reply_q = NULL;
+ }
+
+ kfree(mrioc->req_qinfo);
+ mrioc->req_qinfo = NULL;
+ mrioc->num_op_req_q = 0;
+
+ kfree(mrioc->op_reply_qinfo);
+ mrioc->op_reply_qinfo = NULL;
+ mrioc->num_op_reply_q = 0;
+
+ kfree(mrioc->init_cmds.reply);
+ mrioc->init_cmds.reply = NULL;
+
+ kfree(mrioc->bsg_cmds.reply);
+ mrioc->bsg_cmds.reply = NULL;
+
+ kfree(mrioc->host_tm_cmds.reply);
+ mrioc->host_tm_cmds.reply = NULL;
+
+ kfree(mrioc->pel_cmds.reply);
+ mrioc->pel_cmds.reply = NULL;
+
+ kfree(mrioc->pel_abort_cmd.reply);
+ mrioc->pel_abort_cmd.reply = NULL;
+
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ kfree(mrioc->evtack_cmds[i].reply);
+ mrioc->evtack_cmds[i].reply = NULL;
+ }
+
+ kfree(mrioc->removepend_bitmap);
+ mrioc->removepend_bitmap = NULL;
+
+ kfree(mrioc->devrem_bitmap);
+ mrioc->devrem_bitmap = NULL;
+
+ kfree(mrioc->evtack_cmds_bitmap);
+ mrioc->evtack_cmds_bitmap = NULL;
+
+ kfree(mrioc->chain_bitmap);
+ mrioc->chain_bitmap = NULL;
+
+ kfree(mrioc->transport_cmds.reply);
+ mrioc->transport_cmds.reply = NULL;
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ kfree(mrioc->dev_rmhs_cmds[i].reply);
+ mrioc->dev_rmhs_cmds[i].reply = NULL;
+ }
+
+ if (mrioc->chain_buf_pool) {
+ for (i = 0; i < mrioc->chain_buf_count; i++) {
+ if (mrioc->chain_sgl_list[i].addr) {
+ dma_pool_free(mrioc->chain_buf_pool,
+ mrioc->chain_sgl_list[i].addr,
+ mrioc->chain_sgl_list[i].dma_addr);
+ mrioc->chain_sgl_list[i].addr = NULL;
+ }
+ }
+ dma_pool_destroy(mrioc->chain_buf_pool);
+ mrioc->chain_buf_pool = NULL;
+ }
+
+ kfree(mrioc->chain_sgl_list);
+ mrioc->chain_sgl_list = NULL;
+
+ if (mrioc->admin_reply_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
+ mrioc->admin_reply_base, mrioc->admin_reply_dma);
+ mrioc->admin_reply_base = NULL;
+ }
+ if (mrioc->admin_req_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
+ mrioc->admin_req_base, mrioc->admin_req_dma);
+ mrioc->admin_req_base = NULL;
+ }
+
+ if (mrioc->pel_seqnum_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
+ mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
+ mrioc->pel_seqnum_virt = NULL;
+ }
+
+ kfree(mrioc->logdata_buf);
+ mrioc->logdata_buf = NULL;
+
+}
+
+/**
+ * mpi3mr_issue_ioc_shutdown - shutdown controller
+ * @mrioc: Adapter instance reference
+ *
+ * Send shutodwn notification to the controller and wait for the
+ * shutdown_timeout for it to be completed.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config, ioc_status;
+ u8 retval = 1;
+ u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
+
+ ioc_info(mrioc, "Issuing shutdown Notification\n");
+ if (mrioc->unrecoverable) {
+ ioc_warn(mrioc,
+ "IOC is unrecoverable shutdown is not issued\n");
+ return;
+ }
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
+ ioc_info(mrioc, "shutdown already in progress\n");
+ return;
+ }
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
+
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ if (mrioc->facts.shutdown_timeout)
+ timeout = mrioc->facts.shutdown_timeout * 10;
+
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ if (retval) {
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
+ ioc_warn(mrioc,
+ "shutdown still in progress after timeout\n");
+ }
+
+ ioc_info(mrioc,
+ "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status,
+ ioc_config);
+}
+
+/**
+ * mpi3mr_cleanup_ioc - Cleanup controller
+ * @mrioc: Adapter instance reference
+ *
+ * controller cleanup handler, Message unit reset or soft reset
+ * and shutdown notification is issued to the controller.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
+{
+ enum mpi3mr_iocstate ioc_state;
+
+ dprint_exit(mrioc, "cleaning up the controller\n");
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+
+ if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
+ (ioc_state == MRIOC_STATE_READY)) {
+ if (mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP))
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_MUR_FAILURE);
+ mpi3mr_issue_ioc_shutdown(mrioc);
+ }
+ dprint_exit(mrioc, "controller cleanup completed\n");
+}
+
+/**
+ * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
+ * @mrioc: Adapter instance reference
+ * @cmdptr: Internal command tracker
+ *
+ * Complete an internal driver commands with state indicating it
+ * is completed due to reset.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *cmdptr)
+{
+ if (cmdptr->state & MPI3MR_CMD_PENDING) {
+ cmdptr->state |= MPI3MR_CMD_RESET;
+ cmdptr->state &= ~MPI3MR_CMD_PENDING;
+ if (cmdptr->is_waiting) {
+ complete(&cmdptr->done);
+ cmdptr->is_waiting = 0;
+ } else if (cmdptr->callback)
+ cmdptr->callback(mrioc, cmdptr);
+ }
+}
+
+/**
+ * mpi3mr_flush_drv_cmds - Flush internaldriver commands
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all internal driver commands post reset
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_drv_cmd *cmdptr;
+ u8 i;
+
+ cmdptr = &mrioc->init_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->cfg_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->bsg_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ cmdptr = &mrioc->host_tm_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ cmdptr = &mrioc->dev_rmhs_cmds[i];
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ }
+
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ cmdptr = &mrioc->evtack_cmds[i];
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ }
+
+ cmdptr = &mrioc->pel_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->pel_abort_cmd;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->transport_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+}
+
+/**
+ * mpi3mr_pel_wait_post - Issue PEL Wait
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issue PEL Wait MPI request through admin queue and return.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_req_action_wait pel_wait;
+
+ mrioc->pel_abort_requested = false;
+
+ memset(&pel_wait, 0, sizeof(pel_wait));
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_pel_wait_complete;
+ drv_cmd->ioc_status = 0;
+ drv_cmd->ioc_loginfo = 0;
+ pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+ pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+ pel_wait.action = MPI3_PEL_ACTION_WAIT;
+ pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
+ pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
+ pel_wait.class = cpu_to_le16(mrioc->pel_class);
+ pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
+ dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
+ mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
+
+ if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
+ dprint_bsg_err(mrioc,
+ "Issuing PELWait: Admin post failed\n");
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ mrioc->pel_enabled = false;
+ }
+}
+
+/**
+ * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issue PEL get sequence number MPI request through admin queue
+ * and return.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ int retval = 0;
+
+ memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
+ mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->pel_cmds.is_waiting = 0;
+ mrioc->pel_cmds.ioc_status = 0;
+ mrioc->pel_cmds.ioc_loginfo = 0;
+ mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
+ pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+ pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+ pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
+ mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
+ mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
+
+ retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
+ sizeof(pel_getseq_req), 0);
+ if (retval) {
+ if (drv_cmd) {
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ }
+ mrioc->pel_enabled = false;
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_pel_wait_complete - PELWait Completion callback
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is a callback handler for the PELWait request and
+ * firmware completes a PELWait request when it is aborted or a
+ * new PEL entry is available. This sends AEN to the application
+ * and if the PELwait completion is not due to PELAbort then
+ * this will send a request for new PEL Sequence number
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_reply *pel_reply = NULL;
+ u16 ioc_status, pe_log_status;
+ bool do_retry = false;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto cleanup_drv_cmd;
+
+ ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ __func__, ioc_status, drv_cmd->ioc_loginfo);
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
+ ioc_status, drv_cmd->ioc_loginfo);
+ do_retry = true;
+ }
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
+
+ if (!pel_reply) {
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed due to no reply\n");
+ goto out_failed;
+ }
+
+ pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
+ if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
+ (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
+ ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
+ __func__, pe_log_status);
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed due to pel_log_status(0x%04x)\n",
+ pe_log_status);
+ do_retry = true;
+ }
+
+ if (do_retry) {
+ if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
+ drv_cmd->retry_count);
+ mpi3mr_pel_wait_post(mrioc, drv_cmd);
+ return;
+ }
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed after all retries(%d)\n",
+ drv_cmd->retry_count);
+ goto out_failed;
+ }
+ atomic64_inc(&event_counter);
+ if (!mrioc->pel_abort_requested) {
+ mrioc->pel_cmds.retry_count = 0;
+ mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
+ }
+
+ return;
+out_failed:
+ mrioc->pel_enabled = false;
+cleanup_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+}
+
+/**
+ * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is a callback handler for the PEL get sequence number
+ * request and a new PEL wait request will be issued to the
+ * firmware from this
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_reply *pel_reply = NULL;
+ struct mpi3_pel_seq *pel_seqnum_virt;
+ u16 ioc_status;
+ bool do_retry = false;
+
+ pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto cleanup_drv_cmd;
+
+ ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
+ ioc_status, drv_cmd->ioc_loginfo);
+ do_retry = true;
+ }
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
+ if (!pel_reply) {
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed due to no reply\n");
+ goto out_failed;
+ }
+
+ if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
+ le16_to_cpu(pel_reply->pe_log_status));
+ do_retry = true;
+ }
+
+ if (do_retry) {
+ if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: retrying(%d)\n",
+ drv_cmd->retry_count);
+ mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
+ return;
+ }
+
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed after all retries(%d)\n",
+ drv_cmd->retry_count);
+ goto out_failed;
+ }
+ mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
+ drv_cmd->retry_count = 0;
+ mpi3mr_pel_wait_post(mrioc, drv_cmd);
+
+ return;
+out_failed:
+ mrioc->pel_enabled = false;
+cleanup_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+}
+
+/**
+ * mpi3mr_soft_reset_handler - Reset the controller
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ * @snapdump: Flag to generate snapdump in firmware or not
+ *
+ * This is an handler for recovering controller by issuing soft
+ * reset are diag fault reset. This is a blocking function and
+ * when one reset is executed if any other resets they will be
+ * blocked. All BSG requests will be blocked during the reset. If
+ * controller reset is successful then the controller will be
+ * reinitalized, otherwise the controller will be marked as not
+ * recoverable
+ *
+ * In snapdump bit is set, the controller is issued with diag
+ * fault reset so that the firmware can create a snap dump and
+ * post that the firmware will result in F000 fault and the
+ * driver will issue soft reset to recover from that.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason, u8 snapdump)
+{
+ int retval = 0, i;
+ unsigned long flags;
+ u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+
+ /* Block the reset handler until diag save in progress*/
+ dprint_reset(mrioc,
+ "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
+ mrioc->diagsave_timeout);
+ while (mrioc->diagsave_timeout)
+ ssleep(1);
+ /*
+ * Block new resets until the currently executing one is finished and
+ * return the status of the existing reset for all blocked resets
+ */
+ dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
+ if (!mutex_trylock(&mrioc->reset_mutex)) {
+ ioc_info(mrioc,
+ "controller reset triggered by %s is blocked due to another reset in progress\n",
+ mpi3mr_reset_rc_name(reset_reason));
+ do {
+ ssleep(1);
+ } while (mrioc->reset_in_progress == 1);
+ ioc_info(mrioc,
+ "returning previous reset result(%d) for the reset triggered by %s\n",
+ mrioc->prev_reset_result,
+ mpi3mr_reset_rc_name(reset_reason));
+ return mrioc->prev_reset_result;
+ }
+ ioc_info(mrioc, "controller reset is triggered by %s\n",
+ mpi3mr_reset_rc_name(reset_reason));
+
+ mrioc->device_refresh_on = 0;
+ mrioc->reset_in_progress = 1;
+ mrioc->stop_bsgs = 1;
+ mrioc->prev_reset_result = -1;
+
+ if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
+ (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
+ (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ dprint_reset(mrioc, "soft_reset_handler: masking events\n");
+ mpi3mr_issue_event_notification(mrioc);
+ }
+
+ mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ if (snapdump) {
+ mpi3mr_set_diagsave(mrioc);
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ if (!retval) {
+ do {
+ host_diagnostic =
+ readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic &
+ MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ msleep(100);
+ } while (--timeout);
+ }
+ }
+
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
+ if (retval) {
+ ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
+ goto out;
+ }
+ if (mrioc->num_io_throttle_group !=
+ mrioc->facts.max_io_throttle_group) {
+ ioc_err(mrioc,
+ "max io throttle group doesn't match old(%d), new(%d)\n",
+ mrioc->num_io_throttle_group,
+ mrioc->facts.max_io_throttle_group);
+ retval = -EPERM;
+ goto out;
+ }
+
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
+ mpi3mr_flush_drv_cmds(mrioc);
+ memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
+ mpi3mr_flush_host_io(mrioc);
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
+
+ if (mrioc->prepare_for_reset) {
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
+ mpi3mr_memset_buffers(mrioc);
+ retval = mpi3mr_reinit_ioc(mrioc, 0);
+ if (retval) {
+ pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
+ mrioc->name, reset_reason);
+ goto out;
+ }
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
+
+out:
+ if (!retval) {
+ mrioc->diagsave_timeout = 0;
+ mrioc->reset_in_progress = 0;
+ mrioc->pel_abort_requested = 0;
+ if (mrioc->pel_enabled) {
+ mrioc->pel_cmds.retry_count = 0;
+ mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
+ }
+
+ mrioc->device_refresh_on = 0;
+
+ mrioc->ts_update_counter = 0;
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ mrioc->stop_bsgs = 0;
+ if (mrioc->pel_enabled)
+ atomic64_inc(&event_counter);
+ } else {
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ mrioc->device_refresh_on = 0;
+ mrioc->unrecoverable = 1;
+ mrioc->reset_in_progress = 0;
+ retval = -1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ }
+ mrioc->prev_reset_result = retval;
+ mutex_unlock(&mrioc->reset_mutex);
+ ioc_info(mrioc, "controller reset is %s\n",
+ ((retval == 0) ? "successful" : "failed"));
+ return retval;
+}
+
+
+/**
+ * mpi3mr_free_config_dma_memory - free memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: memory descriptor structure
+ *
+ * Check whether the size of the buffer specified by the memory
+ * descriptor is greater than the default page size if so then
+ * free the memory pointed by the descriptor.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
+ dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
+ mem_desc->addr, mem_desc->dma_addr);
+ mem_desc->addr = NULL;
+ }
+}
+
+/**
+ * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: Memory descriptor to hold dma memory info
+ *
+ * This function allocates new dmaable memory or provides the
+ * default config page dmaable memory based on the memory size
+ * described by the descriptor.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if (mem_desc->size > mrioc->cfg_page_sz) {
+ mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
+ if (!mem_desc->addr)
+ return -ENOMEM;
+ } else {
+ mem_desc->addr = mrioc->cfg_page;
+ mem_desc->dma_addr = mrioc->cfg_page_dma;
+ memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
+ }
+ return 0;
+}
+
+/**
+ * mpi3mr_post_cfg_req - Issue config requests and wait
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 configuration request to
+ * the firmware. This blocks for the completion of request for
+ * timeout seconds and if the request times out this function
+ * faults the controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->cfg_cmds.mutex);
+ if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending config request failed due to command in use\n");
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+ goto out;
+ }
+ mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->cfg_cmds.is_waiting = 1;
+ mrioc->cfg_cmds.callback = NULL;
+ mrioc->cfg_cmds.ioc_status = 0;
+ mrioc->cfg_cmds.ioc_loginfo = 0;
+
+ cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
+ cfg_req->function = MPI3_FUNCTION_CONFIG;
+
+ init_completion(&mrioc->cfg_cmds.done);
+ dprint_cfg_info(mrioc, "posting config request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
+ "mpi3_cfg_req");
+ retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting config request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
+ if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
+ ioc_err(mrioc, "config request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_cfg_err(mrioc,
+ "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
+
+out_unlock:
+ mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_process_cfg_req - config page request processor
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @cfg_hdr: Configuration page header
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ * @cfg_buf: Memory pointer to copy config page or header
+ * @cfg_buf_sz: Size of the memory to get config page or header
+ *
+ * This is handler for config page read, write and config page
+ * header read operations.
+ *
+ * This function expects the cfg_req to be populated with page
+ * type, page number, action for the header read and with page
+ * address for all other operations.
+ *
+ * The cfg_hdr can be passed as null for reading required header
+ * details for read/write pages the cfg_hdr should point valid
+ * configuration page header.
+ *
+ * This allocates dmaable memory based on the size of the config
+ * buffer and set the SGE of the cfg_req.
+ *
+ * For write actions, the config page data has to be passed in
+ * the cfg_buf and size of the data has to be mentioned in the
+ * cfg_buf_sz.
+ *
+ * For read/header actions, on successful completion of the
+ * request with successful ioc_status the data will be copied
+ * into the cfg_buf limited to a minimum of actual page size and
+ * cfg_buf_sz
+ *
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req,
+ struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
+ void *cfg_buf, u32 cfg_buf_sz)
+{
+ struct dma_memory_desc mem_desc;
+ int retval = -1;
+ u8 invalid_action = 0;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
+
+ if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
+ mem_desc.size = sizeof(struct mpi3_config_page_header);
+ else {
+ if (!cfg_hdr) {
+ ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number);
+ goto out;
+ }
+ switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
+ case MPI3_CONFIG_PAGEATTR_READ_ONLY:
+ if (cfg_req->action
+ != MPI3_CONFIG_ACTION_READ_CURRENT)
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
+ if ((cfg_req->action ==
+ MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
+ (cfg_req->action ==
+ MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_PERSISTENT:
+ default:
+ break;
+ }
+ if (invalid_action) {
+ ioc_err(mrioc,
+ "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number, cfg_hdr->page_attribute);
+ goto out;
+ }
+ mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
+ cfg_req->page_length = cfg_hdr->page_length;
+ cfg_req->page_version = cfg_hdr->page_version;
+ }
+ if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
+ goto out;
+
+ mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
+ mem_desc.dma_addr);
+
+ if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
+ (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer to be written\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+ if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
+ goto out;
+
+ retval = 0;
+ if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer read\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+out:
+ mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
+ return retval;
+}
+
+/**
+ * mpi3mr_cfg_get_dev_pg0 - Read current device page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @dev_pg0: Pointer to return device page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific device
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(dev_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "device page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
+ (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
+ ioc_err(mrioc, "device page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg0: Pointer to return SAS Phy page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas phy page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg1: Pointer to return SAS Phy page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page1. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas phy page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg0: Pointer to return SAS Expander page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page0. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
+ ioc_err(mrioc, "expander page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg1: Pointer to return SAS Expander page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page1. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
+ ioc_err(mrioc, "expander page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @encl_pg0: Pointer to return Enclosure page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific Enclosure
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(encl_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "enclosure page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
+ (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
+ ioc_err(mrioc, "enclosure page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page0. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page0 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page0 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page write for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not. This will modify both current
+ * and persistent page.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write current failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
+ * @mrioc: Adapter instance reference
+ * @driver_pg1: Pointer to return Driver page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the Driver page1.
+ * This routine checks ioc_status to decide whether the page
+ * read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(driver_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "driver page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
+ ioc_err(mrioc, "driver page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
new file mode 100644
index 000000000000..f77ee4051b00
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -0,0 +1,5324 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+
+/* global driver scop variables */
+LIST_HEAD(mrioc_list);
+DEFINE_SPINLOCK(mrioc_list_lock);
+static int mrioc_ids;
+static int warn_non_secure_ctlr;
+atomic64_t event_counter;
+
+MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
+MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
+MODULE_VERSION(MPI3MR_DRIVER_VERSION);
+
+/* Module parameters*/
+int prot_mask = -1;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
+
+static int prot_guard_mask = 3;
+module_param(prot_guard_mask, int, 0);
+MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
+static int logging_level;
+module_param(logging_level, int, 0);
+MODULE_PARM_DESC(logging_level,
+ " bits for enabling additional logging info (default=0)");
+
+/* Forward declarations*/
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
+
+#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
+
+#define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE)
+
+/**
+ * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ *
+ * Calculate the host tag based on block tag for a given scmd.
+ *
+ * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
+ */
+static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ struct scmd_priv *priv = NULL;
+ u32 unique_tag;
+ u16 host_tag, hw_queue;
+
+ unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+
+ hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
+ if (hw_queue >= mrioc->num_op_reply_q)
+ return MPI3MR_HOSTTAG_INVALID;
+ host_tag = blk_mq_unique_tag_to_tag(unique_tag);
+
+ if (WARN_ON(host_tag >= mrioc->max_host_ios))
+ return MPI3MR_HOSTTAG_INVALID;
+
+ priv = scsi_cmd_priv(scmd);
+ /*host_tag 0 is invalid hence incrementing by 1*/
+ priv->host_tag = host_tag + 1;
+ priv->scmd = scmd;
+ priv->in_lld_scope = 1;
+ priv->req_q_idx = hw_queue;
+ priv->meta_chain_idx = -1;
+ priv->chain_idx = -1;
+ priv->meta_sg_valid = 0;
+ return priv->host_tag;
+}
+
+/**
+ * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
+ * @mrioc: Adapter instance reference
+ * @host_tag: Host tag
+ * @qidx: Operational queue index
+ *
+ * Identify the block tag from the host tag and queue index and
+ * retrieve associated scsi command using scsi_host_find_tag().
+ *
+ * Return: SCSI command reference or NULL.
+ */
+static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
+ struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
+{
+ struct scsi_cmnd *scmd = NULL;
+ struct scmd_priv *priv = NULL;
+ u32 unique_tag = host_tag - 1;
+
+ if (WARN_ON(host_tag > mrioc->max_host_ios))
+ goto out;
+
+ unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
+
+ scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ scmd = NULL;
+ }
+out:
+ return scmd;
+}
+
+/**
+ * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ *
+ * Invalidate the SCSI command private data to mark the command
+ * is not in LLD scope anymore.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ struct scmd_priv *priv = NULL;
+
+ priv = scsi_cmd_priv(scmd);
+
+ if (WARN_ON(priv->in_lld_scope == 0))
+ return;
+ priv->host_tag = MPI3MR_HOSTTAG_INVALID;
+ priv->req_q_idx = 0xFFFF;
+ priv->scmd = NULL;
+ priv->in_lld_scope = 0;
+ priv->meta_sg_valid = 0;
+ if (priv->chain_idx >= 0) {
+ clear_bit(priv->chain_idx, mrioc->chain_bitmap);
+ priv->chain_idx = -1;
+ }
+ if (priv->meta_chain_idx >= 0) {
+ clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
+ priv->meta_chain_idx = -1;
+ }
+}
+
+static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
+static void mpi3mr_fwevt_worker(struct work_struct *work);
+
+/**
+ * mpi3mr_fwevt_free - firmware event memory dealloctor
+ * @r: k reference pointer of the firmware event
+ *
+ * Free firmware event memory when no reference.
+ */
+static void mpi3mr_fwevt_free(struct kref *r)
+{
+ kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
+}
+
+/**
+ * mpi3mr_fwevt_get - k reference incrementor
+ * @fwevt: Firmware event reference
+ *
+ * Increment firmware event reference count.
+ */
+static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
+{
+ kref_get(&fwevt->ref_count);
+}
+
+/**
+ * mpi3mr_fwevt_put - k reference decrementor
+ * @fwevt: Firmware event reference
+ *
+ * decrement firmware event reference count.
+ */
+static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
+{
+ kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
+}
+
+/**
+ * mpi3mr_alloc_fwevt - Allocate firmware event
+ * @len: length of firmware event data to allocate
+ *
+ * Allocate firmware event with required length and initialize
+ * the reference counter.
+ *
+ * Return: firmware event reference.
+ */
+static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
+{
+ struct mpi3mr_fwevt *fwevt;
+
+ fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
+ if (!fwevt)
+ return NULL;
+
+ kref_init(&fwevt->ref_count);
+ return fwevt;
+}
+
+/**
+ * mpi3mr_fwevt_add_to_list - Add firmware event to the list
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Add the given firmware event to the firmware event list.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ if (!mrioc->fwevt_worker_thread)
+ return;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ /* get fwevt reference count while adding it to fwevt_list */
+ mpi3mr_fwevt_get(fwevt);
+ INIT_LIST_HEAD(&fwevt->list);
+ list_add_tail(&fwevt->list, &mrioc->fwevt_list);
+ INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
+ /* get fwevt reference count while enqueueing it to worker queue */
+ mpi3mr_fwevt_get(fwevt);
+ queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+}
+
+/**
+ * mpi3mr_fwevt_del_from_list - Delete firmware event from list
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Delete the given firmware event from the firmware event list.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ if (!list_empty(&fwevt->list)) {
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+}
+
+/**
+ * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
+ * @mrioc: Adapter instance reference
+ *
+ * Dequeue a firmware event from the firmware event list.
+ *
+ * Return: firmware event.
+ */
+static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
+ struct mpi3mr_ioc *mrioc)
+{
+ unsigned long flags;
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ if (!list_empty(&mrioc->fwevt_list)) {
+ fwevt = list_first_entry(&mrioc->fwevt_list,
+ struct mpi3mr_fwevt, list);
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+
+ return fwevt;
+}
+
+/**
+ * mpi3mr_cancel_work - cancel firmware event
+ * @fwevt: fwevt object which needs to be canceled
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
+{
+ /*
+ * Wait on the fwevt to complete. If this returns 1, then
+ * the event was never executed.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from mpi3mr_process_fwevt()
+ */
+ if (cancel_work_sync(&fwevt->work)) {
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+ /*
+ * Put fwevt reference count to neutralize
+ * kref_init increment
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+}
+
+/**
+ * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all pending firmware events from the firmware event
+ * list.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
+ !mrioc->fwevt_worker_thread)
+ return;
+
+ while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
+ mpi3mr_cancel_work(fwevt);
+
+ if (mrioc->current_event) {
+ fwevt = mrioc->current_event;
+ /*
+ * Don't call cancel_work_sync() API for the
+ * fwevt work if the controller reset is
+ * get called as part of processing the
+ * same fwevt work (or) when worker thread is
+ * waiting for device add/remove APIs to complete.
+ * Otherwise we will see deadlock.
+ */
+ if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
+ fwevt->discard = 1;
+ return;
+ }
+
+ mpi3mr_cancel_work(fwevt);
+ }
+}
+
+/**
+ * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to queue on synthetically generated driver event to
+ * the event worker thread, the driver event will be used to
+ * reduce the QD of all VDs in the TG from the worker thread.
+ *
+ * Return: None.
+ */
+static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ struct mpi3mr_fwevt *fwevt;
+ u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
+
+ /*
+ * If the QD reduction event is already queued due to throttle and if
+ * the QD is not restored through device info change event
+ * then dont queue further reduction events
+ */
+ if (tg->fw_qd != tg->modified_qd)
+ return;
+
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
+ return;
+ }
+ *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = sz;
+ tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
+
+ dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
+ tg->id);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
+ * mpi3mr_invalidate_devhandles -Invalidate device handles
+ * @mrioc: Adapter instance reference
+ *
+ * Invalidate the device handles in the target device structures
+ * . Called post reset prior to reinitializing the controller.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ tgt_priv->io_throttle_enabled = 0;
+ tgt_priv->io_divert = 0;
+ tgt_priv->throttle_group = NULL;
+ if (tgtdev->host_exposed)
+ atomic_set(&tgt_priv->block_io, 1);
+ }
+ }
+}
+
+/**
+ * mpi3mr_print_scmd - print individual SCSI command
+ * @rq: Block request
+ * @data: Adapter instance reference
+ *
+ * Print the SCSI command details if it is in LLD scope.
+ *
+ * Return: true always.
+ */
+static bool mpi3mr_print_scmd(struct request *rq, void *data)
+{
+ struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv = NULL;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+
+ ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
+ __func__, priv->host_tag, priv->req_q_idx + 1);
+ scsi_print_command(scmd);
+ }
+
+out:
+ return(true);
+}
+
+/**
+ * mpi3mr_flush_scmd - Flush individual SCSI command
+ * @rq: Block request
+ * @data: Adapter instance reference
+ *
+ * Return the SCSI command to the upper layers if it is in LLD
+ * scope.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_flush_scmd(struct request *rq, void *data)
+{
+ struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv = NULL;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+
+ if (priv->meta_sg_valid)
+ dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd), scmd->sc_data_direction);
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ scsi_dma_unmap(scmd);
+ scmd->result = DID_RESET << 16;
+ scsi_print_command(scmd);
+ scsi_done(scmd);
+ mrioc->flush_io_count++;
+ }
+
+out:
+ return(true);
+}
+
+/**
+ * mpi3mr_count_dev_pending - Count commands pending for a lun
+ * @rq: Block request
+ * @data: SCSI device reference
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific device(lun) then device specific pending I/O counter
+ * is updated in the device structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
+{
+ struct scsi_device *sdev = (struct scsi_device *)data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device == sdev)
+ sdev_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
+ * mpi3mr_count_tgt_pending - Count commands pending for target
+ * @rq: Block request
+ * @data: SCSI target reference
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific target then target specific pending I/O counter is
+ * updated in the target structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
+{
+ struct scsi_target *starget = (struct scsi_target *)data;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device && (scsi_target(scmd->device) == starget))
+ stgt_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
+ * mpi3mr_flush_host_io - Flush host I/Os
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all of the pending I/Os by calling
+ * blk_mq_tagset_busy_iter() for each possible tag. This is
+ * executed post controller reset
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+
+ mrioc->flush_io_count = 0;
+ ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_flush_scmd, (void *)mrioc);
+ ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
+ mrioc->flush_io_count);
+}
+
+/**
+ * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
+ * @mrioc: Adapter instance reference
+ *
+ * This function waits for currently running IO poll threads to
+ * exit and then flushes all host I/Os and any internal pending
+ * cmds. This is executed after controller is marked as
+ * unrecoverable.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+ int i;
+
+ if (!mrioc->unrecoverable)
+ return;
+
+ if (mrioc->op_reply_qinfo) {
+ for (i = 0; i < mrioc->num_queues; i++) {
+ while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
+ udelay(500);
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ }
+ }
+ mrioc->flush_io_count = 0;
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_flush_scmd, (void *)mrioc);
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
+ mpi3mr_flush_drv_cmds(mrioc);
+}
+
+/**
+ * mpi3mr_alloc_tgtdev - target device allocator
+ *
+ * Allocate target device instance and initialize the reference
+ * count
+ *
+ * Return: target device instance.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
+ if (!tgtdev)
+ return NULL;
+ kref_init(&tgtdev->ref_count);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * Add the target device to the target device list
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ mpi3mr_tgtdev_get(tgtdev);
+ INIT_LIST_HEAD(&tgtdev->list);
+ list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * Remove the target device from the target device list
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ *
+ * Accessor to retrieve target device from the device handle.
+ * Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if (tgtdev->dev_handle == handle)
+ goto found_tgtdev;
+ return NULL;
+
+found_tgtdev:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ *
+ * Accessor to retrieve target device from the device handle.
+ * Lock version
+ *
+ * Return: Target device reference.
+ */
+struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return tgtdev;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
+ * @mrioc: Adapter instance reference
+ * @persist_id: Persistent ID
+ *
+ * Accessor to retrieve target device from the Persistent ID.
+ * Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
+ struct mpi3mr_ioc *mrioc, u16 persist_id)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if (tgtdev->perst_id == persist_id)
+ goto found_tgtdev;
+ return NULL;
+
+found_tgtdev:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
+ * @mrioc: Adapter instance reference
+ * @persist_id: Persistent ID
+ *
+ * Accessor to retrieve target device from the Persistent ID.
+ * Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
+ struct mpi3mr_ioc *mrioc, u16 persist_id)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return tgtdev;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
+ * @mrioc: Adapter instance reference
+ * @tgt_priv: Target private data
+ *
+ * Accessor to return target device from the target private
+ * data. Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
+ struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ tgtdev = tgt_priv->tgt_dev;
+ if (tgtdev)
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ * @divert_value: 1 or 0
+ *
+ * Accessor to set io_divert flag for each device associated
+ * with the given throttle group with the given value.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg, u8 divert_value)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg)
+ tgt_priv->io_divert = divert_value;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * mpi3mr_print_device_event_notice - print notice related to post processing of
+ * device event after controller reset.
+ *
+ * @mrioc: Adapter instance reference
+ * @device_add: true for device add event and false for device removal event
+ *
+ * Return: None.
+ */
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+ bool device_add)
+{
+ ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
+ (device_add ? "addition" : "removal"));
+ ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
+ ioc_notice(mrioc, "are matched with attached devices for correctness\n");
+}
+
+/**
+ * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device structure
+ *
+ * Checks whether the device is exposed to upper layers and if it
+ * is then remove the device from upper layers by calling
+ * scsi_remove_target().
+ *
+ * Return: 0 on success, non zero on failure.
+ */
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
+ __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ atomic_set(&tgt_priv->block_io, 0);
+ tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ }
+
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
+ if (tgtdev->starget) {
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+ scsi_remove_target(&tgtdev->starget->dev);
+ tgtdev->host_exposed = 0;
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc,
+ false);
+ return;
+ }
+ }
+ }
+ } else
+ mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
+
+ ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
+ __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
+}
+
+/**
+ * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
+ * @mrioc: Adapter instance reference
+ * @perst_id: Persistent ID of the device
+ *
+ * Checks whether the device can be exposed to upper layers and
+ * if it is not then expose the device to upper layers by
+ * calling scsi_scan_target().
+ *
+ * Return: 0 on success, non zero on failure.
+ */
+static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
+ u16 perst_id)
+{
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ if (mrioc->reset_in_progress)
+ return -1;
+
+ tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ if (!tgtdev) {
+ retval = -1;
+ goto out;
+ }
+ if (tgtdev->is_hidden || tgtdev->host_exposed) {
+ retval = -1;
+ goto out;
+ }
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
+ tgtdev->host_exposed = 1;
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+ scsi_scan_target(&mrioc->shost->shost_gendev,
+ mrioc->scsi_device_channel, tgtdev->perst_id,
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ if (!tgtdev->starget)
+ tgtdev->host_exposed = 0;
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc, true);
+ goto out;
+ }
+ }
+ } else
+ mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_change_queue_depth- Change QD callback handler
+ * @sdev: SCSI device reference
+ * @q_depth: Queue depth
+ *
+ * Validate and limit QD and call scsi_change_queue_depth.
+ *
+ * Return: return value of scsi_change_queue_depth
+ */
+static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
+ int q_depth)
+{
+ struct scsi_target *starget = scsi_target(sdev);
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ int retval = 0;
+
+ if (!sdev->tagged_supported)
+ q_depth = 1;
+ if (q_depth > shost->can_queue)
+ q_depth = shost->can_queue;
+ else if (!q_depth)
+ q_depth = MPI3MR_DEFAULT_SDEV_QD;
+ retval = scsi_change_queue_depth(sdev, q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_update_sdev - Update SCSI device information
+ * @sdev: SCSI device reference
+ * @data: target device reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the target specific information into each
+ * SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = (struct mpi3mr_tgt_dev *)data;
+ if (!tgtdev)
+ return;
+
+ mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
+ switch (tgtdev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ /*The block layer hw sector size = 512*/
+ if ((tgtdev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgtdev->dev_spec.pcie_inf.mdts / 512);
+ if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
+ * @mrioc: Adapter instance reference
+ *
+ * This is executed post controller reset to identify any
+ * missing devices during reset and remove from the upper layers
+ * or expose any newly detected device to the upper layers.
+ *
+ * Return: Nothing.
+ */
+
+void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
+ dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
+ tgtdev->perst_id);
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+
+ tgtdev = NULL;
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
+ !tgtdev->is_hidden && !tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
+ }
+}
+
+/**
+ * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device internal structure
+ * @dev_pg0: New device page0
+ * @is_added: Flag to indicate the device is just added
+ *
+ * Update the information from the device page0 into the driver
+ * cached target device structure.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
+ bool is_added)
+{
+ u16 flags = 0;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
+ u8 prot_mask = 0;
+
+ tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
+ tgtdev->dev_type = dev_pg0->device_form;
+ tgtdev->io_unit_port = dev_pg0->io_unit_port;
+ tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
+ tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
+ tgtdev->slot = le16_to_cpu(dev_pg0->slot);
+ tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
+ tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
+ tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
+
+ if (tgtdev->encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ tgtdev->encl_handle);
+ if (enclosure_dev)
+ tgtdev->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+
+ flags = tgtdev->devpg0_flag;
+
+ tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
+
+ if (is_added == true)
+ tgtdev->io_throttle_enabled =
+ (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+
+
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
+ scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
+ scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgtdev->io_throttle_enabled;
+ if (is_added == true)
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+
+ switch (dev_pg0->access_status) {
+ case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI3_DEVICE0_ASTATUS_PREPARE:
+ case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
+ case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
+ break;
+ default:
+ tgtdev->is_hidden = 1;
+ break;
+ }
+
+ switch (tgtdev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_SAS_SATA:
+ {
+ struct mpi3_device0_sas_sata_format *sasinf =
+ &dev_pg0->device_specific.sas_sata_format;
+ u16 dev_info = le16_to_cpu(sasinf->device_info);
+
+ tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
+ tgtdev->dev_spec.sas_sata_inf.sas_address =
+ le64_to_cpu(sasinf->sas_address);
+ tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
+ tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
+ sasinf->attached_phy_identifier;
+ if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
+ MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
+ tgtdev->is_hidden = 1;
+ else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
+ MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
+ tgtdev->is_hidden = 1;
+
+ if (((tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
+ && (tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
+ (tgtdev->parent_handle == 0xFFFF))
+ tgtdev->non_stl = 1;
+ if (tgtdev->dev_spec.sas_sata_inf.hba_port)
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
+ dev_pg0->io_unit_port;
+ break;
+ }
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ {
+ struct mpi3_device0_pcie_format *pcieinf =
+ &dev_pg0->device_specific.pcie_format;
+ u16 dev_info = le16_to_cpu(pcieinf->device_info);
+
+ tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
+ tgtdev->dev_spec.pcie_inf.capb =
+ le32_to_cpu(pcieinf->capabilities);
+ tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
+ /* 2^12 = 4096 */
+ tgtdev->dev_spec.pcie_inf.pgsz = 12;
+ if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
+ tgtdev->dev_spec.pcie_inf.mdts =
+ le32_to_cpu(pcieinf->maximum_data_transfer_size);
+ tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
+ tgtdev->dev_spec.pcie_inf.reset_to =
+ max_t(u8, pcieinf->controller_reset_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
+ tgtdev->dev_spec.pcie_inf.abort_to =
+ max_t(u8, pcieinf->nvme_abort_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
+ }
+ if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
+ tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
+ if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
+ ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
+ tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
+ if (!mrioc->shost)
+ break;
+ prot_mask = scsi_host_get_prot(mrioc->shost);
+ if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
+ scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
+ ioc_info(mrioc,
+ "%s : Disabling DIX0 prot capability\n", __func__);
+ ioc_info(mrioc,
+ "because HBA does not support DIX0 operation on NVME drives\n");
+ }
+ break;
+ }
+ case MPI3_DEVICE_DEVFORM_VD:
+ {
+ struct mpi3_device0_vd_format *vdinf =
+ &dev_pg0->device_specific.vd_format;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u16 vdinf_io_throttle_group =
+ le16_to_cpu(vdinf->io_throttle_group);
+
+ tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
+ if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
+ tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
+ tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
+ tgtdev->dev_spec.vd_inf.tg_high =
+ le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
+ tgtdev->dev_spec.vd_inf.tg_low =
+ le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
+ if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
+ tg = mrioc->throttle_groups + vdinf_io_throttle_group;
+ tg->id = vdinf_io_throttle_group;
+ tg->high = tgtdev->dev_spec.vd_inf.tg_high;
+ tg->low = tgtdev->dev_spec.vd_inf.tg_low;
+ tg->qd_reduction =
+ tgtdev->dev_spec.vd_inf.tg_qd_reduction;
+ if (is_added == true)
+ tg->fw_qd = tgtdev->q_depth;
+ tg->modified_qd = tgtdev->q_depth;
+ }
+ tgtdev->dev_spec.vd_inf.tg = tg;
+ if (scsi_tgt_priv_data)
+ scsi_tgt_priv_data->throttle_group = tg;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event information.
+ *
+ * Process Device status Change event and based on device's new
+ * information, either expose the device to the upper layers, or
+ * remove the device from upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ u16 dev_handle = 0;
+ u8 uhide = 0, delete = 0, cleanup = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3_event_data_device_status_change *evtdata =
+ (struct mpi3_event_data_device_status_change *)fwevt->event_data;
+
+ dev_handle = le16_to_cpu(evtdata->dev_handle);
+ ioc_info(mrioc,
+ "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
+ __func__, dev_handle, evtdata->reason_code);
+ switch (evtdata->reason_code) {
+ case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
+ delete = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
+ uhide = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
+ delete = 1;
+ cleanup = 1;
+ break;
+ default:
+ ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
+ evtdata->reason_code);
+ break;
+ }
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ if (uhide) {
+ tgtdev->is_hidden = 0;
+ if (!tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
+ }
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ if (delete)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ }
+ if (cleanup) {
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @dev_pg0: New device page0
+ *
+ * Process Device Info Change event and based on device's new
+ * information, either expose the device to the upper layers, or
+ * remove the device from upper layers or update the details of
+ * the device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3_device_page0 *dev_pg0)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ u16 dev_handle = 0, perst_id = 0;
+
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ dev_handle = le16_to_cpu(dev_pg0->dev_handle);
+ ioc_info(mrioc,
+ "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
+ __func__, dev_handle, perst_id);
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
+ if (!tgtdev->is_hidden && !tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
+ if (tgtdev->is_hidden && tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
+ starget_for_each_device(tgtdev->starget, (void *)tgtdev,
+ mpi3mr_update_sdev);
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_free_enclosure_list - release enclosures
+ * @mrioc: Adapter instance reference
+ *
+ * Free memory allocated during encloure add.
+ *
+ * Return nothing.
+ */
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
+
+ list_for_each_entry_safe(enclosure_dev,
+ enclosure_dev_next, &mrioc->enclosure_list, list) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ }
+}
+
+/**
+ * mpi3mr_enclosure_find_by_handle - enclosure search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the enclosure
+ *
+ * This searches for enclosure device based on handle, then returns the
+ * enclosure object.
+ *
+ * Return: Enclosure object reference or NULL
+ */
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
+
+ list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
+ if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
+ continue;
+ r = enclosure_dev;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
+ * @mrioc: Adapter instance reference
+ * @encl_pg0: Enclosure page 0.
+ * @is_added: Added event or not
+ *
+ * Return nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
+{
+ char *reason_str = NULL;
+
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
+ return;
+
+ if (is_added)
+ reason_str = "enclosure added";
+ else
+ reason_str = "enclosure dev status changed";
+
+ ioc_info(mrioc,
+ "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
+ reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
+ (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
+ ioc_info(mrioc,
+ "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
+ le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
+ le16_to_cpu(encl_pg0->flags),
+ ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the Enclosure device status or
+ * Enclosure add events if logging is enabled and add or remove
+ * the enclosure from the controller's internal list of
+ * enclosures.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
+ struct mpi3_enclosure_page0 *encl_pg0;
+ u16 encl_handle;
+ u8 added, present;
+
+ encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
+ added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
+ mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
+
+
+ encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
+ present = ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
+
+ if (encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ encl_handle);
+ if (!enclosure_dev && present) {
+ enclosure_dev =
+ kzalloc(sizeof(struct mpi3mr_enclosure_node),
+ GFP_KERNEL);
+ if (!enclosure_dev)
+ return;
+ list_add_tail(&enclosure_dev->list,
+ &mrioc->enclosure_list);
+ }
+ if (enclosure_dev) {
+ if (!present) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ } else
+ memcpy(&enclosure_dev->pg0, encl_pg0,
+ sizeof(enclosure_dev->pg0));
+
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_debug - SASTopoChange details
+ * @mrioc: Adapter instance reference
+ * @event_data: SAS topology change list event data
+ *
+ * Prints information about the SAS topology change event.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_data_sas_topology_change_list *event_data)
+{
+ int i;
+ u16 handle;
+ u8 reason_code, phy_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->exp_status) {
+ case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
+ status_str = "responding";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
+ status_str = "direct attached";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ ioc_info(mrioc, "%s :sas topology change: (%s)\n",
+ __func__, status_str);
+ ioc_info(mrioc,
+ "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
+ __func__, le16_to_cpu(event_data->expander_dev_handle),
+ event_data->io_unit_port,
+ le16_to_cpu(event_data->enclosure_handle),
+ event_data->start_phy_num, event_data->num_entries);
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ phy_number = event_data->start_phy_num + i;
+ reason_code = event_data->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ status_str = "link status change";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
+ status_str = "link status no change";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->phy_entry[i].link_rate >> 4;
+ prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
+ ioc_info(mrioc,
+ "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
+ __func__, phy_number, handle, status_str, link_rate,
+ prev_link_rate);
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the SAS topology change event and
+ * for "not responding" event code, removes the device from the
+ * upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_event_data_sas_topology_change_list *event_data =
+ (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ u64 exp_sas_address = 0, parent_sas_address = 0;
+ struct mpi3mr_hba_port *hba_port = NULL;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_sas_node *sas_expander = NULL;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate, parent_phy_number;
+
+ mpi3mr_sastopochg_evt_debug(mrioc, event_data);
+ if (mrioc->sas_transport_enabled) {
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc,
+ event_data->io_unit_port);
+ if (le16_to_cpu(event_data->expander_dev_handle)) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
+ le16_to_cpu(event_data->expander_dev_handle));
+ if (sas_expander) {
+ exp_sas_address = sas_expander->sas_address;
+ hba_port = sas_expander->hba_port;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ parent_sas_address = exp_sas_address;
+ } else
+ parent_sas_address = mrioc->sas_hba.sas_address;
+ }
+
+ for (i = 0; i < event_data->num_entries; i++) {
+ if (fwevt->discard)
+ return;
+ handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (!tgtdev)
+ continue;
+
+ reason_code = event_data->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
+ {
+ if (!mrioc->sas_transport_enabled || tgtdev->non_stl
+ || tgtdev->is_hidden)
+ break;
+ link_rate = event_data->phy_entry[i].link_rate >> 4;
+ prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
+ if (link_rate == prev_link_rate)
+ break;
+ if (!parent_sas_address)
+ break;
+ parent_phy_number = event_data->start_phy_num + i;
+ mpi3mr_update_links(mrioc, parent_sas_address, handle,
+ parent_phy_number, link_rate, hba_port);
+ break;
+ }
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+ if (mrioc->sas_transport_enabled && (event_data->exp_status ==
+ MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
+ if (sas_expander)
+ mpi3mr_expander_remove(mrioc, exp_sas_address,
+ hba_port);
+ }
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
+ * @mrioc: Adapter instance reference
+ * @event_data: PCIe topology change list event data
+ *
+ * Prints information about the PCIe topology change event.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_data_pcie_topology_change_list *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 port_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->switch_status) {
+ case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
+ status_str = "responding";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
+ status_str = "direct attached";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
+ __func__, status_str);
+ ioc_info(mrioc,
+ "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
+ __func__, le16_to_cpu(event_data->switch_dev_handle),
+ le16_to_cpu(event_data->enclosure_handle),
+ event_data->start_port_num, event_data->num_entries);
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle =
+ le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ port_number = event_data->start_port_num + i;
+ reason_code = event_data->port_entry[i].port_status;
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ status_str = "link status change";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
+ status_str = "link status no change";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->port_entry[i].current_port_info &
+ MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->port_entry[i].previous_port_info &
+ MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ ioc_info(mrioc,
+ "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
+ __func__, port_number, handle, status_str, link_rate,
+ prev_link_rate);
+ }
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the PCIe topology change event and
+ * for "not responding" event code, removes the device from the
+ * upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_event_data_pcie_topology_change_list *event_data =
+ (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+
+ mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
+
+ for (i = 0; i < event_data->num_entries; i++) {
+ if (fwevt->discard)
+ return;
+ handle =
+ le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (!tgtdev)
+ continue;
+
+ reason_code = event_data->port_entry[i].port_status;
+
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ break;
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_logdata_evt_bh - Log data event bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Extracts the event data and calls application interfacing
+ * function to process the event further.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
+ fwevt->event_data_size);
+}
+
+/**
+ * mpi3mr_update_sdev_qd - Update SCSI device queue depath
+ * @sdev: SCSI device reference
+ * @data: Queue depth reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the QD of each SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
+{
+ u16 *q_depth = (u16 *)data;
+
+ scsi_change_queue_depth(sdev, (int)*q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
+}
+
+/**
+ * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to reduce QD for each device associated with the
+ * given throttle group.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg) {
+ dprint_event_bh(mrioc,
+ "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
+ tgt_priv->perst_id, tgtdev->q_depth,
+ tg->modified_qd);
+ starget_for_each_device(tgtdev->starget,
+ (void *)&tg->modified_qd,
+ mpi3mr_update_sdev_qd);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Identifies the firmware event and calls corresponding bottomg
+ * half handler and sends event acknowledgment if required.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_device_page0 *dev_pg0 = NULL;
+ u16 perst_id, handle, dev_info;
+ struct mpi3_device0_sas_sata_format *sasinf = NULL;
+
+ mpi3mr_fwevt_del_from_list(mrioc, fwevt);
+ mrioc->current_event = fwevt;
+
+ if (mrioc->stop_drv_processing)
+ goto out;
+
+ if (mrioc->unrecoverable) {
+ dprint_event_bh(mrioc,
+ "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
+ fwevt->event_id);
+ goto out;
+ }
+
+ if (!fwevt->process_evt)
+ goto evt_ack;
+
+ switch (fwevt->event_id) {
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ handle = le16_to_cpu(dev_pg0->dev_handle);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
+ else if (mrioc->sas_transport_enabled &&
+ (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
+ sasinf = &dev_pg0->device_specific.sas_sata_format;
+ dev_info = le16_to_cpu(sasinf->device_info);
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_is_expander_device(dev_info))
+ mpi3mr_expander_add(mrioc, handle);
+ }
+ break;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
+ break;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ {
+ mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
+ break;
+ }
+
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ {
+ mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_LOG_DATA:
+ {
+ mpi3mr_logdata_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
+ {
+ struct mpi3mr_throttle_group_info *tg;
+
+ tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
+ dprint_event_bh(mrioc,
+ "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
+ tg->id, tg->need_qd_reduction);
+ if (tg->need_qd_reduction) {
+ mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
+ tg->need_qd_reduction = 0;
+ }
+ break;
+ }
+ case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
+ {
+ while (mrioc->device_refresh_on)
+ msleep(500);
+
+ dprint_event_bh(mrioc,
+ "scan for non responding and newly added devices after soft reset started\n");
+ if (mrioc->sas_transport_enabled) {
+ mpi3mr_refresh_sas_ports(mrioc);
+ mpi3mr_refresh_expanders(mrioc);
+ }
+ mpi3mr_rfresh_tgtdevs(mrioc);
+ ioc_info(mrioc,
+ "scan for non responding and newly added devices after soft reset completed\n");
+ break;
+ }
+ default:
+ break;
+ }
+
+evt_ack:
+ if (fwevt->send_ack)
+ mpi3mr_process_event_ack(mrioc, fwevt->event_id,
+ fwevt->evt_ctx);
+out:
+ /* Put fwevt reference count to neutralize kref_init increment */
+ mpi3mr_fwevt_put(fwevt);
+ mrioc->current_event = NULL;
+}
+
+/**
+ * mpi3mr_fwevt_worker - Firmware event worker
+ * @work: Work struct containing firmware event
+ *
+ * Extracts the firmware event and calls mpi3mr_fwevt_bh.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_worker(struct work_struct *work)
+{
+ struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
+ work);
+ mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+}
+
+/**
+ * mpi3mr_create_tgtdev - Create and add a target device
+ * @mrioc: Adapter instance reference
+ * @dev_pg0: Device Page 0 data
+ *
+ * If the device specified by the device page 0 data is not
+ * present in the driver's internal list, allocate the memory
+ * for the device, populate the data and add to the list, else
+ * update the device data. The key is persistent ID.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation failure
+ */
+static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
+ struct mpi3_device_page0 *dev_pg0)
+{
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ u16 perst_id = 0;
+
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
+ return retval;
+
+ tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ if (tgtdev) {
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
+ mpi3mr_tgtdev_put(tgtdev);
+ } else {
+ tgtdev = mpi3mr_alloc_tgtdev();
+ if (!tgtdev)
+ return -ENOMEM;
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
+ mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
+ * @mrioc: Adapter instance reference
+ *
+ * Flush pending commands in the delayed lists due to a
+ * controller reset or driver removal as a cleanup.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
+{
+ struct delayed_dev_rmhs_node *_rmhs_node;
+ struct delayed_evt_ack_node *_evtack_node;
+
+ dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
+ while (!list_empty(&mrioc->delayed_rmhs_list)) {
+ _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
+ struct delayed_dev_rmhs_node, list);
+ list_del(&_rmhs_node->list);
+ kfree(_rmhs_node);
+ }
+ dprint_reset(mrioc, "flushing delayed event ack commands\n");
+ while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ list_del(&_evtack_node->list);
+ kfree(_evtack_node);
+ }
+}
+
+/**
+ * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issues a target reset TM to the firmware from the device
+ * removal TM pend list or retry the removal handshake sequence
+ * based on the IOU control request IOC status.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo);
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
+ __func__, drv_cmd->dev_handle,
+ drv_cmd->retry_count);
+ mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
+ drv_cmd, drv_cmd->iou_rc);
+ return;
+ }
+ ioc_err(mrioc,
+ "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ } else {
+ ioc_info(mrioc,
+ "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
+ }
+
+ if (!list_empty(&mrioc->delayed_rmhs_list)) {
+ delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
+ struct delayed_dev_rmhs_node, list);
+ drv_cmd->dev_handle = delayed_dev_rmhs->handle;
+ drv_cmd->retry_count = 0;
+ drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
+ drv_cmd->iou_rc);
+ list_del(&delayed_dev_rmhs->list);
+ kfree(delayed_dev_rmhs);
+ return;
+ }
+
+clear_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issues a target reset TM to the firmware from the device
+ * removal TM pend list or issue IO unit control request as
+ * part of device removal or hidden acknowledgment handshake.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_iounit_control_request iou_ctrl;
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
+ int retval;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ if (tm_reply)
+ pr_info(IOCNAME
+ "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
+ mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count));
+
+ pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
+ mrioc->name, drv_cmd->dev_handle, cmd_idx);
+
+ memset(&iou_ctrl, 0, sizeof(iou_ctrl));
+
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
+ iou_ctrl.operation = drv_cmd->iou_rc;
+ iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
+ iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+
+ retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
+ 1);
+ if (retval) {
+ pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
+ mrioc->name);
+ goto clear_drv_cmd;
+ }
+
+ return;
+clear_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ drv_cmd->retry_count = 0;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ * @cmdparam: Internal command tracker
+ * @iou_rc: IO unit reason code
+ *
+ * Issues a target reset TM to the firmware or add it to a pend
+ * list as part of device removal or hidden acknowledgment
+ * handshake.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
+{
+ struct mpi3_scsi_task_mgmt_request tm_req;
+ int retval = 0;
+ u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
+ u8 retrycount = 5;
+ struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
+ struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+
+ if (drv_cmd)
+ goto issue_cmd;
+ do {
+ cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
+ MPI3MR_NUM_DEVRMCMD);
+ if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
+ if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
+ break;
+ cmd_idx = MPI3MR_NUM_DEVRMCMD;
+ }
+ } while (retrycount--);
+
+ if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
+ delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
+ GFP_ATOMIC);
+ if (!delayed_dev_rmhs)
+ return;
+ INIT_LIST_HEAD(&delayed_dev_rmhs->list);
+ delayed_dev_rmhs->handle = handle;
+ delayed_dev_rmhs->iou_rc = iou_rc;
+ list_add_tail(&delayed_dev_rmhs->list,
+ &mrioc->delayed_rmhs_list);
+ ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
+ __func__, handle);
+ return;
+ }
+ drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
+
+issue_cmd:
+ cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ ioc_info(mrioc,
+ "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
+ __func__, handle, cmd_idx);
+
+ memset(&tm_req, 0, sizeof(tm_req));
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
+ goto out;
+ }
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
+ drv_cmd->dev_handle = handle;
+ drv_cmd->iou_rc = iou_rc;
+ tm_req.dev_handle = cpu_to_le16(handle);
+ tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
+ tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
+
+ set_bit(handle, mrioc->removepend_bitmap);
+ retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
+ __func__);
+ goto out_failed;
+ }
+out:
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ drv_cmd->retry_count = 0;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_complete_evt_ack - event ack request completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is the completion handler for non blocking event
+ * acknowledgment sent to the firmware and this will issue any
+ * pending event acknowledgment request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_event_th(mrioc,
+ "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ drv_cmd->ioc_loginfo);
+ }
+
+ if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ delayed_evtack =
+ list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
+ delayed_evtack->event_ctx);
+ list_del(&delayed_evtack->list);
+ kfree(delayed_evtack);
+ return;
+ }
+clear_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
+ * mpi3mr_send_event_ack - Issue event acknwoledgment request
+ * @mrioc: Adapter instance reference
+ * @event: MPI3 event id
+ * @cmdparam: Internal command tracker
+ * @event_ctx: event context
+ *
+ * Issues event acknowledgment request to the firmware if there
+ * is a free command to send the event ack else it to a pend
+ * list so that it will be processed on a completion of a prior
+ * event acknowledgment .
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
+{
+ struct mpi3_event_ack_request evtack_req;
+ int retval = 0;
+ u8 retrycount = 5;
+ u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd) {
+ dprint_event_th(mrioc,
+ "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ goto issue_cmd;
+ }
+ dprint_event_th(mrioc,
+ "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ do {
+ cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
+ MPI3MR_NUM_EVTACKCMD);
+ if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
+ if (!test_and_set_bit(cmd_idx,
+ mrioc->evtack_cmds_bitmap))
+ break;
+ cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ }
+ } while (retrycount--);
+
+ if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
+ delayed_evtack = kzalloc(sizeof(*delayed_evtack),
+ GFP_ATOMIC);
+ if (!delayed_evtack)
+ return;
+ INIT_LIST_HEAD(&delayed_evtack->list);
+ delayed_evtack->event = event;
+ delayed_evtack->event_ctx = event_ctx;
+ list_add_tail(&delayed_evtack->list,
+ &mrioc->delayed_evtack_cmds_list);
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
+ event, event_ctx);
+ return;
+ }
+ drv_cmd = &mrioc->evtack_cmds[cmd_idx];
+
+issue_cmd:
+ cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+
+ memset(&evtack_req, 0, sizeof(evtack_req));
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ dprint_event_th(mrioc,
+ "sending event ack failed due to command in use\n");
+ goto out;
+ }
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_complete_evt_ack;
+ evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
+ evtack_req.event = event;
+ evtack_req.event_context = cpu_to_le32(event_ctx);
+ retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
+ sizeof(evtack_req), 1);
+ if (retval) {
+ dprint_event_th(mrioc,
+ "posting event ack request is failed\n");
+ goto out_failed;
+ }
+
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
+ event, event_ctx);
+out:
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove with the firmware for
+ * PCIe devices.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_pcie_topology_change_list *topo_evt =
+ (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+
+ for (i = 0; i < topo_evt->num_entries; i++) {
+ handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ reason_code = topo_evt->port_entry[i].port_status;
+ scsi_tgt_priv_data = NULL;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removed = 1;
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+ mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removedelay = 1;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
+ if (scsi_tgt_priv_data &&
+ scsi_tgt_priv_data->dev_removedelay) {
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_dec_if_positive
+ (&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove with the firmware for
+ * SAS/SATA devices.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_sas_topology_change_list *topo_evt =
+ (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+
+ for (i = 0; i < topo_evt->num_entries; i++) {
+ handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ reason_code = topo_evt->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+ scsi_tgt_priv_data = NULL;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removed = 1;
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+ mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removedelay = 1;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ if (scsi_tgt_priv_data &&
+ scsi_tgt_priv_data->dev_removedelay) {
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_dec_if_positive
+ (&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove/hide acknowledgment
+ * with the firmware.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ u16 dev_handle = 0;
+ u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct mpi3_event_data_device_status_change *evtdata =
+ (struct mpi3_event_data_device_status_change *)event_reply->event_data;
+
+ if (mrioc->stop_drv_processing)
+ goto out;
+
+ dev_handle = le16_to_cpu(evtdata->dev_handle);
+
+ switch (evtdata->reason_code) {
+ case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
+ case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
+ block = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
+ delete = 1;
+ hide = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
+ delete = 1;
+ remove = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
+ case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
+ ublock = 1;
+ break;
+ default:
+ break;
+ }
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ if (hide)
+ tgtdev->is_hidden = hide;
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ if (block)
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ if (delete)
+ scsi_tgt_priv_data->dev_removed = 1;
+ if (ublock)
+ atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
+ }
+ if (remove)
+ mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ if (hide)
+ mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
+ MPI3_CTRL_OP_HIDDEN_ACK);
+
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Blocks and unblocks host level I/O based on the reason code
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_prepare_for_reset *evtdata =
+ (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
+
+ if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
+ dprint_event_th(mrioc,
+ "prepare for reset event top half with rc=start\n");
+ if (mrioc->prepare_for_reset)
+ return;
+ mrioc->prepare_for_reset = 1;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
+ dprint_event_th(mrioc,
+ "prepare for reset top half with rc=abort\n");
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
+ if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
+ == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
+ mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
+ le32_to_cpu(event_reply->event_context));
+}
+
+/**
+ * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Identifies the new shutdown timeout value and update.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_energy_pack_change *evtdata =
+ (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
+ u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
+
+ if (shutdown_timeout <= 0) {
+ ioc_warn(mrioc,
+ "%s :Invalid Shutdown Timeout received = %d\n",
+ __func__, shutdown_timeout);
+ return;
+ }
+
+ ioc_info(mrioc,
+ "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
+ __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
+ mrioc->facts.shutdown_timeout = shutdown_timeout;
+}
+
+/**
+ * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Displays Cable manegemt event details.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_cable_management *evtdata =
+ (struct mpi3_event_data_cable_management *)event_reply->event_data;
+
+ switch (evtdata->status) {
+ case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
+ {
+ ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
+ "Devices connected to this cable are not detected.\n"
+ "This cable requires %d mW of power.\n",
+ evtdata->receptacle_id,
+ le32_to_cpu(evtdata->active_cable_power_requirement));
+ break;
+ }
+ case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
+ {
+ ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
+ evtdata->receptacle_id);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
+ * @mrioc: Adapter instance reference
+ *
+ * Add driver specific event to make sure that the driver won't process the
+ * events until all the devices are refreshed during soft reset.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ fwevt = mpi3mr_alloc_fwevt(0);
+ if (!fwevt) {
+ dprint_event_th(mrioc,
+ "failed to schedule bottom half handler for event(0x%02x)\n",
+ MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
+ return;
+ }
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = 0;
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
+ * mpi3mr_os_handle_events - Firmware event handler
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Identify whteher the event has to handled and acknowledged
+ * and either process the event in the tophalf and/or schedule a
+ * bottom half through mpi3mr_fwevt_worker.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ u16 evt_type, sz;
+ struct mpi3mr_fwevt *fwevt = NULL;
+ bool ack_req = 0, process_evt_bh = 0;
+
+ if (mrioc->stop_drv_processing)
+ return;
+
+ if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
+ == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
+ ack_req = 1;
+
+ evt_type = event_reply->event;
+
+ switch (evt_type) {
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *dev_pg0 =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
+ ioc_err(mrioc,
+ "%s :Failed to add device in the device add event\n",
+ __func__);
+ else
+ process_evt_bh = 1;
+ break;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ process_evt_bh = 1;
+ mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ process_evt_bh = 1;
+ mpi3mr_sastopochg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ {
+ process_evt_bh = 1;
+ mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_PREPARE_FOR_RESET:
+ {
+ mpi3mr_preparereset_evt_th(mrioc, event_reply);
+ ack_req = 0;
+ break;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ case MPI3_EVENT_LOG_DATA:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ {
+ process_evt_bh = 1;
+ break;
+ }
+ case MPI3_EVENT_ENERGY_PACK_CHANGE:
+ {
+ mpi3mr_energypackchg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_CABLE_MGMT:
+ {
+ mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_SAS_DISCOVERY:
+ case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
+ case MPI3_EVENT_PCIE_ENUMERATION:
+ break;
+ default:
+ ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
+ __func__, evt_type);
+ break;
+ }
+ if (process_evt_bh || ack_req) {
+ sz = event_reply->event_data_length * 4;
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
+ __func__, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ memcpy(fwevt->event_data, event_reply->event_data, sz);
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = evt_type;
+ fwevt->send_ack = ack_req;
+ fwevt->process_evt = process_evt_bh;
+ fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+ }
+}
+
+/**
+ * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * Identifies the protection information flags from the SCSI
+ * command and set appropriate flags in the MPI3 SCSI IO
+ * request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ u16 eedp_flags = 0;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+
+ switch (prot_op) {
+ case SCSI_PROT_NORMAL:
+ return;
+ case SCSI_PROT_READ_STRIP:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_READ_PASS:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
+ scsiio_req->sgl[0].eedp.application_tag_translation_mask =
+ 0xffff;
+ } else
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
+
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ default:
+ return;
+ }
+
+ if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
+ eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
+
+ if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
+ eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
+
+ if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
+ eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
+ MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+ scsiio_req->cdb.eedp32.primary_reference_tag =
+ cpu_to_be32(scsi_prot_ref_tag(scmd));
+ }
+
+ if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
+ eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+
+ eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
+
+ switch (scsi_prot_interval(scmd)) {
+ case 512:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
+ break;
+ case 520:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
+ break;
+ case 4080:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
+ break;
+ case 4088:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
+ break;
+ case 4096:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
+ break;
+ case 4104:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
+ break;
+ case 4160:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
+ break;
+ default:
+ break;
+ }
+
+ scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
+ scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
+}
+
+/**
+ * mpi3mr_build_sense_buffer - Map sense information
+ * @desc: Sense type
+ * @buf: Sense buffer to populate
+ * @key: Sense key
+ * @asc: Additional sense code
+ * @ascq: Additional sense code qualifier
+ *
+ * Maps the given sense information into either descriptor or
+ * fixed format sense data.
+ *
+ * Return: Nothing
+ */
+static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
+ u8 asc, u8 ascq)
+{
+ if (desc) {
+ buf[0] = 0x72; /* descriptor, current */
+ buf[1] = key;
+ buf[2] = asc;
+ buf[3] = ascq;
+ buf[7] = 0;
+ } else {
+ buf[0] = 0x70; /* fixed, current */
+ buf[2] = key;
+ buf[7] = 0xa;
+ buf[12] = asc;
+ buf[13] = ascq;
+ }
+}
+
+/**
+ * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
+ * @scmd: SCSI command reference
+ * @ioc_status: status of MPI3 request
+ *
+ * Maps the EEDP error status of the SCSI IO request to sense
+ * data.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
+ u16 ioc_status)
+{
+ u8 ascq = 0;
+
+ switch (ioc_status) {
+ case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+
+ mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, ascq);
+ scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
+}
+
+/**
+ * mpi3mr_process_op_reply_desc - reply descriptor handler
+ * @mrioc: Adapter instance reference
+ * @reply_desc: Operational reply descriptor
+ * @reply_dma: place holder for reply DMA address
+ * @qidx: Operational queue index
+ *
+ * Process the operational reply descriptor and identifies the
+ * descriptor type. Based on the descriptor map the MPI3 request
+ * status to a SCSI command status and calls scsi_done call
+ * back.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
+{
+ u16 reply_desc_type, host_tag = 0;
+ u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u32 ioc_loginfo = 0;
+ struct mpi3_status_reply_descriptor *status_desc = NULL;
+ struct mpi3_address_reply_descriptor *addr_desc = NULL;
+ struct mpi3_success_reply_descriptor *success_desc = NULL;
+ struct mpi3_scsi_io_reply *scsi_reply = NULL;
+ struct scsi_cmnd *scmd = NULL;
+ struct scmd_priv *priv = NULL;
+ u8 *sense_buf = NULL;
+ u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
+ u32 xfer_count = 0, sense_count = 0, resp_data = 0;
+ u16 dev_handle = 0xFFFF;
+ struct scsi_sense_hdr sshdr;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u8 throttle_enabled_dev = 0;
+
+ *reply_dma = 0;
+ reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
+ switch (reply_desc_type) {
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
+ status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(status_desc->host_tag);
+ ioc_status = le16_to_cpu(status_desc->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
+ addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
+ *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
+ scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
+ *reply_dma);
+ if (!scsi_reply) {
+ panic("%s: scsi_reply is NULL, this shouldn't happen\n",
+ mrioc->name);
+ goto out;
+ }
+ host_tag = le16_to_cpu(scsi_reply->host_tag);
+ ioc_status = le16_to_cpu(scsi_reply->ioc_status);
+ scsi_status = scsi_reply->scsi_status;
+ scsi_state = scsi_reply->scsi_state;
+ dev_handle = le16_to_cpu(scsi_reply->dev_handle);
+ sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
+ xfer_count = le32_to_cpu(scsi_reply->transfer_count);
+ sense_count = le32_to_cpu(scsi_reply->sense_count);
+ resp_data = le32_to_cpu(scsi_reply->response_data);
+ sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
+ panic("%s: Ran out of sense buffers\n", mrioc->name);
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
+ success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(success_desc->host_tag);
+ break;
+ default:
+ break;
+ }
+ scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
+ if (!scmd) {
+ panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
+ mrioc->name, host_tag);
+ goto out;
+ }
+ priv = scsi_cmd_priv(scmd);
+
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (stgt_priv_data) {
+ tg = stgt_priv_data->throttle_group;
+ throttle_enabled_dev =
+ stgt_priv_data->io_throttle_enabled;
+ }
+ }
+ if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
+ throttle_enabled_dev)) {
+ ioc_pend_data_len = atomic_sub_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (tg) {
+ tg_pend_data_len = atomic_sub_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (tg->io_divert && ((ioc_pend_data_len <=
+ mrioc->io_throttle_low) &&
+ (tg_pend_data_len <= tg->low))) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ } else {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+ }
+ } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
+ ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
+ if (!tg) {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+
+ } else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
+ tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
+ if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ }
+ }
+
+ if (success_desc) {
+ scmd->result = DID_OK << 16;
+ goto out_success;
+ }
+
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
+ if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
+ xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
+ scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
+ ioc_status = MPI3_IOCSTATUS_SUCCESS;
+
+ if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
+ sense_buf) {
+ u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
+
+ memcpy(scmd->sense_buffer, sense_buf, sz);
+ }
+
+ switch (ioc_status) {
+ case MPI3_IOCSTATUS_BUSY:
+ case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ if ((xfer_count == 0) || (scmd->underflow > xfer_count))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else
+ scmd->result = (DID_OK << 16) | scsi_status;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
+ break;
+ if (xfer_count < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR << 16;
+ } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
+ (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ fallthrough;
+ case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI3_IOCSTATUS_SUCCESS:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
+ (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
+ (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ mpi3mr_map_eedp_error(scmd, ioc_status);
+ break;
+ case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI3_IOCSTATUS_INVALID_FUNCTION:
+ case MPI3_IOCSTATUS_INVALID_SGL:
+ case MPI3_IOCSTATUS_INTERNAL_ERROR:
+ case MPI3_IOCSTATUS_INVALID_FIELD:
+ case MPI3_IOCSTATUS_INVALID_STATE:
+ case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
+ default:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ }
+
+ if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
+ (scmd->cmnd[0] != ATA_16)) {
+ ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
+ scmd->result);
+ scsi_print_command(scmd);
+ ioc_info(mrioc,
+ "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
+ __func__, dev_handle, ioc_status, ioc_loginfo,
+ priv->req_q_idx + 1);
+ ioc_info(mrioc,
+ " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
+ host_tag, scsi_state, scsi_status, xfer_count, resp_data);
+ if (sense_buf) {
+ scsi_normalize_sense(sense_buf, sense_count, &sshdr);
+ ioc_info(mrioc,
+ "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
+ __func__, sense_count, sshdr.sense_key,
+ sshdr.asc, sshdr.ascq);
+ }
+ }
+out_success:
+ if (priv->meta_sg_valid) {
+ dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd), scmd->sc_data_direction);
+ }
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ scsi_dma_unmap(scmd);
+ scsi_done(scmd);
+out:
+ if (sense_buf)
+ mpi3mr_repost_sense_buf(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+}
+
+/**
+ * mpi3mr_get_chain_idx - get free chain buffer index
+ * @mrioc: Adapter instance reference
+ *
+ * Try to get a free chain buffer index from the free pool.
+ *
+ * Return: -1 on failure or the free chain buffer index
+ */
+static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
+{
+ u8 retry_count = 5;
+ int cmd_idx = -1;
+
+ do {
+ spin_lock(&mrioc->chain_buf_lock);
+ cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
+ mrioc->chain_buf_count);
+ if (cmd_idx < mrioc->chain_buf_count) {
+ set_bit(cmd_idx, mrioc->chain_bitmap);
+ spin_unlock(&mrioc->chain_buf_lock);
+ break;
+ }
+ spin_unlock(&mrioc->chain_buf_lock);
+ cmd_idx = -1;
+ } while (retry_count--);
+ return cmd_idx;
+}
+
+/**
+ * mpi3mr_prepare_sg_scmd - build scatter gather list
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * This function maps SCSI command's data and protection SGEs to
+ * MPI request SGEs. If required additional 4K chain buffer is
+ * used to send the SGEs.
+ *
+ * Return: 0 on success, -ENOMEM on dma_map_sg failure
+ */
+static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ dma_addr_t chain_dma;
+ struct scatterlist *sg_scmd;
+ void *sg_local, *chain;
+ u32 chain_length;
+ int sges_left, chain_idx;
+ u32 sges_in_segment;
+ u8 simple_sgl_flags;
+ u8 simple_sgl_flags_last;
+ u8 last_chain_sgl_flags;
+ struct chain_element *chain_req;
+ struct scmd_priv *priv = NULL;
+ u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
+ MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
+
+ priv = scsi_cmd_priv(scmd);
+
+ simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+ simple_sgl_flags_last = simple_sgl_flags |
+ MPI3_SGE_FLAGS_END_OF_LIST;
+ last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+
+ if (meta_sg)
+ sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
+ else
+ sg_local = &scsiio_req->sgl;
+
+ if (!scsiio_req->data_length && !meta_sg) {
+ mpi3mr_build_zero_len_sge(sg_local);
+ return 0;
+ }
+
+ if (meta_sg) {
+ sg_scmd = scsi_prot_sglist(scmd);
+ sges_left = dma_map_sg(&mrioc->pdev->dev,
+ scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd),
+ scmd->sc_data_direction);
+ priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
+ } else {
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ }
+
+ if (sges_left < 0) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return -ENOMEM;
+ }
+ if (sges_left > MPI3MR_SG_DEPTH) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map returned unsupported sge count %d!\n",
+ sges_left);
+ return -ENOMEM;
+ }
+
+ sges_in_segment = (mrioc->facts.op_req_sz -
+ offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
+
+ if (scsiio_req->sgl[0].eedp.flags ==
+ MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_in_segment--;
+ /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
+ }
+
+ if (scsiio_req->msg_flags ==
+ MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
+ sges_in_segment--;
+ /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
+ }
+
+ if (meta_sg)
+ sges_in_segment = 1;
+
+ if (sges_left <= sges_in_segment)
+ goto fill_in_last_segment;
+
+ /* fill in main message segment when there is a chain following */
+ while (sges_in_segment > 1) {
+ mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ chain_idx = mpi3mr_get_chain_idx(mrioc);
+ if (chain_idx < 0)
+ return -1;
+ chain_req = &mrioc->chain_sgl_list[chain_idx];
+ if (meta_sg)
+ priv->meta_chain_idx = chain_idx;
+ else
+ priv->chain_idx = chain_idx;
+
+ chain = chain_req->addr;
+ chain_dma = chain_req->dma_addr;
+ sges_in_segment = sges_left;
+ chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
+
+ mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
+ chain_length, chain_dma);
+
+ sg_local = chain;
+
+fill_in_last_segment:
+ while (sges_left > 0) {
+ if (sges_left == 1)
+ mpi3mr_add_sg_single(sg_local,
+ simple_sgl_flags_last, sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_left--;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * This function calls mpi3mr_prepare_sg_scmd for constructing
+ * both data SGEs and protection information SGEs in the MPI
+ * format from the SCSI Command as appropriate .
+ *
+ * Return: return value of mpi3mr_prepare_sg_scmd.
+ */
+static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ int ret;
+
+ ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
+ if (ret)
+ return ret;
+
+ if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
+ /* There is a valid meta sg */
+ scsiio_req->flags |=
+ cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
+ ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
+ }
+
+ return ret;
+}
+
+/**
+ * mpi3mr_tm_response_name - get TM response as a string
+ * @resp_code: TM response code
+ *
+ * Convert known task management response code as a readable
+ * string.
+ *
+ * Return: response code string.
+ */
+static const char *mpi3mr_tm_response_name(u8 resp_code)
+{
+ char *desc;
+
+ switch (resp_code) {
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
+ desc = "invalid LUN";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
+ desc = "task management request denied by NVMe device";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ return desc;
+}
+
+inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ int num_of_reply_queues =
+ mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
+
+ for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
+ mpi3mr_process_op_reply_q(mrioc,
+ mrioc->intr_info[i].op_reply_q);
+}
+
+/**
+ * mpi3mr_issue_tm - Issue Task Management request
+ * @mrioc: Adapter instance reference
+ * @tm_type: Task Management type
+ * @handle: Device handle
+ * @lun: lun ID
+ * @htag: Host tag of the TM request
+ * @timeout: TM timeout value
+ * @drv_cmd: Internal command tracker
+ * @resp_code: Response code place holder
+ * @scmd: SCSI command
+ *
+ * Issues a Task Management Request to the controller for a
+ * specified target, lun and command and wait for its completion
+ * and check TM response. Recover the TM if it timed out by
+ * issuing controller reset.
+ *
+ * Return: 0 on success, non-zero on errors
+ */
+int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ u16 handle, uint lun, u16 htag, ulong timeout,
+ struct mpi3mr_drv_cmd *drv_cmd,
+ u8 *resp_code, struct scsi_cmnd *scmd)
+{
+ struct mpi3_scsi_task_mgmt_request tm_req;
+ struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct scmd_priv *cmd_priv = NULL;
+ struct scsi_device *sdev = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
+
+ ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
+ __func__, tm_type, handle);
+ if (mrioc->unrecoverable) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
+ __func__);
+ goto out;
+ }
+
+ memset(&tm_req, 0, sizeof(tm_req));
+ mutex_lock(&drv_cmd->mutex);
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
+ mutex_unlock(&drv_cmd->mutex);
+ goto out;
+ }
+ if (mrioc->reset_in_progress) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
+ mutex_unlock(&drv_cmd->mutex);
+ goto out;
+ }
+
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 1;
+ drv_cmd->callback = NULL;
+ tm_req.dev_handle = cpu_to_le16(handle);
+ tm_req.task_type = tm_type;
+ tm_req.host_tag = cpu_to_le16(htag);
+
+ int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
+ tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+
+ if (scmd) {
+ sdev = scmd->device;
+ sdev_priv_data = sdev->hostdata;
+ scsi_tgt_priv_data = ((sdev_priv_data) ?
+ sdev_priv_data->tgt_priv_data : NULL);
+ } else {
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ }
+
+ if (scsi_tgt_priv_data)
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+
+ if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
+ if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
+ timeout = tgtdev->dev_spec.pcie_inf.abort_to;
+ else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
+ timeout = tgtdev->dev_spec.pcie_inf.reset_to;
+ }
+
+ init_completion(&drv_cmd->done);
+ retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
+
+ if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
+ drv_cmd->is_waiting = 0;
+ retval = -1;
+ if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
+ dprint_tm(mrioc,
+ "task management request timed out after %ld seconds\n",
+ timeout);
+ if (mrioc->logging_level & MPI3_DEBUG_TM)
+ dprint_dump_req(&tm_req, sizeof(tm_req)/4);
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ }
+ goto out_unlock;
+ }
+
+ if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
+ dprint_tm(mrioc, "invalid task management reply message\n");
+ retval = -1;
+ goto out_unlock;
+ }
+
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ switch (drv_cmd->ioc_status) {
+ case MPI3_IOCSTATUS_SUCCESS:
+ *resp_code = le32_to_cpu(tm_reply->response_data) &
+ MPI3MR_RI_MASK_RESPCODE;
+ break;
+ case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+ *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
+ break;
+ default:
+ dprint_tm(mrioc,
+ "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+ switch (*resp_code) {
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
+ if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ retval = -1;
+ break;
+ default:
+ retval = -1;
+ break;
+ }
+
+ dprint_tm(mrioc,
+ "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
+ tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count),
+ mpi3mr_tm_response_name(*resp_code), *resp_code);
+
+ if (!retval) {
+ mpi3mr_ioc_disable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ mpi3mr_ioc_enable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ }
+ switch (tm_type) {
+ case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (!scsi_tgt_priv_data)
+ break;
+ scsi_tgt_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_tgt_pending,
+ (void *)scsi_tgt_priv_data->starget);
+ break;
+ case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!sdev_priv_data)
+ break;
+ sdev_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_dev_pending, (void *)sdev);
+ break;
+ default:
+ break;
+ }
+
+out_unlock:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&drv_cmd->mutex);
+ if (scsi_tgt_priv_data)
+ atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_bios_param - BIOS param callback
+ * @sdev: SCSI device reference
+ * @bdev: Block device reference
+ * @capacity: Capacity in logical sectors
+ * @params: Parameter array
+ *
+ * Just the parameters with heads/secots/cylinders.
+ *
+ * Return: 0 always
+ */
+static int mpi3mr_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int params[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+ }
+
+ params[0] = heads;
+ params[1] = sectors;
+ params[2] = cylinders;
+ return 0;
+}
+
+/**
+ * mpi3mr_map_queues - Map queues callback handler
+ * @shost: SCSI host reference
+ *
+ * Maps default and poll queues.
+ *
+ * Return: return zero.
+ */
+static void mpi3mr_map_queues(struct Scsi_Host *shost)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ int i, qoff, offset;
+ struct blk_mq_queue_map *map = NULL;
+
+ offset = mrioc->op_reply_q_offset;
+
+ for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
+ map = &shost->tag_set.map[i];
+
+ map->nr_queues = 0;
+
+ if (i == HCTX_TYPE_DEFAULT)
+ map->nr_queues = mrioc->default_qcount;
+ else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = mrioc->active_poll_qcount;
+
+ if (!map->nr_queues) {
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ else
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ offset += map->nr_queues;
+ }
+}
+
+/**
+ * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
+ * @mrioc: Adapter instance reference
+ *
+ * Calculate the pending I/Os for the controller and return.
+ *
+ * Return: Number of pending I/Os
+ */
+static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ uint pend_ios = 0;
+
+ for (i = 0; i < mrioc->num_op_reply_q; i++)
+ pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
+ return pend_ios;
+}
+
+/**
+ * mpi3mr_print_pending_host_io - print pending I/Os
+ * @mrioc: Adapter instance reference
+ *
+ * Print number of pending I/Os and each I/O details prior to
+ * reset for debug purpose.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+
+ ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
+ __func__, mpi3mr_get_fw_pending_ios(mrioc));
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_print_scmd, (void *)mrioc);
+}
+
+/**
+ * mpi3mr_wait_for_host_io - block for I/Os to complete
+ * @mrioc: Adapter instance reference
+ * @timeout: time out in seconds
+ * Waits for pending I/Os for the given adapter to complete or
+ * to hit the timeout.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
+{
+ enum mpi3mr_iocstate iocstate;
+ int i = 0;
+
+ iocstate = mpi3mr_get_iocstate(mrioc);
+ if (iocstate != MRIOC_STATE_READY)
+ return;
+
+ if (!mpi3mr_get_fw_pending_ios(mrioc))
+ return;
+ ioc_info(mrioc,
+ "%s :Waiting for %d seconds prior to reset for %d I/O\n",
+ __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
+
+ for (i = 0; i < timeout; i++) {
+ if (!mpi3mr_get_fw_pending_ios(mrioc))
+ break;
+ iocstate = mpi3mr_get_iocstate(mrioc);
+ if (iocstate != MRIOC_STATE_READY)
+ break;
+ msleep(1000);
+ }
+
+ ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
+ mpi3mr_get_fw_pending_ios(mrioc));
+}
+
+/**
+ * mpi3mr_eh_host_reset - Host reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue controller reset if the scmd is for a Physical Device,
+ * if the scmd is for RAID volume, then wait for
+ * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
+ * pending I/Os prior to issuing reset to the controller.
+ *
+ * Return: SUCCESS of successful reset else FAILED
+ */
+static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
+ int retval = FAILED, ret;
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_type = stgt_priv_data->dev_type;
+ }
+
+ if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
+ mpi3mr_wait_for_host_io(mrioc,
+ MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
+ if (!mpi3mr_get_fw_pending_ios(mrioc)) {
+ retval = SUCCESS;
+ goto out;
+ }
+ }
+
+ mpi3mr_print_pending_host_io(mrioc);
+ ret = mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EH_HOS, 1);
+ if (ret)
+ goto out;
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "Host reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_eh_target_reset - Target reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue Target reset Task Management and verify the scmd is
+ * terminated successfully and return status accordingly.
+ *
+ * Return: SUCCESS of successful termination of the scmd else
+ * FAILED
+ */
+static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u16 dev_handle;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "Attempting Target Reset! scmd(%p)\n", scmd);
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "SCSI device is not available\n");
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
+ sdev_printk(KERN_INFO, scmd->device,
+ "Target Reset is issued to handle(0x%04x)\n",
+ dev_handle);
+
+ ret = mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
+ sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
+
+ if (ret)
+ goto out;
+
+ if (stgt_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: target has %d pending commands, target reset is failed\n",
+ mrioc->name, stgt_priv_data->pend_count);
+ goto out;
+ }
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: target reset is %s for scmd(%p)\n", mrioc->name,
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_eh_dev_reset- Device reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue lun reset Task Management and verify the scmd is
+ * terminated successfully and return status accordingly.
+ *
+ * Return: SUCCESS of successful termination of the scmd else
+ * FAILED
+ */
+static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u16 dev_handle;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "SCSI device is not available\n");
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
+ sdev_printk(KERN_INFO, scmd->device,
+ "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
+
+ ret = mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
+ sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
+
+ if (ret)
+ goto out;
+
+ if (sdev_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device has %d pending commands, device(LUN) reset is failed\n",
+ mrioc->name, sdev_priv_data->pend_count);
+ goto out;
+ }
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_scan_start - Scan start callback handler
+ * @shost: SCSI host reference
+ *
+ * Issue port enable request asynchronously.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_scan_start(struct Scsi_Host *shost)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ mrioc->scan_started = 1;
+ ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
+ if (mpi3mr_issue_port_enable(mrioc, 1)) {
+ ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
+ mrioc->scan_started = 0;
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ }
+}
+
+/**
+ * mpi3mr_scan_finished - Scan finished callback handler
+ * @shost: SCSI host reference
+ * @time: Jiffies from the scan start
+ *
+ * Checks whether the port enable is completed or timedout or
+ * failed and set the scan status accordingly after taking any
+ * recovery if required.
+ *
+ * Return: 1 on scan finished or timed out, 0 for in progress
+ */
+static int mpi3mr_scan_finished(struct Scsi_Host *shost,
+ unsigned long time)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+ u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ ioc_err(mrioc, "port enable failed due to fault or reset\n");
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ }
+
+ if (time >= (pe_timeout * HZ)) {
+ ioc_err(mrioc, "port enable failed due to time out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ }
+
+ if (mrioc->scan_started)
+ return 0;
+
+ if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable is successfully completed\n");
+
+ mpi3mr_start_watchdog(mrioc);
+ mrioc->is_driver_loading = 0;
+ mrioc->stop_bsgs = 0;
+ return 1;
+}
+
+/**
+ * mpi3mr_slave_destroy - Slave destroy callback handler
+ * @sdev: SCSI device reference
+ *
+ * Cleanup and free per device(lun) private data.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_slave_destroy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
+ unsigned long flags;
+ struct scsi_target *starget;
+ struct sas_rphy *rphy = NULL;
+
+ if (!sdev->hostdata)
+ return;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ scsi_tgt_priv_data->num_luns--;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
+
+ if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
+ tgt_dev->starget = NULL;
+ if (tgt_dev)
+ mpi3mr_tgtdev_put(tgt_dev);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/**
+ * mpi3mr_target_destroy - Target destroy callback handler
+ * @starget: SCSI target reference
+ *
+ * Cleanup and free per target private data.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+
+ if (!starget->hostdata)
+ return;
+
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
+ if (tgt_dev && (tgt_dev->starget == starget) &&
+ (tgt_dev->perst_id == starget->id))
+ tgt_dev->starget = NULL;
+ if (tgt_dev) {
+ scsi_tgt_priv_data->tgt_dev = NULL;
+ scsi_tgt_priv_data->perst_id = 0;
+ mpi3mr_tgtdev_put(tgt_dev);
+ mpi3mr_tgtdev_put(tgt_dev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ kfree(starget->hostdata);
+ starget->hostdata = NULL;
+}
+
+/**
+ * mpi3mr_slave_configure - Slave configure callback handler
+ * @sdev: SCSI device reference
+ *
+ * Configure queue depth, max hardware sectors and virt boundary
+ * as required
+ *
+ * Return: 0 always.
+ */
+static int mpi3mr_slave_configure(struct scsi_device *sdev)
+{
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
+ unsigned long flags;
+ int retval = 0;
+ struct sas_rphy *rphy = NULL;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ if (!tgt_dev)
+ return -ENXIO;
+
+ mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
+
+ sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
+
+ switch (tgt_dev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ /*The block layer hw sector size = 512*/
+ if ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgt_dev->dev_spec.pcie_inf.mdts / 512);
+ if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ }
+ break;
+ default:
+ break;
+ }
+
+ mpi3mr_tgtdev_put(tgt_dev);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_slave_alloc -Slave alloc callback handler
+ * @sdev: SCSI device reference
+ *
+ * Allocate per device(lun) private data and initialize it.
+ *
+ * Return: 0 on success -ENOMEM on memory allocation failure.
+ */
+static int mpi3mr_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
+ struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
+ unsigned long flags;
+ struct scsi_target *starget;
+ int retval = 0;
+ struct sas_rphy *rphy = NULL;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
+
+ if (tgt_dev) {
+ if (tgt_dev->starget == NULL)
+ tgt_dev->starget = starget;
+ mpi3mr_tgtdev_put(tgt_dev);
+ retval = 0;
+ } else {
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return -ENXIO;
+ }
+
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
+ if (!scsi_dev_priv_data)
+ return -ENOMEM;
+
+ scsi_dev_priv_data->lun_id = sdev->lun;
+ scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
+ sdev->hostdata = scsi_dev_priv_data;
+
+ scsi_tgt_priv_data->num_luns++;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_target_alloc - Target alloc callback handler
+ * @starget: SCSI target reference
+ *
+ * Allocate per target private data and initialize it.
+ *
+ * Return: 0 on success -ENOMEM on memory allocation failure.
+ */
+static int mpi3mr_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+ int retval = 0;
+ struct sas_rphy *rphy = NULL;
+ bool update_stgt_priv_data = false;
+
+ scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
+ if (!scsi_tgt_priv_data)
+ return -ENOMEM;
+
+ starget->hostdata = scsi_tgt_priv_data;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+
+ if (starget->channel == mrioc->scsi_device_channel) {
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (tgt_dev && !tgt_dev->is_hidden)
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ } else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
+ (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ }
+
+ if (update_stgt_priv_data) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgt_dev->io_throttle_enabled;
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ scsi_tgt_priv_data->throttle_group =
+ tgt_dev->dev_spec.vd_inf.tg;
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_check_return_unmap - Whether an unmap is allowed
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI Command reference
+ *
+ * The controller hardware cannot handle certain unmap commands
+ * for NVMe drives, this routine checks those and return true
+ * and completes the SCSI command with proper status and sense
+ * data.
+ *
+ * Return: TRUE for not allowed unmap, FALSE otherwise.
+ */
+static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char *buf;
+ u16 param_len, desc_len, trunc_param_len;
+
+ trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
+
+ if (mrioc->pdev->revision) {
+ if ((param_len > 24) && ((param_len - 8) & 0xF)) {
+ trunc_param_len -= (param_len - 8) & 0xF;
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ dprint_scsi_err(mrioc,
+ "truncating param_len from (%d) to (%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ }
+ return false;
+ }
+
+ if (!param_len) {
+ ioc_warn(mrioc,
+ "%s: cdb received with zero parameter length\n",
+ __func__);
+ scsi_print_command(scmd);
+ scmd->result = DID_OK << 16;
+ scsi_done(scmd);
+ return true;
+ }
+
+ if (param_len < 24) {
+ ioc_warn(mrioc,
+ "%s: cdb received with invalid param_len: %d\n",
+ __func__, param_len);
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x1A, 0);
+ scsi_done(scmd);
+ return true;
+ }
+ if (param_len != scsi_bufflen(scmd)) {
+ ioc_warn(mrioc,
+ "%s: cdb received with param_len: %d bufflen: %d\n",
+ __func__, param_len, scsi_bufflen(scmd));
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x1A, 0);
+ scsi_done(scmd);
+ return true;
+ }
+ buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
+ if (!buf) {
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x55, 0x03);
+ scsi_done(scmd);
+ return true;
+ }
+ scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
+ desc_len = get_unaligned_be16(&buf[2]);
+
+ if (desc_len < 16) {
+ ioc_warn(mrioc,
+ "%s: Invalid descriptor length in param list: %d\n",
+ __func__, desc_len);
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x26, 0);
+ scsi_done(scmd);
+ kfree(buf);
+ return true;
+ }
+
+ if (param_len > (desc_len + 8)) {
+ trunc_param_len = desc_len + 8;
+ scsi_print_command(scmd);
+ dprint_scsi_err(mrioc,
+ "truncating param_len(%d) to desc_len+8(%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
+ scsi_print_command(scmd);
+ }
+
+ kfree(buf);
+ return false;
+}
+
+/**
+ * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
+ * @scmd: SCSI Command reference
+ *
+ * Checks whether a cdb is allowed during shutdown or not.
+ *
+ * Return: TRUE for allowed commands, FALSE otherwise.
+ */
+
+inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ case START_STOP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * mpi3mr_qcmd - I/O request despatcher
+ * @shost: SCSI Host reference
+ * @scmd: SCSI Command reference
+ *
+ * Issues the SCSI Command as an MPI3 request.
+ *
+ * Return: 0 on successful queueing of the request or if the
+ * request is completed with failure.
+ * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
+ * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
+ */
+static int mpi3mr_qcmd(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct scmd_priv *scmd_priv_data = NULL;
+ struct mpi3_scsi_io_request *scsiio_req = NULL;
+ struct op_req_qinfo *op_req_q = NULL;
+ int retval = 0;
+ u16 dev_handle;
+ u16 host_tag;
+ u32 scsiio_flags = 0, data_len_blks = 0;
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ int iprio_class;
+ u8 is_pcie_dev = 0;
+ u32 tracked_io_sz = 0;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+
+ if (mrioc->unrecoverable) {
+ scmd->result = DID_ERROR << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ if (mrioc->stop_drv_processing &&
+ !(mpi3mr_allow_scmd_to_fw(scmd))) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ if (mrioc->reset_in_progress) {
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+
+ if (atomic_read(&stgt_priv_data->block_io)) {
+ if (mrioc->stop_drv_processing) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+ retval = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
+ dev_handle = stgt_priv_data->dev_handle;
+ if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+ if (stgt_priv_data->dev_removed) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
+ is_pcie_dev = 1;
+ if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
+ (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
+ mpi3mr_check_return_unmap(mrioc, scmd))
+ goto out;
+
+ host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
+ if (host_tag == MPI3MR_HOSTTAG_INVALID) {
+ scmd->result = DID_ERROR << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
+ else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
+ else
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
+
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
+
+ if (sdev_priv_data->ncq_prio_enable) {
+ iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ if (iprio_class == IOPRIO_CLASS_RT)
+ scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
+ }
+
+ if (scmd->cmd_len > 16)
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
+
+ scmd_priv_data = scsi_cmd_priv(scmd);
+ memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
+ scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
+ scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
+ scsiio_req->host_tag = cpu_to_le16(host_tag);
+
+ mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
+
+ memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
+ scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
+ scsiio_req->dev_handle = cpu_to_le16(dev_handle);
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
+ int_to_scsilun(sdev_priv_data->lun_id,
+ (struct scsi_lun *)scsiio_req->lun);
+
+ if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ if ((data_len_blks >= mrioc->io_throttle_data_length) &&
+ stgt_priv_data->io_throttle_enabled) {
+ tracked_io_sz = data_len_blks;
+ tg = stgt_priv_data->throttle_group;
+ if (tg) {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ tg_pend_data_len = atomic_add_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (!tg->io_divert && ((ioc_pend_data_len >=
+ mrioc->io_throttle_high) ||
+ (tg_pend_data_len >= tg->high))) {
+ tg->io_divert = 1;
+ tg->need_qd_reduction = 1;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
+ tg, 1);
+ mpi3mr_queue_qd_reduction_event(mrioc, tg);
+ }
+ } else {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (ioc_pend_data_len >= mrioc->io_throttle_high)
+ stgt_priv_data->io_divert = 1;
+ }
+ }
+
+ if (stgt_priv_data->io_divert) {
+ scsiio_req->msg_flags |=
+ MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
+ }
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
+
+ if (mpi3mr_op_request_post(mrioc, op_req_q,
+ scmd_priv_data->mpi3mr_scsiio_req)) {
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ if (tracked_io_sz) {
+ atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
+ if (tg)
+ atomic_sub(tracked_io_sz,
+ &tg->pend_large_data_sz);
+ }
+ goto out;
+ }
+
+out:
+ return retval;
+}
+
+static struct scsi_host_template mpi3mr_driver_template = {
+ .module = THIS_MODULE,
+ .name = "MPI3 Storage Controller",
+ .proc_name = MPI3MR_DRIVER_NAME,
+ .queuecommand = mpi3mr_qcmd,
+ .target_alloc = mpi3mr_target_alloc,
+ .slave_alloc = mpi3mr_slave_alloc,
+ .slave_configure = mpi3mr_slave_configure,
+ .target_destroy = mpi3mr_target_destroy,
+ .slave_destroy = mpi3mr_slave_destroy,
+ .scan_finished = mpi3mr_scan_finished,
+ .scan_start = mpi3mr_scan_start,
+ .change_queue_depth = mpi3mr_change_queue_depth,
+ .eh_device_reset_handler = mpi3mr_eh_dev_reset,
+ .eh_target_reset_handler = mpi3mr_eh_target_reset,
+ .eh_host_reset_handler = mpi3mr_eh_host_reset,
+ .bios_param = mpi3mr_bios_param,
+ .map_queues = mpi3mr_map_queues,
+ .mq_poll = mpi3mr_blk_mq_poll,
+ .no_write_same = 1,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPI3MR_SG_DEPTH,
+ /* max xfer supported is 1M (2K in 512 byte sized sectors)
+ */
+ .max_sectors = 2048,
+ .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
+ .max_segment_size = 0xffffffff,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct scmd_priv),
+ .shost_groups = mpi3mr_host_groups,
+ .sdev_groups = mpi3mr_dev_groups,
+};
+
+/**
+ * mpi3mr_init_drv_cmd - Initialize internal command tracker
+ * @cmdptr: Internal command tracker
+ * @host_tag: Host tag used for the specific command
+ *
+ * Initialize the internal command tracker structure with
+ * specified host tag.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
+ u16 host_tag)
+{
+ mutex_init(&cmdptr->mutex);
+ cmdptr->reply = NULL;
+ cmdptr->state = MPI3MR_CMD_NOTUSED;
+ cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ cmdptr->host_tag = host_tag;
+}
+
+/**
+ * osintfc_mrioc_security_status -Check controller secure status
+ * @pdev: PCI device instance
+ *
+ * Read the Device Serial Number capability from PCI config
+ * space and decide whether the controller is secure or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int
+osintfc_mrioc_security_status(struct pci_dev *pdev)
+{
+ u32 cap_data;
+ int base;
+ u32 ctlr_status;
+ u32 debug_status;
+ int retval = 0;
+
+ base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ if (!base) {
+ dev_err(&pdev->dev,
+ "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
+ return -1;
+ }
+
+ pci_read_config_dword(pdev, base + 4, &cap_data);
+
+ debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
+ ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
+
+ switch (ctlr_status) {
+ case MPI3MR_INVALID_DEVICE:
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ break;
+ case MPI3MR_CONFIG_SECURE_DEVICE:
+ if (!debug_status)
+ dev_info(&pdev->dev,
+ "%s: Config secure ctlr is detected\n",
+ __func__);
+ break;
+ case MPI3MR_HARD_SECURE_DEVICE:
+ break;
+ case MPI3MR_TAMPERED_DEVICE:
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ break;
+ default:
+ retval = -1;
+ break;
+ }
+
+ if (!retval && debug_status) {
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_probe - PCI probe callback
+ * @pdev: PCI device instance
+ * @id: PCI device ID details
+ *
+ * controller initialization routine. Checks the security status
+ * of the controller and if it is invalid or tampered return the
+ * probe without initializing the controller. Otherwise,
+ * allocate per adapter instance through shost_priv and
+ * initialize controller specific data structures, initializae
+ * the controller hardware, add shost to the SCSI subsystem.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+
+static int
+mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mpi3mr_ioc *mrioc = NULL;
+ struct Scsi_Host *shost = NULL;
+ int retval = 0, i;
+
+ if (osintfc_mrioc_security_status(pdev)) {
+ warn_non_secure_ctlr = 1;
+ return 1; /* For Invalid and Tampered device */
+ }
+
+ shost = scsi_host_alloc(&mpi3mr_driver_template,
+ sizeof(struct mpi3mr_ioc));
+ if (!shost) {
+ retval = -ENODEV;
+ goto shost_failed;
+ }
+
+ mrioc = shost_priv(shost);
+ mrioc->id = mrioc_ids++;
+ sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
+ sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
+ INIT_LIST_HEAD(&mrioc->list);
+ spin_lock(&mrioc_list_lock);
+ list_add_tail(&mrioc->list, &mrioc_list);
+ spin_unlock(&mrioc_list_lock);
+
+ spin_lock_init(&mrioc->admin_req_lock);
+ spin_lock_init(&mrioc->reply_free_queue_lock);
+ spin_lock_init(&mrioc->sbq_lock);
+ spin_lock_init(&mrioc->fwevt_lock);
+ spin_lock_init(&mrioc->tgtdev_lock);
+ spin_lock_init(&mrioc->watchdog_lock);
+ spin_lock_init(&mrioc->chain_buf_lock);
+ spin_lock_init(&mrioc->sas_node_lock);
+
+ INIT_LIST_HEAD(&mrioc->fwevt_list);
+ INIT_LIST_HEAD(&mrioc->tgtdev_list);
+ INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
+ INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
+ INIT_LIST_HEAD(&mrioc->sas_expander_list);
+ INIT_LIST_HEAD(&mrioc->hba_port_table_list);
+ INIT_LIST_HEAD(&mrioc->enclosure_list);
+
+ mutex_init(&mrioc->reset_mutex);
+ mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
+ mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
+ mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
+ MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
+ MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
+
+ if (pdev->revision)
+ mrioc->enable_segqueue = true;
+
+ init_waitqueue_head(&mrioc->reset_waitq);
+ mrioc->logging_level = logging_level;
+ mrioc->shost = shost;
+ mrioc->pdev = pdev;
+ mrioc->stop_bsgs = 1;
+
+ /* init shost parameters */
+ shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
+ shost->max_lun = -1;
+ shost->unique_id = mrioc->id;
+
+ shost->max_channel = 0;
+ shost->max_id = 0xFFFFFFFF;
+
+ shost->host_tagset = 1;
+
+ if (prot_mask >= 0)
+ scsi_host_set_prot(shost, prot_mask);
+ else {
+ prot_mask = SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION;
+ scsi_host_set_prot(shost, prot_mask);
+ }
+
+ ioc_info(mrioc,
+ "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
+ __func__,
+ (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
+ (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
+ (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
+ (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
+ (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
+ (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
+ (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
+
+ if (prot_guard_mask)
+ scsi_host_set_guard(shost, (prot_guard_mask & 3));
+ else
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
+ snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
+ "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
+ mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
+ mrioc->fwevt_worker_name, 0);
+ if (!mrioc->fwevt_worker_thread) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto fwevtthread_failed;
+ }
+
+ mrioc->is_driver_loading = 1;
+ mrioc->cpu_count = num_online_cpus();
+ if (mpi3mr_setup_resources(mrioc)) {
+ ioc_err(mrioc, "setup resources failed\n");
+ retval = -ENODEV;
+ goto resource_alloc_failed;
+ }
+ if (mpi3mr_init_ioc(mrioc)) {
+ ioc_err(mrioc, "initializing IOC failed\n");
+ retval = -ENODEV;
+ goto init_ioc_failed;
+ }
+
+ shost->nr_hw_queues = mrioc->num_op_reply_q;
+ if (mrioc->active_poll_qcount)
+ shost->nr_maps = 3;
+
+ shost->can_queue = mrioc->max_host_ios;
+ shost->sg_tablesize = MPI3MR_SG_DEPTH;
+ shost->max_id = mrioc->facts.max_perids + 1;
+
+ retval = scsi_add_host(shost, &pdev->dev);
+ if (retval) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto addhost_failed;
+ }
+
+ scsi_scan_host(shost);
+ mpi3mr_bsg_init(mrioc);
+ return retval;
+
+addhost_failed:
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+init_ioc_failed:
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+resource_alloc_failed:
+ destroy_workqueue(mrioc->fwevt_worker_thread);
+fwevtthread_failed:
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+ scsi_host_put(shost);
+shost_failed:
+ return retval;
+}
+
+/**
+ * mpi3mr_remove - PCI remove callback
+ * @pdev: PCI device instance
+ *
+ * Cleanup the IOC by issuing MUR and shutdown notification.
+ * Free up all memory and resources associated with the
+ * controllerand target devices, unregister the shost.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+
+ if (!shost)
+ return;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ }
+
+ mpi3mr_bsg_exit(mrioc);
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ wq = mrioc->fwevt_worker_thread;
+ mrioc->fwevt_worker_thread = NULL;
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ if (mrioc->sas_transport_enabled)
+ sas_remove_host(shost);
+ else
+ scsi_remove_host(shost);
+
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+
+ scsi_host_put(shost);
+}
+
+/**
+ * mpi3mr_shutdown - PCI shutdown callback
+ * @pdev: PCI device instance
+ *
+ * Free up all memory and resources associated with the
+ * controller
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost)
+ return;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ wq = mrioc->fwevt_worker_thread;
+ mrioc->fwevt_worker_thread = NULL;
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+}
+
+/**
+ * mpi3mr_suspend - PCI power management suspend callback
+ * @dev: Device struct
+ *
+ * Change the power state to the given value and cleanup the IOC
+ * by issuing MUR and shutdown notification
+ *
+ * Return: 0 always.
+ */
+static int __maybe_unused
+mpi3mr_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+
+ if (!shost)
+ return 0;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ scsi_block_requests(shost);
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
+ pdev, pci_name(pdev));
+ mpi3mr_cleanup_resources(mrioc);
+
+ return 0;
+}
+
+/**
+ * mpi3mr_resume - PCI power management resume callback
+ * @dev: Device struct
+ *
+ * Restore the power state to D0 and reinitialize the controller
+ * and resume I/O operations to the target devices
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int __maybe_unused
+mpi3mr_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ pci_power_t device_state = pdev->current_state;
+ int r;
+
+ if (!shost)
+ return 0;
+
+ mrioc = shost_priv(shost);
+
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
+ mrioc->pdev = pdev;
+ mrioc->cpu_count = num_online_cpus();
+ r = mpi3mr_setup_resources(mrioc);
+ if (r) {
+ ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
+ __func__, r);
+ return r;
+ }
+
+ mrioc->stop_drv_processing = 0;
+ mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
+ mpi3mr_memset_buffers(mrioc);
+ r = mpi3mr_reinit_ioc(mrioc, 1);
+ if (r) {
+ ioc_err(mrioc, "resuming controller failed[%d]\n", r);
+ return r;
+ }
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
+ scsi_unblock_requests(shost);
+ mrioc->device_refresh_on = 0;
+ mpi3mr_start_watchdog(mrioc);
+
+ return 0;
+}
+
+static const struct pci_device_id mpi3mr_pci_id_table[] = {
+ {
+ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
+ MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
+
+static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
+
+static struct pci_driver mpi3mr_pci_driver = {
+ .name = MPI3MR_DRIVER_NAME,
+ .id_table = mpi3mr_pci_id_table,
+ .probe = mpi3mr_probe,
+ .remove = mpi3mr_remove,
+ .shutdown = mpi3mr_shutdown,
+ .driver.pm = &mpi3mr_pm_ops,
+};
+
+static ssize_t event_counter_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
+}
+static DRIVER_ATTR_RO(event_counter);
+
+static int __init mpi3mr_init(void)
+{
+ int ret_val;
+
+ pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
+ MPI3MR_DRIVER_VERSION);
+
+ mpi3mr_transport_template =
+ sas_attach_transport(&mpi3mr_transport_functions);
+ if (!mpi3mr_transport_template) {
+ pr_err("%s failed to load due to sas transport attach failure\n",
+ MPI3MR_DRIVER_NAME);
+ return -ENODEV;
+ }
+
+ ret_val = pci_register_driver(&mpi3mr_pci_driver);
+ if (ret_val) {
+ pr_err("%s failed to load due to pci register driver failure\n",
+ MPI3MR_DRIVER_NAME);
+ goto err_pci_reg_fail;
+ }
+
+ ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
+ &driver_attr_event_counter);
+ if (ret_val)
+ goto err_event_counter;
+
+ return ret_val;
+
+err_event_counter:
+ pci_unregister_driver(&mpi3mr_pci_driver);
+
+err_pci_reg_fail:
+ sas_release_transport(mpi3mr_transport_template);
+ return ret_val;
+}
+
+static void __exit mpi3mr_exit(void)
+{
+ if (warn_non_secure_ctlr)
+ pr_warn(
+ "Unloading %s version %s while managing a non secure controller\n",
+ MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
+ else
+ pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
+ MPI3MR_DRIVER_VERSION);
+
+ driver_remove_file(&mpi3mr_pci_driver.driver,
+ &driver_attr_event_counter);
+ pci_unregister_driver(&mpi3mr_pci_driver);
+ sas_release_transport(mpi3mr_transport_template);
+}
+
+module_init(mpi3mr_init);
+module_exit(mpi3mr_exit);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
new file mode 100644
index 000000000000..3fc897336b5e
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -0,0 +1,3291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+
+static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander);
+
+/**
+ * mpi3mr_post_transport_req - Issue transport requests and wait
+ * @mrioc: Adapter instance reference
+ * @request: Properly populated MPI3 request
+ * @request_sz: Size of the MPI3 request
+ * @reply: Pointer to return MPI3 reply
+ * @reply_sz: Size of the MPI3 reply buffer
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 requests from the SAS
+ * transport layer that uses transport command infrastructure.
+ * This blocks for the completion of request for timeout seconds
+ * and if the request times out this function faults the
+ * controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_transport_req(struct mpi3mr_ioc *mrioc, void *request,
+ u16 request_sz, void *reply, u16 reply_sz, int timeout,
+ u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->transport_cmds.mutex);
+ if (mrioc->transport_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending transport request failed due to command in use\n");
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+ goto out;
+ }
+ mrioc->transport_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->transport_cmds.is_waiting = 1;
+ mrioc->transport_cmds.callback = NULL;
+ mrioc->transport_cmds.ioc_status = 0;
+ mrioc->transport_cmds.ioc_loginfo = 0;
+
+ init_completion(&mrioc->transport_cmds.done);
+ dprint_cfg_info(mrioc, "posting transport request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)
+ dprint_dump(request, request_sz, "transport_req");
+ retval = mpi3mr_admin_request_post(mrioc, request, request_sz, 1);
+ if (retval) {
+ ioc_err(mrioc, "posting transport request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->transport_cmds.done,
+ (timeout * HZ));
+ if (!(mrioc->transport_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT);
+ ioc_err(mrioc, "transport request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->transport_cmds.ioc_status &
+ MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_transport_err(mrioc,
+ "transport request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->transport_cmds.ioc_loginfo);
+
+ if ((reply) && (mrioc->transport_cmds.state & MPI3MR_CMD_REPLY_VALID))
+ memcpy((u8 *)reply, mrioc->transport_cmds.reply, reply_sz);
+
+out_unlock:
+ mrioc->transport_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * mpi3mr_report_manufacture - obtain SMP report_manufacture
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the expander device
+ * @edev: SAS transport layer sas_expander_device object
+ * @port_id: ID of the HBA port
+ *
+ * Fills in the sas_expander_device with manufacturing info.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct sas_expander_device *edev, u8 port_id)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc = 0;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u8 *tmp;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct rep_manu_request);
+ data_in_sz = sizeof(struct rep_manu_reply);
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev,
+ data_out_sz + data_in_sz, &data_out_dma, GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ manufacture_reply = data_out + data_out_sz;
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) port_id;
+ mpi_request.sas_address = cpu_to_le64(sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending report manufacturer SMP request to sas_address(0x%016llx), port(%d)\n",
+ (unsigned long long)sas_address, port_id);
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "report manufacturer SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "report manufacturer - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct rep_manu_reply)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ strscpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strscpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strscpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strscpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, data_out_sz + data_in_sz,
+ data_out, data_out_dma);
+
+ return rc;
+}
+
+/**
+ * __mpi3mr_expander_find_by_handle - expander search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the expander
+ *
+ * Context: The caller should acquire sas_node_lock
+ *
+ * This searches for expander device based on handle, then
+ * returns the sas_node object.
+ *
+ * Return: Expander sas_node object reference or NULL
+ */
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpi3mr_is_expander_device - if device is an expander
+ * @device_info: Bitfield providing information about the device
+ *
+ * Return: 1 if the device is expander device, else 0.
+ */
+u8 mpi3mr_is_expander_device(u16 device_info)
+{
+ if ((device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) ==
+ MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * mpi3mr_get_sas_address - retrieve sas_address for handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @sas_address: Address to hold sas address
+ *
+ * This function issues device page0 read for a given device
+ * handle and gets the SAS address and return it back
+ *
+ * Return: 0 for success, non-zero for failure
+ */
+static int mpi3mr_get_sas_address(struct mpi3mr_ioc *mrioc, u16 handle,
+ u64 *sas_address)
+{
+ struct mpi3_device_page0 dev_pg0;
+ u16 ioc_status;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ *sas_address = 0;
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (le16_to_cpu(dev_pg0.flags) &
+ MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE)
+ *sas_address = mrioc->sas_hba.sas_address;
+ else if (dev_pg0.device_form == MPI3_DEVICE_DEVFORM_SAS_SATA) {
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ *sas_address = le64_to_cpu(sasinf->sas_address);
+ } else {
+ ioc_err(mrioc, "%s: device_form(%d) is not SAS_SATA\n",
+ __func__, dev_pg0.device_form);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.hba_port == hba_port))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Context: This function will acquire tgtdev_lock and will
+ * release before returning the mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+
+ if (!hba_port)
+ goto out;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc, sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+out:
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_remove_device_by_sas_address - remove the device
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device using sas address and hba
+ * port pointer then removes it from the OS.
+ *
+ * Return: None
+ */
+static void mpi3mr_remove_device_by_sas_address(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ u8 was_on_tgtdev_list = 0;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc,
+ sas_address, hba_port);
+ if (tgtdev) {
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ was_on_tgtdev_list = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ if (was_on_tgtdev_list) {
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr_and_rphy - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @rphy: SAS transport layer rphy object
+ *
+ * This searches for target device from sas address and rphy
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.rphy == rphy))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_expander_find_by_sas_address - sas expander search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander
+ * @hba_port: HBA port entry
+ *
+ * Return: A valid SAS expander node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *mpi3mr_expander_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander, *r = NULL;
+
+ if (!hba_port)
+ goto out;
+
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if ((sas_expander->sas_address != sas_address) ||
+ (sas_expander->hba_port != hba_port))
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * __mpi3mr_sas_node_find_by_sas_address - sas node search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander or sas host
+ * @hba_port: HBA port entry
+ * Context: Caller should acquire mrioc->sas_node_lock.
+ *
+ * If the SAS address indicates the device is direct attached to
+ * the controller (controller's SAS address) then the SAS node
+ * associated with the controller is returned back else the SAS
+ * address and hba port are used to identify the exact expander
+ * and the associated sas_node object is returned. If there is
+ * no match NULL is returned.
+ *
+ * Return: A valid SAS node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *__mpi3mr_sas_node_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+
+ if (mrioc->sas_hba.sas_address == sas_address)
+ return &mrioc->sas_hba;
+ return mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+}
+
+/**
+ * mpi3mr_parent_present - Is parent present for a phy
+ * @mrioc: Adapter instance reference
+ * @phy: SAS transport layer phy object
+ *
+ * Return: 0 if parent is present else non-zero
+ */
+static int mpi3mr_parent_present(struct mpi3mr_ioc *mrioc, struct sas_phy *phy)
+{
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ if (__mpi3mr_sas_node_find_by_sas_address(mrioc,
+ phy->identify.sas_address,
+ hba_port) == NULL) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return 0;
+}
+
+/**
+ * mpi3mr_convert_phy_link_rate -
+ * @link_rate: link rate as defined in the MPI header
+ *
+ * Convert link_rate from mpi format into sas_transport layer
+ * form.
+ *
+ * Return: A valid SAS transport layer defined link rate
+ */
+static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI3_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_12_0:
+ rc = SAS_LINK_RATE_12_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_22_5:
+ rc = SAS_LINK_RATE_22_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ default:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * mpi3mr_delete_sas_phy - Remove a single phy from port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mr_sas_phy->phy_id);
+
+ list_del(&mr_sas_phy->port_siblings);
+ mr_sas_port->num_phys--;
+ mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id);
+ if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * mpi3mr_add_sas_phy - Adding a single phy to a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "add: sas_address(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mr_sas_phy->phy_id);
+
+ list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id);
+ if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * mpi3mr_add_phy_to_an_existing_port - add phy to existing port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object *
+ * @sas_address: SAS address of device/expander were phy needs
+ * to be added to
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_phy_to_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_port *mr_sas_port;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ if (!hba_port)
+ return;
+
+ list_for_each_entry(mr_sas_port, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy == mr_sas_phy)
+ return;
+ }
+ mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy);
+ return;
+ }
+}
+
+/**
+ * mpi3mr_delete_sas_port - helper function to removing a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+ struct mpi3mr_hba_port *hba_port = mr_sas_port->hba_port;
+ enum sas_device_type device_type =
+ mr_sas_port->remote_identify.device_type;
+
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ if (device_type == SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc, sas_address,
+ hba_port);
+
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc, sas_address, hba_port);
+}
+
+/**
+ * mpi3mr_del_phy_from_an_existing_port - del phy from a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_del_phy_from_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy != mr_sas_phy)
+ continue;
+ if ((mr_sas_port->num_phys == 1) &&
+ !mrioc->reset_in_progress)
+ mpi3mr_delete_sas_port(mrioc, mr_sas_port);
+ else
+ mpi3mr_delete_sas_phy(mrioc, mr_sas_port,
+ mr_sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * mpi3mr_sas_port_sanity_check - sanity check while adding port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @sas_address: SAS address of device/expander
+ * @hba_port: HBA port entry
+ *
+ * Verifies whether the Phys attached to a device with the given
+ * SAS address already belongs to an existing sas port if so
+ * will remove those phys from the sas port
+ *
+ * Return: None.
+ */
+static void mpi3mr_sas_port_sanity_check(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ sas_address) || (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ if (mr_sas_node->phy[i].phy_belongs_to_port == 1)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ mr_sas_node, &mr_sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpi3mr_set_identify - set identify for phys and end devices
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @identify: SAS transport layer's identify info
+ *
+ * Populates sas identify info for a specific device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_set_identify(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct sas_identify *identify)
+{
+
+ struct mpi3_device_page0 device_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ u16 device_info;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &device_pg0,
+ sizeof(device_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ sasinf = &device_pg0.device_specific.sas_sata_format;
+ device_info = le16_to_cpu(sasinf->device_info);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sasinf->sas_address);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sasinf->phy_num;
+
+ /* device_type */
+ switch (device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) {
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for SATA INIT so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+
+ /* target_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for STP Target so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET)
+ identify->target_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_host_phy - report sas_host phy to SAS transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @phy_pg0: SAS phy page 0
+ * @parent_dev: Prent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_host_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy, struct mpi3_sas_phy_page0 phy_pg0,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle = le16_to_cpu(phy_pg0.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_expander_phy - report expander phy to transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @expander_pg1: SAS Expander page 1
+ * @parent_dev: Parent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_expander_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy,
+ struct mpi3_sas_expander_page1 expander_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_alloc_hba_port - alloc hba port object
+ * @mrioc: Adapter instance reference
+ * @port_id: Port number
+ *
+ * Alloc memory for hba port object.
+ */
+static struct mpi3mr_hba_port *
+mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id)
+{
+ struct mpi3mr_hba_port *hba_port;
+
+ hba_port = kzalloc(sizeof(struct mpi3mr_hba_port),
+ GFP_KERNEL);
+ if (!hba_port)
+ return NULL;
+ hba_port->port_id = port_id;
+ ioc_info(mrioc, "hba_port entry: %p, port: %d is added to hba_port list\n",
+ hba_port, hba_port->port_id);
+ list_add_tail(&hba_port->list, &mrioc->hba_port_table_list);
+ return hba_port;
+}
+
+/**
+ * mpi3mr_get_hba_port_by_id - find hba port by id
+ * @mrioc: Adapter instance reference
+ * @port_id - Port ID to search
+ *
+ * Return: mpi3mr_hba_port reference for the matched port
+ */
+
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id)
+{
+ struct mpi3mr_hba_port *port, *port_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (port->port_id != port_id)
+ continue;
+ if (port->flags & MPI3MR_HBA_PORT_FLAG_DIRTY)
+ continue;
+ return port;
+ }
+
+ return NULL;
+}
+
+/**
+ * mpi3mr_update_links - refreshing SAS phy link changes
+ * @mrioc: Adapter instance reference
+ * @sas_address_parent: SAS address of parent expander or host
+ * @handle: Firmware device handle of attached device
+ * @phy_number: Phy number
+ * @link_rate: New link rate
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port)
+{
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct mpi3mr_sas_phy *mr_sas_phy;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ mr_sas_phy = &mr_sas_node->phy[phy_number];
+ mr_sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI3_SAS_NEG_LINK_RATE_1_5)) {
+ mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_phy->remote_identify);
+ mpi3mr_add_phy_to_an_existing_port(mrioc, mr_sas_node,
+ mr_sas_phy, mr_sas_phy->remote_identify.sas_address,
+ hba_port);
+ } else
+ memset(&mr_sas_phy->remote_identify, 0, sizeof(struct
+ sas_identify));
+
+ if (mr_sas_phy->phy)
+ mr_sas_phy->phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(link_rate);
+
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_phy->phy->dev,
+ "refresh: parent sas_address(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ (unsigned long long)sas_address_parent,
+ link_rate, phy_number, handle, (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+}
+
+/**
+ * mpi3mr_sas_host_refresh - refreshing sas host object contents
+ * @mrioc: Adapter instance reference
+ *
+ * This function refreshes the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for each device addition or device info
+ * change events
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u8 link_rate;
+ u16 sz, port_id, attached_handle;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+
+ dprint_transport_info(mrioc,
+ "updating handles for sas_host(0x%016llx)\n",
+ (unsigned long long)mrioc->sas_hba.sas_address);
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ link_rate =
+ sas_io_unit_pg0->phy_data[i].negotiated_link_rate >> 4;
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (attached_handle && link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_update_links(mrioc, mrioc->sas_hba.sas_address,
+ attached_handle, i, link_rate,
+ mrioc->sas_hba.phy[i].hba_port);
+ }
+ out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_host_add - create sas host object
+ * @mrioc: Adapter instance reference
+ *
+ * This function creates the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for first device addition or device info
+ * change event.
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u16 sz, num_phys = 1, port_id, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_enclosure_page0 encl_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ num_phys = sas_io_unit_pg0->num_phys;
+ kfree(sas_io_unit_pg0);
+
+ mrioc->sas_hba.host_node = 1;
+ INIT_LIST_HEAD(&mrioc->sas_hba.sas_port_list);
+ mrioc->sas_hba.parent_dev = &mrioc->shost->shost_gendev;
+ mrioc->sas_hba.phy = kcalloc(num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!mrioc->sas_hba.phy)
+ return;
+
+ mrioc->sas_hba.num_phys = num_phys;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, i)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ mrioc->sas_hba.phy[i].phy_id = i;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_add_host_phy(mrioc, &mrioc->sas_hba.phy[i],
+ phy_pg0, mrioc->sas_hba.parent_dev);
+ }
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ mrioc->sas_hba.handle, ioc_status, __FILE__, __LINE__,
+ __func__);
+ goto out;
+ }
+ mrioc->sas_hba.enclosure_handle =
+ le16_to_cpu(dev_pg0.enclosure_handle);
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ mrioc->sas_hba.sas_address =
+ le64_to_cpu(sasinf->sas_address);
+ ioc_info(mrioc,
+ "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ mrioc->sas_hba.handle,
+ (unsigned long long) mrioc->sas_hba.sas_address,
+ mrioc->sas_hba.num_phys);
+
+ if (mrioc->sas_hba.enclosure_handle) {
+ if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status,
+ &encl_pg0, sizeof(dev_pg0),
+ MPI3_ENCLOS_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.enclosure_handle)) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS))
+ mrioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(encl_pg0.enclosure_logical_id);
+ }
+
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_port_add - Expose the SAS device to the SAS TL
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the attached device
+ * @sas_address_parent: sas address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * This function creates a new sas port object for the given end
+ * device matching sas address and hba_port and adds it to the
+ * sas_node's sas_port_list and expose the attached sas device
+ * to the SAS transport layer through sas_rphy_add.
+ *
+ * Returns a valid mpi3mr_sas_port reference or NULL.
+ */
+static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+ u16 handle, u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy, *next;
+ struct mpi3mr_sas_port *mr_sas_port;
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct sas_rphy *rphy;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ int i;
+ struct sas_port *port;
+
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ mr_sas_port = kzalloc(sizeof(struct mpi3mr_sas_port), GFP_KERNEL);
+ if (!mr_sas_port)
+ return NULL;
+
+ INIT_LIST_HEAD(&mr_sas_port->port_list);
+ INIT_LIST_HEAD(&mr_sas_port->phy_list);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!mr_sas_node) {
+ ioc_err(mrioc, "%s:could not find parent sas_address(0x%016llx)!\n",
+ __func__, (unsigned long long)sas_address_parent);
+ goto out_fail;
+ }
+
+ if ((mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_port->remote_identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mr_sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->hba_port = hba_port;
+ mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+ mr_sas_port->remote_identify.sas_address, hba_port);
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ mr_sas_port->remote_identify.sas_address) ||
+ (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ list_add_tail(&mr_sas_node->phy[i].port_siblings,
+ &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << i);
+ }
+
+ if (!mr_sas_port->num_phys) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+
+ if (!tgtdev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 1;
+ }
+
+ if (!mr_sas_node->parent_dev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(mr_sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mr_sas_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&port->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ sas_port_add_phy(port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+ mr_sas_phy->hba_port = hba_port;
+ }
+
+ mr_sas_port->port = port;
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ rphy = sas_end_device_alloc(port);
+ tgtdev->dev_spec.sas_sata_inf.rphy = rphy;
+ } else {
+ rphy = sas_expander_alloc(port,
+ mr_sas_port->remote_identify.device_type);
+ }
+ rphy->identify = mr_sas_port->remote_identify;
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ if ((sas_rphy_add(rphy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ }
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 0;
+ tgtdev->dev_spec.sas_sata_inf.sas_transport_attached = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+ dev_info(&rphy->dev,
+ "%s: added: handle(0x%04x), sas_address(0x%016llx)\n",
+ __func__, handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address);
+
+ mr_sas_port->rphy = rphy;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&mr_sas_port->port_list, &mr_sas_node->sas_port_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, true);
+ }
+
+ /* fill in report manufacture */
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_report_manufacture(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy), hba_port->port_id);
+
+ return mr_sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mr_sas_phy, next, &mr_sas_port->phy_list,
+ port_siblings)
+ list_del(&mr_sas_phy->port_siblings);
+ kfree(mr_sas_port);
+ return NULL;
+}
+
+/**
+ * mpi3mr_sas_port_remove - remove port from the list
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of attached device
+ * @sas_address_parent: SAS address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * Removing object and freeing associated memory from the
+ * sas_port_list.
+ *
+ * Return: None
+ */
+static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+ unsigned long flags;
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_node *mr_sas_node;
+ u8 found = 0;
+ struct mpi3mr_sas_phy *mr_sas_phy, *next_phy;
+ struct mpi3mr_hba_port *srch_port, *hba_port_next = NULL;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ found = 1;
+ list_del(&mr_sas_port->port_list);
+ goto out;
+ }
+
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ if (mr_sas_node->host_node) {
+ list_for_each_entry_safe(srch_port, hba_port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (srch_port != hba_port)
+ continue;
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+ srch_port, srch_port->port_id);
+ list_del(&hba_port->list);
+ kfree(hba_port);
+ break;
+ }
+ }
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if (mr_sas_node->phy[i].remote_identify.sas_address ==
+ sas_address)
+ memset(&mr_sas_node->phy[i].remote_identify, 0,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ list_for_each_entry_safe(mr_sas_phy, next_phy,
+ &mr_sas_port->phy_list, port_siblings) {
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ mr_sas_phy->phy_belongs_to_port = 0;
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete_phy(mr_sas_port->port,
+ mr_sas_phy->phy);
+ list_del(&mr_sas_phy->port_siblings);
+ }
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete(mr_sas_port->port);
+ ioc_info(mrioc, "%s: removed sas_address(0x%016llx)\n",
+ __func__, (unsigned long long)sas_address);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, false);
+ }
+
+ kfree(mr_sas_port);
+}
+
+/**
+ * struct host_port - host port details
+ * @sas_address: SAS Address of the attached device
+ * @phy_mask: phy mask of host port
+ * @handle: Device Handle of attached device
+ * @iounit_port_id: port ID
+ * @used: host port is already matched with sas port from sas_port_list
+ * @lowest_phy: lowest phy ID of host port
+ */
+struct host_port {
+ u64 sas_address;
+ u32 phy_mask;
+ u16 handle;
+ u8 iounit_port_id;
+ u8 used;
+ u8 lowest_phy;
+};
+
+/**
+ * mpi3mr_update_mr_sas_port - update sas port objects during reset
+ * @mrioc: Adapter instance reference
+ * @h_port: host_port object
+ * @mr_sas_port: sas_port objects which needs to be updated
+ *
+ * Update the port ID of sas port object. Also add the phys if new phys got
+ * added to current sas port and remove the phys if some phys are moved
+ * out of the current sas port.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy;
+ u32 phy_mask_xor;
+ u64 phys_to_be_added, phys_to_be_removed;
+ int i;
+
+ h_port->used = 1;
+ mr_sas_port->marked_responding = 1;
+
+ dev_info(&mr_sas_port->port->dev,
+ "sas_address(0x%016llx), old: port_id %d phy_mask 0x%x, new: port_id %d phy_mask:0x%x\n",
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port->port_id, mr_sas_port->phy_mask,
+ h_port->iounit_port_id, h_port->phy_mask);
+
+ mr_sas_port->hba_port->port_id = h_port->iounit_port_id;
+ mr_sas_port->hba_port->flags &= ~MPI3MR_HBA_PORT_FLAG_DIRTY;
+
+ /* Get the newly added phys bit map & removed phys bit map */
+ phy_mask_xor = mr_sas_port->phy_mask ^ h_port->phy_mask;
+ phys_to_be_added = h_port->phy_mask & phy_mask_xor;
+ phys_to_be_removed = mr_sas_port->phy_mask & phy_mask_xor;
+
+ /*
+ * Register these new phys to current mr_sas_port's port.
+ * if these phys are previously registered with another port
+ * then delete these phys from that port first.
+ */
+ for_each_set_bit(i, (ulong *) &phys_to_be_added, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ mpi3mr_add_phy_to_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ /* Delete the phys which are not part of current mr_sas_port's port. */
+ for_each_set_bit(i, (ulong *) &phys_to_be_removed, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ }
+}
+
+/**
+ * mpi3mr_refresh_sas_ports - update host's sas ports during reset
+ * @mrioc: Adapter instance reference
+ *
+ * Update the host's sas ports during reset by checking whether
+ * sas ports are still intact or not. Add/remove phys if any hba
+ * phys are (moved in)/(moved out) of sas port. Also update
+ * io_unit_port if it got changed during reset.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
+{
+ struct host_port h_port[32];
+ int i, j, found, host_port_count = 0, port_idx;
+ u16 sz, attached_handle, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ struct mpi3mr_sas_port *mr_sas_port;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* Create a new expander port table */
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (!attached_handle)
+ continue;
+ found = 0;
+ for (j = 0; j < host_port_count; j++) {
+ if (h_port[j].handle == attached_handle) {
+ h_port[j].phy_mask |= (1 << i);
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ continue;
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ attached_handle))) {
+ dprint_reset(mrioc,
+ "failed to read dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ attached_handle, __FILE__, __LINE__, __func__);
+ continue;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ ioc_status, attached_handle,
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+
+ port_idx = host_port_count;
+ h_port[port_idx].sas_address = le64_to_cpu(sasinf->sas_address);
+ h_port[port_idx].handle = attached_handle;
+ h_port[port_idx].phy_mask = (1 << i);
+ h_port[port_idx].iounit_port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ h_port[port_idx].lowest_phy = sasinf->phy_num;
+ h_port[port_idx].used = 0;
+ host_port_count++;
+ }
+
+ if (!host_port_count)
+ goto out;
+
+ if (mrioc->logging_level & MPI3_DEBUG_RESET) {
+ ioc_info(mrioc, "Host port details before reset\n");
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ mr_sas_port->hba_port->port_id,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->phy_mask, mr_sas_port->lowest_phy);
+ }
+ mr_sas_port = NULL;
+ ioc_info(mrioc, "Host port details after reset\n");
+ for (i = 0; i < host_port_count; i++) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ h_port[i].iounit_port_id, h_port[i].sas_address,
+ h_port[i].phy_mask, h_port[i].lowest_phy);
+ }
+ }
+
+ /* mark all host sas port entries as dirty */
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ mr_sas_port->marked_responding = 0;
+ mr_sas_port->hba_port->flags |= MPI3MR_HBA_PORT_FLAG_DIRTY;
+ }
+
+ /* First check for matching lowest phy */
+ for (i = 0; i < host_port_count; i++) {
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].lowest_phy == mr_sas_port->lowest_phy) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if lowest phy is got enabled or disabled during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].phy_mask & mr_sas_port->phy_mask) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if expander cable is removed & connected to another HBA port during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_refresh_expanders - Refresh expander device exposure
+ * @mrioc: Adapter instance reference
+ *
+ * This is executed post controller reset to identify any
+ * missing expander devices during reset and remove from the upper layers
+ * or expose any newly detected expander device to the upper layers.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ u16 ioc_status, handle;
+ u64 sas_address;
+ int i;
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ sas_expander->non_responding = 1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ sas_expander = NULL;
+
+ handle = 0xffff;
+
+ /* Search for responding expander devices and add them if they are newly got added */
+ while (true) {
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(struct mpi3_sas_expander_page0),
+ MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ dprint_reset(mrioc,
+ "failed to read exp pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading exp pg0 for handle:(0x%04x), %s:%d/%s()!\n",
+ ioc_status, handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ handle = le16_to_cpu(expander_pg0.dev_handle);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, expander_pg0.io_unit_port);
+
+ if (!hba_port) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!sas_expander) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ sas_expander->non_responding = 0;
+ if (sas_expander->handle == handle)
+ continue;
+
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ }
+
+ /*
+ * Delete non responding expander devices and the corresponding
+ * hba_port if the non responding expander device's parent device
+ * is a host node.
+ */
+ sas_expander = NULL;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+ &mrioc->sas_expander_list, list) {
+ if (sas_expander->non_responding) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_node_add - insert an expander to the list.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander sas node
+ * Context: This function will acquire sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return: None.
+ */
+static void mpi3mr_expander_node_add(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &mrioc->sas_expander_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_add - Create expander object
+ * @mrioc: Adapter instance reference
+ * @handle: Expander firmware device handle
+ *
+ * This function creating expander object, stored in
+ * sas_expander_list and expose it to the SAS transport
+ * layer.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_enclosure_node *enclosure_dev;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ u16 ioc_status, parent_handle, temp_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ u8 port_id, link_rate;
+ struct mpi3mr_sas_port *mr_sas_port = NULL;
+ struct mpi3mr_hba_port *hba_port;
+ u32 phynum_handle;
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (mrioc->reset_in_progress)
+ return -1;
+
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(expander_pg0), MPI3_SAS_EXPAND_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ parent_handle = le16_to_cpu(expander_pg0.parent_dev_handle);
+ if (mpi3mr_get_sas_address(mrioc, parent_handle, &sas_address_parent)
+ != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ port_id = expander_pg0.io_unit_port;
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (sas_address_parent != mrioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = mpi3mr_expander_add(mrioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ } else {
+ /*
+ * When there is a parent expander present, update it's
+ * phys where child expander is connected with the link
+ * speed, attached dev handle and sas address.
+ */
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle =
+ (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ parent_handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc,
+ &ioc_status, &expander_pg1,
+ sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ temp_handle = le16_to_cpu(
+ expander_pg1.attached_dev_handle);
+ if (temp_handle != handle)
+ continue;
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ mpi3mr_update_links(mrioc, sas_address_parent,
+ handle, i, link_rate, hba_port);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node),
+ GFP_KERNEL);
+ if (!sas_expander)
+ return -1;
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.num_phys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+ sas_expander->hba_port = hba_port;
+
+ ioc_info(mrioc,
+ "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys) {
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mr_sas_port = mpi3mr_sas_port_add(mrioc, handle, sas_address_parent,
+ sas_expander->hba_port);
+ if (!mr_sas_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mr_sas_port->rphy->dev;
+ sas_expander->rphy = mr_sas_port->rphy;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle = (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+ sas_expander->phy[i].hba_port = hba_port;
+
+ if ((mpi3mr_add_expander_phy(mrioc, &sas_expander->phy[i],
+ expander_pg1, sas_expander->parent_dev))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ enclosure_dev =
+ mpi3mr_enclosure_find_by_handle(mrioc,
+ sas_expander->enclosure_handle);
+ if (enclosure_dev)
+ sas_expander->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+ }
+
+ mpi3mr_expander_node_add(mrioc, sas_expander);
+ return 0;
+
+out_fail:
+
+ if (mr_sas_port)
+ mpi3mr_sas_port_remove(mrioc,
+ sas_expander->sas_address,
+ sas_address_parent, sas_expander->hba_port);
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * mpi3mr_expander_node_remove - recursive removal of expander.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander device object
+ *
+ * Removes expander object and freeing associated memory from
+ * the sas_expander_list and removes the same from SAS TL, if
+ * one of the attached device is an expander then it recursively
+ * removes the expander device too.
+ *
+ * Return nothing.
+ */
+static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ unsigned long flags;
+ u8 port_id;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mr_sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (mrioc->reset_in_progress)
+ return;
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ else if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ port_id = sas_expander->hba_port->port_id;
+ mpi3mr_sas_port_remove(mrioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent, sas_expander->hba_port);
+
+ ioc_info(mrioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address, port_id);
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * mpi3mr_expander_remove - Remove expander object
+ * @mrioc: Adapter instance reference
+ * @sas_address: Remove expander sas_address
+ * @hba_port: HBA port reference
+ *
+ * This function remove expander object, stored in
+ * mrioc->sas_expander_list and removes it from the SAS TL by
+ * calling mpi3mr_expander_node_remove().
+ *
+ * Return: None
+ */
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ unsigned long flags;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (sas_expander)
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+
+}
+
+/**
+ * mpi3mr_get_sas_negotiated_logical_linkrate - get linkrate
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function identifies whether the target device is
+ * attached directly or through expander and issues sas phy
+ * page0 or expander phy page1 and gets the link rate, if there
+ * is any failure in reading the pages then this returns link
+ * rate of 1.5.
+ *
+ * Return: logical link rate.
+ */
+static u8 mpi3mr_get_sas_negotiated_logical_linkrate(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u8 link_rate = MPI3_SAS_NEG_LINK_RATE_1_5, phy_number;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u32 phynum_handle;
+ u16 ioc_status;
+
+ phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ if (!(tgtdev->devpg0_flag & MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)) {
+ phynum_handle = ((phy_number<<MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT)
+ | tgtdev->parent_handle);
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy_number)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+out:
+ return link_rate;
+}
+
+/**
+ * mpi3mr_report_tgtdev_to_sas_transport - expose dev to SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function exposes the target device after
+ * preparing host_phy, setting up link rate etc.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ int retval = 0;
+ u8 link_rate, parent_phy_number;
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+ u8 port_id;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return -1;
+
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_get_sas_address(mrioc, tgtdev->parent_handle,
+ &sas_address_parent) != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.sas_address_parent = sas_address_parent;
+
+ parent_phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ port_id = tgtdev->io_unit_port;
+
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.hba_port = hba_port;
+
+ link_rate = mpi3mr_get_sas_negotiated_logical_linkrate(mrioc, tgtdev);
+
+ mpi3mr_update_links(mrioc, sas_address_parent, tgtdev->dev_handle,
+ parent_phy_number, link_rate, hba_port);
+
+ tgtdev->host_exposed = 1;
+ if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
+ sas_address_parent, hba_port)) {
+ tgtdev->host_exposed = 0;
+ retval = -1;
+ } else if ((!tgtdev->starget)) {
+ if (!mrioc->is_driver_loading)
+ mpi3mr_sas_port_remove(mrioc, sas_address,
+ sas_address_parent, hba_port);
+ tgtdev->host_exposed = 0;
+ retval = -1;
+ }
+ return retval;
+}
+
+/**
+ * mpi3mr_remove_tgtdev_from_sas_transport - remove from SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function removes the target device
+ *
+ * Return: None.
+ */
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return;
+
+ hba_port = tgtdev->dev_spec.sas_sata_inf.hba_port;
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ sas_address_parent = tgtdev->dev_spec.sas_sata_inf.sas_address_parent;
+ mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
+ hba_port);
+ tgtdev->host_exposed = 0;
+}
+
+/**
+ * mpi3mr_get_port_id_by_sas_phy - Get port ID of the given phy
+ * @phy: SAS transport layer phy object
+ *
+ * Return: Port number for valid ID else 0xFFFF
+ */
+static inline u8 mpi3mr_get_port_id_by_sas_phy(struct sas_phy *phy)
+{
+ u8 port_id = 0xFF;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ if (hba_port)
+ port_id = hba_port->port_id;
+
+ return port_id;
+}
+
+/**
+ * mpi3mr_get_port_id_by_rphy - Get Port number from SAS rphy
+ *
+ * @mrioc: Adapter instance reference
+ * @rphy: SAS transport layer remote phy object
+ *
+ * Retrieves HBA port number in which the device pointed by the
+ * rphy object is attached with.
+ *
+ * Return: Valid port number on success else OxFFFF.
+ */
+static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *rphy)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ if (!rphy)
+ return port_id;
+
+ if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list,
+ list) {
+ if (sas_expander->rphy == rphy) {
+ port_id = sas_expander->hba_port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ } else if (rphy->identify.device_type == SAS_END_DEVICE) {
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ port_id =
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ }
+ return port_id;
+}
+
+static inline struct mpi3mr_ioc *phy_to_mrioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+
+ return shost_priv(shost);
+}
+
+static inline struct mpi3mr_ioc *rphy_to_mrioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+
+ return shost_priv(shost);
+}
+
+/* report phy error log structure */
+struct phy_error_log_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+
+/**
+ * mpi3mr_get_expander_phy_error_log - return expander counters:
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ *
+ * Return: 0 for success, non-zero for failure.
+ *
+ */
+static int mpi3mr_get_expander_phy_error_log(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma, data_in_dma;
+ u32 data_out_sz, data_in_sz, sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_error_log_request);
+ data_in_sz = sizeof(struct phy_error_log_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_error_log_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy error log SMP request to sas_address(0x%016llx), phy_id(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy error log - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log - function_result(%d)\n",
+ phy_error_log_reply->function_result);
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_linkerrors - return phy error counters
+ * @phy: The SAS transport layer phy object
+ *
+ * This function retrieves the phy error log information of the
+ * HBA or expander for which the phy belongs to
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_phy_page1 phy_pg1;
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_get_expander_phy_error_log(mrioc, phy);
+
+ memset(&phy_pg1, 0, sizeof(struct mpi3_sas_phy_page1));
+ /* get hba phy error logs */
+ if ((mpi3mr_cfg_get_sas_phy_pg1(mrioc, &ioc_status, &phy_pg1,
+ sizeof(struct mpi3_sas_phy_page1),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.invalid_dword_count);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.running_disparity_error_count);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.loss_dword_synch_count);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.phy_reset_problem_count);
+ return 0;
+}
+
+/**
+ * mpi3mr_transport_get_enclosure_identifier - Get Enclosure ID
+ * @rphy: The SAS transport layer remote phy object
+ * @identifier: Enclosure identifier to be returned
+ *
+ * Returns the enclosure id for the device pointed by the remote
+ * phy object.
+ *
+ * Return: 0 on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_enclosure_identifier(struct sas_rphy *rphy,
+ u64 *identifier)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ *identifier =
+ tgtdev->enclosure_logical_id;
+ rc = 0;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_bay_identifier - Get bay ID
+ * @rphy: The SAS transport layer remote phy object
+ *
+ * Returns the slot id for the device pointed by the remote phy
+ * object.
+ *
+ * Return: Valid slot ID on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ rc = tgtdev->slot;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * mpi3mr_expander_phy_control - expander phy control
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ * @phy_operation: The phy operation to be executed
+ *
+ * Issues SMP passthru phy control request to execute a specific
+ * phy operation for a given expander device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_expander_phy_control(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u16 sz;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_control_request);
+ data_in_sz = sizeof(struct phy_control_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_control_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy control SMP request to sas_address(0x%016llx), phy_id(%d) opcode(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ phy_operation);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy control SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy control - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+ dprint_transport_info(mrioc,
+ "phy control - function_result(%d)\n",
+ phy_control_reply->function_result);
+ rc = 0;
+ }
+ out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_reset - Reset a given phy
+ * @phy: The SAS transport layer phy object
+ * @hard_reset: Flag to indicate the type of reset
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_iounit_control_request mpi_request;
+ struct mpi3_iounit_control_reply mpi_reply;
+ u16 request_sz = sizeof(struct mpi3_iounit_control_request);
+ u16 reply_sz = sizeof(struct mpi3_iounit_control_reply);
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, request_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request.operation = MPI3_CTRL_OP_SAS_PHY_CONTROL;
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX] =
+ (hard_reset ? MPI3_CTRL_ACTION_HARD_RESET :
+ MPI3_CTRL_ACTION_LINK_RESET);
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX] =
+ phy->number;
+
+ dprint_transport_info(mrioc,
+ "sending phy reset request to sas_address(0x%016llx), phy_id(%d) hard_reset(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ hard_reset);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "phy reset request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+out:
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_enable - enable/disable phys
+ * @phy: The SAS transport layer phy object
+ * @enable: flag to enable/disable, enable phy when true
+ *
+ * This function enables/disables a given by executing required
+ * configuration page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ u16 sz;
+ int rc = 0;
+ int i, discovery_active;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when discovery is active */
+ for (i = 0, discovery_active = 0; i < mrioc->sas_hba.num_phys ; i++) {
+ if (sas_io_unit_pg0->phy_data[i].port_flags &
+ MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS) {
+ ioc_err(mrioc,
+ "discovery is active on port = %d, phy = %d\n"
+ "\tunable to enable/disable phys, try again later!\n",
+ sas_io_unit_pg0->phy_data[i].io_unit_port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if ((sas_io_unit_pg0->phy_data[phy->number].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (enable)
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ &= ~MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ |= MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_io_unit_pg1);
+ kfree(sas_io_unit_pg0);
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_speed - set phy min/max speed
+ * @phy: The SAS transport later phy object
+ * @rates: Rates defined as in sas_phy_linkrates
+ *
+ * This function sets the link rates given in the rates
+ * argument to the given phy by executing required configuration
+ * page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u16 sz, ioc_status;
+ int rc = 0;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ sas_io_unit_pg1->phy_data[phy->number].max_min_link_rate =
+ (rates->minimum_linkrate + (rates->maximum_linkrate << 4));
+
+ if (mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS)) {
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate &
+ MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK)
+ >> MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ }
+
+out:
+ kfree(sas_io_unit_pg1);
+ return rc;
+}
+
+/**
+ * mpi3mr_map_smp_buffer - map BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address holder
+ * @dma_len: Mapped DMA buffer length.
+ * @p: Virtual address holder
+ *
+ * This function maps the DMAable buffer
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+mpi3mr_map_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t *dma_addr, size_t *dma_len, void **p)
+{
+ /* Check if the request is split across multiple segments */
+ if (buf->sg_cnt > 1) {
+ *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+ GFP_KERNEL);
+ if (!*p)
+ return -ENOMEM;
+ *dma_len = buf->payload_len;
+ } else {
+ if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+ return -ENOMEM;
+ *dma_addr = sg_dma_address(buf->sg_list);
+ *dma_len = sg_dma_len(buf->sg_list);
+ *p = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_unmap_smp_buffer - unmap BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address to be unmapped
+ * @p: Virtual address
+ *
+ * This function unmaps the DMAable buffer
+ */
+static void
+mpi3mr_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t dma_addr, void *p)
+{
+ if (p)
+ dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+ else
+ dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
+/**
+ * mpi3mr_transport_smp_handler - handler for smp passthru
+ * @job: BSG job reference
+ * @shost: SCSI host object reference
+ * @rphy: SAS transport rphy object pointing the expander
+ *
+ * This is used primarily by smp utils for sending the SMP
+ * commands to the expanders attached to the controller
+ */
+static void
+mpi3mr_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ int rc;
+ void *psge;
+ dma_addr_t dma_addr_in;
+ dma_addr_t dma_addr_out;
+ void *addr_in = NULL;
+ void *addr_out = NULL;
+ size_t dma_len_in;
+ size_t dma_len_out;
+ unsigned int reslen = 0;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ &dma_addr_out, &dma_len_out, &addr_out);
+ if (rc)
+ goto out;
+
+ if (addr_out)
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, addr_out,
+ job->request_payload.payload_len);
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ &dma_addr_in, &dma_len_in, &addr_in);
+ if (rc)
+ goto unmap_out;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_rphy(mrioc, rphy);
+ mpi_request.sas_address = ((rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(mrioc->sas_hba.sas_address));
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_out - 4, dma_addr_out);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_in - 4, dma_addr_in);
+
+ dprint_transport_info(mrioc, "sending SMP request\n");
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto unmap_in;
+
+ dprint_transport_info(mrioc,
+ "SMP request completed with ioc_status(0x%04x)\n", ioc_status);
+
+ dprint_transport_info(mrioc,
+ "SMP request - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ memcpy(job->reply, &mpi_reply, reply_sz);
+ job->reply_len = reply_sz;
+ reslen = le16_to_cpu(mpi_reply.response_data_length);
+
+ if (addr_in)
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, addr_in,
+ job->reply_payload.payload_len);
+
+ rc = 0;
+unmap_in:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ dma_addr_in, addr_in);
+unmap_out:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ dma_addr_out, addr_out);
+out:
+ bsg_job_done(job, rc, reslen);
+}
+
+struct sas_function_template mpi3mr_transport_functions = {
+ .get_linkerrors = mpi3mr_transport_get_linkerrors,
+ .get_enclosure_identifier = mpi3mr_transport_get_enclosure_identifier,
+ .get_bay_identifier = mpi3mr_transport_get_bay_identifier,
+ .phy_reset = mpi3mr_transport_phy_reset,
+ .phy_enable = mpi3mr_transport_phy_enable,
+ .set_phy_speed = mpi3mr_transport_phy_speed,
+ .smp_handler = mpi3mr_transport_smp_handler,
+};
+
+struct scsi_transport_template *mpi3mr_transport_template;
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index a072187875df..c299f7e078fb 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -46,7 +46,7 @@ config SCSI_MPT3SAS
select SCSI_SAS_ATTRS
select RAID_ATTRS
select IRQ_POLL
- ---help---
+ help
This driver supports PCI-Express SAS 12Gb/s Host Adapters.
config SCSI_MPT2SAS_MAX_SGE
@@ -54,7 +54,7 @@ config SCSI_MPT2SAS_MAX_SGE
depends on PCI && SCSI && SCSI_MPT3SAS
default "128"
range 16 256
- ---help---
+ help
This option allows you to specify the maximum number of scatter-
gather entries per I/O. The driver default is 128, which matches
MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
@@ -66,7 +66,7 @@ config SCSI_MPT3SAS_MAX_SGE
depends on PCI && SCSI && SCSI_MPT3SAS
default "128"
range 16 256
- ---help---
+ help
This option allows you to specify the maximum number of scatter-
gather entries per I/O. The driver default is 128, which matches
MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
@@ -78,6 +78,6 @@ config SCSI_MPT2SAS
default n
select SCSI_MPT3SAS
depends on PCI && SCSI
- ---help---
- Dummy config option for backwards compatiblity: configure the MPT3SAS
+ help
+ Dummy config option for backwards compatibility: configure the MPT3SAS
driver instead.
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index 84fb3fbdb0ca..e76d994dbed3 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -7,4 +7,5 @@ mpt3sas-y += mpt3sas_base.o \
mpt3sas_transport.o \
mpt3sas_ctl.o \
mpt3sas_trigger_diag.o \
- mpt3sas_warpdrive.o
+ mpt3sas_warpdrive.o \
+ mpt3sas_debugfs.o \
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 43a3bf8ff428..4d0be5ab98c1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -534,6 +534,7 @@ typedef struct _MPI2_CONFIG_REPLY {
****************************************************************************/
#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+#define MPI2_MFGPAGE_VENDORID_ATTO (0x117C)
/*MPI v2.0 SAS products */
#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
@@ -992,7 +993,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
*one and check the value returned for GPIOCount at runtime.
*/
#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
-#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
+#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (36)
#endif
typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 {
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index e83c7c529dc9..2c57115172cf 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -537,7 +537,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
U16 Event; /*0x14 */
U16 Reserved4; /*0x16 */
U32 EventContext; /*0x18 */
- U32 EventData[1]; /*0x1C */
+ U32 EventData[]; /*0x1C */
} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY,
Mpi2EventNotificationReply_t,
*pMpi2EventNotificationReply_t;
@@ -639,7 +639,7 @@ typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
U8 Reserved1; /*0x01 */
U16 Reserved2; /*0x02 */
U32 Reserved3; /*0x04 */
- U32 HostData[1]; /*0x08 */
+ U32 HostData[]; /*0x08 */
} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t;
@@ -1397,7 +1397,7 @@ typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
U32 Reserved8; /*0x18 */
U32 Reserved9; /*0x1C */
U32 Reserved10; /*0x20 */
- U32 HostData[1]; /*0x24 */
+ U32 HostData[]; /*0x24 */
} MPI2_SEND_HOST_MESSAGE_REQUEST,
*PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
Mpi2SendHostMessageRequest_t,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 663782bb790d..4e981ccaac41 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -116,6 +116,14 @@ MODULE_PARM_DESC(perf_mode,
"\t\tdefault - default perf_mode is 'balanced'"
);
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
+ "This parameter is effective only if host_tagset_enable=1. &\n\t\t"
+ "when poll_queues are enabled then &\n\t\t"
+ "perf_mode is set to latency mode. &\n\t\t"
+ );
+
enum mpt3sas_perf_mode {
MPT_PERF_MODE_DEFAULT = -1,
MPT_PERF_MODE_BALANCED = 0,
@@ -129,8 +137,6 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
static void
-_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
-static void
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
/**
@@ -143,7 +149,7 @@ _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
* @mpi_request:mf request pointer.
* @sz: size of buffer.
*
- * @Returns - 1/0 Reset to be done or Not
+ * Return: 1/0 Reset to be done or Not
*/
u8
mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
@@ -190,7 +196,7 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
/**
* _base_readl_aero - retry readl for max three times.
- * @addr - MPT Fusion system interface register address
+ * @addr: MPT Fusion system interface register address
*
* Retry the readl() for max three times if it gets zero value
* while reading the system interface register.
@@ -413,7 +419,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2SGESimple32_t *sgel, *sgel_next;
u32 sgl_flags, sge_chain_count = 0;
- bool is_write = 0;
+ bool is_write = false;
u16 i = 0;
void __iomem *buffer_iomem;
phys_addr_t buffer_iomem_phys;
@@ -442,7 +448,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
return;
/* From smid we can get scsi_cmd, once we have sg_scmd,
- * we just need to get sg_virt and sg_next to get virual
+ * we just need to get sg_virt and sg_next to get virtual
* address associated with sgel->Address.
*/
@@ -482,7 +488,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
if (le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
- is_write = 1;
+ is_write = true;
for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
@@ -599,6 +605,71 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
}
/**
+ * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
+ * @ioc: Per Adapter Object
+ *
+ * Return: nothing.
+ */
+static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26IoUnitControlRequest_t *mpi_request;
+ Mpi26IoUnitControlReply_t *mpi_reply;
+ u16 smid;
+ ktime_t current_time;
+ u64 TimeStamp = 0;
+ u8 issue_reset = 0;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER;
+ mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
+ current_time = ktime_get_real();
+ TimeStamp = ktime_to_ms(current_time);
+ mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32);
+ mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
+ init_completion(&ioc->scsih_cmds.done);
+ ioc->put_smid_default(ioc, smid);
+ dinitprintk(ioc, ioc_info(ioc,
+ "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
+ TimeStamp));
+ wait_for_completion_timeout(&ioc->scsih_cmds.done,
+ MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ);
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->scsih_cmds.status, mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset);
+ goto issue_host_reset;
+ }
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ dinitprintk(ioc, ioc_info(ioc,
+ "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
+ }
+issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+out:
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+/**
* _base_fault_reset_work - workq handling ioc fault conditions
* @work: input argument, used to derive ioc
*
@@ -641,11 +712,12 @@ _base_fault_reset_work(struct work_struct *work)
/*
* Call _scsih_flush_pending_cmds callback so that we flush all
- * pending commands back to OS. This call is required to aovid
+ * pending commands back to OS. This call is required to avoid
* deadlock at block layer. Dead IOC will fail to do diag reset,
* and this call is safe since dead ioc will never return any
* command back from HW.
*/
+ mpt3sas_base_pause_mq_polling(ioc);
ioc->schedule_dead_ioc_flush_running_cmds(ioc);
/*
* Set remove_host flag early since kernel thread will
@@ -680,7 +752,8 @@ _base_fault_reset_work(struct work_struct *work)
ioc->shost_recovery = 1;
spin_unlock_irqrestore(
&ioc->ioc_reset_in_progress_lock, flags);
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
_base_clear_outstanding_commands(ioc);
}
@@ -722,7 +795,11 @@ _base_fault_reset_work(struct work_struct *work)
return; /* don't rearm timer */
}
ioc->ioc_coredump_loop = 0;
-
+ if (ioc->time_sync_interval &&
+ ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
+ ioc->timestamp_update_count = 0;
+ _base_sync_drv_fw_timestamp(ioc);
+ }
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
rearm_timer:
if (ioc->fault_reset_work_q)
@@ -746,6 +823,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
if (ioc->fault_reset_work_q)
return;
+ ioc->timestamp_update_count = 0;
/* initialize fault polling */
INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
@@ -795,7 +873,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
* @fault_code: fault code
*/
void
-mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
+mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
{
ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
}
@@ -805,7 +883,7 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
* @ioc: per adapter object
* @fault_code: fault code
*
- * Return nothing.
+ * Return: nothing.
*/
void
mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
@@ -817,8 +895,9 @@ mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
* mpt3sas_base_wait_for_coredump_completion - Wait until coredump
* completes or times out
* @ioc: per adapter object
+ * @caller: caller function name
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
@@ -906,6 +985,20 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
return;
+ /*
+ * Older Firmware version doesn't support driver trigger pages.
+ * So, skip displaying 'config invalid type' type
+ * of error message.
+ */
+ if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
+ Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
+
+ if ((rqst->ExtPageType ==
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) &&
+ !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
+ return;
+ }
+ }
switch (ioc_status) {
@@ -964,7 +1057,7 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
desc = "config no defaults";
break;
case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
- desc = "config cant commit";
+ desc = "config can't commit";
break;
/****************************************************************************
@@ -1228,7 +1321,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
* @log_info: log info
*/
static void
-_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
+_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info)
{
union loginfo_type {
u32 loginfo;
@@ -1276,11 +1369,11 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
}
/**
- * _base_display_reply_info -
+ * _base_display_reply_info - handle reply descriptors depending on IOC Status
* @ioc: per adapter object
* @smid: system request message index
* @msix_index: MSIX table index supplied by the OS
- * @reply: reply message frame(lower 32bit addr)
+ * @reply: reply message frame (lower 32bit addr)
*/
static void
_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1300,7 +1393,7 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
(ioc->logging_level & MPT_DEBUG_REPLY)) {
- _base_sas_ioc_info(ioc , mpi_reply,
+ _base_sas_ioc_info(ioc, mpi_reply,
mpt3sas_base_get_msg_frame(ioc, smid));
}
@@ -1465,13 +1558,62 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
- * _base_mask_interrupts - disable interrupts
+ * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
+ * when driver is flushing out the IOs.
+ * @ioc: per adapter object
+ *
+ * Pause polling on the mq poll (io uring) queues when driver is flushing
+ * out the IOs. Otherwise we may see the race condition of completing the same
+ * IO from two paths.
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
+{
+ int iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
+
+ /*
+ * wait for current poll to complete.
+ */
+ for (qid = 0; qid < iopoll_q_count; qid++) {
+ while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
+ cpu_relax();
+ udelay(500);
+ }
+ }
+}
+
+/**
+ * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
+{
+ int iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
+}
+
+/**
+ * mpt3sas_base_mask_interrupts - disable interrupts
* @ioc: per adapter object
*
* Disabling ResetIRQ, Reply and Doorbell Interrupts
*/
-static void
-_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
{
u32 him_register;
@@ -1483,13 +1625,13 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_unmask_interrupts - enable interrupts
+ * mpt3sas_base_unmask_interrupts - enable interrupts
* @ioc: per adapter object
*
* Enabling only Reply Interrupts
*/
-static void
-_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
{
u32 him_register;
@@ -1627,7 +1769,7 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
* So that FW can find enough entries to post the Reply
* Descriptors in the reply descriptor post queue.
*/
- if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
+ if (completed_cmds >= ioc->thresh_hold) {
if (ioc->combined_reply_queue) {
writel(reply_q->reply_post_host_index |
((msix_index & 7) <<
@@ -1639,7 +1781,8 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
MPI2_RPHI_MSIX_INDEX_SHIFT),
&ioc->chip->ReplyPostHostIndex);
}
- if (!reply_q->irq_poll_scheduled) {
+ if (!reply_q->is_iouring_poll_q &&
+ !reply_q->irq_poll_scheduled) {
reply_q->irq_poll_scheduled = true;
irq_poll_sched(&reply_q->irqpoll);
}
@@ -1696,6 +1839,33 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
}
/**
+ * mpt3sas_blk_mq_poll - poll the blk mq poll queue
+ * @shost: Scsi_Host object
+ * @queue_num: hw ctx queue number
+ *
+ * Return number of entries that has been processed from poll queue.
+ */
+int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ struct MPT3SAS_ADAPTER *ioc =
+ (struct MPT3SAS_ADAPTER *)shost->hostdata;
+ struct adapter_reply_queue *reply_q;
+ int num_entries = 0;
+ int qid = queue_num - ioc->iopoll_q_start_index;
+
+ if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
+ !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
+ return 0;
+
+ reply_q = ioc->io_uring_poll_queues[qid].reply_q;
+
+ num_entries = _base_process_reply_queue(reply_q);
+ atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
+
+ return num_entries;
+}
+
+/**
* _base_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
@@ -1718,10 +1888,10 @@ _base_interrupt(int irq, void *bus_id)
/**
* _base_irqpoll - IRQ poll callback handler
- * @irqpoll - irq_poll object
- * @budget - irq poll weight
+ * @irqpoll: irq_poll object
+ * @budget: irq poll weight
*
- * returns number of reply descriptors processed
+ * Return: number of reply descriptors processed
*/
static int
_base_irqpoll(struct irq_poll *irqpoll, int budget)
@@ -1732,7 +1902,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
reply_q = container_of(irqpoll, struct adapter_reply_queue,
irqpoll);
if (reply_q->irq_line_enable) {
- disable_irq(reply_q->os_irq);
+ disable_irq_nosync(reply_q->os_irq);
reply_q->irq_line_enable = false;
}
num_entries = _base_process_reply_queue(reply_q);
@@ -1741,6 +1911,13 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
reply_q->irq_poll_scheduled = false;
reply_q->irq_line_enable = true;
enable_irq(reply_q->os_irq);
+ /*
+ * Go for one more round of processing the
+ * reply descriptor post queue in case the HBA
+ * Firmware has posted some reply descriptors
+ * while reenabling the IRQ.
+ */
+ _base_process_reply_queue(reply_q);
}
return num_entries;
@@ -1750,7 +1927,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
* _base_init_irqpolls - initliaze IRQ polls
* @ioc: per adapter object
*
- * returns nothing
+ * Return: nothing
*/
static void
_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
@@ -1761,6 +1938,8 @@ _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
return;
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ if (reply_q->is_iouring_poll_q)
+ continue;
irq_poll_init(&reply_q->irqpoll,
ioc->hba_queue_depth/4, _base_irqpoll);
reply_q->irq_poll_scheduled = false;
@@ -1786,12 +1965,14 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
* @ioc: per adapter object
- * Context: non ISR conext
+ * @poll: poll over reply descriptor pools incase interrupt for
+ * timed-out SCSI command got delayed
+ * Context: non-ISR context
*
* Called when a Task Management request has completed.
*/
void
-mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
{
struct adapter_reply_queue *reply_q;
@@ -1808,18 +1989,31 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
/* TMs are on msix_index == 0 */
if (reply_q->msix_index == 0)
continue;
+
+ if (reply_q->is_iouring_poll_q) {
+ _base_process_reply_queue(reply_q);
+ continue;
+ }
+
+ synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
if (reply_q->irq_poll_scheduled) {
/* Calling irq_poll_disable will wait for any pending
* callbacks to have completed.
*/
irq_poll_disable(&reply_q->irqpoll);
irq_poll_enable(&reply_q->irqpoll);
- reply_q->irq_poll_scheduled = false;
- reply_q->irq_line_enable = true;
- enable_irq(reply_q->os_irq);
- continue;
+ /* check how the scheduled poll has ended,
+ * clean up only if necessary
+ */
+ if (reply_q->irq_poll_scheduled) {
+ reply_q->irq_poll_scheduled = false;
+ reply_q->irq_line_enable = true;
+ enable_irq(reply_q->os_irq);
+ }
}
- synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
+
+ if (poll)
+ _base_process_reply_queue(reply_q);
}
}
@@ -2006,7 +2200,16 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
/**
* _base_build_nvme_prp - This function is called for NVMe end devices to build
- * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
+ * a native SGL (NVMe PRP).
+ * @ioc: per adapter object
+ * @smid: system request message index for getting asscociated SGL
+ * @nvme_encap_request: the NVMe request msg frame pointer
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * The native SGL is built starting in the first PRP
* entry of the NVMe message (PRP1). If the data buffer is small enough to be
* described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
* used to describe a larger data buffer. If the data buffer is too large to
@@ -2035,7 +2238,7 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* Each 64-bit PRP entry comprises an address and an offset field. The address
* always points at the beginning of a 4KB physical memory page, and the offset
* describes where within that 4KB page the memory segment begins. Only the
- * first element in a PRP list may contain a non-zero offest, implying that all
+ * first element in a PRP list may contain a non-zero offset, implying that all
* memory segments following the first begin at the start of a 4KB page.
*
* Each PRP element normally describes 4KB of physical memory, with exceptions
@@ -2049,14 +2252,6 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* Since PRP entries lack any indication of size, the overall data buffer length
* is used to determine where the end of the data memory buffer is located, and
* how many PRP entries are required to describe it.
- *
- * @ioc: per adapter object
- * @smid: system request message index for getting asscociated SGL
- * @nvme_encap_request: the NVMe request msg frame pointer
- * @data_out_dma: physical address for WRITES
- * @data_out_sz: data xfer size for WRITES
- * @data_in_dma: physical address for READS
- * @data_in_sz: data xfer size for READS
*/
static void
_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -2213,8 +2408,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * base_make_prp_nvme -
- * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
+ * base_make_prp_nvme - Prepare PRPs (Physical Region Page) -
+ * SGLs specific to NVMe drives only
*
* @ioc: per adapter object
* @scmd: SCSI command from the mid-layer
@@ -2399,16 +2594,12 @@ _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
/* Get the SG list pointer and info. */
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- sdev_printk(KERN_ERR, scmd->device,
- "scsi_dma_map failed: request for %d bytes!\n",
- scsi_bufflen(scmd));
+ if (sges_left < 0)
return 1;
- }
/* Check if we need to build a native SG list. */
- if (base_is_prp_possible(ioc, pcie_device,
- scmd, sges_left) == 0) {
+ if (!base_is_prp_possible(ioc, pcie_device,
+ scmd, sges_left)) {
/* We built a native SG list, just return. */
goto out;
}
@@ -2511,12 +2702,8 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- sdev_printk(KERN_ERR, scmd->device,
- "scsi_dma_map failed: request for %d bytes!\n",
- scsi_bufflen(scmd));
+ if (sges_left < 0)
return -ENOMEM;
- }
sg_local = &mpi_request->SGL;
sges_in_segment = ioc->max_sges_in_main_message;
@@ -2659,12 +2846,8 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- sdev_printk(KERN_ERR, scmd->device,
- "scsi_dma_map failed: request for %d bytes!\n",
- scsi_bufflen(scmd));
+ if (sges_left < 0)
return -ENOMEM;
- }
sg_local = &mpi_request->SGL;
sges_in_segment = (ioc->request_sz -
@@ -2806,58 +2989,44 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
- u64 required_mask, coherent_mask;
struct sysinfo s;
- /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
- int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
-
- if (ioc->is_mcpu_endpoint)
- goto try_32bit;
-
- required_mask = dma_get_required_mask(&pdev->dev);
- if (sizeof(dma_addr_t) == 4 || required_mask == 32)
- goto try_32bit;
+ u64 coherent_dma_mask, dma_mask;
- if (ioc->dma_mask)
- coherent_mask = DMA_BIT_MASK(dma_mask);
- else
- coherent_mask = DMA_BIT_MASK(32);
-
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
- dma_set_coherent_mask(&pdev->dev, coherent_mask))
- goto try_32bit;
+ if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 ||
+ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) {
+ ioc->dma_mask = 32;
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) {
+ ioc->dma_mask = 63;
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(63);
+ } else {
+ ioc->dma_mask = 64;
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(64);
+ }
- ioc->base_add_sg_single = &_base_add_sg_single_64;
- ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = dma_mask;
- goto out;
+ if (ioc->use_32bit_dma)
+ coherent_dma_mask = DMA_BIT_MASK(32);
- try_32bit:
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pdev->dev, dma_mask) ||
+ dma_set_coherent_mask(&pdev->dev, coherent_dma_mask))
return -ENODEV;
- ioc->base_add_sg_single = &_base_add_sg_single_32;
- ioc->sge_size = sizeof(Mpi2SGESimple32_t);
- ioc->dma_mask = 32;
- out:
+ if (ioc->dma_mask > 32) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ } else {
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ }
+
si_meminfo(&s);
ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- ioc->dma_mask, convert_to_kb(s.totalram));
+ ioc->dma_mask, convert_to_kb(s.totalram));
return 0;
}
-static int
-_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
- struct pci_dev *pdev)
-{
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
- return -ENODEV;
- }
- return 0;
-}
-
/**
* _base_check_enable_msix - checks MSIX capabable.
* @ioc: per adapter object
@@ -2905,14 +3074,15 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_free_irq - free irq
+ * mpt3sas_base_free_irq - free irq
* @ioc: per adapter object
*
* Freeing respective reply_queue from the list.
*/
-static void
-_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
{
+ unsigned int irq;
struct adapter_reply_queue *reply_q, *next;
if (list_empty(&ioc->reply_queue_list))
@@ -2920,9 +3090,15 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
- if (ioc->smp_affinity_enable)
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- reply_q->msix_index), NULL);
+ if (reply_q->is_iouring_poll_q) {
+ kfree(reply_q);
+ continue;
+ }
+
+ if (ioc->smp_affinity_enable) {
+ irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
+ irq_update_affinity_hint(irq, NULL);
+ }
free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
reply_q);
kfree(reply_q);
@@ -2941,7 +3117,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
{
struct pci_dev *pdev = ioc->pdev;
struct adapter_reply_queue *reply_q;
- int r;
+ int r, qid;
reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
if (!reply_q) {
@@ -2953,6 +3129,17 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
reply_q->msix_index = index;
atomic_set(&reply_q->busy, 0);
+
+ if (index >= ioc->iopoll_q_start_index) {
+ qid = index - ioc->iopoll_q_start_index;
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
+ ioc->driver_name, ioc->id, qid);
+ reply_q->is_iouring_poll_q = 1;
+ ioc->io_uring_poll_queues[qid].reply_q = reply_q;
+ goto out;
+ }
+
+
if (ioc->msix_enable)
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
ioc->driver_name, ioc->id, index);
@@ -2967,7 +3154,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
kfree(reply_q);
return -EBUSY;
}
-
+out:
INIT_LIST_HEAD(&reply_q->list);
list_add_tail(&reply_q->list, &ioc->reply_queue_list);
return 0;
@@ -2978,16 +3165,15 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
* @ioc: per adapter object
*
* The enduser would need to set the affinity via /proc/irq/#/smp_affinity
- *
- * It would nice if we could call irq_set_affinity, however it is not
- * an exported symbol
*/
static void
_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
{
- unsigned int cpu, nr_cpus, nr_msix, index = 0;
+ unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
struct adapter_reply_queue *reply_q;
- int local_numa_node;
+ int iopoll_q_count = ioc->reply_queue_count -
+ ioc->iopoll_q_start_index;
+ const struct cpumask *mask;
if (!_base_is_controller_msix_enabled(ioc))
return;
@@ -3010,18 +3196,19 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
* corresponding to high iops queues.
*/
if (ioc->high_iops_queues) {
- local_numa_node = dev_to_node(&ioc->pdev->dev);
+ mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
for (index = 0; index < ioc->high_iops_queues;
index++) {
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- index), cpumask_of_node(local_numa_node));
+ irq = pci_irq_vector(ioc->pdev, index);
+ irq_set_affinity_and_hint(irq, mask);
}
}
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
const cpumask_t *mask;
- if (reply_q->msix_index < ioc->high_iops_queues)
+ if (reply_q->msix_index < ioc->high_iops_queues ||
+ reply_q->msix_index >= ioc->iopoll_q_start_index)
continue;
mask = pci_irq_get_affinity(ioc->pdev,
@@ -3043,13 +3230,14 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
fall_back:
cpu = cpumask_first(cpu_online_mask);
- nr_msix -= ioc->high_iops_queues;
+ nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
index = 0;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
unsigned int i, group = nr_cpus / nr_msix;
- if (reply_q->msix_index < ioc->high_iops_queues)
+ if (reply_q->msix_index < ioc->high_iops_queues ||
+ reply_q->msix_index >= ioc->iopoll_q_start_index)
continue;
if (cpu >= nr_cpus)
@@ -3068,8 +3256,8 @@ fall_back:
/**
* _base_check_and_enable_high_iops_queues - enable high iops mode
- * @ ioc - per adapter object
- * @ hba_msix_vector_count - msix vectors supported by HBA
+ * @ioc: per adapter object
+ * @hba_msix_vector_count: msix vectors supported by HBA
*
* Enable high iops queues only if
* - HBA is a SEA/AERO controller and
@@ -3078,7 +3266,7 @@ fall_back:
* - loaded driver with default max_msix_vectors module parameter and
* - system booted in non kdump mode
*
- * returns nothing.
+ * Return: nothing.
*/
static void
_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
@@ -3086,8 +3274,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
{
u16 lnksta, speed;
+ /*
+ * Disable high iops queues if io uring poll queues are enabled.
+ */
if (perf_mode == MPT_PERF_MODE_IOPS ||
- perf_mode == MPT_PERF_MODE_LATENCY) {
+ perf_mode == MPT_PERF_MODE_LATENCY ||
+ ioc->io_uring_poll_queues) {
ioc->high_iops_queues = 0;
return;
}
@@ -3113,17 +3305,18 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _base_disable_msix - disables msix
+ * mpt3sas_base_disable_msix - disables msix
* @ioc: per adapter object
*
*/
-static void
-_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
{
if (!ioc->msix_enable)
return;
pci_free_irq_vectors(ioc->pdev);
ioc->msix_enable = 0;
+ kfree(ioc->io_uring_poll_queues);
}
/**
@@ -3137,18 +3330,24 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
int i, irq_flags = PCI_IRQ_MSIX;
struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
struct irq_affinity *descp = &desc;
+ /*
+ * Don't allocate msix vectors for poll_queues.
+ * msix_vectors is always within a range of FW supported reply queue.
+ */
+ int nr_msix_vectors = ioc->iopoll_q_start_index;
+
if (ioc->smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
else
descp = NULL;
- ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
- ioc->reply_queue_count);
+ ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
+ ioc->reply_queue_count, nr_msix_vectors);
i = pci_alloc_irq_vectors_affinity(ioc->pdev,
ioc->high_iops_queues,
- ioc->reply_queue_count, irq_flags, descp);
+ nr_msix_vectors, irq_flags, descp);
return i;
}
@@ -3164,6 +3363,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
int r;
int i, local_max_msix_vectors;
u8 try_msix = 0;
+ int iopoll_q_count = 0;
ioc->msix_load_balance = false;
@@ -3179,22 +3379,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
ioc->cpu_count, max_msix_vectors);
- if (ioc->is_aero_ioc)
- _base_check_and_enable_high_iops_queues(ioc,
- ioc->msix_vector_count);
+
ioc->reply_queue_count =
- min_t(int, ioc->cpu_count + ioc->high_iops_queues,
- ioc->msix_vector_count);
+ min_t(int, ioc->cpu_count, ioc->msix_vector_count);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
local_max_msix_vectors = (reset_devices) ? 1 : 8;
else
local_max_msix_vectors = max_msix_vectors;
- if (local_max_msix_vectors > 0)
- ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
- ioc->reply_queue_count);
- else if (local_max_msix_vectors == 0)
+ if (local_max_msix_vectors == 0)
goto try_ioapic;
/*
@@ -3215,19 +3409,82 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
if (ioc->msix_load_balance)
ioc->smp_affinity_enable = 0;
+ if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
+ ioc->shost->host_tagset = 0;
+
+ /*
+ * Enable io uring poll queues only if host_tagset is enabled.
+ */
+ if (ioc->shost->host_tagset)
+ iopoll_q_count = poll_queues;
+
+ if (iopoll_q_count) {
+ ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
+ sizeof(struct io_uring_poll_queue), GFP_KERNEL);
+ if (!ioc->io_uring_poll_queues)
+ iopoll_q_count = 0;
+ }
+
+ if (ioc->is_aero_ioc)
+ _base_check_and_enable_high_iops_queues(ioc,
+ ioc->msix_vector_count);
+
+ /*
+ * Add high iops queues count to reply queue count if high iops queues
+ * are enabled.
+ */
+ ioc->reply_queue_count = min_t(int,
+ ioc->reply_queue_count + ioc->high_iops_queues,
+ ioc->msix_vector_count);
+
+ /*
+ * Adjust the reply queue count incase reply queue count
+ * exceeds the user provided MSIx vectors count.
+ */
+ if (local_max_msix_vectors > 0)
+ ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
+ ioc->reply_queue_count);
+ /*
+ * Add io uring poll queues count to reply queues count
+ * if io uring is enabled in driver.
+ */
+ if (iopoll_q_count) {
+ if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
+ iopoll_q_count = 0;
+ ioc->reply_queue_count = min_t(int,
+ ioc->reply_queue_count + iopoll_q_count,
+ ioc->msix_vector_count);
+ }
+
+ /*
+ * Starting index of io uring poll queues in reply queue list.
+ */
+ ioc->iopoll_q_start_index =
+ ioc->reply_queue_count - iopoll_q_count;
+
r = _base_alloc_irq_vectors(ioc);
if (r < 0) {
ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
goto try_ioapic;
}
+ /*
+ * Adjust the reply queue count if the allocated
+ * MSIx vectors is less then the requested number
+ * of MSIx vectors.
+ */
+ if (r < ioc->iopoll_q_start_index) {
+ ioc->reply_queue_count = r + iopoll_q_count;
+ ioc->iopoll_q_start_index =
+ ioc->reply_queue_count - iopoll_q_count;
+ }
+
ioc->msix_enable = 1;
- ioc->reply_queue_count = r;
for (i = 0; i < ioc->reply_queue_count; i++) {
r = _base_request_irq(ioc, i);
if (r) {
- _base_free_irq(ioc);
- _base_disable_msix(ioc);
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
goto try_ioapic;
}
}
@@ -3242,6 +3499,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->high_iops_queues = 0;
ioc_info(ioc, "High IOPs queues : disabled\n");
ioc->reply_queue_count = 1;
+ ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
if (r < 0) {
dfailprintk(ioc,
@@ -3264,8 +3522,8 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
- _base_free_irq(ioc);
- _base_disable_msix(ioc);
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
kfree(ioc->replyPostRegisterIndex);
ioc->replyPostRegisterIndex = NULL;
@@ -3287,14 +3545,14 @@ static int
_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
/**
- * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
+ * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state
* and if it is in fault state then issue diag reset.
* @ioc: per adapter object
*
- * Returns: 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
-static int
-_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
+int
+mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
int rc = -EFAULT;
@@ -3308,12 +3566,14 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_mask_interrupts(ioc);
rc = _base_diag_reset(ioc);
} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_COREDUMP) {
mpt3sas_print_coredump_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+ mpt3sas_base_mask_interrupts(ioc);
rc = _base_diag_reset(ioc);
}
@@ -3336,6 +3596,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
u64 pio_chip = 0;
phys_addr_t chip_phys = 0;
struct adapter_reply_queue *reply_q;
+ int iopoll_q_count = 0;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -3391,11 +3652,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
goto out_fail;
}
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
r = _base_get_ioc_facts(ioc);
if (r) {
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_get_ioc_facts(ioc)))
goto out_fail;
}
@@ -3409,6 +3670,12 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_fail;
+ iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
+ for (i = 0; i < iopoll_q_count; i++) {
+ atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
+ atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
+ }
+
if (!ioc->is_driver_loading)
_base_init_irqpolls(ioc);
/* Use the Combined reply queue feature only for SAS3 C0 & higher
@@ -3432,10 +3699,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
for (i = 0; i < ioc->combined_reply_index_count; i++) {
- ioc->replyPostRegisterIndex[i] = (resource_size_t *)
- ((u8 __force *)&ioc->chip->Doorbell +
- MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
- (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
+ ioc->replyPostRegisterIndex[i] =
+ (resource_size_t __iomem *)
+ ((u8 __force *)&ioc->chip->Doorbell +
+ MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
+ (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
}
@@ -3450,11 +3718,18 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* 4)));
}
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
+ pr_info("%s: enabled: index: %d\n",
+ reply_q->name, reply_q->msix_index);
+ continue;
+ }
+
pr_info("%s: %s enabled: IRQ %d\n",
reply_q->name,
ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ }
ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
&chip_phys, ioc->chip, memap_sz);
@@ -3556,7 +3831,7 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
* @ioc: per adapter object
* @scmd: scsi_cmnd object
*
- * returns msix index of general reply queues,
+ * Return: msix index of general reply queues,
* i.e. reply queue on which IO request's reply
* should be posted by the HBA firmware.
*/
@@ -3570,32 +3845,23 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
base_mod64(atomic64_add_return(1,
&ioc->total_io_cnt), ioc->reply_queue_count) : 0;
- return ioc->cpu_msix_table[raw_smp_processor_id()];
-}
+ if (scmd && ioc->shost->nr_hw_queues > 1) {
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
-/**
- * _base_sdev_nr_inflight_request -get number of inflight requests
- * of a request queue.
- * @q: request_queue object
- *
- * returns number of inflight request of a request queue.
- */
-inline unsigned long
-_base_sdev_nr_inflight_request(struct request_queue *q)
-{
- struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
+ return blk_mq_unique_tag_to_hwq(tag) +
+ ioc->high_iops_queues;
+ }
- return atomic_read(&hctx->nr_active);
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
}
-
/**
* _base_get_high_iops_msix_index - get the msix index of
* high iops queues
* @ioc: per adapter object
* @scmd: scsi_cmnd object
*
- * Returns: msix index of high iops reply queues.
+ * Return: msix index of high iops reply queues.
* i.e. high iops reply queue on which IO request's
* reply should be posted by the HBA firmware.
*/
@@ -3608,8 +3874,8 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
* reply queues in terms of batch count 16 when outstanding
* IOs on the target device is >=8.
*/
- if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
- MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
+
+ if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
return base_mod64((
atomic64_add_return(1, &ioc->high_iops_outstanding) /
MPT3SAS_HIGH_IOPS_BATCH_COUNT),
@@ -3661,8 +3927,23 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
struct scsiio_tracker *request = scsi_cmd_priv(scmd);
- unsigned int tag = scmd->request->tag;
u16 smid;
+ u32 tag, unique_tag;
+
+ unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ tag = blk_mq_unique_tag_to_tag(unique_tag);
+
+ /*
+ * Store hw queue number corresponding to the tag.
+ * This hw queue number is used later to determine
+ * the unique_tag using the logic below. This unique_tag
+ * is used to retrieve the scmd pointer corresponding
+ * to tag using scsi_host_find_tag() API.
+ *
+ * tag = smid - 1;
+ * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
+ */
+ ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
smid = tag + 1;
request->cb_idx = cb_idx;
@@ -3753,6 +4034,7 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
mpt3sas_base_clear_st(ioc, st);
_base_recovery_check(ioc);
+ ioc->io_queue_num[smid - 1] = 0;
return;
}
@@ -3826,7 +4108,7 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
* @ioc: per adapter object
* @smid: system request message index
*
- * returns msix index.
+ * Return: msix index.
*/
static u8
_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3921,7 +4203,7 @@ _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* _base_put_smid_hi_priority - send Task Management request to firmware
* @ioc: per adapter object
* @smid: system request message index
- * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
+ * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
*/
static void
_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4025,7 +4307,7 @@ _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @smid: system request message index
* @handle: device handle, unused in this function, for function type match
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4047,7 +4329,7 @@ _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle, unused in this function, for function type match
- * Return nothing
+ * Return: nothing
*/
static void
_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4068,9 +4350,9 @@ _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* firmware using Atomic Request Descriptor
* @ioc: per adapter object
* @smid: system request message index
- * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4087,12 +4369,12 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * _base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default_atomic - Default, primarily used for config pages
* use Atomic Request Descriptor
* @ioc: per adapter object
* @smid: system request message index
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4350,6 +4632,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
ioc->pdev->subsystem_device);
break;
}
+ break;
default:
break;
}
@@ -4369,7 +4652,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
Mpi26ComponentImageHeader_t *cmp_img_hdr;
Mpi25FWUploadRequest_t *mpi_request;
Mpi2FWUploadReply_t mpi_reply;
- int r = 0;
+ int r = 0, issue_diag_reset = 0;
u32 package_version = 0;
void *fwpkg_data = NULL;
dma_addr_t fwpkg_data_dma;
@@ -4419,7 +4702,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi25FWUploadRequest_t)/4);
- r = -ETIME;
+ issue_diag_reset = 1;
} else {
memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
@@ -4459,18 +4742,25 @@ out:
if (fwpkg_data)
dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
fwpkg_data_dma);
+ if (issue_diag_reset) {
+ if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
+ return -EFAULT;
+ if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
+ return -EFAULT;
+ r = -EAGAIN;
+ }
return r;
}
/**
- * _base_display_ioc_capabilities - Disply IOC's capabilities.
+ * _base_display_ioc_capabilities - Display IOC's capabilities.
* @ioc: per adapter object
*/
static void
_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
{
int i = 0;
- char desc[16];
+ char desc[17] = {0};
u32 iounit_pg1_flags;
u32 bios_version;
@@ -4666,15 +4956,19 @@ out:
* according to performance mode.
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: zero on success; otherwise return EAGAIN error code asking the
+ * caller to retry.
*/
-static void
+static int
_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2IOCPage1_t ioc_pg1;
Mpi2ConfigReply_t mpi_reply;
+ int rc;
- mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
+ rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
+ if (rc)
+ return rc;
memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
switch (perf_mode) {
@@ -4696,11 +4990,13 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
*/
ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
- mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ if (rc)
+ return rc;
ioc_info(ioc, "performance mode: balanced\n");
- return;
+ return 0;
}
- /* Fall through */
+ fallthrough;
case MPT_PERF_MODE_LATENCY:
/*
* Enable interrupt coalescing on all reply queues
@@ -4709,7 +5005,9 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
ioc_pg1.ProductSpecific = 0;
- mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ if (rc)
+ return rc;
ioc_info(ioc, "performance mode: latency\n");
break;
case MPT_PERF_MODE_IOPS:
@@ -4721,32 +5019,601 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
le32_to_cpu(ioc_pg1.CoalescingTimeout));
ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
ioc_pg1.ProductSpecific = 0;
- mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ if (rc)
+ return rc;
break;
}
+ return 0;
+}
+
+/**
+ * _base_get_event_diag_triggers - get event diag trigger values from
+ * persistent pages
+ * @ioc : per adapter object
+ *
+ * Return: nothing.
+ */
+static int
+_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26DriverTriggerPage2_t trigger_pg2;
+ struct SL_WH_EVENT_TRIGGER_T *event_tg;
+ MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg;
+ Mpi2ConfigReply_t mpi_reply;
+ int r = 0, i = 0;
+ u16 count = 0;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
+ &trigger_pg2);
+ if (r)
+ return r;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dinitprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return 0;
+ }
+
+ if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
+ count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger);
+ count = min_t(u16, NUM_VALID_ENTRIES, count);
+ ioc->diag_trigger_event.ValidEntries = count;
+
+ event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
+ mpi_event_tg = &trigger_pg2.MPIEventTriggers[0];
+ for (i = 0; i < count; i++) {
+ event_tg->EventValue = le16_to_cpu(
+ mpi_event_tg->MPIEventCode);
+ event_tg->LogEntryQualifier = le16_to_cpu(
+ mpi_event_tg->MPIEventCodeSpecific);
+ event_tg++;
+ mpi_event_tg++;
+ }
+ }
+ return 0;
+}
+
+/**
+ * _base_get_scsi_diag_triggers - get scsi diag trigger values from
+ * persistent pages
+ * @ioc : per adapter object
+ *
+ * Return: 0 on success; otherwise return failure status.
+ */
+static int
+_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26DriverTriggerPage3_t trigger_pg3;
+ struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
+ MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg;
+ Mpi2ConfigReply_t mpi_reply;
+ int r = 0, i = 0;
+ u16 count = 0;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
+ &trigger_pg3);
+ if (r)
+ return r;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dinitprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return 0;
+ }
+
+ if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
+ count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger);
+ count = min_t(u16, NUM_VALID_ENTRIES, count);
+ ioc->diag_trigger_scsi.ValidEntries = count;
+
+ scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
+ mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0];
+ for (i = 0; i < count; i++) {
+ scsi_tg->ASCQ = mpi_scsi_tg->ASCQ;
+ scsi_tg->ASC = mpi_scsi_tg->ASC;
+ scsi_tg->SenseKey = mpi_scsi_tg->SenseKey;
+
+ scsi_tg++;
+ mpi_scsi_tg++;
+ }
+ }
+ return 0;
+}
+
+/**
+ * _base_get_mpi_diag_triggers - get mpi diag trigger values from
+ * persistent pages
+ * @ioc : per adapter object
+ *
+ * Return: 0 on success; otherwise return failure status.
+ */
+static int
+_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26DriverTriggerPage4_t trigger_pg4;
+ struct SL_WH_MPI_TRIGGER_T *status_tg;
+ MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg;
+ Mpi2ConfigReply_t mpi_reply;
+ int r = 0, i = 0;
+ u16 count = 0;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
+ &trigger_pg4);
+ if (r)
+ return r;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dinitprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return 0;
+ }
+
+ if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
+ count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger);
+ count = min_t(u16, NUM_VALID_ENTRIES, count);
+ ioc->diag_trigger_mpi.ValidEntries = count;
+
+ status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
+ mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0];
+
+ for (i = 0; i < count; i++) {
+ status_tg->IOCStatus = le16_to_cpu(
+ mpi_status_tg->IOCStatus);
+ status_tg->IocLogInfo = le32_to_cpu(
+ mpi_status_tg->LogInfo);
+
+ status_tg++;
+ mpi_status_tg++;
+ }
+ }
+ return 0;
+}
+
+/**
+ * _base_get_master_diag_triggers - get master diag trigger values from
+ * persistent pages
+ * @ioc : per adapter object
+ *
+ * Return: nothing.
+ */
+static int
+_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26DriverTriggerPage1_t trigger_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
+ &trigger_pg1);
+ if (r)
+ return r;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dinitprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return 0;
+ }
+
+ if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
+ ioc->diag_trigger_master.MasterData |=
+ le32_to_cpu(
+ trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
+ return 0;
+}
+
+/**
+ * _base_check_for_trigger_pages_support - checks whether HBA FW supports
+ * driver trigger pages or not
+ * @ioc : per adapter object
+ * @trigger_flags : address where trigger page0's TriggerFlags value is copied
+ *
+ * Return: trigger flags mask if HBA FW supports driver trigger pages;
+ * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or
+ * return EAGAIN if diag reset occurred due to FW fault and asking the
+ * caller to retry the command.
+ *
+ */
+static int
+_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
+{
+ Mpi26DriverTriggerPage0_t trigger_pg0;
+ int r = 0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
+ &trigger_pg0);
+ if (r)
+ return r;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return -EFAULT;
+
+ *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags);
+ return 0;
+}
+
+/**
+ * _base_get_diag_triggers - Retrieve diag trigger values from
+ * persistent pages.
+ * @ioc : per adapter object
+ *
+ * Return: zero on success; otherwise return EAGAIN error codes
+ * asking the caller to retry.
+ */
+static int
+_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ int trigger_flags;
+ int r;
+
+ /*
+ * Default setting of master trigger.
+ */
+ ioc->diag_trigger_master.MasterData =
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+
+ r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
+ if (r) {
+ if (r == -EAGAIN)
+ return r;
+ /*
+ * Don't go for error handling when FW doesn't support
+ * driver trigger pages.
+ */
+ return 0;
+ }
+
+ ioc->supports_trigger_pages = 1;
+
+ /*
+ * Retrieve master diag trigger values from driver trigger pg1
+ * if master trigger bit enabled in TriggerFlags.
+ */
+ if ((u16)trigger_flags &
+ MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) {
+ r = _base_get_master_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
+
+ /*
+ * Retrieve event diag trigger values from driver trigger pg2
+ * if event trigger bit enabled in TriggerFlags.
+ */
+ if ((u16)trigger_flags &
+ MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) {
+ r = _base_get_event_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
+
+ /*
+ * Retrieve scsi diag trigger values from driver trigger pg3
+ * if scsi trigger bit enabled in TriggerFlags.
+ */
+ if ((u16)trigger_flags &
+ MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) {
+ r = _base_get_scsi_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
+ /*
+ * Retrieve mpi error diag trigger values from driver trigger pg4
+ * if loginfo trigger bit enabled in TriggerFlags.
+ */
+ if ((u16)trigger_flags &
+ MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) {
+ r = _base_get_mpi_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * _base_update_diag_trigger_pages - Update the driver trigger pages after
+ * online FW update, in case updated FW supports driver
+ * trigger pages.
+ * @ioc : per adapter object
+ *
+ * Return: nothing.
+ */
+static void
+_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
+{
+
+ if (ioc->diag_trigger_master.MasterData)
+ mpt3sas_config_update_driver_trigger_pg1(ioc,
+ &ioc->diag_trigger_master, 1);
+
+ if (ioc->diag_trigger_event.ValidEntries)
+ mpt3sas_config_update_driver_trigger_pg2(ioc,
+ &ioc->diag_trigger_event, 1);
+
+ if (ioc->diag_trigger_scsi.ValidEntries)
+ mpt3sas_config_update_driver_trigger_pg3(ioc,
+ &ioc->diag_trigger_scsi, 1);
+
+ if (ioc->diag_trigger_mpi.ValidEntries)
+ mpt3sas_config_update_driver_trigger_pg4(ioc,
+ &ioc->diag_trigger_mpi, 1);
+}
+
+/**
+ * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices.
+ * - On failure set default QD values.
+ * @ioc : per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
+ u16 depth;
+ int sz;
+ int rc = 0;
+
+ ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
+ ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
+ ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
+ if (!ioc->is_gen35_ioc)
+ goto out;
+ /* sas iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
+ sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return rc;
+ }
+ rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz);
+ if (rc) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
+ ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+ depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
+ ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+ depth = sas_iounit_pg1->SATAMaxQDepth;
+ ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
+
+ /* pcie iounit page 1 */
+ rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
+ &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
+ if (rc) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
+ (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) :
+ MPT3SAS_NVME_QUEUE_DEPTH;
+out:
+ dinitprintk(ioc, pr_err(
+ "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
+ ioc->max_wideport_qd, ioc->max_narrowport_qd,
+ ioc->max_sata_qd, ioc->max_nvme_qd));
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+/**
+ * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1
+ *
+ * @ioc : per adapter object
+ * @n : ptr to the ATTO nvram structure
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc,
+ struct ATTO_SAS_NVRAM *n)
+{
+ int r = -EINVAL;
+ union ATTO_SAS_ADDRESS *s1;
+ u32 len;
+ u8 *pb;
+ u8 ckSum;
+
+ /* validate nvram checksum */
+ pb = (u8 *) n;
+ ckSum = ATTO_SASNVR_CKSUM_SEED;
+ len = sizeof(struct ATTO_SAS_NVRAM);
+
+ while (len--)
+ ckSum = ckSum + pb[len];
+
+ if (ckSum) {
+ ioc_err(ioc, "Invalid ATTO NVRAM checksum\n");
+ return r;
+ }
+
+ s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr;
+
+ if (n->Signature[0] != 'E'
+ || n->Signature[1] != 'S'
+ || n->Signature[2] != 'A'
+ || n->Signature[3] != 'S')
+ ioc_err(ioc, "Invalid ATTO NVRAM signature\n");
+ else if (n->Version > ATTO_SASNVR_VERSION)
+ ioc_info(ioc, "Invalid ATTO NVRAM version");
+ else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1))
+ || s1->b[0] != 0x50
+ || s1->b[1] != 0x01
+ || s1->b[2] != 0x08
+ || (s1->b[3] & 0xF0) != 0x60
+ || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) {
+ ioc_err(ioc, "Invalid ATTO SAS address\n");
+ } else
+ r = 0;
+ return r;
+}
+
+/**
+ * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
+ *
+ * @ioc : per adapter object
+ * @*sas_addr : return sas address
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr)
+{
+ Mpi2ManufacturingPage1_t mfg_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ struct ATTO_SAS_NVRAM *nvram;
+ int r;
+ __be64 addr;
+
+ r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1);
+ if (r) {
+ ioc_err(ioc, "Failed to read manufacturing page 1\n");
+ return r;
+ }
+
+ /* validate nvram */
+ nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD;
+ r = mpt3sas_atto_validate_nvram(ioc, nvram);
+ if (r)
+ return r;
+
+ addr = *((__be64 *) nvram->SasAddr);
+ sas_addr->q = cpu_to_le64(be64_to_cpu(addr));
+ return r;
+}
+
+/**
+ * mpt3sas_atto_init - perform initializaion for ATTO branded
+ * adapter.
+ * @ioc : per adapter object
+ *5
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc)
+{
+ int sz = 0;
+ Mpi2BiosPage4_t *bios_pg4 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ int ix;
+ union ATTO_SAS_ADDRESS sas_addr;
+ union ATTO_SAS_ADDRESS temp;
+ union ATTO_SAS_ADDRESS bias;
+
+ r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr);
+ if (r)
+ return r;
+
+ /* get header first to get size */
+ r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0);
+ if (r) {
+ ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n");
+ return r;
+ }
+
+ sz = mpi_reply.Header.PageLength * sizeof(u32);
+ bios_pg4 = kzalloc(sz, GFP_KERNEL);
+ if (!bios_pg4) {
+ ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n");
+ return -ENOMEM;
+ }
+
+ /* read bios page 4 */
+ r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
+ if (r) {
+ ioc_err(ioc, "Failed to read ATTO bios page 4\n");
+ goto out;
+ }
+
+ /* Update bios page 4 with the ATTO WWID */
+ bias.q = sas_addr.q;
+ bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS;
+
+ for (ix = 0; ix < bios_pg4->NumPhys; ix++) {
+ temp.q = sas_addr.q;
+ temp.b[7] += ix;
+ bios_pg4->Phy[ix].ReassignmentWWID = temp.q;
+ bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q;
+ }
+ r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
+
+out:
+ kfree(bios_pg4);
+ return r;
}
/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
-static void
+static int
_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2ConfigReply_t mpi_reply;
u32 iounit_pg1_flags;
-
+ int tg_flags = 0;
+ int rc;
ioc->nvme_abort_timeout = 30;
- mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
- if (ioc->ir_firmware)
- mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
+
+ rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
+ &ioc->manu_pg0);
+ if (rc)
+ return rc;
+ if (ioc->ir_firmware) {
+ rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
&ioc->manu_pg10);
+ if (rc)
+ return rc;
+ }
+
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
+ rc = mpt3sas_atto_init(ioc);
+ if (rc)
+ return rc;
+ }
/*
* Ensure correct T10 PI operation if vendor left EEDPTagMode
* flag unset in NVDATA.
*/
- mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
+ rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
+ &ioc->manu_pg11);
+ if (rc)
+ return rc;
if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
ioc->name);
@@ -4767,13 +5634,54 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
else
ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
}
+ ioc->time_sync_interval =
+ ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
+ if (ioc->time_sync_interval) {
+ if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
+ ioc->time_sync_interval =
+ ioc->time_sync_interval * SECONDS_PER_HOUR;
+ else
+ ioc->time_sync_interval =
+ ioc->time_sync_interval * SECONDS_PER_MIN;
+ dinitprintk(ioc, ioc_info(ioc,
+ "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
+ ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
+ MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"));
+ } else {
+ if (ioc->is_gen35_ioc)
+ ioc_warn(ioc,
+ "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
+ }
+ rc = _base_assign_fw_reported_qd(ioc);
+ if (rc)
+ return rc;
- mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
- mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
- mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
- mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
- mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
- mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
+ /*
+ * ATTO doesn't use bios page 2 and 3 for bios settings.
+ */
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO)
+ ioc->bios_pg3.BiosVersion = 0;
+ else {
+ rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ if (rc)
+ return rc;
+ }
+
+ rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
+ if (rc)
+ return rc;
_base_display_ioc_capabilities(ioc);
/*
@@ -4789,19 +5697,50 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
iounit_pg1_flags |=
MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
- mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ if (rc)
+ return rc;
if (ioc->iounit_pg8.NumSensors)
ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
- if (ioc->is_aero_ioc)
- _base_update_ioc_page1_inlinewith_perf_mode(ioc);
+ if (ioc->is_aero_ioc) {
+ rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
+ if (rc)
+ return rc;
+ }
+ if (ioc->is_gen35_ioc) {
+ if (ioc->is_driver_loading) {
+ rc = _base_get_diag_triggers(ioc);
+ if (rc)
+ return rc;
+ } else {
+ /*
+ * In case of online HBA FW update operation,
+ * check whether updated FW supports the driver trigger
+ * pages or not.
+ * - If previous FW has not supported driver trigger
+ * pages and newer FW supports them then update these
+ * pages with current diag trigger values.
+ * - If previous FW has supported driver trigger pages
+ * and new FW doesn't support them then disable
+ * support_trigger_pages flag.
+ */
+ _base_check_for_trigger_pages_support(ioc, &tg_flags);
+ if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
+ _base_update_diag_trigger_pages(ioc);
+ else if (ioc->supports_trigger_pages &&
+ tg_flags == -EFAULT)
+ ioc->supports_trigger_pages = 0;
+ }
+ }
+ return 0;
}
/**
* mpt3sas_free_enclosure_list - release memory
* @ioc: per adapter object
*
- * Free memory allocated during encloure add.
+ * Free memory allocated during enclosure add.
*/
void
mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
@@ -4827,8 +5766,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
{
int i = 0;
int j = 0;
+ int dma_alloc_count = 0;
struct chain_tracker *ct;
- struct reply_post_struct *rps;
+ int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -4870,29 +5810,34 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->reply_post) {
- do {
- rps = &ioc->reply_post[i];
- if (rps->reply_post_free) {
- dma_pool_free(
- ioc->reply_post_free_dma_pool,
- rps->reply_post_free,
- rps->reply_post_free_dma);
- dexitprintk(ioc,
- ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
- rps->reply_post_free));
- rps->reply_post_free = NULL;
+ dma_alloc_count = DIV_ROUND_UP(count,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+ for (i = 0; i < count; i++) {
+ if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
+ && dma_alloc_count) {
+ if (ioc->reply_post[i].reply_post_free) {
+ dma_pool_free(
+ ioc->reply_post_free_dma_pool,
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post[i].reply_post_free_dma);
+ dexitprintk(ioc, ioc_info(ioc,
+ "reply_post_free_pool(0x%p): free\n",
+ ioc->reply_post[i].reply_post_free));
+ ioc->reply_post[i].reply_post_free =
+ NULL;
+ }
+ --dma_alloc_count;
}
- } while (ioc->rdpq_array_enable &&
- (++i < ioc->reply_queue_count));
+ }
+ dma_pool_destroy(ioc->reply_post_free_dma_pool);
if (ioc->reply_post_free_array &&
ioc->rdpq_array_enable) {
dma_pool_free(ioc->reply_post_free_array_dma_pool,
- ioc->reply_post_free_array,
- ioc->reply_post_free_array_dma);
+ ioc->reply_post_free_array,
+ ioc->reply_post_free_array_dma);
ioc->reply_post_free_array = NULL;
}
dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
- dma_pool_destroy(ioc->reply_post_free_dma_pool);
kfree(ioc->reply_post);
}
@@ -4901,11 +5846,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
dma_pool_free(ioc->pcie_sgl_dma_pool,
ioc->pcie_sg_lookup[i].pcie_sgl,
ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
}
- if (ioc->pcie_sgl_dma_pool)
- dma_pool_destroy(ioc->pcie_sgl_dma_pool);
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
-
if (ioc->config_page) {
dexitprintk(ioc,
ioc_info(ioc, "config_page(0x%p): free\n",
@@ -4915,7 +5859,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
kfree(ioc->hpr_lookup);
+ ioc->hpr_lookup = NULL;
kfree(ioc->internal_lookup);
+ ioc->internal_lookup = NULL;
if (ioc->chain_lookup) {
for (i = 0; i < ioc->scsiio_depth; i++) {
for (j = ioc->chains_per_prp_buffer;
@@ -4932,33 +5878,378 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->chain_lookup);
ioc->chain_lookup = NULL;
}
+
+ kfree(ioc->io_queue_num);
+ ioc->io_queue_num = NULL;
}
/**
- * is_MSB_are_same - checks whether all reply queues in a set are
+ * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address.
- * @reply_pool_start_address: Base address of a reply queue set
+ * @start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
*
* Return: 1 if reply queues in a set have a same upper 32bits in their base
* memory address, else 0.
*/
-
static int
-is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
+mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
{
- long reply_pool_end_address;
+ dma_addr_t end_address;
- reply_pool_end_address = reply_pool_start_address + pool_sz;
+ end_address = start_address + pool_sz - 1;
- if (upper_32_bits(reply_pool_start_address) ==
- upper_32_bits(reply_pool_end_address))
+ if (upper_32_bits(start_address) == upper_32_bits(end_address))
return 1;
else
return 0;
}
/**
+ * _base_reduce_hba_queue_depth- Retry with reduced queue depth
+ * @ioc: Adapter object
+ *
+ * Return: 0 for success, non-zero for failure.
+ **/
+static inline int
+_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
+{
+ int reduce_sz = 64;
+
+ if ((ioc->hba_queue_depth - reduce_sz) >
+ (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
+ ioc->hba_queue_depth -= reduce_sz;
+ return 0;
+ } else
+ return -ENOMEM;
+}
+
+/**
+ * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
+ * for pcie sgl pools.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+
+static int
+_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ int i = 0, j = 0;
+ struct chain_tracker *ct;
+
+ ioc->pcie_sgl_dma_pool =
+ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
+ ioc->page_size, 0);
+ if (!ioc->pcie_sgl_dma_pool) {
+ ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
+ return -ENOMEM;
+ }
+
+ ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
+ ioc->chains_per_prp_buffer =
+ min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ ioc->pcie_sg_lookup[i].pcie_sgl =
+ dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
+ ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
+ return -EAGAIN;
+ }
+
+ if (!mpt3sas_check_same_4gb_region(
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) {
+ ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
+ ioc->pcie_sg_lookup[i].pcie_sgl,
+ (unsigned long long)
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+
+ for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
+ ct = &ioc->chain_lookup[i].chains_per_smid[j];
+ ct->chain_buffer =
+ ioc->pcie_sg_lookup[i].pcie_sgl +
+ (j * ioc->chain_segment_sz);
+ ct->chain_buffer_dma =
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma +
+ (j * ioc->chain_segment_sz);
+ }
+ }
+ dinitprintk(ioc, ioc_info(ioc,
+ "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
+ dinitprintk(ioc, ioc_info(ioc,
+ "Number of chains can fit in a PRP page(%d)\n",
+ ioc->chains_per_prp_buffer));
+ return 0;
+}
+
+/**
+ * _base_allocate_chain_dma_pool - Allocating DMA'able memory
+ * for chain dma pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ int i = 0, j = 0;
+ struct chain_tracker *ctr;
+
+ ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
+ ioc->chain_segment_sz, 16, 0);
+ if (!ioc->chain_dma_pool)
+ return -ENOMEM;
+
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ for (j = ioc->chains_per_prp_buffer;
+ j < ioc->chains_needed_per_io; j++) {
+ ctr = &ioc->chain_lookup[i].chains_per_smid[j];
+ ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
+ GFP_KERNEL, &ctr->chain_buffer_dma);
+ if (!ctr->chain_buffer)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region(
+ ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
+ ioc_err(ioc,
+ "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
+ ctr->chain_buffer,
+ (unsigned long long)ctr->chain_buffer_dma);
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ }
+ }
+ dinitprintk(ioc, ioc_info(ioc,
+ "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
+ (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
+ ioc->chain_segment_sz))/1024));
+ return 0;
+}
+
+/**
+ * _base_allocate_sense_dma_pool - Allocating DMA'able memory
+ * for sense dma pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ ioc->sense_dma_pool =
+ dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
+ if (!ioc->sense_dma_pool)
+ return -ENOMEM;
+ ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
+ GFP_KERNEL, &ioc->sense_dma);
+ if (!ioc->sense)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) {
+ dinitprintk(ioc, pr_err(
+ "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
+ ioc->sense, (unsigned long long) ioc->sense_dma));
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ ioc_info(ioc,
+ "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
+ ioc->sense, (unsigned long long)ioc->sense_dma,
+ ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
+ return 0;
+}
+
+/**
+ * _base_allocate_reply_pool - Allocating DMA'able memory
+ * for reply pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ /* reply pool, 4 byte align */
+ ioc->reply_dma_pool = dma_pool_create("reply pool",
+ &ioc->pdev->dev, sz, 4, 0);
+ if (!ioc->reply_dma_pool)
+ return -ENOMEM;
+ ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
+ &ioc->reply_dma);
+ if (!ioc->reply)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) {
+ dinitprintk(ioc, pr_err(
+ "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
+ ioc->reply, (unsigned long long) ioc->reply_dma));
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
+ ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
+ ioc_info(ioc,
+ "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->reply, (unsigned long long)ioc->reply_dma,
+ ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
+ return 0;
+}
+
+/**
+ * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
+ * for reply free dma pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ /* reply free queue, 16 byte align */
+ ioc->reply_free_dma_pool = dma_pool_create(
+ "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
+ if (!ioc->reply_free_dma_pool)
+ return -ENOMEM;
+ ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
+ GFP_KERNEL, &ioc->reply_free_dma);
+ if (!ioc->reply_free)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) {
+ dinitprintk(ioc,
+ pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
+ ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ memset(ioc->reply_free, 0, sz);
+ dinitprintk(ioc, ioc_info(ioc,
+ "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
+ dinitprintk(ioc, ioc_info(ioc,
+ "reply_free_dma (0x%llx)\n",
+ (unsigned long long)ioc->reply_free_dma));
+ return 0;
+}
+
+/**
+ * _base_allocate_reply_post_free_array - Allocating DMA'able memory
+ * for reply post free array.
+ * @ioc: Adapter object
+ * @reply_post_free_array_sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+
+static int
+_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
+ u32 reply_post_free_array_sz)
+{
+ ioc->reply_post_free_array_dma_pool =
+ dma_pool_create("reply_post_free_array pool",
+ &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
+ if (!ioc->reply_post_free_array_dma_pool)
+ return -ENOMEM;
+ ioc->reply_post_free_array =
+ dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
+ GFP_KERNEL, &ioc->reply_post_free_array_dma);
+ if (!ioc->reply_post_free_array)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma,
+ reply_post_free_array_sz)) {
+ dinitprintk(ioc, pr_err(
+ "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
+ ioc->reply_free,
+ (unsigned long long) ioc->reply_free_dma));
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ return 0;
+}
+/**
+ * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
+ * for reply queues.
+ * @ioc: per adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
+{
+ int i = 0;
+ u32 dma_alloc_count = 0;
+ int reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
+
+ ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
+ GFP_KERNEL);
+ if (!ioc->reply_post)
+ return -ENOMEM;
+ /*
+ * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
+ * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
+ * be within 4GB boundary i.e reply queues in a set must have same
+ * upper 32-bits in their memory address. so here driver is allocating
+ * the DMA'able memory for reply queues according.
+ * Driver uses limitation of
+ * VENTURA_SERIES to manage INVADER_SERIES as well.
+ */
+ dma_alloc_count = DIV_ROUND_UP(count,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+ ioc->reply_post_free_dma_pool =
+ dma_pool_create("reply_post_free pool",
+ &ioc->pdev->dev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool)
+ return -ENOMEM;
+ for (i = 0; i < count; i++) {
+ if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
+ ioc->reply_post[i].reply_post_free =
+ dma_pool_zalloc(ioc->reply_post_free_dma_pool,
+ GFP_KERNEL,
+ &ioc->reply_post[i].reply_post_free_dma);
+ if (!ioc->reply_post[i].reply_post_free)
+ return -ENOMEM;
+ /*
+ * Each set of RDPQ pool must satisfy 4gb boundary
+ * restriction.
+ * 1) Check if allocated resources for RDPQ pool are in
+ * the same 4GB range.
+ * 2) If #1 is true, continue with 64 bit DMA.
+ * 3) If #1 is false, return 1. which means free all the
+ * resources and set DMA mask to 32 and allocate.
+ */
+ if (!mpt3sas_check_same_4gb_region(
+ ioc->reply_post[i].reply_post_free_dma, sz)) {
+ dinitprintk(ioc,
+ ioc_err(ioc, "bad Replypost free pool(0x%p)"
+ "reply_post_free_dma = (0x%llx)\n",
+ ioc->reply_post[i].reply_post_free,
+ (unsigned long long)
+ ioc->reply_post[i].reply_post_free_dma));
+ return -EAGAIN;
+ }
+ dma_alloc_count--;
+
+ } else {
+ ioc->reply_post[i].reply_post_free =
+ (Mpi2ReplyDescriptorsUnion_t *)
+ ((long)ioc->reply_post[i-1].reply_post_free
+ + reply_post_free_sz);
+ ioc->reply_post[i].reply_post_free_dma =
+ (dma_addr_t)
+ (ioc->reply_post[i-1].reply_post_free_dma +
+ reply_post_free_sz);
+ }
+ }
+ return 0;
+}
+
+/**
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
*
@@ -4972,11 +6263,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
u32 retry_sz;
+ u32 rdpq_sz = 0, sense_sz = 0;
u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize;
u16 sge_size;
- int i, j;
- struct chain_tracker *ct;
+ int i;
+ int ret = 0, rc = 0;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5129,54 +6421,29 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth *
sizeof(Mpi2DefaultReplyDescriptor_t);
-
- sz = reply_post_free_sz;
- if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
- sz *= ioc->reply_queue_count;
-
- ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
- (ioc->reply_queue_count):1,
- sizeof(struct reply_post_struct), GFP_KERNEL);
-
- if (!ioc->reply_post) {
- ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
- goto out;
- }
- ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
- &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->reply_post_free_dma_pool) {
- ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
- goto out;
- }
- i = 0;
- do {
- ioc->reply_post[i].reply_post_free =
- dma_pool_zalloc(ioc->reply_post_free_dma_pool,
- GFP_KERNEL,
- &ioc->reply_post[i].reply_post_free_dma);
- if (!ioc->reply_post[i].reply_post_free) {
- ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
- goto out;
- }
- dinitprintk(ioc,
- ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->reply_post[i].reply_post_free,
- ioc->reply_post_queue_depth,
- 8, sz / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
- (u64)ioc->reply_post[i].reply_post_free_dma));
- total_sz += sz;
- } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
-
- if (ioc->dma_mask > 32) {
- if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
- ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
- pci_name(ioc->pdev));
- goto out;
+ rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
+ if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
+ || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
+ rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
+ ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
+ if (ret == -EAGAIN) {
+ /*
+ * Free allocated bad RDPQ memory pools.
+ * Change dma coherent mask to 32 bit and reallocate RDPQ
+ */
+ _base_release_memory_pools(ioc);
+ ioc->use_32bit_dma = true;
+ if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
+ ioc_err(ioc,
+ "32 DMA mask failed %s\n", pci_name(ioc->pdev));
+ return -ENODEV;
}
- }
-
+ if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
+ return -ENOMEM;
+ } else if (ret == -ENOMEM)
+ return -ENOMEM;
+ total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
+ DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
ioc->scsiio_depth = ioc->hba_queue_depth -
ioc->hi_priority_depth - ioc->internal_depth;
@@ -5188,7 +6455,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
ioc->shost->can_queue));
-
/* contiguous pool for request and chains, 16 byte align, one extra "
* "frame for smid=0
*/
@@ -5215,7 +6481,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
_base_release_memory_pools(ioc);
goto retry_allocation;
}
- memset(ioc->request, 0, sz);
if (retry_sz)
ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
@@ -5289,6 +6554,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
ioc->internal,
ioc->internal_depth, ioc->internal_smid));
+
+ ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
+ sizeof(u16), GFP_KERNEL);
+ if (!ioc->io_queue_num)
+ goto out;
/*
* The number of NVMe page sized blocks needed is:
* (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
@@ -5302,6 +6572,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* be required for NVMe PRP's, only each set of NVMe blocks will be
* contiguous, so a new set is allocated for each possible I/O.
*/
+
ioc->chains_per_prp_buffer = 0;
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
nvme_blocks_needed =
@@ -5316,190 +6587,67 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
goto out;
}
sz = nvme_blocks_needed * ioc->page_size;
- ioc->pcie_sgl_dma_pool =
- dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->pcie_sgl_dma_pool) {
- ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
- goto out;
- }
-
- ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
- ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
- ioc->chains_needed_per_io);
-
- for (i = 0; i < ioc->scsiio_depth; i++) {
- ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
- ioc->pcie_sgl_dma_pool, GFP_KERNEL,
- &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
- if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
- ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
- goto out;
- }
- for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
- ct = &ioc->chain_lookup[i].chains_per_smid[j];
- ct->chain_buffer =
- ioc->pcie_sg_lookup[i].pcie_sgl +
- (j * ioc->chain_segment_sz);
- ct->chain_buffer_dma =
- ioc->pcie_sg_lookup[i].pcie_sgl_dma +
- (j * ioc->chain_segment_sz);
- }
- }
-
- dinitprintk(ioc,
- ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->scsiio_depth, sz,
- (sz * ioc->scsiio_depth) / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
- ioc->chains_per_prp_buffer));
+ rc = _base_allocate_pcie_sgl_pool(ioc, sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
total_sz += sz * ioc->scsiio_depth;
}
- ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
- ioc->chain_segment_sz, 16, 0);
- if (!ioc->chain_dma_pool) {
- ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
- goto out;
- }
- for (i = 0; i < ioc->scsiio_depth; i++) {
- for (j = ioc->chains_per_prp_buffer;
- j < ioc->chains_needed_per_io; j++) {
- ct = &ioc->chain_lookup[i].chains_per_smid[j];
- ct->chain_buffer = dma_pool_alloc(
- ioc->chain_dma_pool, GFP_KERNEL,
- &ct->chain_buffer_dma);
- if (!ct->chain_buffer) {
- ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
- goto out;
- }
- }
- total_sz += ioc->chain_segment_sz;
- }
-
+ rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
+ total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
+ ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
dinitprintk(ioc,
- ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->chain_depth, ioc->chain_segment_sz,
- (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
-
+ ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->chain_depth, ioc->chain_segment_sz,
+ (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
/* sense buffers, 4 byte align */
- sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
- ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
- 4, 0);
- if (!ioc->sense_dma_pool) {
- ioc_err(ioc, "sense pool: dma_pool_create failed\n");
- goto out;
- }
- ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
- &ioc->sense_dma);
- if (!ioc->sense) {
- ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
- goto out;
- }
- /* sense buffer requires to be in same 4 gb region.
- * Below function will check the same.
- * In case of failure, new pci pool will be created with updated
- * alignment. Older allocation and pool will be destroyed.
- * Alignment will be used such a way that next allocation if
- * success, will always meet same 4gb region requirement.
- * Actual requirement is not alignment, but we need start and end of
- * DMA address must have same upper 32 bit address.
- */
- if (!is_MSB_are_same((long)ioc->sense, sz)) {
- //Release Sense pool & Reallocate
- dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
- dma_pool_destroy(ioc->sense_dma_pool);
- ioc->sense = NULL;
-
- ioc->sense_dma_pool =
- dma_pool_create("sense pool", &ioc->pdev->dev, sz,
- roundup_pow_of_two(sz), 0);
- if (!ioc->sense_dma_pool) {
- ioc_err(ioc, "sense pool: pci_pool_create failed\n");
- goto out;
- }
- ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
- &ioc->sense_dma);
- if (!ioc->sense) {
- ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
- goto out;
- }
- }
+ sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
+ rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
+ total_sz += sense_sz;
ioc_info(ioc,
"sense pool(0x%p)- dma(0x%llx): depth(%d),"
"element_size(%d), pool_size(%d kB)\n",
ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
SCSI_SENSE_BUFFERSIZE, sz / 1024);
-
- total_sz += sz;
-
/* reply pool, 4 byte align */
sz = ioc->reply_free_queue_depth * ioc->reply_sz;
- ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
- 4, 0);
- if (!ioc->reply_dma_pool) {
- ioc_err(ioc, "reply pool: dma_pool_create failed\n");
- goto out;
- }
- ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
- &ioc->reply_dma);
- if (!ioc->reply) {
- ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
- goto out;
- }
- ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
- ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
- dinitprintk(ioc,
- ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->reply, ioc->reply_free_queue_depth,
- ioc->reply_sz, sz / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "reply_dma(0x%llx)\n",
- (unsigned long long)ioc->reply_dma));
+ rc = _base_allocate_reply_pool(ioc, sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
total_sz += sz;
/* reply free queue, 16 byte align */
sz = ioc->reply_free_queue_depth * 4;
- ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
- &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->reply_free_dma_pool) {
- ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
- goto out;
- }
- ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
- &ioc->reply_free_dma);
- if (!ioc->reply_free) {
- ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
- goto out;
- }
- dinitprintk(ioc,
- ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->reply_free, ioc->reply_free_queue_depth,
- 4, sz / 1024));
+ rc = _base_allocate_reply_free_dma_pool(ioc, sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
dinitprintk(ioc,
ioc_info(ioc, "reply_free_dma (0x%llx)\n",
(unsigned long long)ioc->reply_free_dma));
total_sz += sz;
-
if (ioc->rdpq_array_enable) {
reply_post_free_array_sz = ioc->reply_queue_count *
sizeof(Mpi2IOCInitRDPQArrayEntry);
- ioc->reply_post_free_array_dma_pool =
- dma_pool_create("reply_post_free_array pool",
- &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
- if (!ioc->reply_post_free_array_dma_pool) {
- dinitprintk(ioc,
- ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
- goto out;
- }
- ioc->reply_post_free_array =
- dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
- GFP_KERNEL, &ioc->reply_post_free_array_dma);
- if (!ioc->reply_post_free_array) {
- dinitprintk(ioc,
- ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
- goto out;
- }
+ rc = _base_allocate_reply_post_free_array(ioc,
+ reply_post_free_array_sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
}
ioc->config_page_sz = 512;
ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
@@ -5522,6 +6670,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->shost->sg_tablesize);
return 0;
+try_32bit_dma:
+ _base_release_memory_pools(ioc);
+ if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
+ /* Change dma coherent mask to 32 bit and reallocate */
+ if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
+ pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
+ pci_name(ioc->pdev));
+ return -ENODEV;
+ }
+ } else if (_base_reduce_hba_queue_depth(ioc) != 0)
+ return -ENOMEM;
+ goto retry_allocation;
+
out:
return -ENOMEM;
}
@@ -5577,9 +6738,27 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
}
/**
+ * _base_dump_reg_set - This function will print hexdump of register set.
+ * @ioc: per adapter object
+ *
+ * Return: nothing.
+ */
+static inline void
+_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned int i, sz = 256;
+ u32 __iomem *reg = (u32 __iomem *)ioc->chip;
+
+ ioc_info(ioc, "System Register set:\n");
+ for (i = 0; i < (sz / sizeof(u32)); i++)
+ pr_info("%08x: %08x\n", (i * 4), readl(&reg[i]));
+}
+
+/**
* _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
* a write to the doorbell)
* @ioc: per adapter object
+ * @timeout: timeout in seconds
*
* Return: 0 for success, non-zero for failure.
*
@@ -5792,11 +6971,11 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
/**
* mpt3sas_wait_for_ioc - IOC's operational state is checked here.
* @ioc: per adapter object
- * @wait_count: timeout in seconds
+ * @timeout: timeout in seconds
*
* Return: Waits up to timeout seconds for the IOC to
* become operational. Returns 0 if IOC is present
- * and operational; otherwise returns -EFAULT.
+ * and operational; otherwise returns %-EFAULT.
*/
int
@@ -5809,6 +6988,17 @@ mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
break;
+
+ /*
+ * Watchdog thread will be started after IOC Initialization, so
+ * no need to wait here for IOC state to become operational
+ * when IOC Initialization is on. Instead the driver will
+ * return ETIME status, so that calling function can issue
+ * diag reset operation and retry the command.
+ */
+ if (ioc->is_driver_loading)
+ return -ETIME;
+
ssleep(1);
ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
__func__, ++wait_state_count);
@@ -6387,7 +7577,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
r = _base_handshake_req_reply_wait(ioc,
sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
- sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
+ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
if (r != 0) {
ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
@@ -6401,6 +7591,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
r = -EIO;
}
+ /* Reset TimeSync Counter*/
+ ioc->timestamp_update_count = 0;
return r;
}
@@ -6439,7 +7631,8 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
ioc->port_enable_failed = 1;
- if (ioc->is_driver_loading) {
+ if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
+ ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
mpt3sas_port_enable_complete(ioc);
return 1;
@@ -6540,8 +7733,9 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
-
+ ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->port_enable_cmds.smid = smid;
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
@@ -6638,7 +7832,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
Mpi2EventNotificationRequest_t *mpi_request;
u16 smid;
int r = 0;
- int i;
+ int i, issue_diag_reset = 0;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -6672,10 +7866,19 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
if (ioc->base_cmds.status & MPT3_CMD_RESET)
r = -EFAULT;
else
- r = -ETIME;
+ issue_diag_reset = 1;
+
} else
dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ if (issue_diag_reset) {
+ if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
+ return -EFAULT;
+ if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
+ return -EFAULT;
+ r = -EAGAIN;
+ }
return r;
}
@@ -6732,6 +7935,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "sending diag reset !!\n");
+ pci_cfg_access_lock(ioc->pdev);
+
drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
count = 0;
@@ -6754,6 +7959,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (count++ > 20) {
ioc_info(ioc,
"Stop writing magic sequence after 20 retries\n");
+ _base_dump_reg_set(ioc);
goto out;
}
@@ -6782,6 +7988,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (host_diagnostic == 0xFFFFFFFF) {
ioc_info(ioc,
"Invalid host diagnostic register value\n");
+ _base_dump_reg_set(ioc);
goto out;
}
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
@@ -6816,26 +8023,29 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (ioc_state) {
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
__func__, ioc_state);
+ _base_dump_reg_set(ioc);
goto out;
}
+ pci_cfg_access_unlock(ioc->pdev);
ioc_info(ioc, "diag reset: SUCCESS\n");
return 0;
out:
+ pci_cfg_access_unlock(ioc->pdev);
ioc_err(ioc, "diag reset: FAILED\n");
return -EFAULT;
}
/**
- * _base_make_ioc_ready - put controller in READY state
+ * mpt3sas_base_make_ioc_ready - put controller in READY state
* @ioc: per adapter object
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
* Return: 0 for success, non-zero for failure.
*/
-static int
-_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
+int
+mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
{
u32 ioc_state;
int rc;
@@ -7032,7 +8242,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->is_driver_loading)
return r;
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_send_ioc_init(ioc)))
return r;
}
@@ -7058,7 +8268,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
skip_init_reply_post_host_index:
- _base_unmask_interrupts(ioc);
+ mpt3sas_base_unmask_interrupts(ioc);
if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
r = _base_display_fwpkg_version(ioc);
@@ -7066,12 +8276,15 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
return r;
}
- _base_static_config_pages(ioc);
+ r = _base_static_config_pages(ioc);
+ if (r)
+ return r;
+
r = _base_event_notification(ioc);
if (r)
return r;
- if (ioc->is_driver_loading) {
+ if (!ioc->shost_recovery) {
if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
== 0x80) {
@@ -7107,9 +8320,9 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
/* synchronizing freeing resource with pci_access_mutex lock */
mutex_lock(&ioc->pci_access_mutex);
if (ioc->chip_phys && ioc->chip) {
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
ioc->shost_recovery = 1;
- _base_make_ioc_ready(ioc, SOFT_RESET);
+ mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
ioc->shost_recovery = 0;
}
@@ -7158,7 +8371,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->smp_affinity_enable = smp_affinity_enable;
ioc->rdpq_array_enable_assigned = 0;
- ioc->dma_mask = 0;
+ ioc->use_32bit_dma = false;
+ ioc->dma_mask = 64;
if (ioc->is_aero_ioc)
ioc->base_readl = &_base_readl_aero;
else
@@ -7170,7 +8384,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc);
if (r) {
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_get_ioc_facts(ioc)))
goto out_free_resources;
}
@@ -7187,7 +8401,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
/*
* In SAS3.0,
* SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
- * Target Status - all require the IEEE formated scatter gather
+ * Target Status - all require the IEEE formatted scatter gather
* elements.
*/
ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
@@ -7228,7 +8442,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg_mpi = &_base_build_sg;
ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
- r = _base_make_ioc_ready(ioc, SOFT_RESET);
+ r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
if (r)
goto out_free_resources;
@@ -7242,7 +8456,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
r = _base_get_port_facts(ioc, i);
if (r) {
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_get_port_facts(ioc, i)))
goto out_free_resources;
}
@@ -7283,14 +8497,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pend_os_device_add_sz++;
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
GFP_KERNEL);
- if (!ioc->pend_os_device_add)
+ if (!ioc->pend_os_device_add) {
+ r = -ENOMEM;
goto out_free_resources;
+ }
ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
ioc->device_remove_in_progress =
kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
- if (!ioc->device_remove_in_progress)
+ if (!ioc->device_remove_in_progress) {
+ r = -ENOMEM;
goto out_free_resources;
+ }
ioc->fwfault_debug = mpt3sas_fwfault_debug;
@@ -7364,8 +8582,11 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
}
}
r = _base_make_ioc_operational(ioc);
- if (r)
- goto out_free_resources;
+ if (r == -EAGAIN) {
+ r = _base_make_ioc_operational(ioc);
+ if (r)
+ goto out_free_resources;
+ }
/*
* Copy current copy of IOCFacts in prev_fw_facts
@@ -7483,8 +8704,6 @@ _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
ioc->start_scan_failed =
MPI2_IOCSTATUS_INTERNAL_ERROR;
ioc->start_scan = 0;
- ioc->port_enable_cmds.status =
- MPT3_CMD_NOT_USED;
} else {
complete(&ioc->port_enable_cmds.done);
}
@@ -7668,13 +8887,17 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
(ioc_state & MPI2_IOC_STATE_MASK) ==
- MPI2_IOC_STATE_COREDUMP)
+ MPI2_IOC_STATE_COREDUMP) {
is_fault = 1;
+ ioc->htb_rel.trigger_info_dwords[1] =
+ (ioc_state & MPI2_DOORBELL_DATA_MASK);
+ }
}
_base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
- _base_mask_interrupts(ioc);
- r = _base_make_ioc_ready(ioc, type);
+ mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
+ r = mpt3sas_base_make_ioc_ready(ioc, type);
if (r)
goto out;
_base_clear_outstanding_commands(ioc);
@@ -7715,6 +8938,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
ioc->ioc_reset_count++;
mutex_unlock(&ioc->reset_in_progress_mutex);
+ mpt3sas_base_resume_mq_polling(ioc);
out_unlocked:
if ((r == 0) && is_trigger) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index e7197150721f..05364aa15ecd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -71,13 +71,14 @@
#include "mpt3sas_debug.h"
#include "mpt3sas_trigger_diag.h"
+#include "mpt3sas_trigger_pages.h"
/* driver versioning info */
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "33.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 33
+#define MPT3SAS_DRIVER_VERSION "43.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 43
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -93,6 +94,14 @@
/* CoreDump: Default timeout */
#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /*15 seconds*/
#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF)
+#define MPT3SAS_TIMESYNC_TIMEOUT_SECONDS (10) /* 10 seconds */
+#define MPT3SAS_TIMESYNC_UPDATE_INTERVAL (900) /* 15 minutes */
+#define MPT3SAS_TIMESYNC_UNIT_MASK (0x80) /* bit 7 */
+#define MPT3SAS_TIMESYNC_MASK (0x7F) /* 0 - 6 bits */
+#define SECONDS_PER_MIN (60)
+#define SECONDS_PER_HOUR (3600)
+#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF)
+#define MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP (0x81)
/*
* Set MPT3SAS_SG_DEPTH value based on user input.
@@ -133,6 +142,8 @@
#define MPT_MAX_CALLBACKS 32
+#define MPT_MAX_HBA_NUM_PHYS 32
+
#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
/* reserved for issuing internally framed scsi io cmds */
#define INTERNAL_SCSIIO_CMDS_COUNT 3
@@ -345,6 +356,7 @@ struct mpt3sas_nvme_cmd {
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
+#define MPT3_MIN_IRQS 1
/* OEM Identifiers */
#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -367,6 +379,7 @@ struct mpt3sas_nvme_cmd {
#define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8
#define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16
#define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128
+#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
/* OEM Specific Flags will come from OEM specific header files */
struct Mpi2ManufacturingPage10_t {
@@ -404,7 +417,7 @@ struct Mpi2ManufacturingPage11_t {
u16 HostTraceBufferMaxSizeKB; /* 50h */
u16 HostTraceBufferMinSizeKB; /* 52h */
u8 CoreDumpTOSec; /* 54h */
- u8 Reserved8; /* 55h */
+ u8 TimeSyncInterval; /* 55h */
u16 Reserved9; /* 56h */
__le32 Reserved10; /* 58h */
};
@@ -419,6 +432,7 @@ struct Mpi2ManufacturingPage11_t {
* @flags: MPT_TARGET_FLAGS_XXX flags
* @deleted: target flaged for deletion
* @tm_busy: target is busy with TM request.
+ * @port: hba port entry containing target's port number info
* @sas_dev: The sas_device associated with this target
* @pcie_dev: The pcie device associated with this target
*/
@@ -431,6 +445,7 @@ struct MPT3SAS_TARGET {
u32 flags;
u8 deleted;
u8 tm_busy;
+ struct hba_port *port;
struct _sas_device *sas_dev;
struct _pcie_device *pcie_dev;
};
@@ -488,6 +503,7 @@ struct MPT3SAS_DEVICE {
#define MPT3_CMD_PENDING 0x0002 /* pending */
#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */
#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */
+#define MPT3_CMD_COMPLETE_ASYNC 0x0010 /* tells whether cmd completes in same thread or not */
/**
* struct _internal_cmd - internal commands struct
@@ -533,6 +549,9 @@ struct _internal_cmd {
* addition routine.
* @chassis_slot: chassis slot
* @is_chassis_slot_valid: chassis slot valid or not
+ * @port: hba port entry containing device's port number info
+ * @rphy: device's sas_rphy address used to identify this device structure in
+ * target_alloc callback function
*/
struct _sas_device {
struct list_head list;
@@ -559,6 +578,9 @@ struct _sas_device {
u8 is_chassis_slot_valid;
u8 connector_name[5];
struct kref refcount;
+ u8 port_type;
+ struct hba_port *port;
+ struct sas_rphy *rphy;
};
static inline void sas_device_get(struct _sas_device *s)
@@ -729,6 +751,7 @@ struct _boot_device {
* @remote_identify: attached device identification
* @rphy: sas transport rphy object
* @port: sas transport wide/narrow port object
+ * @hba_port: hba port entry containing port's port number info
* @phy_list: _sas_phy list objects belonging to this port
*/
struct _sas_port {
@@ -737,6 +760,7 @@ struct _sas_port {
struct sas_identify remote_identify;
struct sas_rphy *rphy;
struct sas_port *port;
+ struct hba_port *hba_port;
struct list_head phy_list;
};
@@ -750,6 +774,7 @@ struct _sas_port {
* @handle: device handle for this phy
* @attached_handle: device handle for attached device
* @phy_belongs_to_port: port has been created for this phy
+ * @port: hba port entry containing port number info
*/
struct _sas_phy {
struct list_head port_siblings;
@@ -760,6 +785,8 @@ struct _sas_phy {
u16 handle;
u16 attached_handle;
u8 phy_belongs_to_port;
+ u8 hba_vphy;
+ struct hba_port *port;
};
/**
@@ -773,8 +800,11 @@ struct _sas_phy {
* @enclosure_handle: handle for this a member of an enclosure
* @device_info: bitwise defining capabilities of this sas_host/expander
* @responding: used in _scsih_expander_device_mark_responding
+ * @nr_phys_allocated: Allocated memory for this many count phys
* @phy: a list of phys that make up this sas_host/expander
* @sas_port_list: list of ports attached to this sas_host/expander
+ * @port: hba port entry containing node's port number info
+ * @rphy: sas_rphy object of this expander
*/
struct _sas_node {
struct list_head list;
@@ -786,11 +816,13 @@ struct _sas_node {
u16 enclosure_handle;
u64 enclosure_logical_id;
u8 responding;
+ u8 nr_phys_allocated;
+ struct hba_port *port;
struct _sas_phy *phy;
struct list_head sas_port_list;
+ struct sas_rphy *rphy;
};
-
/**
* struct _enclosure_node - enclosure information
* @list: list of enclosures
@@ -910,6 +942,8 @@ struct _event_ack_list {
* @os_irq: irq number
* @irqpoll: irq_poll object
* @irq_poll_scheduled: Tells whether irq poll is scheduled or not
+ * @is_iouring_poll_q: Tells whether reply queues is assigned
+ * to io uring poll queues or not
* @list: this list
*/
struct adapter_reply_queue {
@@ -923,9 +957,22 @@ struct adapter_reply_queue {
struct irq_poll irqpoll;
bool irq_poll_scheduled;
bool irq_line_enable;
+ bool is_iouring_poll_q;
struct list_head list;
};
+/**
+ * struct io_uring_poll_queue - the io uring poll queue structure
+ * @busy: Tells whether io uring poll queue is busy or not
+ * @pause: Tells whether IOs are paused on io uring poll queue or not
+ * @reply_q: reply queue mapped for io uring poll queue
+ */
+struct io_uring_poll_queue {
+ atomic_t busy;
+ atomic_t pause;
+ struct adapter_reply_queue *reply_q;
+};
+
typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
/* SAS3.0 support */
@@ -1008,6 +1055,90 @@ struct reply_post_struct {
dma_addr_t reply_post_free_dma;
};
+/**
+ * struct virtual_phy - vSES phy structure
+ * sas_address: SAS Address of vSES device
+ * phy_mask: vSES device's phy number
+ * flags: flags used to manage this structure
+ */
+struct virtual_phy {
+ struct list_head list;
+ u64 sas_address;
+ u32 phy_mask;
+ u8 flags;
+};
+
+#define MPT_VPHY_FLAG_DIRTY_PHY 0x01
+
+/**
+ * struct hba_port - Saves each HBA's Wide/Narrow port info
+ * @sas_address: sas address of this wide/narrow port's attached device
+ * @phy_mask: HBA PHY's belonging to this port
+ * @port_id: port number
+ * @flags: hba port flags
+ * @vphys_mask : mask of vSES devices Phy number
+ * @vphys_list : list containing vSES device structures
+ */
+struct hba_port {
+ struct list_head list;
+ u64 sas_address;
+ u32 phy_mask;
+ u8 port_id;
+ u8 flags;
+ u32 vphys_mask;
+ struct list_head vphys_list;
+};
+
+/* hba port flags */
+#define HBA_PORT_FLAG_DIRTY_PORT 0x01
+#define HBA_PORT_FLAG_NEW_PORT 0x02
+
+#define MULTIPATH_DISABLED_PORT_ID 0xFF
+
+/**
+ * struct htb_rel_query - diagnostic buffer release reason
+ * @unique_id - unique id associated with this buffer.
+ * @buffer_rel_condition - Release condition ioctl/sysfs/reset
+ * @reserved
+ * @trigger_type - Master/Event/scsi/MPI
+ * @trigger_info_dwords - Data Correspondig to trigger type
+ */
+struct htb_rel_query {
+ u16 buffer_rel_condition;
+ u16 reserved;
+ u32 trigger_type;
+ u32 trigger_info_dwords[2];
+};
+
+/* Buffer_rel_condition bit fields */
+
+/* Bit 0 - Diag Buffer not Released */
+#define MPT3_DIAG_BUFFER_NOT_RELEASED (0x00)
+/* Bit 0 - Diag Buffer Released */
+#define MPT3_DIAG_BUFFER_RELEASED (0x01)
+
+/*
+ * Bit 1 - Diag Buffer Released by IOCTL,
+ * This bit is valid only if Bit 0 is one
+ */
+#define MPT3_DIAG_BUFFER_REL_IOCTL (0x02 | MPT3_DIAG_BUFFER_RELEASED)
+
+/*
+ * Bit 2 - Diag Buffer Released by Trigger,
+ * This bit is valid only if Bit 0 is one
+ */
+#define MPT3_DIAG_BUFFER_REL_TRIGGER (0x04 | MPT3_DIAG_BUFFER_RELEASED)
+
+/*
+ * Bit 3 - Diag Buffer Released by SysFs,
+ * This bit is valid only if Bit 0 is one
+ */
+#define MPT3_DIAG_BUFFER_REL_SYSFS (0x08 | MPT3_DIAG_BUFFER_RELEASED)
+
+/* DIAG RESET Master trigger flags */
+#define MPT_DIAG_RESET_ISSUED_BY_DRIVER 0x00000000
+#define MPT_DIAG_RESET_ISSUED_BY_USER 0x00000001
+
typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
/**
* struct MPT3SAS_ADAPTER - per adapter struct
@@ -1026,7 +1157,6 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @ir_firmware: IR firmware present
* @bars: bitmask of BAR's that must be configured
* @mask_interrupts: ignore interrupt
- * @dma_mask: used to set the consistent dma mask
* @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and
* pci resource handling
* @fault_reset_work_q_name: fw fault work queue
@@ -1036,6 +1166,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @firmware_event_thread: ""
* @fw_event_lock:
* @fw_event_list: list of fw events
+ * @current_evet: current processing firmware event
+ * @fw_event_cleanup: set to one while cleaning up the fw events
* @aen_event_read_flag: event log was read
* @broadcast_aen_busy: broadcast aen waiting to be serviced
* @shost_recovery: host reset in progress
@@ -1056,6 +1188,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @cpu_msix_table_sz: table size
* @total_io_cnt: Gives total IO count, used to load balance the interrupts
* @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state
+ * @timestamp_update_count: Counter to fire timeSync command
+ * time_sync_interval: Time sync interval read from man page 11
* @high_iops_outstanding: used to load balance the interrupts
* within high iops reply queues
* @msix_load_balance: Enables load balancing of interrupts across
@@ -1063,7 +1197,11 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
* @thresh_hold: Max number of reply descriptors processed
* before updating Host Index
+ * @iopoll_q_start_index: starting index of io uring poll queues
+ * in reply queue list
+ * @drv_internal_flags: Bit map internal to driver
* @drv_support_bitmap: driver's supported feature bit map
+ * @use_32bit_dma: Flag to use 32 bit consistent dma mask
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -1188,6 +1326,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* which ensures the syncrhonization between cli/sysfs_show path.
* @atomic_desc_capable: Atomic Request Descriptor support.
* @GET_MSIX_INDEX: Get the msix index of high iops queues.
+ * @multipath_on_hba: flag to determine multipath on hba is enabled or not
+ * @port_table_list: list containing HBA's wide/narrow port's info
*/
struct MPT3SAS_ADAPTER {
struct list_head list;
@@ -1205,7 +1345,6 @@ struct MPT3SAS_ADAPTER {
u8 ir_firmware;
int bars;
u8 mask_interrupts;
- int dma_mask;
/* fw fault handler */
char fault_reset_work_q_name[20];
@@ -1217,6 +1356,8 @@ struct MPT3SAS_ADAPTER {
struct workqueue_struct *firmware_event_thread;
spinlock_t fw_event_lock;
struct list_head fw_event_list;
+ struct fw_event_work *current_event;
+ u8 fw_events_cleanup;
/* misc flags */
int aen_event_read_flag;
@@ -1247,13 +1388,20 @@ struct MPT3SAS_ADAPTER {
MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
u32 non_operational_loop;
u8 ioc_coredump_loop;
+ u32 timestamp_update_count;
+ u32 time_sync_interval;
atomic64_t total_io_cnt;
atomic64_t high_iops_outstanding;
bool msix_load_balance;
u16 thresh_hold;
u8 high_iops_queues;
+ u8 iopoll_q_start_index;
+ u32 drv_internal_flags;
u32 drv_support_bitmap;
+ u32 dma_mask;
bool enable_sdev_max_qd;
+ bool use_32bit_dma;
+ struct io_uring_poll_queue *io_uring_poll_queues;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
@@ -1300,6 +1448,10 @@ struct MPT3SAS_ADAPTER {
u8 tm_custom_handling;
u8 nvme_abort_timeout;
u16 max_shutdown_latency;
+ u16 max_wideport_qd;
+ u16 max_narrowport_qd;
+ u16 max_nvme_qd;
+ u8 max_sata_qd;
/* static config pages */
struct mpt3sas_facts facts;
@@ -1364,6 +1516,7 @@ struct MPT3SAS_ADAPTER {
spinlock_t scsi_lookup_lock;
int pending_io_count;
wait_queue_head_t reset_wq;
+ u16 *io_queue_num;
/* PCIe SGL */
struct dma_pool *pcie_sgl_dma_pool;
@@ -1435,7 +1588,7 @@ struct MPT3SAS_ADAPTER {
u8 combined_reply_index_count;
u8 smp_affinity_enable;
/* reply post register index */
- resource_size_t **replyPostRegisterIndex;
+ resource_size_t __iomem **replyPostRegisterIndex;
struct list_head delayed_tr_list;
struct list_head delayed_tr_volume_list;
@@ -1454,6 +1607,8 @@ struct MPT3SAS_ADAPTER {
u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
u32 ring_buffer_offset;
u32 ring_buffer_sz;
+ struct htb_rel_query htb_rel;
+ u8 reset_from_user;
u8 is_warpdrive;
u8 is_mcpu_endpoint;
u8 hide_ir_msg;
@@ -1467,22 +1622,62 @@ struct MPT3SAS_ADAPTER {
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+ u8 supports_trigger_pages;
void *device_remove_in_progress;
u16 device_remove_in_progress_sz;
u8 is_gen35_ioc;
u8 is_aero_ioc;
+ struct dentry *debugfs_root;
+ struct dentry *ioc_dump;
PUT_SMID_IO_FP_HIP put_smid_scsi_io;
PUT_SMID_IO_FP_HIP put_smid_fast_path;
PUT_SMID_IO_FP_HIP put_smid_hi_priority;
PUT_SMID_DEFAULT put_smid_default;
GET_MSIX_INDEX get_msix_index_for_smlio;
+
+ u8 multipath_on_hba;
+ struct list_head port_table_list;
+};
+
+struct mpt3sas_debugfs_buffer {
+ void *buf;
+ u32 len;
};
#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
+#define MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY 0x00000002
+
+#define MPT_DRV_INTERNAL_FIRST_PE_ISSUED 0x00000001
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
+/*
+ * struct ATTO_SAS_NVRAM - ATTO NVRAM settings stored
+ * in Manufacturing page 1 used to get
+ * ATTO SasAddr.
+ */
+struct ATTO_SAS_NVRAM {
+ u8 Signature[4];
+ u8 Version;
+#define ATTO_SASNVR_VERSION 0
+
+ u8 Checksum;
+#define ATTO_SASNVR_CKSUM_SEED 0x5A
+ u8 Pad[10];
+ u8 SasAddr[8];
+#define ATTO_SAS_ADDR_ALIGN 64
+ u8 Reserved[232];
+};
+
+#define ATTO_SAS_ADDR_DEVNAME_BIAS 63
+
+union ATTO_SAS_ADDRESS {
+ U8 b[8];
+ U16 w[4];
+ U32 d[2];
+ U64 q;
+};
/* base shared API */
extern struct list_head mpt3sas_ioc_list;
@@ -1516,7 +1711,9 @@ __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid);
dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll);
+void mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
@@ -1572,6 +1769,9 @@ void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
u16 device_missing_delay, u8 io_missing_delay);
+int mpt3sas_base_check_for_fault_and_issue_reset(
+ struct MPT3SAS_ADAPTER *ioc);
+
int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
void
@@ -1585,6 +1785,12 @@ do { ioc_err(ioc, "In func: %s\n", __func__); \
status, mpi_request, sz); } while (0)
int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
+int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
+void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
+void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
@@ -1596,28 +1802,36 @@ void mpt3sas_scsih_clear_outstanding_scsi_tm_commands(
struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
-int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
- u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
+int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task,
- u8 timeout, u8 tr_method);
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
-void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port);
void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address);
+ u64 sas_address, struct hba_port *port);
u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
+struct hba_port *
+mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port,
+ u8 bypass_dirty_port_flag);
struct _sas_node *mpt3sas_scsih_expander_find_by_handle(
struct MPT3SAS_ADAPTER *ioc, u16 handle);
struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address(
- struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port);
struct _sas_device *mpt3sas_get_sdev_by_addr(
- struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port);
struct _sas_device *__mpt3sas_get_sdev_by_addr(
- struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port);
struct _sas_device *mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
u16 handle);
struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
@@ -1627,6 +1841,11 @@ void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
+struct _sas_device *
+__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, struct sas_rphy *rphy);
+struct virtual_phy *
+mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port, u32 phy);
/* config shared API */
u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1635,6 +1854,9 @@ int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc,
u8 *num_phys);
int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page);
+
int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
u16 sz);
@@ -1653,6 +1875,12 @@ int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2BiosPage2_t *config_page);
int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_page);
+int mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_page);
int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage0_t *config_page);
int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1667,6 +1895,9 @@ int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
u32 form, u32 handle);
+int mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
+ u16 sz);
int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
u16 sz);
@@ -1718,10 +1949,37 @@ int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
u16 *volume_handle);
int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc,
u16 volume_handle, u64 *wwid);
+int
+mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page);
+int
+mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page);
+int
+mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page);
+int
+mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page);
+int
+mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page);
+int
+mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set);
+int
+mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set);
+int
+mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set);
+int
+mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set);
/* ctl shared API */
-extern struct device_attribute *mpt3sas_host_attrs[];
-extern struct device_attribute *mpt3sas_dev_attrs[];
+extern const struct attribute_group *mpt3sas_host_groups[];
+extern const struct attribute_group *mpt3sas_dev_groups[];
void mpt3sas_ctl_init(ushort hbas_to_enumerate);
void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1744,25 +2002,33 @@ extern struct scsi_transport_template *mpt3sas_transport_template;
u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc,
- u16 handle, u64 sas_address);
+ u16 handle, u64 sas_address, struct hba_port *port);
void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
- u64 sas_address_parent);
+ u64 sas_address_parent, struct hba_port *port);
int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
*mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc,
struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
struct device *parent_dev);
void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate,
+ struct hba_port *port);
extern struct sas_function_template mpt3sas_transport_functions;
extern struct scsi_transport_template *mpt3sas_transport_template;
+void
+mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy);
+void
+mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy,
+ u64 sas_address, struct hba_port *port);
/* trigger data externs */
void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
void mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc,
- u32 tigger_bitmask);
+ u32 trigger_bitmask);
void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
u16 log_entry_qualifier);
void mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key,
@@ -1781,6 +2047,11 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_init_debugfs(void);
+void mpt3sas_exit_debugfs(void);
+
/**
* _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device
* @device_info: bitfield providing information about the device.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 62ddf53ab3ae..d114ef381c44 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -303,7 +303,6 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
u8 retry_count, issue_host_reset = 0;
struct config_request mem;
u32 ioc_status = UINT_MAX;
- u8 issue_reset = 0;
mutex_lock(&ioc->config_cmds.mutex);
if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
@@ -360,8 +359,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
}
r = mpt3sas_wait_for_ioc(ioc, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT);
- if (r)
+ if (r) {
+ if (r == -ETIME)
+ issue_host_reset = 1;
goto free_mem;
+ }
smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
if (!smid) {
@@ -372,7 +374,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
}
r = 0;
- memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
+ memset(ioc->config_cmds.reply, 0, sizeof(Mpi2ConfigReply_t));
ioc->config_cmds.status = MPT3_CMD_PENDING;
config_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->config_cmds.smid = smid;
@@ -386,17 +388,19 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
_config_display_some_debug(ioc,
smid, "config_request", NULL);
- mpt3sas_check_cmd_timeout(ioc,
- ioc->config_cmds.status, mpi_request,
- sizeof(Mpi2ConfigRequest_t)/4, issue_reset);
+ ioc_err(ioc, "%s: command timeout\n", __func__);
+ mpt3sas_base_check_cmd_timeout(ioc, ioc->config_cmds.status,
+ mpi_request, sizeof(Mpi2ConfigRequest_t) / 4);
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
- if ((ioc->shost_recovery) || (ioc->config_cmds.status &
- MPT3_CMD_RESET) || ioc->pci_error_recovery)
+ if (ioc->config_cmds.status & MPT3_CMD_RESET)
goto retry_config;
- issue_host_reset = 1;
- r = -EFAULT;
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ issue_host_reset = 0;
+ r = -EFAULT;
+ } else
+ issue_host_reset = 1;
goto free_mem;
}
@@ -487,8 +491,16 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
mutex_unlock(&ioc->config_cmds.mutex);
- if (issue_host_reset)
- mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ if (issue_host_reset) {
+ if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) {
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ r = -EFAULT;
+ } else {
+ if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
+ return -EFAULT;
+ r = -EAGAIN;
+ }
+ }
return r;
}
@@ -529,6 +541,42 @@ mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * mpt3sas_config_get_manufacturing_pg1 - obtain manufacturing page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -745,11 +793,99 @@ mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
sizeof(*config_page));
+
out:
return r;
}
/**
+ * mpt3sas_config_set_bios_pg4 - write out bios page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz_config_pg: sizeof the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_pg)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION;
+
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz_config_pg);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg4 - read bios page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz_config_pg: sizeof the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_pg)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ /*
+ * The sizeof the page is variable. Allow for just the
+ * size to be returned
+ */
+ if (config_page && sz_config_pg) {
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz_config_pg);
+ }
+
+out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_iounit_pg0 - obtain iounit page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -1160,6 +1296,43 @@ out:
}
/**
+ * mpt3sas_config_get_pcie_iounit_pg1 - obtain pcie iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT;
+ mpi_request.Header.PageVersion = MPI26_PCIEIOUNITPAGE1_PAGEVERSION;
+ mpi_request.Header.PageNumber = 1;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -1744,6 +1917,766 @@ mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
}
/**
+ * mpt3sas_config_get_driver_trigger_pg0 - obtain driver trigger page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * _config_set_driver_trigger_pg0 - write driver trigger page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg0 - update driver trigger page 0
+ * @ioc: per adapter object
+ * @trigger_flag: trigger type bit map
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_config_update_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
+ u16 trigger_flag, bool set)
+{
+ Mpi26DriverTriggerPage0_t tg_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc;
+ u16 flags, ioc_status;
+
+ rc = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0);
+ if (rc)
+ return rc;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg0, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return -EFAULT;
+ }
+
+ if (set)
+ flags = le16_to_cpu(tg_pg0.TriggerFlags) | trigger_flag;
+ else
+ flags = le16_to_cpu(tg_pg0.TriggerFlags) & ~trigger_flag;
+
+ tg_pg0.TriggerFlags = cpu_to_le16(flags);
+
+ rc = _config_set_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0);
+ if (rc)
+ return rc;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to update trigger pg0, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * mpt3sas_config_get_driver_trigger_pg1 - obtain driver trigger page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * _config_set_driver_trigger_pg1 - write driver trigger page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg1 - update driver trigger page 1
+ * @ioc: per adapter object
+ * @master_tg: Master trigger bit map
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set)
+{
+ Mpi26DriverTriggerPage1_t tg_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc;
+ u16 ioc_status;
+
+ rc = mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, set);
+ if (rc)
+ return rc;
+
+ rc = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (set) {
+ tg_pg1.NumMasterTrigger = cpu_to_le16(1);
+ tg_pg1.MasterTriggers[0].MasterTriggerFlags = cpu_to_le32(
+ master_tg->MasterData);
+ } else {
+ tg_pg1.NumMasterTrigger = 0;
+ tg_pg1.MasterTriggers[0].MasterTriggerFlags = 0;
+ }
+
+ rc = _config_set_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, !set);
+
+ return rc;
+}
+
+/**
+ * mpt3sas_config_get_driver_trigger_pg2 - obtain driver trigger page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 2;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * _config_set_driver_trigger_pg2 - write driver trigger page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 2;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg2 - update driver trigger page 2
+ * @ioc: per adapter object
+ * @event_tg: list of Event Triggers
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set)
+{
+ Mpi26DriverTriggerPage2_t tg_pg2;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc, i, count;
+ u16 ioc_status;
+
+ rc = mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, set);
+ if (rc)
+ return rc;
+
+ rc = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (set) {
+ count = event_tg->ValidEntries;
+ tg_pg2.NumMPIEventTrigger = cpu_to_le16(count);
+ for (i = 0; i < count; i++) {
+ tg_pg2.MPIEventTriggers[i].MPIEventCode =
+ cpu_to_le16(
+ event_tg->EventTriggerEntry[i].EventValue);
+ tg_pg2.MPIEventTriggers[i].MPIEventCodeSpecific =
+ cpu_to_le16(
+ event_tg->EventTriggerEntry[i].LogEntryQualifier);
+ }
+ } else {
+ tg_pg2.NumMPIEventTrigger = 0;
+ memset(&tg_pg2.MPIEventTriggers[0], 0,
+ NUM_VALID_ENTRIES * sizeof(
+ MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY));
+ }
+
+ rc = _config_set_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, !set);
+
+ return rc;
+}
+
+/**
+ * mpt3sas_config_get_driver_trigger_pg3 - obtain driver trigger page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * _config_set_driver_trigger_pg3 - write driver trigger page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg3 - update driver trigger page 3
+ * @ioc: per adapter object
+ * @scsi_tg: scsi trigger list
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set)
+{
+ Mpi26DriverTriggerPage3_t tg_pg3;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc, i, count;
+ u16 ioc_status;
+
+ rc = mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, set);
+ if (rc)
+ return rc;
+
+ rc = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return -EFAULT;
+ }
+
+ if (set) {
+ count = scsi_tg->ValidEntries;
+ tg_pg3.NumSCSISenseTrigger = cpu_to_le16(count);
+ for (i = 0; i < count; i++) {
+ tg_pg3.SCSISenseTriggers[i].ASCQ =
+ scsi_tg->SCSITriggerEntry[i].ASCQ;
+ tg_pg3.SCSISenseTriggers[i].ASC =
+ scsi_tg->SCSITriggerEntry[i].ASC;
+ tg_pg3.SCSISenseTriggers[i].SenseKey =
+ scsi_tg->SCSITriggerEntry[i].SenseKey;
+ }
+ } else {
+ tg_pg3.NumSCSISenseTrigger = 0;
+ memset(&tg_pg3.SCSISenseTriggers[0], 0,
+ NUM_VALID_ENTRIES * sizeof(
+ MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY));
+ }
+
+ rc = _config_set_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return -EFAULT;
+ }
+
+ return 0;
+out:
+ mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, !set);
+
+ return rc;
+}
+
+/**
+ * mpt3sas_config_get_driver_trigger_pg4 - obtain driver trigger page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * _config_set_driver_trigger_pg4 - write driver trigger page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg4 - update driver trigger page 4
+ * @ioc: per adapter object
+ * @mpi_tg: mpi trigger list
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set)
+{
+ Mpi26DriverTriggerPage4_t tg_pg4;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc, i, count;
+ u16 ioc_status;
+
+ rc = mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, set);
+ if (rc)
+ return rc;
+
+ rc = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (set) {
+ count = mpi_tg->ValidEntries;
+ tg_pg4.NumIOCStatusLogInfoTrigger = cpu_to_le16(count);
+ for (i = 0; i < count; i++) {
+ tg_pg4.IOCStatusLoginfoTriggers[i].IOCStatus =
+ cpu_to_le16(mpi_tg->MPITriggerEntry[i].IOCStatus);
+ tg_pg4.IOCStatusLoginfoTriggers[i].LogInfo =
+ cpu_to_le32(mpi_tg->MPITriggerEntry[i].IocLogInfo);
+ }
+ } else {
+ tg_pg4.NumIOCStatusLogInfoTrigger = 0;
+ memset(&tg_pg4.IOCStatusLoginfoTriggers[0], 0,
+ NUM_VALID_ENTRIES * sizeof(
+ MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY));
+ }
+
+ rc = _config_set_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, !set);
+
+ return rc;
+}
+
+/**
* mpt3sas_config_get_volume_handle - returns volume handle for give hidden
* raid components
* @ioc: per adapter object
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 62e552838565..0d8b1e942ded 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -454,7 +454,7 @@ out:
}
/**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl)
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -479,12 +479,14 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc,
"%s: Releasing the trace buffer due to adapter reset.",
__func__);
+ ioc->htb_rel.buffer_rel_condition =
+ MPT3_DIAG_BUFFER_REL_TRIGGER;
mpt3sas_send_diag_release(ioc, i, &issue_reset);
}
}
/**
- * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd.
+ * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd.
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -501,7 +503,7 @@ void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl)
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -576,7 +578,7 @@ static int
_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
- u8 found = 0;
+ bool found = false;
u16 smid;
u16 handle;
struct scsi_cmnd *scmd;
@@ -598,6 +600,7 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
handle = le16_to_cpu(tm_request->DevHandle);
for (smid = ioc->scsiio_depth; smid && !found; smid--) {
struct scsiio_tracker *st;
+ __le16 task_mid;
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
@@ -616,10 +619,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
* first outstanding smid will be picked up. Otherwise,
* targeted smid will be the one.
*/
- if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
- tm_request->TaskMID = cpu_to_le16(st->smid);
- found = 1;
- }
+ task_mid = cpu_to_le16(st->smid);
+ if (!tm_request->TaskMID)
+ tm_request->TaskMID = task_mid;
+ found = tm_request->TaskMID == task_mid;
}
if (!found) {
@@ -664,7 +667,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
struct _pcie_device *pcie_device = NULL;
u16 smid;
- u8 timeout;
+ unsigned long timeout;
u8 issue_reset;
u32 sz, sz_arg;
void *psge;
@@ -902,8 +905,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
(Mpi2SmpPassthroughRequest_t *)mpi_request;
u8 *data;
- /* ioc determines which port to use */
- smp_request->PhysicalPort = 0xFF;
+ if (!ioc->multipath_on_hba) {
+ /* ioc determines which port to use */
+ smp_request->PhysicalPort = 0xFF;
+ }
if (smp_request->PassthroughFlags &
MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
data = (u8 *)&smp_request->SGL;
@@ -943,6 +948,14 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
break;
}
case MPI2_FUNCTION_FW_DOWNLOAD:
+ {
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
+ ioc_info(ioc, "Firmware download not supported for ATTO HBA.\n");
+ ret = -EPERM;
+ break;
+ }
+ fallthrough;
+ }
case MPI2_FUNCTION_FW_UPLOAD:
{
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
@@ -1002,7 +1015,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
/* drop to default case for posting the request */
}
- /* fall through */
+ fallthrough;
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
@@ -1109,13 +1122,15 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
pcie_device->device_info))))
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 0, 0, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
else
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 0, 0, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
@@ -1330,6 +1345,7 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
__func__));
+ ioc->reset_from_user = 1;
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
ioc_info(ioc,
"Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
@@ -1678,11 +1694,15 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
request_data = ioc->diag_buffer[buffer_type];
request_data_sz = diag_register->requested_buffer_size;
ioc->unique_id[buffer_type] = diag_register->unique_id;
+ /* Reset ioc variables used for additional query commands */
+ ioc->reset_from_user = 0;
+ memset(&ioc->htb_rel, 0, sizeof(struct htb_rel_query));
ioc->diag_buffer_status[buffer_type] &=
MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
memcpy(ioc->product_specific[buffer_type],
@@ -1776,6 +1796,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (rc && request_data) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
}
@@ -2152,6 +2173,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
@@ -2168,7 +2190,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
mpt3sas_check_cmd_timeout(ioc,
ioc->ctl_cmds.status, mpi_request,
sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
- *issue_reset = reset_needed;
+ *issue_reset = reset_needed;
rc = -EFAULT;
goto out;
}
@@ -2406,6 +2428,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
@@ -2465,7 +2488,60 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return rc;
}
+/**
+ * _ctl_addnl_diag_query - query relevant info associated with diag buffers
+ * @ioc: per adapter object
+ * @arg: user space buffer containing ioctl content
+ *
+ * The application will send only unique_id. Driver will
+ * inspect unique_id first, if valid, fill the details related to cause
+ * for diag buffer release.
+ */
+static long
+_ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_addnl_diag_query karg;
+ u32 buffer_type = 0;
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("%s: failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__));
+ if (karg.unique_id == 0) {
+ ioc_err(ioc, "%s: unique_id is(0x%08x)\n",
+ __func__, karg.unique_id);
+ return -EPERM;
+ }
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EPERM;
+ }
+ memset(&karg.rel_query, 0, sizeof(karg.rel_query));
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
+ goto out;
+ }
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not released\n",
+ __func__, buffer_type);
+ return -EPERM;
+ }
+ memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
+out:
+ if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
+ ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
+ __func__, arg);
+ return -EFAULT;
+ }
+ return 0;
+}
#ifdef CONFIG_COMPAT
/**
@@ -2529,7 +2605,7 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
struct MPT3SAS_ADAPTER *ioc;
struct mpt3_ioctl_header ioctl_header;
enum block_state state;
- long ret = -EINVAL;
+ long ret = -ENOIOCTLCMD;
/* get IOCTL header */
if (copy_from_user(&ioctl_header, (char __user *)arg,
@@ -2639,6 +2715,10 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
ret = _ctl_diag_read_buffer(ioc, arg);
break;
+ case MPT3ADDNLDIAGQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query))
+ ret = _ctl_addnl_diag_query(ioc, arg);
+ break;
default:
dctlprintk(ioc,
ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
@@ -2691,7 +2771,7 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
/**
- *_ ctl_ioctl_compat - main ioctl entry point (compat)
+ * _ctl_ioctl_compat - main ioctl entry point (compat)
* @file: ?
* @cmd: ?
* @arg: ?
@@ -2709,7 +2789,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
}
/**
- *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
+ * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
* @file: ?
* @cmd: ?
* @arg: ?
@@ -2977,7 +3057,7 @@ fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
static DEVICE_ATTR_RO(fw_queue_depth);
/**
- * sas_address_show - sas address
+ * host_sas_address_show - sas address
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3135,7 +3215,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
+ Mpi2IOUnitPage3_t io_unit_pg3;
Mpi2ConfigReply_t mpi_reply;
u16 backup_rail_monitor_status = 0;
u16 ioc_status;
@@ -3145,28 +3225,21 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
if (!ioc->is_warpdrive) {
ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
__func__);
- goto out;
+ return 0;
}
/* pci_access_mutex lock acquired by sysfs show path */
mutex_lock(&ioc->pci_access_mutex);
- if (ioc->pci_error_recovery || ioc->remove_host) {
- mutex_unlock(&ioc->pci_access_mutex);
- return 0;
- }
-
- /* allocate upto GPIOVal 36 entries */
- sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
- io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
- if (!io_unit_pg3) {
- ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
- __func__, sz);
+ if (ioc->pci_error_recovery || ioc->remove_host)
goto out;
- }
- if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
+ sz = sizeof(io_unit_pg3);
+ memset(&io_unit_pg3, 0, sz);
+
+ if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) !=
0) {
ioc_err(ioc, "%s: failed reading iounit_pg3\n",
__func__);
+ rc = -EINVAL;
goto out;
}
@@ -3174,21 +3247,22 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
__func__, ioc_status);
+ rc = -EINVAL;
goto out;
}
- if (io_unit_pg3->GPIOCount < 25) {
- ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
- __func__, io_unit_pg3->GPIOCount);
+ if (io_unit_pg3.GPIOCount < 25) {
+ ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n",
+ __func__, io_unit_pg3.GPIOCount);
+ rc = -EINVAL;
goto out;
}
/* BRM status is in bit zero of GPIOVal[24] */
- backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
+ backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]);
rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
out:
- kfree(io_unit_pg3);
mutex_unlock(&ioc->pci_access_mutex);
return rc;
}
@@ -3382,12 +3456,10 @@ host_trace_buffer_enable_store(struct device *cdev,
&&
(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
- pci_free_consistent(ioc->pdev,
- ioc->diag_buffer_sz[
- MPI2_DIAG_BUF_TYPE_TRACE],
- ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
- ioc->diag_buffer_dma[
- MPI2_DIAG_BUF_TYPE_TRACE]);
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]);
ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
NULL;
}
@@ -3421,6 +3493,7 @@ host_trace_buffer_enable_store(struct device *cdev,
MPT3_DIAG_BUFFER_IS_RELEASED))
goto out;
ioc_info(ioc, "releasing host trace buffer\n");
+ ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_SYSFS;
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
@@ -3473,11 +3546,31 @@ diag_trigger_master_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_MASTER_TRIGGER_T *master_tg;
unsigned long flags;
ssize_t rc;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
+
+ if (ioc->supports_trigger_pages) {
+ master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T),
+ GFP_KERNEL);
+ if (!master_tg)
+ return -ENOMEM;
+
+ memcpy(master_tg, buf, rc);
+ if (!master_tg->MasterData)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg,
+ set)) {
+ kfree(master_tg);
+ return -EFAULT;
+ }
+ kfree(master_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
memset(&ioc->diag_trigger_master, 0,
sizeof(struct SL_WH_MASTER_TRIGGER_T));
memcpy(&ioc->diag_trigger_master, buf, rc);
@@ -3529,11 +3622,31 @@ diag_trigger_event_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_EVENT_TRIGGERS_T *event_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!event_tg)
+ return -ENOMEM;
+
+ memcpy(event_tg, buf, sz);
+ if (!event_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg,
+ set)) {
+ kfree(event_tg);
+ return -EFAULT;
+ }
+ kfree(event_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
memset(&ioc->diag_trigger_event, 0,
sizeof(struct SL_WH_EVENT_TRIGGERS_T));
memcpy(&ioc->diag_trigger_event, buf, sz);
@@ -3584,11 +3697,31 @@ diag_trigger_scsi_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_SCSI_TRIGGERS_T *scsi_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
+
+ sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!scsi_tg)
+ return -ENOMEM;
+
+ memcpy(scsi_tg, buf, sz);
+ if (!scsi_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg,
+ set)) {
+ kfree(scsi_tg);
+ return -EFAULT;
+ }
+ kfree(scsi_tg);
+ }
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
- sz = min(sizeof(ioc->diag_trigger_scsi), count);
+
memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
memcpy(&ioc->diag_trigger_scsi, buf, sz);
if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
@@ -3600,7 +3733,7 @@ static DEVICE_ATTR_RW(diag_trigger_scsi);
/**
- * diag_trigger_scsi_show - show the diag_trigger_mpi attribute
+ * diag_trigger_mpi_show - show the diag_trigger_mpi attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3638,11 +3771,30 @@ diag_trigger_mpi_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_MPI_TRIGGERS_T *mpi_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!mpi_tg)
+ return -ENOMEM;
+
+ memcpy(mpi_tg, buf, sz);
+ if (!mpi_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg,
+ set)) {
+ kfree(mpi_tg);
+ return -EFAULT;
+ }
+ kfree(mpi_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
memset(&ioc->diag_trigger_mpi, 0,
sizeof(ioc->diag_trigger_mpi));
memcpy(&ioc->diag_trigger_mpi, buf, sz);
@@ -3660,8 +3812,9 @@ static DEVICE_ATTR_RW(diag_trigger_mpi);
/**
* drv_support_bitmap_show - driver supported feature bitmap
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: unused
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -3678,8 +3831,9 @@ static DEVICE_ATTR_RO(drv_support_bitmap);
/**
* enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: unused
+ * @buf: the buffer returned
*
* A sysfs read/write shost attribute. This attribute is used to set the
* targets queue depth to HBA IO queue depth if this attribute is enabled.
@@ -3696,8 +3850,10 @@ enable_sdev_max_qd_show(struct device *cdev,
/**
* enable_sdev_max_qd_store - Enable/disable sdev max qd
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: unused
+ * @buf: the buffer returned
+ * @count: unused
*
* A sysfs read/write shost attribute. This attribute is used to set the
* targets queue depth to HBA IO queue depth if this attribute is enabled.
@@ -3756,9 +3912,10 @@ enable_sdev_max_qd_store(struct device *cdev,
}
} else if (sas_target_priv_data->flags &
MPT_TARGET_FLAGS_PCIE_DEVICE)
- qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+ qdepth = ioc->max_nvme_qd;
else
- qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
mpt3sas_scsih_change_queue_depth(sdev, qdepth);
}
@@ -3777,37 +3934,46 @@ enable_sdev_max_qd_store(struct device *cdev,
}
static DEVICE_ATTR_RW(enable_sdev_max_qd);
-struct device_attribute *mpt3sas_host_attrs[] = {
- &dev_attr_version_fw,
- &dev_attr_version_bios,
- &dev_attr_version_mpi,
- &dev_attr_version_product,
- &dev_attr_version_nvdata_persistent,
- &dev_attr_version_nvdata_default,
- &dev_attr_board_name,
- &dev_attr_board_assembly,
- &dev_attr_board_tracer,
- &dev_attr_io_delay,
- &dev_attr_device_delay,
- &dev_attr_logging_level,
- &dev_attr_fwfault_debug,
- &dev_attr_fw_queue_depth,
- &dev_attr_host_sas_address,
- &dev_attr_ioc_reset_count,
- &dev_attr_host_trace_buffer_size,
- &dev_attr_host_trace_buffer,
- &dev_attr_host_trace_buffer_enable,
- &dev_attr_reply_queue_count,
- &dev_attr_diag_trigger_master,
- &dev_attr_diag_trigger_event,
- &dev_attr_diag_trigger_scsi,
- &dev_attr_diag_trigger_mpi,
- &dev_attr_drv_support_bitmap,
- &dev_attr_BRM_status,
- &dev_attr_enable_sdev_max_qd,
+static struct attribute *mpt3sas_host_attrs[] = {
+ &dev_attr_version_fw.attr,
+ &dev_attr_version_bios.attr,
+ &dev_attr_version_mpi.attr,
+ &dev_attr_version_product.attr,
+ &dev_attr_version_nvdata_persistent.attr,
+ &dev_attr_version_nvdata_default.attr,
+ &dev_attr_board_name.attr,
+ &dev_attr_board_assembly.attr,
+ &dev_attr_board_tracer.attr,
+ &dev_attr_io_delay.attr,
+ &dev_attr_device_delay.attr,
+ &dev_attr_logging_level.attr,
+ &dev_attr_fwfault_debug.attr,
+ &dev_attr_fw_queue_depth.attr,
+ &dev_attr_host_sas_address.attr,
+ &dev_attr_ioc_reset_count.attr,
+ &dev_attr_host_trace_buffer_size.attr,
+ &dev_attr_host_trace_buffer.attr,
+ &dev_attr_host_trace_buffer_enable.attr,
+ &dev_attr_reply_queue_count.attr,
+ &dev_attr_diag_trigger_master.attr,
+ &dev_attr_diag_trigger_event.attr,
+ &dev_attr_diag_trigger_scsi.attr,
+ &dev_attr_diag_trigger_mpi.attr,
+ &dev_attr_drv_support_bitmap.attr,
+ &dev_attr_BRM_status.attr,
+ &dev_attr_enable_sdev_max_qd.attr,
NULL,
};
+static const struct attribute_group mpt3sas_host_attr_group = {
+ .attrs = mpt3sas_host_attrs
+};
+
+const struct attribute_group *mpt3sas_host_groups[] = {
+ &mpt3sas_host_attr_group,
+ NULL
+};
+
/* device attributes */
/**
@@ -3855,7 +4021,25 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(sas_device_handle);
/**
- * sas_ncq_io_prio_show - send prioritized io commands to device
+ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_supported attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' sdev attribute, only works with SATA
+ */
+static ssize_t
+sas_ncq_prio_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
+}
+static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
+/**
+ * sas_ncq_prio_enable_show - send prioritized io commands to device
* @dev: pointer to embedded device
* @attr: ?
* @buf: the buffer returned
@@ -3893,13 +4077,23 @@ sas_ncq_prio_enable_store(struct device *dev,
}
static DEVICE_ATTR_RW(sas_ncq_prio_enable);
-struct device_attribute *mpt3sas_dev_attrs[] = {
- &dev_attr_sas_address,
- &dev_attr_sas_device_handle,
- &dev_attr_sas_ncq_prio_enable,
+static struct attribute *mpt3sas_dev_attrs[] = {
+ &dev_attr_sas_address.attr,
+ &dev_attr_sas_device_handle.attr,
+ &dev_attr_sas_ncq_prio_supported.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL,
};
+static const struct attribute_group mpt3sas_dev_attr_group = {
+ .attrs = mpt3sas_dev_attrs
+};
+
+const struct attribute_group *mpt3sas_dev_groups[] = {
+ &mpt3sas_dev_attr_group,
+ NULL
+};
+
/* file operations table for mpt3ctl device */
static const struct file_operations ctl_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 0f7aa4ddade0..8f6ffb40261c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -50,6 +50,8 @@
#include <linux/miscdevice.h>
#endif
+#include "mpt3sas_base.h"
+
#ifndef MPT2SAS_MINOR
#define MPT2SAS_MINOR (MPT_MINOR + 1)
#endif
@@ -94,6 +96,8 @@
struct mpt3_diag_query)
#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
struct mpt3_diag_read_buffer)
+#define MPT3ADDNLDIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 32, \
+ struct mpt3_addnl_diag_query)
/* Trace Buffer default UniqueId */
#define MPT2DIAGBUFFUNIQUEID (0x07075900)
@@ -430,4 +434,18 @@ struct mpt3_diag_read_buffer {
uint32_t diagnostic_data[1];
};
+/**
+ * struct mpt3_addnl_diag_query - diagnostic buffer release reason
+ * @hdr - generic header
+ * @unique_id - unique id associated with this buffer.
+ * @rel_query - release query.
+ * @reserved2
+ */
+struct mpt3_addnl_diag_query {
+ struct mpt3_ioctl_header hdr;
+ uint32_t unique_id;
+ struct htb_rel_query rel_query;
+ uint32_t reserved2[2];
+};
+
#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debugfs.c b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c
new file mode 100644
index 000000000000..a6ab1db81167
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Debugfs interface Support for MPT (Message Passing Technology) based
+ * controllers.
+ *
+ * Copyright (C) 2020 Broadcom Inc.
+ *
+ * Authors: Broadcom Inc.
+ * Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+ * Suganath Prabu <suganath-prabu.subramani@broadcom.com>
+ *
+ * Send feedback to : MPT-FusionLinux.pdl@broadcom.com)
+ *
+ **/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/uio.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "mpt3sas_base.h"
+#include <linux/debugfs.h>
+
+static struct dentry *mpt3sas_debugfs_root;
+
+/*
+ * _debugfs_iocdump_read - copy ioc dump from debugfs buffer
+ * @filep: File Pointer
+ * @ubuf: Buffer to fill data
+ * @cnt: Length of the buffer
+ * @ppos: Offset in the file
+ */
+
+static ssize_t
+_debugfs_iocdump_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+
+{
+ struct mpt3sas_debugfs_buffer *debug = filp->private_data;
+
+ if (!debug || !debug->buf)
+ return 0;
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len);
+}
+
+/*
+ * _debugfs_iocdump_open : open the ioc_dump debugfs attribute file
+ */
+static int
+_debugfs_iocdump_open(struct inode *inode, struct file *file)
+{
+ struct MPT3SAS_ADAPTER *ioc = inode->i_private;
+ struct mpt3sas_debugfs_buffer *debug;
+
+ debug = kzalloc(sizeof(struct mpt3sas_debugfs_buffer), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->buf = (void *)ioc;
+ debug->len = sizeof(struct MPT3SAS_ADAPTER);
+ file->private_data = debug;
+ return 0;
+}
+
+/*
+ * _debugfs_iocdump_release : release the ioc_dump debugfs attribute
+ * @inode: inode structure to the corresponds device
+ * @file: File pointer
+ */
+static int
+_debugfs_iocdump_release(struct inode *inode, struct file *file)
+{
+ struct mpt3sas_debugfs_buffer *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static const struct file_operations mpt3sas_debugfs_iocdump_fops = {
+ .owner = THIS_MODULE,
+ .open = _debugfs_iocdump_open,
+ .read = _debugfs_iocdump_read,
+ .release = _debugfs_iocdump_release,
+};
+
+/*
+ * mpt3sas_init_debugfs : Create debugfs root for mpt3sas driver
+ */
+void mpt3sas_init_debugfs(void)
+{
+ mpt3sas_debugfs_root = debugfs_create_dir("mpt3sas", NULL);
+ if (!mpt3sas_debugfs_root)
+ pr_info("mpt3sas: Cannot create debugfs root\n");
+}
+
+/*
+ * mpt3sas_exit_debugfs : Remove debugfs root for mpt3sas driver
+ */
+void mpt3sas_exit_debugfs(void)
+{
+ debugfs_remove_recursive(mpt3sas_debugfs_root);
+}
+
+/*
+ * mpt3sas_setup_debugfs : Setup debugfs per HBA adapter
+ * ioc: MPT3SAS_ADAPTER object
+ */
+void
+mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc)
+{
+ char name[64];
+
+ snprintf(name, sizeof(name), "scsi_host%d", ioc->shost->host_no);
+ if (!ioc->debugfs_root) {
+ ioc->debugfs_root =
+ debugfs_create_dir(name, mpt3sas_debugfs_root);
+ if (!ioc->debugfs_root) {
+ dev_err(&ioc->pdev->dev,
+ "Cannot create per adapter debugfs directory\n");
+ return;
+ }
+ }
+
+ snprintf(name, sizeof(name), "ioc_dump");
+ ioc->ioc_dump = debugfs_create_file(name, 0444,
+ ioc->debugfs_root, ioc, &mpt3sas_debugfs_iocdump_fops);
+ if (!ioc->ioc_dump) {
+ dev_err(&ioc->pdev->dev,
+ "Cannot create ioc_dump debugfs file\n");
+ debugfs_remove(ioc->debugfs_root);
+ return;
+ }
+
+ snprintf(name, sizeof(name), "host_recovery");
+ debugfs_create_u8(name, 0444, ioc->debugfs_root, &ioc->shost_recovery);
+
+}
+
+/*
+ * mpt3sas_destroy_debugfs : Destroy debugfs per HBA adapter
+ * @ioc: MPT3SAS_ADAPTER object
+ */
+void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc)
+{
+ debugfs_remove_recursive(ioc->debugfs_root);
+}
+
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c597d544eb39..8e24ebcebfe5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -54,6 +54,7 @@
#include <linux/interrupt.h>
#include <linux/aer.h>
#include <linux/raid_class.h>
+#include <linux/blk-mq-pci.h>
#include <asm/unaligned.h>
#include "mpt3sas_base.h"
@@ -77,6 +78,7 @@ static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
/* global parameters */
LIST_HEAD(mpt3sas_ioc_list);
@@ -159,6 +161,20 @@ module_param(enable_sdev_max_qd, bool, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd,
"Enable sdev max qd as can_queue, def=disabled(0)");
+static int multipath_on_hba = -1;
+module_param(multipath_on_hba, int, 0);
+MODULE_PARM_DESC(multipath_on_hba,
+ "Multipath support to add same target device\n\t\t"
+ "as many times as it is visible to HBA from various paths\n\t\t"
+ "(by default:\n\t\t"
+ "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
+ "\t SAS 3.5 HBA - This will be enabled)");
+
+static int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable,
+ "Shared host tagset enable/disable Default: enable(1)");
+
/* raid transport support */
static struct raid_template *mpt3sas_raid_template;
static struct raid_template *mpt2sas_raid_template;
@@ -207,7 +223,7 @@ struct fw_event_work {
u8 ignore;
u16 event;
struct kref refcount;
- char event_data[0] __aligned(4);
+ char event_data[] __aligned(4);
};
static void fw_event_work_free(struct kref *r)
@@ -357,6 +373,87 @@ _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
}
/**
+ * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
+ * port number from port list
+ * @ioc: per adapter object
+ * @port_id: port number
+ * @bypass_dirty_port_flag: when set look the matching hba port entry even
+ * if hba port entry is marked as dirty.
+ *
+ * Search for hba port entry corresponding to provided port number,
+ * if available return port object otherwise return NULL.
+ */
+struct hba_port *
+mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
+ u8 port_id, u8 bypass_dirty_port_flag)
+{
+ struct hba_port *port, *port_next;
+
+ /*
+ * When multipath_on_hba is disabled then
+ * search the hba_port entry using default
+ * port id i.e. 255
+ */
+ if (!ioc->multipath_on_hba)
+ port_id = MULTIPATH_DISABLED_PORT_ID;
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (port->port_id != port_id)
+ continue;
+ if (bypass_dirty_port_flag)
+ return port;
+ if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
+ continue;
+ return port;
+ }
+
+ /*
+ * Allocate hba_port object for default port id (i.e. 255)
+ * when multipath_on_hba is disabled for the HBA.
+ * And add this object to port_table_list.
+ */
+ if (!ioc->multipath_on_hba) {
+ port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
+ if (!port)
+ return NULL;
+
+ port->port_id = port_id;
+ ioc_info(ioc,
+ "hba_port entry: %p, port: %d is added to hba_port list\n",
+ port, port->port_id);
+ list_add_tail(&port->list,
+ &ioc->port_table_list);
+ return port;
+ }
+ return NULL;
+}
+
+/**
+ * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
+ * @ioc: per adapter object
+ * @port: hba_port object
+ * @phy: phy number
+ *
+ * Return virtual_phy object corresponding to phy number.
+ */
+struct virtual_phy *
+mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port, u32 phy)
+{
+ struct virtual_phy *vphy, *vphy_next;
+
+ if (!port->vphys_mask)
+ return NULL;
+
+ list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
+ if (vphy->phy_mask & (1 << phy))
+ return vphy;
+ }
+ return NULL;
+}
+
+/**
* _scsih_is_boot_device - search for matching boot device.
* @sas_address: sas address
* @device_name: device name specified in INDENTIFY fram
@@ -614,48 +711,106 @@ mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
return ret;
}
+
+/**
+ * __mpt3sas_get_sdev_by_rphy - sas device search
+ * @ioc: per adapter object
+ * @rphy: sas_rphy pointer
+ *
+ * Context: This function will acquire ioc->sas_device_lock and will release
+ * before returning the sas_device object.
+ *
+ * This searches for sas_device from rphy object
+ * then return sas_device object.
+ */
struct _sas_device *
-__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_rphy *rphy)
{
struct _sas_device *sas_device;
assert_spin_locked(&ioc->sas_device_lock);
- list_for_each_entry(sas_device, &ioc->sas_device_list, list)
- if (sas_device->sas_address == sas_address)
- goto found_device;
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->rphy != rphy)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
- list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
- if (sas_device->sas_address == sas_address)
- goto found_device;
+ sas_device = NULL;
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
+ if (sas_device->rphy != rphy)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
return NULL;
+}
-found_device:
- sas_device_get(sas_device);
- return sas_device;
+/**
+ * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
+ * sas address from sas_device_list list
+ * @ioc: per adapter object
+ * @sas_address: device sas address
+ * @port: port number
+ *
+ * Search for _sas_device object corresponding to provided sas address,
+ * if available return _sas_device object address otherwise return NULL.
+ */
+struct _sas_device *
+__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct hba_port *port)
+{
+ struct _sas_device *sas_device;
+
+ if (!port)
+ return NULL;
+
+ assert_spin_locked(&ioc->sas_device_lock);
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->sas_address != sas_address)
+ continue;
+ if (sas_device->port != port)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
+ if (sas_device->sas_address != sas_address)
+ continue;
+ if (sas_device->port != port)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
+
+ return NULL;
}
/**
* mpt3sas_get_sdev_by_addr - sas device search
* @ioc: per adapter object
* @sas_address: sas address
+ * @port: hba port entry
* Context: Calling function should acquire ioc->sas_device_lock
*
- * This searches for sas_device based on sas_address, then return sas_device
- * object.
+ * This searches for sas_device based on sas_address & port number,
+ * then return sas_device object.
*/
struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
struct _sas_device *sas_device;
unsigned long flags;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_address);
+ sas_address, port);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return sas_device;
@@ -824,13 +979,17 @@ _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
- * mpt3sas_device_remove_by_sas_address - removing device object by sas address
+ * mpt3sas_device_remove_by_sas_address - removing device object by
+ * sas address & port number
* @ioc: per adapter object
* @sas_address: device sas_address
+ * @port: hba port entry
+ *
+ * Return nothing.
*/
void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
struct _sas_device *sas_device;
unsigned long flags;
@@ -839,7 +998,7 @@ mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
+ sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
if (sas_device) {
list_del_init(&sas_device->list);
sas_device_put(sas_device);
@@ -884,7 +1043,7 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
}
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent)) {
+ sas_device->sas_address_parent, sas_device->port)) {
_scsih_sas_device_remove(ioc, sas_device);
} else if (!sas_device->starget) {
/*
@@ -895,7 +1054,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
if (!ioc->is_driver_loading) {
mpt3sas_transport_port_remove(ioc,
sas_device->sas_address,
- sas_device->sas_address_parent);
+ sas_device->sas_address_parent,
+ sas_device->port);
_scsih_sas_device_remove(ioc, sas_device);
}
} else
@@ -1432,21 +1592,26 @@ out:
* mpt3sas_scsih_expander_find_by_sas_address - expander device search
* @ioc: per adapter object
* @sas_address: sas address
+ * @port: hba port entry
* Context: Calling function should acquire ioc->sas_node_lock.
*
- * This searches for expander device based on sas_address, then returns the
- * sas_node object.
+ * This searches for expander device based on sas_address & port number,
+ * then returns the sas_node object.
*/
struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
- struct _sas_node *sas_expander, *r;
+ struct _sas_node *sas_expander, *r = NULL;
+
+ if (!port)
+ return r;
- r = NULL;
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
if (sas_expander->sas_address != sas_address)
continue;
+ if (sas_expander->port != port)
+ continue;
r = sas_expander;
goto out;
}
@@ -1513,6 +1678,66 @@ _scsih_is_nvme_pciescsi_device(u32 device_info)
}
/**
+ * _scsih_scsi_lookup_find_by_target - search for matching channel:id
+ * @ioc: per adapter object
+ * @id: target id
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ int smid;
+ struct scsi_cmnd *scmd;
+
+ for (smid = 1;
+ smid <= ioc->shost->can_queue; smid++) {
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
+ unsigned int lun, int channel)
+{
+ int smid;
+ struct scsi_cmnd *scmd;
+
+ for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel &&
+ scmd->device->lun == lun)
+ return 1;
+ }
+ return 0;
+}
+
+/**
* mpt3sas_scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
@@ -1526,10 +1751,12 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
struct scsi_cmnd *scmd = NULL;
struct scsiio_tracker *st;
Mpi25SCSIIORequest_t *mpi_request;
+ u16 tag = smid - 1;
if (smid > 0 &&
smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
- u32 unique_tag = smid - 1;
+ u32 unique_tag =
+ ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -1576,7 +1803,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* limit max device queue for SATA to 32 if enable_sdev_max_qd
* is disabled.
*/
- if (ioc->enable_sdev_max_qd)
+ if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
goto not_sata;
sas_device_priv_data = sdev->hostdata;
@@ -1684,6 +1911,7 @@ scsih_target_alloc(struct scsi_target *starget)
if (pcie_device) {
sas_target_priv_data->handle = pcie_device->handle;
sas_target_priv_data->sas_address = pcie_device->wwid;
+ sas_target_priv_data->port = NULL;
sas_target_priv_data->pcie_dev = pcie_device;
pcie_device->starget = starget;
pcie_device->id = starget->id;
@@ -1701,12 +1929,12 @@ scsih_target_alloc(struct scsi_target *starget)
/* sas/sata devices */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
rphy = dev_to_rphy(starget->dev.parent);
- sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- rphy->identify.sas_address);
+ sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
if (sas_device) {
sas_target_priv_data->handle = sas_device->handle;
sas_target_priv_data->sas_address = sas_device->sas_address;
+ sas_target_priv_data->port = sas_device->port;
sas_target_priv_data->sas_dev = sas_device;
sas_device->starget = starget;
sas_device->id = starget->id;
@@ -1862,7 +2090,8 @@ scsih_slave_alloc(struct scsi_device *sdev)
} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_target_priv_data->sas_address);
+ sas_target_priv_data->sas_address,
+ sas_target_priv_data->port);
if (sas_device && (sas_device->starget == NULL)) {
sdev_printk(KERN_INFO, sdev,
"%s : sas_device->starget set to starget @ %d\n",
@@ -2428,7 +2657,7 @@ scsih_slave_configure(struct scsi_device *sdev)
return 1;
}
- qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+ qdepth = ioc->max_nvme_qd;
ds = "NVMe";
sdev_printk(KERN_INFO, sdev,
"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
@@ -2467,7 +2696,8 @@ scsih_slave_configure(struct scsi_device *sdev)
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_device_priv_data->sas_target->sas_address);
+ sas_device_priv_data->sas_target->sas_address,
+ sas_device_priv_data->sas_target->port);
if (!sas_device) {
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
dfailprintk(ioc,
@@ -2479,7 +2709,8 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device->volume_handle = volume_handle;
sas_device->volume_wwid = volume_wwid;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
- qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ qdepth = (sas_device->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
ssp_target = 1;
if (sas_device->device_info &
MPI2_SAS_DEVICE_INFO_SEP) {
@@ -2491,7 +2722,7 @@ scsih_slave_configure(struct scsi_device *sdev)
} else
ds = "SSP";
} else {
- qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ qdepth = ioc->max_sata_qd;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
ds = "STP";
else if (sas_device->device_info &
@@ -2701,9 +2932,101 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
+ * @ioc: per adapter object
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ *
+ * Look whether TM has aborted the timed out SCSI command, if
+ * TM has aborted the IO then return SUCCESS else return FAILED.
+ */
+static int
+scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task)
+{
+
+ if (smid_task <= ioc->shost->can_queue) {
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (!(_scsih_scsi_lookup_find_by_target(ioc,
+ id, channel)))
+ return SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
+ lun, channel)))
+ return SUCCESS;
+ break;
+ default:
+ return SUCCESS;
+ }
+ } else if (smid_task == ioc->scsih_cmds.smid) {
+ if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
+ (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
+ return SUCCESS;
+ } else if (smid_task == ioc->ctl_cmds.smid) {
+ if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
+ (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
+ return SUCCESS;
+ }
+
+ return FAILED;
+}
+
+/**
+ * scsih_tm_post_processing - post processing of target & LUN reset
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ *
+ * Post processing of target & LUN reset. Due to interrupt latency
+ * issue it possible that interrupt for aborted IO might not be
+ * received yet. So before returning failure status, poll the
+ * reply descriptor pools for the reply of timed out SCSI command.
+ * Return FAILED status if reply for timed out is not received
+ * otherwise return SUCCESS.
+ */
+static int
+scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task)
+{
+ int rc;
+
+ rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
+ if (rc == SUCCESS)
+ return rc;
+
+ ioc_info(ioc,
+ "Poll ReplyDescriptor queues for completion of"
+ " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
+ smid_task, type, handle);
+
+ /*
+ * Due to interrupt latency issues, driver may receive interrupt for
+ * TM first and then for aborted SCSI IO command. So, poll all the
+ * ReplyDescriptor pools before returning the FAILED status to SML.
+ */
+ mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_sync_reply_irqs(ioc, 1);
+ mpt3sas_base_unmask_interrupts(ioc);
+
+ return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
+}
+
+/**
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
* @handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
@@ -2720,11 +3043,13 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* Return: SUCCESS or FAILED.
*/
int
-mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
- u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
+ u8 timeout, u8 tr_method)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
+ Mpi25SCSIIORequest_t *request;
u16 smid = 0;
u32 ioc_state;
int rc;
@@ -2780,7 +3105,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
- mpi_request->MsgFlags = tr_method;
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ mpi_request->MsgFlags = tr_method;
mpi_request->TaskMID = cpu_to_le16(smid_task);
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
@@ -2800,7 +3127,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
}
/* sync IRQs in case those were busy during flush. */
- mpt3sas_base_sync_reply_irqs(ioc);
+ mpt3sas_base_sync_reply_irqs(ioc, 0);
if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
@@ -2817,7 +3144,44 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
}
}
- rc = SUCCESS;
+
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ /*
+ * If DevHandle filed in smid_task's entry of request pool
+ * doesn't match with device handle on which this task abort
+ * TM is received then it means that TM has successfully
+ * aborted the timed out command. Since smid_task's entry in
+ * request pool will be memset to zero once the timed out
+ * command is returned to the SML. If the command is not
+ * aborted then smid_task’s entry won’t be cleared and it
+ * will have same DevHandle value on which this task abort TM
+ * is received and driver will return the TM status as FAILED.
+ */
+ request = mpt3sas_base_get_msg_frame(ioc, smid_task);
+ if (le16_to_cpu(request->DevHandle) != handle)
+ break;
+
+ ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
+ "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
+ handle, timeout, tr_method, smid_task, msix_task);
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
+ type, smid_task);
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
@@ -2826,14 +3190,14 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task,
- u8 timeout, u8 tr_method)
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
- ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
- msix_task, timeout, tr_method);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
+ smid_task, msix_task, timeout, tr_method);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@@ -2941,7 +3305,7 @@ scsih_abort(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
"scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
- (scmd->request->timeout / HZ) * 1000);
+ (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
@@ -2950,7 +3314,7 @@ scsih_abort(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device,
"device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
r = SUCCESS;
goto out;
}
@@ -2980,7 +3344,8 @@ scsih_abort(struct scsi_cmnd *scmd)
if (pcie_device && (!ioc->tm_custom_handling) &&
(!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
timeout = ioc->nvme_abort_timeout;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
st->smid, st->msix_io, timeout, 0);
/* Command must be cleared after abort */
@@ -3025,7 +3390,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device,
"device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
r = SUCCESS;
goto out;
}
@@ -3056,11 +3421,12 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
tr_timeout, tr_method);
/* Check for busy commands after reset */
- if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
+ if (r == SUCCESS && scsi_device_busy(scmd->device))
r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
@@ -3104,7 +3470,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
starget_printk(KERN_INFO, starget,
"target been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
r = SUCCESS;
goto out;
}
@@ -3134,7 +3500,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
tr_timeout, tr_method);
/* Check for busy commands after reset */
@@ -3266,8 +3633,6 @@ _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- if (ioc->is_driver_loading)
- return;
fw_event = alloc_fw_event_work(0);
if (!fw_event)
return;
@@ -3305,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
fw_event = list_first_entry(&ioc->fw_event_list,
struct fw_event_work, list);
list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
@@ -3317,17 +3683,64 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
*
* Walk the firmware event queue, either killing timers, or waiting
* for outstanding events to complete
+ *
+ * Context: task, can sleep
*/
static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- if (list_empty(&ioc->fw_event_list) ||
- !ioc->firmware_event_thread || in_interrupt())
+ if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
+ !ioc->firmware_event_thread)
return;
+ /*
+ * Set current running event as ignore, so that
+ * current running event will exit quickly.
+ * As diag reset has occurred it is of no use
+ * to process remaining stale event data entries.
+ */
+ if (ioc->shost_recovery && ioc->current_event)
+ ioc->current_event->ignore = 1;
+
+ ioc->fw_events_cleanup = 1;
+ while ((fw_event = dequeue_next_fw_event(ioc)) ||
+ (fw_event = ioc->current_event)) {
+
+ /*
+ * Don't call cancel_work_sync() for current_event
+ * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
+ * otherwise we may observe deadlock if current
+ * hard reset issued as part of processing the current_event.
+ *
+ * Orginal logic of cleaning the current_event is added
+ * for handling the back to back host reset issued by the user.
+ * i.e. during back to back host reset, driver use to process
+ * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
+ * event back to back and this made the drives to unregister
+ * the devices from SML.
+ */
+
+ if (fw_event == ioc->current_event &&
+ ioc->current_event->event !=
+ MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
+ ioc->current_event = NULL;
+ continue;
+ }
+
+ /*
+ * Driver has to clear ioc->start_scan flag when
+ * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
+ * otherwise scsi_scan_host() API waits for the
+ * 5 minute timer to expire. If we exit from
+ * scsi_scan_host() early then we can issue the
+ * new port enable request as part of current diag reset.
+ */
+ if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ ioc->start_scan = 0;
+ }
- while ((fw_event = dequeue_next_fw_event(ioc))) {
/*
* Wait on the fw_event to complete. If this returns 1, then
* the event was never executed, and we need a put for the
@@ -3339,8 +3752,8 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
if (cancel_work_sync(&fw_event->work))
fw_event_work_put(fw_event);
- fw_event_work_put(fw_event);
}
+ ioc->fw_events_cleanup = 0;
}
/**
@@ -3443,22 +3856,26 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
* _scsih_ublock_io_device - prepare device to be deleted
* @ioc: per adapter object
* @sas_address: sas address
+ * @port: hba port entry
*
* unblock then put device in offline state
*/
static void
-_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct hba_port *port)
{
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
shost_for_each_device(sdev, ioc->shost) {
sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
continue;
if (sas_device_priv_data->sas_target->sas_address
!= sas_address)
continue;
+ if (sas_device_priv_data->sas_target->port != port)
+ continue;
if (sas_device_priv_data->block)
_scsih_internal_device_unblock(sdev,
sas_device_priv_data);
@@ -3559,7 +3976,8 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
SAS_END_DEVICE) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
if (sas_device) {
set_bit(sas_device->handle,
ioc->blocking_handles);
@@ -3578,7 +3996,8 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
SAS_FANOUT_EXPANDER_DEVICE) {
expander_sibling =
mpt3sas_scsih_expander_find_by_sas_address(
- ioc, mpt3sas_port->remote_identify.sas_address);
+ ioc, mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
_scsih_block_io_to_children_attached_to_ex(ioc,
expander_sibling);
}
@@ -3667,6 +4086,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _tr_list *delayed_tr;
u32 ioc_state;
u8 tr_method = 0;
+ struct hba_port *port = NULL;
if (ioc->pci_error_recovery) {
dewtprintk(ioc,
@@ -3695,6 +4115,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_target_priv_data = sas_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
sas_address = sas_device->sas_address;
+ port = sas_device->port;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (!sas_device) {
@@ -3742,7 +4163,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device->enclosure_level,
pcie_device->connector_name));
}
- _scsih_ublock_io_device(ioc, sas_address);
+ _scsih_ublock_io_device(ioc, sas_address, port);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -4141,7 +4562,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _scsih_check_for_pending_internal_cmds - check for pending internal messages
+ * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
* @ioc: per adapter object
* @smid: system request message index
*
@@ -4609,7 +5030,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
scmd->result = DID_NO_CONNECT << 16;
else
scmd->result = DID_RESET << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
}
@@ -4627,48 +5048,34 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
Mpi25SCSIIORequest_t *mpi_request)
{
u16 eedp_flags;
- unsigned char prot_op = scsi_get_prot_op(scmd);
- unsigned char prot_type = scsi_get_prot_type(scmd);
Mpi25SCSIIORequest_t *mpi_request_3v =
(Mpi25SCSIIORequest_t *)mpi_request;
- if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
- return;
-
- if (prot_op == SCSI_PROT_READ_STRIP)
+ switch (scsi_get_prot_op(scmd)) {
+ case SCSI_PROT_READ_STRIP:
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
- else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ break;
+ case SCSI_PROT_WRITE_INSERT:
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
- else
+ break;
+ default:
return;
+ }
- switch (prot_type) {
- case SCSI_PROT_DIF_TYPE1:
- case SCSI_PROT_DIF_TYPE2:
+ if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- /*
- * enable ref/guard checking
- * auto increment ref tag
- */
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- mpi_request->CDB.EEDP32.PrimaryReferenceTag =
- cpu_to_be32(t10_pi_ref_tag(scmd->request));
- break;
+ if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
- case SCSI_PROT_DIF_TYPE3:
+ if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
- /*
- * enable guard checking
- */
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
-
- break;
+ mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(scsi_prot_ref_tag(scmd));
}
- mpi_request_3v->EEDPBlockSize =
- cpu_to_le16(scmd->device->sector_size);
+ mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
if (ioc->is_gen35_ioc)
eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
@@ -4699,10 +5106,8 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
ascq = 0x00;
break;
}
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
- ascq);
- scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
+ set_host_byte(scmd, DID_ABORT);
}
/**
@@ -4723,7 +5128,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _raid_device *raid_device;
- struct request *rq = scmd->request;
+ struct request *rq = scsi_cmd_to_rq(scmd);
int class;
Mpi25SCSIIORequest_t *mpi_request;
struct _pcie_device *pcie_device = NULL;
@@ -4737,13 +5142,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -4751,9 +5156,22 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* invalid device handle */
handle = sas_target_priv_data->handle;
+
+ /*
+ * Avoid error handling escallation when device is disconnected
+ */
+ if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
+ if (scmd->device->host->shost_state == SHOST_RECOVERY &&
+ scmd->cmnd[0] == TEST_UNIT_READY) {
+ scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
+ scsi_done(scmd);
+ return 0;
+ }
+ }
+
if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -4764,7 +5182,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
} else if (sas_target_priv_data->deleted) {
/* device has been deleted */
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
} else if (sas_target_priv_data->tm_busy ||
sas_device_priv_data->block) {
@@ -4889,7 +5307,7 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
}
/**
- * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
* @ioc: per adapter object
* @scmd: pointer to scsi command object
* @mpi_reply: reply mf payload returned from firmware
@@ -5459,18 +5877,14 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scmd->sense_buffer[0] = 0x70;
- scmd->sense_buffer[2] = ILLEGAL_REQUEST;
- scmd->sense_buffer[12] = 0x20;
- scmd->sense_buffer[13] = 0;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
+ 0x20, 0);
}
break;
case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
scsi_set_resid(scmd, 0);
- /* fall through */
+ fallthrough;
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
case MPI2_IOCSTATUS_SUCCESS:
scmd->result = (DID_OK << 16) | scsi_status;
@@ -5511,11 +5925,636 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scsi_dma_unmap(scmd);
mpt3sas_base_free_smid(ioc, smid);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
/**
+ * _scsih_update_vphys_after_reset - update the Port's
+ * vphys_list after reset
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 sz, ioc_status;
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u64 attached_sas_addr;
+ u8 found = 0, port_id;
+ Mpi2SasPhyPage0_t phy_pg0;
+ struct hba_port *port, *port_next, *mport;
+ struct virtual_phy *vphy, *vphy_next;
+ struct _sas_device *sas_device;
+
+ /*
+ * Mark all the vphys objects as dirty.
+ */
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (!port->vphys_mask)
+ continue;
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
+ }
+ }
+
+ /*
+ * Read SASIOUnitPage0 to get each HBA Phy's data.
+ */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
+ (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ /*
+ * Loop over each HBA Phy.
+ */
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ /*
+ * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
+ */
+ if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
+ MPI2_SAS_NEG_LINK_RATE_1_5)
+ continue;
+ /*
+ * Check whether Phy is connected to SEP device or not,
+ * if it is SEP device then read the Phy's SASPHYPage0 data to
+ * determine whether Phy is a virtual Phy or not. if it is
+ * virtual phy then it is conformed that the attached remote
+ * device is a HBA's vSES device.
+ */
+ if (!(le32_to_cpu(
+ sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
+ MPI2_SAS_DEVICE_INFO_SEP))
+ continue;
+
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ i))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+
+ if (!(le32_to_cpu(phy_pg0.PhyInfo) &
+ MPI2_SAS_PHYINFO_VIRTUAL_PHY))
+ continue;
+ /*
+ * Get the vSES device's SAS Address.
+ */
+ attached_handle = le16_to_cpu(
+ sas_iounit_pg0->PhyData[i].AttachedDevHandle);
+ if (_scsih_get_sas_address(ioc, attached_handle,
+ &attached_sas_addr) != 0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+
+ found = 0;
+ port = port_next = NULL;
+ /*
+ * Loop over each virtual_phy object from
+ * each port's vphys_list.
+ */
+ list_for_each_entry_safe(port,
+ port_next, &ioc->port_table_list, list) {
+ if (!port->vphys_mask)
+ continue;
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ /*
+ * Continue with next virtual_phy object
+ * if the object is not marked as dirty.
+ */
+ if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
+ continue;
+
+ /*
+ * Continue with next virtual_phy object
+ * if the object's SAS Address is not equals
+ * to current Phy's vSES device SAS Address.
+ */
+ if (vphy->sas_address != attached_sas_addr)
+ continue;
+ /*
+ * Enable current Phy number bit in object's
+ * phy_mask field.
+ */
+ if (!(vphy->phy_mask & (1 << i)))
+ vphy->phy_mask = (1 << i);
+ /*
+ * Get hba_port object from hba_port table
+ * corresponding to current phy's Port ID.
+ * if there is no hba_port object corresponding
+ * to Phy's Port ID then create a new hba_port
+ * object & add to hba_port table.
+ */
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
+ if (!mport) {
+ mport = kzalloc(
+ sizeof(struct hba_port), GFP_KERNEL);
+ if (!mport)
+ break;
+ mport->port_id = port_id;
+ ioc_info(ioc,
+ "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
+ __func__, mport, mport->port_id);
+ list_add_tail(&mport->list,
+ &ioc->port_table_list);
+ }
+ /*
+ * If mport & port pointers are not pointing to
+ * same hba_port object then it means that vSES
+ * device's Port ID got changed after reset and
+ * hence move current virtual_phy object from
+ * port's vphys_list to mport's vphys_list.
+ */
+ if (port != mport) {
+ if (!mport->vphys_mask)
+ INIT_LIST_HEAD(
+ &mport->vphys_list);
+ mport->vphys_mask |= (1 << i);
+ port->vphys_mask &= ~(1 << i);
+ list_move(&vphy->list,
+ &mport->vphys_list);
+ sas_device = mpt3sas_get_sdev_by_addr(
+ ioc, attached_sas_addr, port);
+ if (sas_device)
+ sas_device->port = mport;
+ }
+ /*
+ * Earlier while updating the hba_port table,
+ * it is determined that there is no other
+ * direct attached device with mport's Port ID,
+ * Hence mport was marked as dirty. Only vSES
+ * device has this Port ID, so unmark the mport
+ * as dirt.
+ */
+ if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
+ mport->sas_address = 0;
+ mport->phy_mask = 0;
+ mport->flags &=
+ ~HBA_PORT_FLAG_DIRTY_PORT;
+ }
+ /*
+ * Unmark current virtual_phy object as dirty.
+ */
+ vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
+ found = 1;
+ break;
+ }
+ if (found)
+ break;
+ }
+ }
+out:
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_get_port_table_after_reset - Construct temporary port table
+ * @ioc: per adapter object
+ * @port_table: address where port table needs to be constructed
+ *
+ * return number of HBA port entries available after reset.
+ */
+static int
+_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port_table)
+{
+ u16 sz, ioc_status;
+ int i, j;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u64 attached_sas_addr;
+ u8 found = 0, port_count = 0, port_id;
+
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
+ * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return port_count;
+ }
+
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ found = 0;
+ if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
+ MPI2_SAS_NEG_LINK_RATE_1_5)
+ continue;
+ attached_handle =
+ le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
+ if (_scsih_get_sas_address(
+ ioc, attached_handle, &attached_sas_addr) != 0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+
+ for (j = 0; j < port_count; j++) {
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ if (port_table[j].port_id == port_id &&
+ port_table[j].sas_address == attached_sas_addr) {
+ port_table[j].phy_mask |= (1 << i);
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ port_table[port_count].port_id = port_id;
+ port_table[port_count].phy_mask = (1 << i);
+ port_table[port_count].sas_address = attached_sas_addr;
+ port_count++;
+ }
+out:
+ kfree(sas_iounit_pg0);
+ return port_count;
+}
+
+enum hba_port_matched_codes {
+ NOT_MATCHED = 0,
+ MATCHED_WITH_ADDR_AND_PHYMASK,
+ MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
+ MATCHED_WITH_ADDR_AND_SUBPHYMASK,
+ MATCHED_WITH_ADDR,
+};
+
+/**
+ * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
+ * from HBA port table
+ * @ioc: per adapter object
+ * @port_entry: hba port entry from temporary port table which needs to be
+ * searched for matched entry in the HBA port table
+ * @matched_port_entry: save matched hba port entry here
+ * @count: count of matched entries
+ *
+ * return type of matched entry found.
+ */
+static enum hba_port_matched_codes
+_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port_entry,
+ struct hba_port **matched_port_entry, int *count)
+{
+ struct hba_port *port_table_entry, *matched_port = NULL;
+ enum hba_port_matched_codes matched_code = NOT_MATCHED;
+ int lcount = 0;
+ *matched_port_entry = NULL;
+
+ list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
+ if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
+ continue;
+
+ if ((port_table_entry->sas_address == port_entry->sas_address)
+ && (port_table_entry->phy_mask == port_entry->phy_mask)) {
+ matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
+ matched_port = port_table_entry;
+ break;
+ }
+
+ if ((port_table_entry->sas_address == port_entry->sas_address)
+ && (port_table_entry->phy_mask & port_entry->phy_mask)
+ && (port_table_entry->port_id == port_entry->port_id)) {
+ matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
+ matched_port = port_table_entry;
+ continue;
+ }
+
+ if ((port_table_entry->sas_address == port_entry->sas_address)
+ && (port_table_entry->phy_mask & port_entry->phy_mask)) {
+ if (matched_code ==
+ MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
+ continue;
+ matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
+ matched_port = port_table_entry;
+ continue;
+ }
+
+ if (port_table_entry->sas_address == port_entry->sas_address) {
+ if (matched_code ==
+ MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
+ continue;
+ if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
+ continue;
+ matched_code = MATCHED_WITH_ADDR;
+ matched_port = port_table_entry;
+ lcount++;
+ }
+ }
+
+ *matched_port_entry = matched_port;
+ if (matched_code == MATCHED_WITH_ADDR)
+ *count = lcount;
+ return matched_code;
+}
+
+/**
+ * _scsih_del_phy_part_of_anther_port - remove phy if it
+ * is a part of anther port
+ *@ioc: per adapter object
+ *@port_table: port table after reset
+ *@index: hba port entry index
+ *@port_count: number of ports available after host reset
+ *@offset: HBA phy bit offset
+ *
+ */
+static void
+_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port_table,
+ int index, u8 port_count, int offset)
+{
+ struct _sas_node *sas_node = &ioc->sas_hba;
+ u32 i, found = 0;
+
+ for (i = 0; i < port_count; i++) {
+ if (i == index)
+ continue;
+
+ if (port_table[i].phy_mask & (1 << offset)) {
+ mpt3sas_transport_del_phy_from_an_existing_port(
+ ioc, sas_node, &sas_node->phy[offset]);
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ port_table[index].phy_mask |= (1 << offset);
+}
+
+/**
+ * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
+ * right port
+ *@ioc: per adapter object
+ *@hba_port_entry: hba port table entry
+ *@port_table: temporary port table
+ *@index: hba port entry index
+ *@port_count: number of ports available after host reset
+ *
+ */
+static void
+_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *hba_port_entry, struct hba_port *port_table,
+ int index, int port_count)
+{
+ u32 phy_mask, offset = 0;
+ struct _sas_node *sas_node = &ioc->sas_hba;
+
+ phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
+
+ for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
+ if (phy_mask & (1 << offset)) {
+ if (!(port_table[index].phy_mask & (1 << offset))) {
+ _scsih_del_phy_part_of_anther_port(
+ ioc, port_table, index, port_count,
+ offset);
+ continue;
+ }
+ if (sas_node->phy[offset].phy_belongs_to_port)
+ mpt3sas_transport_del_phy_from_an_existing_port(
+ ioc, sas_node, &sas_node->phy[offset]);
+ mpt3sas_transport_add_phy_to_an_existing_port(
+ ioc, sas_node, &sas_node->phy[offset],
+ hba_port_entry->sas_address,
+ hba_port_entry);
+ }
+ }
+}
+
+/**
+ * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct hba_port *port, *port_next;
+ struct virtual_phy *vphy, *vphy_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (!port->vphys_mask)
+ continue;
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
+ drsprintk(ioc, ioc_info(ioc,
+ "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
+ vphy, port->port_id,
+ vphy->phy_mask));
+ port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+ }
+ if (!port->vphys_mask && !port->sas_address)
+ port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
+ }
+}
+
+/**
+ * _scsih_del_dirty_port_entries - delete dirty port entries from port list
+ * after host reset
+ *@ioc: per adapter object
+ *
+ */
+static void
+_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct hba_port *port, *port_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
+ port->flags & HBA_PORT_FLAG_NEW_PORT)
+ continue;
+
+ drsprintk(ioc, ioc_info(ioc,
+ "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
+ port, port->port_id, port->phy_mask));
+ list_del(&port->list);
+ kfree(port);
+ }
+}
+
+/**
+ * _scsih_sas_port_refresh - Update HBA port table after host reset
+ * @ioc: per adapter object
+ */
+static void
+_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 port_count = 0;
+ struct hba_port *port_table;
+ struct hba_port *port_table_entry;
+ struct hba_port *port_entry = NULL;
+ int i, j, count = 0, lcount = 0;
+ int ret;
+ u64 sas_addr;
+ u8 num_phys;
+
+ drsprintk(ioc, ioc_info(ioc,
+ "updating ports for sas_host(0x%016llx)\n",
+ (unsigned long long)ioc->sas_hba.sas_address));
+
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (num_phys > ioc->sas_hba.nr_phys_allocated) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ ioc->sas_hba.num_phys = num_phys;
+
+ port_table = kcalloc(ioc->sas_hba.num_phys,
+ sizeof(struct hba_port), GFP_KERNEL);
+ if (!port_table)
+ return;
+
+ port_count = _scsih_get_port_table_after_reset(ioc, port_table);
+ if (!port_count)
+ return;
+
+ drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
+ for (j = 0; j < port_count; j++)
+ drsprintk(ioc, ioc_info(ioc,
+ "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
+ port_table[j].port_id,
+ port_table[j].phy_mask, port_table[j].sas_address));
+
+ list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
+ port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
+
+ drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
+ port_table_entry = NULL;
+ list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
+ drsprintk(ioc, ioc_info(ioc,
+ "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
+ port_table_entry->port_id,
+ port_table_entry->phy_mask,
+ port_table_entry->sas_address));
+ }
+
+ for (j = 0; j < port_count; j++) {
+ ret = _scsih_look_and_get_matched_port_entry(ioc,
+ &port_table[j], &port_entry, &count);
+ if (!port_entry) {
+ drsprintk(ioc, ioc_info(ioc,
+ "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
+ port_table[j].sas_address,
+ port_table[j].port_id));
+ continue;
+ }
+
+ switch (ret) {
+ case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
+ case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
+ _scsih_add_or_del_phys_from_existing_port(ioc,
+ port_entry, port_table, j, port_count);
+ break;
+ case MATCHED_WITH_ADDR:
+ sas_addr = port_table[j].sas_address;
+ for (i = 0; i < port_count; i++) {
+ if (port_table[i].sas_address == sas_addr)
+ lcount++;
+ }
+
+ if (count > 1 || lcount > 1)
+ port_entry = NULL;
+ else
+ _scsih_add_or_del_phys_from_existing_port(ioc,
+ port_entry, port_table, j, port_count);
+ }
+
+ if (!port_entry)
+ continue;
+
+ if (port_entry->port_id != port_table[j].port_id)
+ port_entry->port_id = port_table[j].port_id;
+ port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
+ port_entry->phy_mask = port_table[j].phy_mask;
+ }
+
+ port_table_entry = NULL;
+}
+
+/**
+ * _scsih_alloc_vphy - allocate virtual_phy object
+ * @ioc: per adapter object
+ * @port_id: Port ID number
+ * @phy_num: HBA Phy number
+ *
+ * Returns allocated virtual_phy object.
+ */
+static struct virtual_phy *
+_scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
+{
+ struct virtual_phy *vphy;
+ struct hba_port *port;
+
+ port = mpt3sas_get_port_by_id(ioc, port_id, 0);
+ if (!port)
+ return NULL;
+
+ vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
+ if (!vphy) {
+ vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
+ if (!vphy)
+ return NULL;
+
+ if (!port->vphys_mask)
+ INIT_LIST_HEAD(&port->vphys_list);
+
+ /*
+ * Enable bit corresponding to HBA phy number on its
+ * parent hba_port object's vphys_mask field.
+ */
+ port->vphys_mask |= (1 << phy_num);
+ vphy->phy_mask |= (1 << phy_num);
+
+ list_add_tail(&vphy->list, &port->vphys_list);
+
+ ioc_info(ioc,
+ "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
+ vphy, port->port_id, phy_num);
+ }
+ return vphy;
+}
+
+/**
* _scsih_sas_host_refresh - refreshing sas host object contents
* @ioc: per adapter object
* Context: user
@@ -5533,7 +6572,9 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
Mpi2ConfigReply_t mpi_reply;
Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
u16 attached_handle;
- u8 link_rate;
+ u8 link_rate, port_id;
+ struct hba_port *port;
+ Mpi2SasPhyPage0_t phy_pg0;
dtmprintk(ioc,
ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
@@ -5557,15 +6598,94 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
if (i == 0)
- ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
- PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.handle = le16_to_cpu(
+ sas_iounit_pg0->PhyData[0].ControllerDevHandle);
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
+ port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
+ if (!port)
+ goto out;
+
+ port->port_id = port_id;
+ ioc_info(ioc,
+ "hba_port entry: %p, port: %d is added to hba_port list\n",
+ port, port->port_id);
+ if (ioc->shost_recovery)
+ port->flags = HBA_PORT_FLAG_NEW_PORT;
+ list_add_tail(&port->list, &ioc->port_table_list);
+ }
+ /*
+ * Check whether current Phy belongs to HBA vSES device or not.
+ */
+ if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
+ MPI2_SAS_DEVICE_INFO_SEP &&
+ (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
+ &phy_pg0, i))) {
+ ioc_err(ioc,
+ "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (!(le32_to_cpu(phy_pg0.PhyInfo) &
+ MPI2_SAS_PHYINFO_VIRTUAL_PHY))
+ continue;
+ /*
+ * Allocate a virtual_phy object for vSES device, if
+ * this vSES device is hot added.
+ */
+ if (!_scsih_alloc_vphy(ioc, port_id, i))
+ goto out;
+ ioc->sas_hba.phy[i].hba_vphy = 1;
+ }
+
+ /*
+ * Add new HBA phys to STL if these new phys got added as part
+ * of HBA Firmware upgrade/downgrade operation.
+ */
+ if (!ioc->sas_hba.phy[i].phy) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
+ &phy_pg0, i))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ ioc->sas_hba.phy[i].phy_id = i;
+ mpt3sas_transport_add_host_phy(ioc,
+ &ioc->sas_hba.phy[i], phy_pg0,
+ ioc->sas_hba.parent_dev);
+ continue;
+ }
ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
AttachedDevHandle);
if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
+ ioc->sas_hba.phy[i].port =
+ mpt3sas_get_port_by_id(ioc, port_id, 0);
mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
- attached_handle, i, link_rate);
+ attached_handle, i, link_rate,
+ ioc->sas_hba.phy[i].port);
+ }
+ /*
+ * Clear the phy details if this phy got disabled as part of
+ * HBA Firmware upgrade/downgrade operation.
+ */
+ for (i = ioc->sas_hba.num_phys;
+ i < ioc->sas_hba.nr_phys_allocated; i++) {
+ if (ioc->sas_hba.phy[i].phy &&
+ ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
+ SAS_LINK_RATE_1_5_GBPS)
+ mpt3sas_transport_update_links(ioc,
+ ioc->sas_hba.sas_address, 0, i,
+ MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
}
out:
kfree(sas_iounit_pg0);
@@ -5590,7 +6710,8 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u16 sz;
u8 device_missing_delay;
- u8 num_phys;
+ u8 num_phys, port_id;
+ struct hba_port *port;
mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
if (!num_phys) {
@@ -5598,7 +6719,10 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
__FILE__, __LINE__, __func__);
return;
}
- ioc->sas_hba.phy = kcalloc(num_phys,
+
+ ioc->sas_hba.nr_phys_allocated = max_t(u8,
+ MPT_MAX_HBA_NUM_PHYS, num_phys);
+ ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!ioc->sas_hba.phy) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -5683,8 +6807,40 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
if (i == 0)
ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
PhyData[0].ControllerDevHandle);
+
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
+ port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
+ if (!port)
+ goto out;
+
+ port->port_id = port_id;
+ ioc_info(ioc,
+ "hba_port entry: %p, port: %d is added to hba_port list\n",
+ port, port->port_id);
+ list_add_tail(&port->list,
+ &ioc->port_table_list);
+ }
+
+ /*
+ * Check whether current Phy belongs to HBA vSES device or not.
+ */
+ if ((le32_to_cpu(phy_pg0.PhyInfo) &
+ MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
+ (phy_pg0.NegotiatedLinkRate >> 4) >=
+ MPI2_SAS_NEG_LINK_RATE_1_5) {
+ /*
+ * Allocate a virtual_phy object for vSES device.
+ */
+ if (!_scsih_alloc_vphy(ioc, port_id, i))
+ goto out;
+ ioc->sas_hba.phy[i].hba_vphy = 1;
+ }
+
ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
ioc->sas_hba.phy[i].phy_id = i;
+ ioc->sas_hba.phy[i].port =
+ mpt3sas_get_port_by_id(ioc, port_id, 0);
mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
phy_pg0, ioc->sas_hba.parent_dev);
}
@@ -5738,6 +6894,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
int i;
unsigned long flags;
struct _sas_port *mpt3sas_port = NULL;
+ u8 port_id;
int rc = 0;
@@ -5770,10 +6927,13 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
__FILE__, __LINE__, __func__);
return -1;
}
+
+ port_id = expander_pg0.PhysicalPort;
if (sas_address_parent != ioc->sas_hba.sas_address) {
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address_parent);
+ sas_address_parent,
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!sas_expander) {
rc = _scsih_expander_add(ioc, parent_handle);
@@ -5785,7 +6945,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_address = le64_to_cpu(expander_pg0.SASAddress);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address);
+ sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (sas_expander)
@@ -5803,13 +6963,22 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander->num_phys = expander_pg0.NumPhys;
sas_expander->sas_address_parent = sas_address_parent;
sas_expander->sas_address = sas_address;
+ sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
+ if (!sas_expander->port) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
handle, parent_handle,
(u64)sas_expander->sas_address, sas_expander->num_phys);
- if (!sas_expander->num_phys)
+ if (!sas_expander->num_phys) {
+ rc = -1;
goto out_fail;
+ }
sas_expander->phy = kcalloc(sas_expander->num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!sas_expander->phy) {
@@ -5821,7 +6990,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
INIT_LIST_HEAD(&sas_expander->sas_port_list);
mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
- sas_address_parent);
+ sas_address_parent, sas_expander->port);
if (!mpt3sas_port) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -5829,6 +6998,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
goto out_fail;
}
sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
+ sas_expander->rphy = mpt3sas_port->rphy;
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
@@ -5840,6 +7010,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
sas_expander->phy[i].handle = handle;
sas_expander->phy[i].phy_id = i;
+ sas_expander->phy[i].port =
+ mpt3sas_get_port_by_id(ioc, port_id, 0);
if ((mpt3sas_transport_add_expander_phy(ioc,
&sas_expander->phy[i], expander_pg1,
@@ -5867,7 +7039,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (mpt3sas_port)
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
- sas_address_parent);
+ sas_address_parent, sas_expander->port);
kfree(sas_expander);
return rc;
}
@@ -5876,9 +7048,11 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* mpt3sas_expander_remove - removing expander object
* @ioc: per adapter object
* @sas_address: expander sas_address
+ * @port: hba port entry
*/
void
-mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port)
{
struct _sas_node *sas_expander;
unsigned long flags;
@@ -5886,9 +7060,12 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
if (ioc->shost_recovery)
return;
+ if (!port)
+ return;
+
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address);
+ sas_address, port);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (sas_expander)
_scsih_expander_node_remove(ioc, sas_expander);
@@ -6011,7 +7188,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
- struct _sas_device *sas_device;
+ struct _sas_device *sas_device = NULL;
struct _enclosure_node *enclosure_dev = NULL;
u32 ioc_status;
unsigned long flags;
@@ -6019,6 +7196,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
struct scsi_target *starget;
struct MPT3SAS_TARGET *sas_target_priv_data;
u32 device_info;
+ struct hba_port *port;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
@@ -6041,8 +7219,11 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
+ if (!port)
+ goto out_unlock;
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_address);
+ sas_address, port);
if (!sas_device)
goto out_unlock;
@@ -6098,7 +7279,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
goto out_unlock;
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- _scsih_ublock_io_device(ioc, sas_address);
+ _scsih_ublock_io_device(ioc, sas_address, port);
if (sas_device)
sas_device_put(sas_device);
@@ -6132,6 +7313,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
u32 ioc_status;
u64 sas_address;
u32 device_info;
+ u8 port_id;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -6168,8 +7350,9 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device_pg0.AccessStatus))
return -1;
+ port_id = sas_device_pg0.PhysicalPort;
sas_device = mpt3sas_get_sdev_by_addr(ioc,
- sas_address);
+ sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
if (sas_device) {
clear_bit(handle, ioc->pend_os_device_add);
sas_device_put(sas_device);
@@ -6210,6 +7393,12 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->phy = sas_device_pg0.PhyNum;
sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+ sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
+ if (!sas_device->port) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
if (le16_to_cpu(sas_device_pg0.Flags)
& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
@@ -6237,12 +7426,17 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
/* get device name */
sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+ sas_device->port_type = sas_device_pg0.MaxPortConnections;
+ ioc_info(ioc,
+ "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
+ handle, sas_device->sas_address, sas_device->port_type);
if (ioc->wait_for_discovery_to_complete)
_scsih_sas_device_init_add(ioc, sas_device);
else
_scsih_sas_device_add(ioc, sas_device);
+out:
sas_device_put(sas_device);
return 0;
}
@@ -6275,7 +7469,8 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
if (sas_device->starget && sas_device->starget->hostdata) {
sas_target_priv_data = sas_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- _scsih_ublock_io_device(ioc, sas_device->sas_address);
+ _scsih_ublock_io_device(ioc, sas_device->sas_address,
+ sas_device->port);
sas_target_priv_data->handle =
MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -6283,7 +7478,8 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
if (!ioc->hide_drives)
mpt3sas_transport_port_remove(ioc,
sas_device->sas_address,
- sas_device->sas_address_parent);
+ sas_device->sas_address_parent,
+ sas_device->port);
ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
sas_device->handle, (u64)sas_device->sas_address);
@@ -6394,6 +7590,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address;
unsigned long flags;
u8 link_rate, prev_link_rate;
+ struct hba_port *port;
Mpi2EventDataSasTopologyChangeList_t *event_data =
(Mpi2EventDataSasTopologyChangeList_t *)
fw_event->event_data;
@@ -6415,6 +7612,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
}
parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+ port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
/* handle expander add */
if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
@@ -6427,6 +7625,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (sas_expander) {
sas_address = sas_expander->sas_address;
max_phys = sas_expander->num_phys;
+ port = sas_expander->port;
} else if (parent_handle < ioc->sas_hba.num_phys) {
sas_address = ioc->sas_hba.sas_address;
max_phys = ioc->sas_hba.num_phys;
@@ -6469,7 +7668,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
mpt3sas_transport_update_links(ioc, sas_address,
- handle, phy_number, link_rate);
+ handle, phy_number, link_rate, port);
if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
break;
@@ -6480,7 +7679,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (!test_bit(handle, ioc->pend_os_device_add))
break;
- /* fall through */
+ fallthrough;
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -6488,7 +7687,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
mpt3sas_transport_update_links(ioc, sas_address,
- handle, phy_number, link_rate);
+ handle, phy_number, link_rate, port);
_scsih_add_device(ioc, handle, phy_number, 0);
@@ -6503,7 +7702,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/* handle expander removal */
if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
sas_expander)
- mpt3sas_expander_remove(ioc, sas_address);
+ mpt3sas_expander_remove(ioc, sas_address, port);
return 0;
}
@@ -6604,7 +7803,8 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_address = le64_to_cpu(event_data->SASAddress);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_address);
+ sas_address,
+ mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
if (!sas_device || !sas_device->starget)
goto out;
@@ -6753,7 +7953,7 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
if (pcie_device->starget && pcie_device->starget->hostdata) {
sas_target_priv_data = pcie_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- _scsih_ublock_io_device(ioc, pcie_device->wwid);
+ _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -6875,7 +8075,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
pcie_device_put(pcie_device);
- _scsih_ublock_io_device(ioc, wwid);
+ _scsih_ublock_io_device(ioc, wwid, NULL);
return;
}
@@ -7208,7 +8408,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
event_data->PortEntry[i].PortStatus &= 0xF0;
event_data->PortEntry[i].PortStatus |=
MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
- /* fall through */
+ fallthrough;
case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
if (ioc->shost_recovery)
break;
@@ -7527,7 +8727,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
+ r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
st->msix_io, 30, 0);
if (r == FAILED) {
@@ -7568,9 +8768,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery)
goto out_no_lock;
- r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
- st->msix_io, 30, 0);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, 30, 0);
if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@@ -8044,7 +9244,9 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
mpt3sas_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc,
+ sas_device_pg0.PhysicalPort, 0));
_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
_scsih_add_device(ioc, handle, 0, 1);
@@ -8350,7 +9552,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
mpt3sas_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc,
+ sas_device_pg0.PhysicalPort, 0));
_scsih_add_device(ioc, handle, 0, 1);
@@ -8459,6 +9663,42 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_update_device_qdepth - Update QD during Reset.
+ * @ioc: per adapter object
+ *
+ */
+static void
+_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct scsi_device *sdev;
+ u16 qdepth;
+
+ ioc_info(ioc, "Update devices with firmware reported queue depth\n");
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target) {
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ sas_device = sas_device_priv_data->sas_target->sas_dev;
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
+ qdepth = ioc->max_nvme_qd;
+ else if (sas_device &&
+ sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ qdepth = (sas_device->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
+ else if (sas_device &&
+ sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ qdepth = ioc->max_sata_qd;
+ else
+ continue;
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
+ }
+ }
+}
+
+/**
* _scsih_mark_responding_sas_device - mark a sas_devices as responding
* @ioc: per adapter object
* @sas_device_pg0: SAS Device page 0
@@ -8475,6 +9715,8 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
struct _sas_device *sas_device = NULL;
struct _enclosure_node *enclosure_dev = NULL;
unsigned long flags;
+ struct hba_port *port = mpt3sas_get_port_by_id(
+ ioc, sas_device_pg0->PhysicalPort, 0);
if (sas_device_pg0->EnclosureHandle) {
enclosure_dev =
@@ -8486,69 +9728,71 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if ((sas_device->sas_address == le64_to_cpu(
- sas_device_pg0->SASAddress)) && (sas_device->slot ==
- le16_to_cpu(sas_device_pg0->Slot))) {
- sas_device->responding = 1;
- starget = sas_device->starget;
- if (starget && starget->hostdata) {
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
- sas_target_priv_data->deleted = 0;
- } else
- sas_target_priv_data = NULL;
- if (starget) {
- starget_printk(KERN_INFO, starget,
- "handle(0x%04x), sas_addr(0x%016llx)\n",
- le16_to_cpu(sas_device_pg0->DevHandle),
- (unsigned long long)
- sas_device->sas_address);
+ if (sas_device->sas_address != le64_to_cpu(
+ sas_device_pg0->SASAddress))
+ continue;
+ if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
+ continue;
+ if (sas_device->port != port)
+ continue;
+ sas_device->responding = 1;
+ starget = sas_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget) {
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_addr(0x%016llx)\n",
+ le16_to_cpu(sas_device_pg0->DevHandle),
+ (unsigned long long)
+ sas_device->sas_address);
- if (sas_device->enclosure_handle != 0)
- starget_printk(KERN_INFO, starget,
- "enclosure logical id(0x%016llx),"
- " slot(%d)\n",
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- }
- if (le16_to_cpu(sas_device_pg0->Flags) &
- MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
- sas_device->enclosure_level =
- sas_device_pg0->EnclosureLevel;
- memcpy(&sas_device->connector_name[0],
- &sas_device_pg0->ConnectorName[0], 4);
- } else {
- sas_device->enclosure_level = 0;
- sas_device->connector_name[0] = '\0';
- }
+ if (sas_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), slot(%d)\n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ if (le16_to_cpu(sas_device_pg0->Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ sas_device->enclosure_level =
+ sas_device_pg0->EnclosureLevel;
+ memcpy(&sas_device->connector_name[0],
+ &sas_device_pg0->ConnectorName[0], 4);
+ } else {
+ sas_device->enclosure_level = 0;
+ sas_device->connector_name[0] = '\0';
+ }
- sas_device->enclosure_handle =
- le16_to_cpu(sas_device_pg0->EnclosureHandle);
- sas_device->is_chassis_slot_valid = 0;
- if (enclosure_dev) {
- sas_device->enclosure_logical_id = le64_to_cpu(
- enclosure_dev->pg0.EnclosureLogicalID);
- if (le16_to_cpu(enclosure_dev->pg0.Flags) &
- MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
- sas_device->is_chassis_slot_valid = 1;
- sas_device->chassis_slot =
- enclosure_dev->pg0.ChassisSlot;
- }
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0->EnclosureHandle);
+ sas_device->is_chassis_slot_valid = 0;
+ if (enclosure_dev) {
+ sas_device->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_dev->pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot =
+ enclosure_dev->pg0.ChassisSlot;
}
+ }
- if (sas_device->handle == le16_to_cpu(
- sas_device_pg0->DevHandle))
- goto out;
- pr_info("\thandle changed from(0x%04x)!!!\n",
- sas_device->handle);
- sas_device->handle = le16_to_cpu(
- sas_device_pg0->DevHandle);
- if (sas_target_priv_data)
- sas_target_priv_data->handle =
- le16_to_cpu(sas_device_pg0->DevHandle);
+ if (sas_device->handle == le16_to_cpu(
+ sas_device_pg0->DevHandle))
goto out;
- }
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ sas_device->handle);
+ sas_device->handle = le16_to_cpu(
+ sas_device_pg0->DevHandle);
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle =
+ le16_to_cpu(sas_device_pg0->DevHandle);
+ goto out;
}
out:
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -8899,6 +10143,8 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
u16 handle = le16_to_cpu(expander_pg0->DevHandle);
u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
+ struct hba_port *port = mpt3sas_get_port_by_id(
+ ioc, expander_pg0->PhysicalPort, 0);
if (enclosure_handle)
enclosure_dev =
@@ -8909,6 +10155,8 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
if (sas_expander->sas_address != sas_address)
continue;
+ if (sas_expander->port != port)
+ continue;
sas_expander->responding = 1;
if (enclosure_dev) {
@@ -8948,6 +10196,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u64 sas_address;
u16 handle;
+ u8 port;
ioc_info(ioc, "search for expanders: start\n");
@@ -8965,9 +10214,12 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
handle = le16_to_cpu(expander_pg0.DevHandle);
sas_address = le64_to_cpu(expander_pg0.SASAddress);
- pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
- handle,
- (unsigned long long)sas_address);
+ port = expander_pg0.PhysicalPort;
+ pr_info(
+ "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ handle, (unsigned long long)sas_address,
+ (ioc->multipath_on_hba ?
+ port : MULTIPATH_DISABLED_PORT_ID));
_scsih_mark_responding_expander(ioc, &expander_pg0);
}
@@ -8999,6 +10251,17 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
* owner for the reference the list had on any object we prune.
*/
spin_lock_irqsave(&ioc->sas_device_lock, flags);
+
+ /*
+ * Clean up the sas_device_init_list list as
+ * driver goes for fresh scan as part of diag reset.
+ */
+ list_for_each_entry_safe(sas_device, sas_device_next,
+ &ioc->sas_device_init_list, list) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
+
list_for_each_entry_safe(sas_device, sas_device_next,
&ioc->sas_device_list, list) {
if (!sas_device->responding)
@@ -9020,6 +10283,16 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
INIT_LIST_HEAD(&head);
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ /*
+ * Clean up the pcie_device_init_list list as
+ * driver goes for fresh scan as part of diag reset.
+ */
+ list_for_each_entry_safe(pcie_device, pcie_device_next,
+ &ioc->pcie_device_init_list, list) {
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
+
list_for_each_entry_safe(pcie_device, pcie_device_next,
&ioc->pcie_device_list, list) {
if (!pcie_device->responding)
@@ -9089,7 +10362,8 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
le16_to_cpu(expander_pg1.AttachedDevHandle), i,
- expander_pg1.NegotiatedLinkRate >> 4);
+ expander_pg1.NegotiatedLinkRate >> 4,
+ sas_expander->port);
}
}
@@ -9103,12 +10377,12 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
Mpi2ExpanderPage0_t expander_pg0;
Mpi2SasDevicePage0_t sas_device_pg0;
Mpi26PCIeDevicePage0_t pcie_device_pg0;
- Mpi2RaidVolPage1_t volume_pg1;
- Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidVolPage1_t *volume_pg1;
+ Mpi2RaidVolPage0_t *volume_pg0;
Mpi2RaidPhysDiskPage0_t pd_pg0;
Mpi2EventIrConfigElement_t element;
Mpi2ConfigReply_t mpi_reply;
- u8 phys_disk_num;
+ u8 phys_disk_num, port_id;
u16 ioc_status;
u16 handle, parent_handle;
u64 sas_address;
@@ -9119,6 +10393,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
u8 retry_count;
unsigned long flags;
+ volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
+ if (!volume_pg0)
+ return;
+
+ volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
+ if (!volume_pg1) {
+ kfree(volume_pg0);
+ return;
+ }
+
ioc_info(ioc, "scan devices: start\n");
_scsih_sas_host_refresh(ioc);
@@ -9138,8 +10422,10 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
}
handle = le16_to_cpu(expander_pg0.DevHandle);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ port_id = expander_pg0.PhysicalPort;
expander_device = mpt3sas_scsih_expander_find_by_sas_address(
- ioc, le64_to_cpu(expander_pg0.SASAddress));
+ ioc, le64_to_cpu(expander_pg0.SASAddress),
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (expander_device)
_scsih_refresh_expander_links(ioc, expander_device,
@@ -9198,9 +10484,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
handle,
(u64)le64_to_cpu(sas_device_pg0.SASAddress));
+ port_id = sas_device_pg0.PhysicalPort;
mpt3sas_transport_update_links(ioc, sas_address,
handle, sas_device_pg0.PhyNum,
- MPI2_SAS_NEG_LINK_RATE_1_5);
+ MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
set_bit(handle, ioc->pd_handles);
retry_count = 0;
/* This will retry adding the end device.
@@ -9224,7 +10512,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
/* volumes */
handle = 0xFFFF;
while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
- &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
@@ -9232,15 +10520,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
- handle = le16_to_cpu(volume_pg1.DevHandle);
+ handle = le16_to_cpu(volume_pg1->DevHandle);
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_wwid(ioc,
- le64_to_cpu(volume_pg1.WWID));
+ le64_to_cpu(volume_pg1->WWID));
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (raid_device)
continue;
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
- &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t)))
continue;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
@@ -9250,17 +10538,17 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
- if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
- volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
- volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
- element.VolDevHandle = volume_pg1.DevHandle;
+ element.VolDevHandle = volume_pg1->DevHandle;
ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
- volume_pg1.DevHandle);
+ volume_pg1->DevHandle);
_scsih_sas_volume_add(ioc, &element);
ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
- volume_pg1.DevHandle);
+ volume_pg1->DevHandle);
}
}
@@ -9286,8 +10574,10 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
if (!(_scsih_is_end_device(
le32_to_cpu(sas_device_pg0.DeviceInfo))))
continue;
+ port_id = sas_device_pg0.PhysicalPort;
sas_device = mpt3sas_get_sdev_by_addr(ioc,
- le64_to_cpu(sas_device_pg0.SASAddress));
+ le64_to_cpu(sas_device_pg0.SASAddress),
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
if (sas_device) {
sas_device_put(sas_device);
continue;
@@ -9298,7 +10588,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
handle,
(u64)le64_to_cpu(sas_device_pg0.SASAddress));
mpt3sas_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
retry_count = 0;
/* This will retry adding the end device.
* _scsih_add_device() will decide on retries and
@@ -9345,12 +10636,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
}
+
+ kfree(volume_pg0);
+ kfree(volume_pg1);
+
ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
ioc_info(ioc, "scan devices: complete\n");
}
/**
- * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -9391,7 +10686,7 @@ mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -9400,8 +10695,11 @@ void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
- if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
- !ioc->sas_hba.num_phys)) {
+ if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
+ if (ioc->multipath_on_hba) {
+ _scsih_sas_port_refresh(ioc);
+ _scsih_update_vphys_after_reset(ioc);
+ }
_scsih_prep_device_scan(ioc);
_scsih_create_enclosure_list_after_reset(ioc);
_scsih_search_responding_sas_devices(ioc);
@@ -9421,11 +10719,13 @@ mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
{
+ ioc->current_event = fw_event;
_scsih_fw_event_del_from_list(ioc, fw_event);
/* the queue is being flushed so ignore this event */
if (ioc->remove_host || ioc->pci_error_recovery) {
fw_event_work_put(fw_event);
+ ioc->current_event = NULL;
return;
}
@@ -9439,15 +10739,31 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
while (scsi_host_in_recovery(ioc->shost) ||
ioc->shost_recovery) {
/*
- * If we're unloading, bail. Otherwise, this can become
- * an infinite loop.
+ * If we're unloading or cancelling the work, bail.
+ * Otherwise, this can become an infinite loop.
*/
- if (ioc->remove_host)
+ if (ioc->remove_host || ioc->fw_events_cleanup)
goto out;
ssleep(1);
}
_scsih_remove_unresponding_devices(ioc);
+ _scsih_del_dirty_vphy(ioc);
+ _scsih_del_dirty_port_entries(ioc);
+ if (ioc->is_gen35_ioc)
+ _scsih_update_device_qdepth(ioc);
_scsih_scan_for_devices_after_reset(ioc);
+ /*
+ * If diag reset has occurred during the driver load
+ * then driver has to complete the driver load operation
+ * by executing the following items:
+ *- Register the devices from sas_device_init_list to SML
+ *- clear is_driver_loading flag,
+ *- start the watchdog thread.
+ * In happy driver load path, above things are taken care of when
+ * driver executes scsih_scan_finished().
+ */
+ if (ioc->is_driver_loading)
+ _scsih_complete_devices_scanning(ioc);
_scsih_set_nvme_max_shutdown_latency(ioc);
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
@@ -9503,11 +10819,12 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
break;
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_pcie_topology_change_event(ioc, fw_event);
- return;
- break;
+ ioc->current_event = NULL;
+ return;
}
out:
fw_event_work_put(fw_event);
+ ioc->current_event = NULL;
}
/**
@@ -9591,11 +10908,23 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
_scsih_check_topo_delete_events(ioc,
(Mpi2EventDataSasTopologyChangeList_t *)
mpi_reply->EventData);
+ /*
+ * No need to add the topology change list
+ * event to fw event work queue when
+ * diag reset is going on. Since during diag
+ * reset driver scan the devices by reading
+ * sas device page0's not by processing the
+ * events.
+ */
+ if (ioc->shost_recovery)
+ return 1;
break;
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_check_pcie_topo_remove_events(ioc,
(Mpi26EventDataPCIeTopologyChangeList_t *)
mpi_reply->EventData);
+ if (ioc->shost_recovery)
+ return 1;
break;
case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
_scsih_check_ir_config_unhide_events(ioc,
@@ -9610,20 +10939,20 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_LOG_ENTRY_ADDED:
{
Mpi2EventDataLogEntryAdded_t *log_entry;
- u32 *log_code;
+ u32 log_code;
if (!ioc->is_warpdrive)
break;
log_entry = (Mpi2EventDataLogEntryAdded_t *)
mpi_reply->EventData;
- log_code = (u32 *)log_entry->LogData;
+ log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
if (le16_to_cpu(log_entry->LogEntryQualifier)
!= MPT2_WARPDRIVE_LOGENTRY)
break;
- switch (le32_to_cpu(*log_code)) {
+ switch (log_code) {
case MPT2_WARPDRIVE_LC_SSDT:
ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
break;
@@ -9669,7 +10998,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
pr_notice("cannot be powered and devices connected\n");
pr_notice("to this active cable will not be seen\n");
pr_notice("This active cable requires %d mW of power\n",
- ActiveCableEventData->ActiveCablePowerRequirement);
+ le32_to_cpu(
+ ActiveCableEventData->ActiveCablePowerRequirement));
break;
case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
@@ -9718,6 +11048,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
{
struct _sas_port *mpt3sas_port, *next;
unsigned long flags;
+ int port_id;
/* remove sibling ports attached to this expander */
list_for_each_entry_safe(mpt3sas_port, next,
@@ -9727,21 +11058,27 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
if (mpt3sas_port->remote_identify.device_type ==
SAS_END_DEVICE)
mpt3sas_device_remove_by_sas_address(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
else if (mpt3sas_port->remote_identify.device_type ==
SAS_EDGE_EXPANDER_DEVICE ||
mpt3sas_port->remote_identify.device_type ==
SAS_FANOUT_EXPANDER_DEVICE)
mpt3sas_expander_remove(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
}
+ port_id = sas_expander->port->port_id;
+
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
- sas_expander->sas_address_parent);
+ sas_expander->sas_address_parent, sas_expander->port);
- ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
- sas_expander->handle, (unsigned long long)
- sas_expander->sas_address);
+ ioc_info(ioc,
+ "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address,
+ port_id);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_del(&sas_expander->list);
@@ -9889,6 +11226,34 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_get_shost_and_ioc - get shost and ioc
+ * and verify whether they are NULL or not
+ * @pdev: PCI device struct
+ * @shost: address of scsi host pointer
+ * @ioc: address of HBA adapter pointer
+ *
+ * Return zero if *shost and *ioc are not NULL otherwise return error number.
+ */
+static int
+_scsih_get_shost_and_ioc(struct pci_dev *pdev,
+ struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
+{
+ *shost = pci_get_drvdata(pdev);
+ if (*shost == NULL) {
+ dev_err(&pdev->dev, "pdev's driver data is null\n");
+ return -ENXIO;
+ }
+
+ *ioc = shost_priv(*shost);
+ if (*ioc == NULL) {
+ dev_err(&pdev->dev, "shost's private data is null\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/**
* scsih_remove - detach and remove add host
* @pdev: PCI device struct
*
@@ -9896,8 +11261,8 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
*/
static void scsih_remove(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
struct _sas_port *mpt3sas_port, *next_port;
struct _raid_device *raid_device, *next;
struct MPT3SAS_TARGET *sas_target_priv_data;
@@ -9905,11 +11270,17 @@ static void scsih_remove(struct pci_dev *pdev)
struct workqueue_struct *wq;
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
+ struct hba_port *port, *port_next;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev)) {
+ mpt3sas_base_pause_mq_polling(ioc);
+ _scsih_flush_running_cmds(ioc);
+ }
_scsih_fw_event_cleanup_queue(ioc);
@@ -9928,6 +11299,7 @@ static void scsih_remove(struct pci_dev *pdev)
&ioc->ioc_pg1_copy);
/* release all the volumes */
_scsih_ir_shutdown(ioc);
+ mpt3sas_destroy_debugfs(ioc);
sas_remove_host(shost);
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) {
@@ -9954,13 +11326,21 @@ static void scsih_remove(struct pci_dev *pdev)
if (mpt3sas_port->remote_identify.device_type ==
SAS_END_DEVICE)
mpt3sas_device_remove_by_sas_address(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
else if (mpt3sas_port->remote_identify.device_type ==
SAS_EDGE_EXPANDER_DEVICE ||
mpt3sas_port->remote_identify.device_type ==
SAS_FANOUT_EXPANDER_DEVICE)
mpt3sas_expander_remove(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
+ }
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ list_del(&port->list);
+ kfree(port);
}
/* free phys attached to the sas_host */
@@ -9984,16 +11364,21 @@ static void scsih_remove(struct pci_dev *pdev)
static void
scsih_shutdown(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
struct workqueue_struct *wq;
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
+
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev)) {
+ mpt3sas_base_pause_mq_polling(ioc);
+ _scsih_flush_running_cmds(ioc);
+ }
_scsih_fw_event_cleanup_queue(ioc);
@@ -10013,7 +11398,13 @@ scsih_shutdown(struct pci_dev *pdev)
_scsih_ir_shutdown(ioc);
_scsih_nvme_shutdown(ioc);
- mpt3sas_base_detach(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_stop_watchdog(ioc);
+ ioc->shost_recovery = 1;
+ mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
}
@@ -10039,6 +11430,7 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
unsigned long flags;
int rc;
int tid;
+ struct hba_port *port;
/* no Bios, return immediately */
if (!ioc->bios_pg3.BiosVersion)
@@ -10061,13 +11453,27 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
if (channel == RAID_CHANNEL) {
raid_device = device;
+ /*
+ * If this boot vd is already registered with SML then
+ * no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (raid_device->starget)
+ return;
rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
raid_device->id, 0);
if (rc)
_scsih_raid_device_remove(ioc, raid_device);
} else if (channel == PCIE_CHANNEL) {
- spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device = device;
+ /*
+ * If this boot NVMe device is already registered with SML then
+ * no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (pcie_device->starget)
+ return;
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
tid = pcie_device->id;
list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
@@ -10075,24 +11481,36 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
if (rc)
_scsih_pcie_device_remove(ioc, pcie_device);
} else {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = device;
+ /*
+ * If this boot sas/sata device is already registered with SML
+ * then no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (sas_device->starget)
+ return;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
handle = sas_device->handle;
sas_address_parent = sas_device->sas_address_parent;
sas_address = sas_device->sas_address;
+ port = sas_device->port;
list_move_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (ioc->hide_drives)
return;
+
+ if (!port)
+ return;
+
if (!mpt3sas_transport_port_add(ioc, handle,
- sas_address_parent)) {
+ sas_address_parent, port)) {
_scsih_sas_device_remove(ioc, sas_device);
} else if (!sas_device->starget) {
if (!ioc->is_driver_loading) {
mpt3sas_transport_port_remove(ioc,
sas_address,
- sas_address_parent);
+ sas_address_parent, port);
_scsih_sas_device_remove(ioc, sas_device);
}
}
@@ -10180,7 +11598,7 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
while ((sas_device = get_next_sas_device(ioc))) {
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent)) {
+ sas_device->sas_address_parent, sas_device->port)) {
_scsih_sas_device_remove(ioc, sas_device);
sas_device_put(sas_device);
continue;
@@ -10194,7 +11612,8 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->is_driver_loading) {
mpt3sas_transport_port_remove(ioc,
sas_device->sas_address,
- sas_device->sas_address_parent);
+ sas_device->sas_address_parent,
+ sas_device->port);
_scsih_sas_device_remove(ioc, sas_device);
sas_device_put(sas_device);
continue;
@@ -10369,6 +11788,25 @@ scsih_scan_start(struct Scsi_Host *shost)
}
/**
+ * _scsih_complete_devices_scanning - add the devices to sml and
+ * complete ioc initialization.
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
+{
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+
+ mpt3sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
+}
+
+/**
* scsih_scan_finished - scsi lld callback for .scan_finished
* @shost: SCSI host pointer
* @time: elapsed time of the scan in jiffies
@@ -10381,6 +11819,8 @@ static int
scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 ioc_state;
+ int issue_hard_reset = 0;
if (disable_discovery > 0) {
ioc->is_driver_loading = 0;
@@ -10395,9 +11835,30 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
return 1;
}
- if (ioc->start_scan)
+ if (ioc->start_scan) {
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ issue_hard_reset = 1;
+ goto out;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_base_coredump_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+ issue_hard_reset = 1;
+ goto out;
+ }
return 0;
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
+ ioc_info(ioc,
+ "port enable: aborted due to diag reset\n");
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
if (ioc->start_scan_failed) {
ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
ioc->start_scan_failed);
@@ -10409,16 +11870,61 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
ioc_info(ioc, "port enable: SUCCESS\n");
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ _scsih_complete_devices_scanning(ioc);
- if (ioc->wait_for_discovery_to_complete) {
- ioc->wait_for_discovery_to_complete = 0;
- _scsih_probe_devices(ioc);
+out:
+ if (issue_hard_reset) {
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
+ ioc->is_driver_loading = 0;
}
- mpt3sas_base_start_watchdog(ioc);
- ioc->is_driver_loading = 0;
return 1;
}
+/**
+ * scsih_map_queues - map reply queues with request queues
+ * @shost: SCSI host pointer
+ */
+static void scsih_map_queues(struct Scsi_Host *shost)
+{
+ struct MPT3SAS_ADAPTER *ioc =
+ (struct MPT3SAS_ADAPTER *)shost->hostdata;
+ struct blk_mq_queue_map *map;
+ int i, qoff, offset;
+ int nr_msix_vectors = ioc->iopoll_q_start_index;
+ int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
+
+ if (shost->nr_hw_queues == 1)
+ return;
+
+ for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+ map = &shost->tag_set.map[i];
+ map->nr_queues = 0;
+ offset = 0;
+ if (i == HCTX_TYPE_DEFAULT) {
+ map->nr_queues =
+ nr_msix_vectors - ioc->high_iops_queues;
+ offset = ioc->high_iops_queues;
+ } else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = iopoll_q_count;
+
+ if (!map->nr_queues)
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_pci_map_queues(map, ioc->pdev, offset);
+ else
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ }
+}
+
/* shost template for SAS 2.0 HBA devices */
static struct scsi_host_template mpt2sas_driver_template = {
.module = THIS_MODULE,
@@ -10443,8 +11949,8 @@ static struct scsi_host_template mpt2sas_driver_template = {
.sg_tablesize = MPT2SAS_SG_DEPTH,
.max_sectors = 32767,
.cmd_per_lun = 7,
- .shost_attrs = mpt3sas_host_attrs,
- .sdev_attrs = mpt3sas_dev_attrs,
+ .shost_groups = mpt3sas_host_groups,
+ .sdev_groups = mpt3sas_dev_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct scsiio_tracker),
};
@@ -10481,11 +11987,13 @@ static struct scsi_host_template mpt3sas_driver_template = {
.sg_tablesize = MPT3SAS_SG_DEPTH,
.max_sectors = 32767,
.max_segment_size = 0xffffffff,
- .cmd_per_lun = 7,
- .shost_attrs = mpt3sas_host_attrs,
- .sdev_attrs = mpt3sas_dev_attrs,
+ .cmd_per_lun = 128,
+ .shost_groups = mpt3sas_host_groups,
+ .sdev_groups = mpt3sas_dev_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct scsiio_tracker),
+ .map_queues = scsih_map_queues,
+ .mq_poll = mpt3sas_blk_mq_poll,
};
/* raid transport support for SAS 3.0 HBA devices */
@@ -10559,6 +12067,10 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3916:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3916:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3816:
return MPI26_VERSION;
}
return 0;
@@ -10578,6 +12090,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *shost = NULL;
int rv;
u16 hba_mpi_version;
+ int iopoll_q_count = 0;
/* Determine in which MPI version class this pci device belongs */
hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
@@ -10624,6 +12137,12 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
break;
}
+
+ if (multipath_on_hba == -1 || multipath_on_hba == 0)
+ ioc->multipath_on_hba = 0;
+ else
+ ioc->multipath_on_hba = 1;
+
break;
case MPI25_VERSION:
case MPI26_VERSION:
@@ -10648,11 +12167,25 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI26_ATLAS_PCIe_SWITCH_DEVID:
ioc->is_gen35_ioc = 1;
break;
+ case MPI26_MFGPAGE_DEVID_INVALID0_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3916:
+ dev_err(&pdev->dev,
+ "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
+ pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ return 1;
+ case MPI26_MFGPAGE_DEVID_INVALID1_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3916:
+ dev_err(&pdev->dev,
+ "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
+ pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ return 1;
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
dev_info(&pdev->dev,
"HBA is in Configurable Secure mode\n");
- /* fall through */
+ fallthrough;
case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
@@ -10671,6 +12204,24 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->combined_reply_index_count =
MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
}
+
+ switch (ioc->is_gen35_ioc) {
+ case 0:
+ if (multipath_on_hba == -1 || multipath_on_hba == 0)
+ ioc->multipath_on_hba = 0;
+ else
+ ioc->multipath_on_hba = 1;
+ break;
+ case 1:
+ if (multipath_on_hba == -1 || multipath_on_hba > 0)
+ ioc->multipath_on_hba = 1;
+ else
+ ioc->multipath_on_hba = 0;
+ break;
+ default:
+ break;
+ }
+
break;
default:
return -ENODEV;
@@ -10701,6 +12252,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* Enable MEMORY MOVE support flag.
*/
ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
+ /* Enable ADDITIONAL QUERY support flag. */
+ ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
ioc->enable_sdev_max_qd = enable_sdev_max_qd;
@@ -10731,6 +12284,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
INIT_LIST_HEAD(&ioc->reply_queue_list);
+ INIT_LIST_HEAD(&ioc->port_table_list);
sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
@@ -10763,8 +12317,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
/* register EEDP capabilities with SCSI layer */
- if (prot_mask > 0)
- scsi_host_set_prot(shost, prot_mask);
+ if (prot_mask >= 0)
+ scsi_host_set_prot(shost, (prot_mask & 0x07));
else
scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
@@ -10784,6 +12338,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_thread_fail;
}
+ shost->host_tagset = 0;
+
+ if (ioc->is_gen35_ioc && host_tagset_enable)
+ shost->host_tagset = 1;
+
ioc->is_driver_loading = 1;
if ((mpt3sas_base_attach(ioc))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -10806,6 +12365,22 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} else
ioc->hide_drives = 0;
+ shost->nr_hw_queues = 1;
+
+ if (shost->host_tagset) {
+ shost->nr_hw_queues =
+ ioc->reply_queue_count - ioc->high_iops_queues;
+
+ iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+
+ shost->nr_maps = iopoll_q_count ? 3 : 1;
+
+ dev_info(&ioc->pdev->dev,
+ "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
+ shost->can_queue, shost->nr_hw_queues);
+ }
+
rv = scsi_add_host(shost, &pdev->dev);
if (rv) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -10814,6 +12389,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
scsi_scan_host(shost);
+ mpt3sas_setup_debugfs(ioc);
return 0;
out_add_shost_fail:
mpt3sas_base_detach(ioc);
@@ -10827,55 +12403,56 @@ out_add_shost_fail:
return rv;
}
-#ifdef CONFIG_PM
/**
* scsih_suspend - power management suspend main entry point
- * @pdev: PCI device struct
- * @state: PM state change to (usually PCI_D3)
+ * @dev: Device struct
*
* Return: 0 success, anything else error.
*/
-static int
-scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+scsih_suspend(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pci_power_t device_state;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ int rc;
+
+ rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
+ if (rc)
+ return rc;
mpt3sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
_scsih_nvme_shutdown(ioc);
- device_state = pci_choose_state(pdev, state);
- ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- pdev, pci_name(pdev), device_state);
+ ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
+ pdev, pci_name(pdev));
- pci_save_state(pdev);
mpt3sas_base_free_resources(ioc);
- pci_set_power_state(pdev, device_state);
return 0;
}
/**
* scsih_resume - power management resume main entry point
- * @pdev: PCI device struct
+ * @dev: Device struct
*
* Return: 0 success, anything else error.
*/
-static int
-scsih_resume(struct pci_dev *pdev)
+static int __maybe_unused
+scsih_resume(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
pci_power_t device_state = pdev->current_state;
int r;
+ r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
+ if (r)
+ return r;
+
ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
ioc->pdev = pdev;
r = mpt3sas_base_map_resources(ioc);
if (r)
@@ -10886,7 +12463,6 @@ scsih_resume(struct pci_dev *pdev)
mpt3sas_base_start_watchdog(ioc);
return 0;
}
-#endif /* CONFIG_PM */
/**
* scsih_pci_error_detected - Called when a PCI error is detected.
@@ -10900,8 +12476,11 @@ scsih_resume(struct pci_dev *pdev)
static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
@@ -10919,6 +12498,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* Permanent error, prepare for device removal */
ioc->pci_error_recovery = 1;
mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
_scsih_flush_running_cmds(ioc);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -10936,10 +12516,13 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
int rc;
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
+
ioc_info(ioc, "PCI error: slot reset callback!!\n");
ioc->pci_error_recovery = 0;
@@ -10972,8 +12555,11 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
static void
scsih_pci_resume(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
ioc_info(ioc, "PCI error: resume callback!!\n");
@@ -10988,8 +12574,11 @@ scsih_pci_resume(struct pci_dev *pdev)
static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
@@ -11003,7 +12592,7 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
}
/**
- * scsih__ncq_prio_supp - Check for NCQ command priority support
+ * scsih_ncq_prio_supp - Check for NCQ command priority support
* @sdev: scsi device struct
*
* This is called when a user indicates they would like to enable
@@ -11011,20 +12600,18 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
*/
bool scsih_ncq_prio_supp(struct scsi_device *sdev)
{
- unsigned char *buf;
+ struct scsi_vpd *vpd;
bool ncq_prio_supp = false;
- if (!scsi_device_supports_vpd(sdev))
- return ncq_prio_supp;
-
- buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
- if (!buf)
- return ncq_prio_supp;
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+ if (!vpd || vpd->len < 214)
+ goto out;
- if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
- ncq_prio_supp = (buf[213] >> 4) & 1;
+ ncq_prio_supp = (vpd->data[213] >> 4) & 1;
+out:
+ rcu_read_unlock();
- kfree(buf);
return ncq_prio_supp;
}
/*
@@ -11137,6 +12724,14 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
PCI_ANY_ID, PCI_ANY_ID },
+ /*
+ * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+
/* Atlas PCIe Switch Management Port */
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
PCI_ANY_ID, PCI_ANY_ID },
@@ -11149,6 +12744,20 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
PCI_ANY_ID, PCI_ANY_ID },
+ /*
+ * ATTO Branded ExpressSAS H12xx GT
+ */
+ { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /*
+ * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
@@ -11160,6 +12769,8 @@ static struct pci_error_handlers _mpt3sas_err_handler = {
.resume = scsih_pci_resume,
};
+static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
+
static struct pci_driver mpt3sas_driver = {
.name = MPT3SAS_DRIVER_NAME,
.id_table = mpt3sas_pci_table,
@@ -11167,10 +12778,7 @@ static struct pci_driver mpt3sas_driver = {
.remove = scsih_remove,
.shutdown = scsih_shutdown,
.err_handler = &_mpt3sas_err_handler,
-#ifdef CONFIG_PM
- .suspend = scsih_suspend,
- .resume = scsih_resume,
-#endif
+ .driver.pm = &scsih_pm_ops,
};
/**
@@ -11220,6 +12828,7 @@ scsih_init(void)
tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
_scsih_sas_control_complete);
+ mpt3sas_init_debugfs();
return 0;
}
@@ -11251,6 +12860,7 @@ scsih_exit(void)
if (hbas_to_enumerate != 2)
raid_class_release(mpt2sas_raid_template);
sas_release_transport(mpt3sas_transport_template);
+ mpt3sas_exit_debugfs();
}
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 6ec5b7f33dfd..0681daee6c14 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -61,9 +61,28 @@
#include "mpt3sas_base.h"
/**
+ * _transport_get_port_id_by_sas_phy - get zone's port id that Phy belong to
+ * @phy: sas_phy object
+ *
+ * Return Port number
+ */
+static inline u8
+_transport_get_port_id_by_sas_phy(struct sas_phy *phy)
+{
+ u8 port_id = 0xFF;
+ struct hba_port *port = phy->hostdata;
+
+ if (port)
+ port_id = port->port_id;
+
+ return port_id;
+}
+
+/**
* _transport_sas_node_find_by_sas_address - sas node search
* @ioc: per adapter object
* @sas_address: sas address of expander or sas host
+ * @port: hba port entry
* Context: Calling function should acquire ioc->sas_node_lock.
*
* Search for either hba phys or expander device based on handle, then returns
@@ -71,13 +90,56 @@
*/
static struct _sas_node *
_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
if (ioc->sas_hba.sas_address == sas_address)
return &ioc->sas_hba;
else
return mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address);
+ sas_address, port);
+}
+
+/**
+ * _transport_get_port_id_by_rphy - Get Port number from rphy object
+ * @ioc: per adapter object
+ * @rphy: sas_rphy object
+ *
+ * Returns Port number.
+ */
+static u8
+_transport_get_port_id_by_rphy(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_rphy *rphy)
+{
+ struct _sas_node *sas_expander;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ if (!rphy)
+ return port_id;
+
+ if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander,
+ &ioc->sas_expander_list, list) {
+ if (sas_expander->rphy == rphy) {
+ port_id = sas_expander->port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ } else if (rphy->identify.device_type == SAS_END_DEVICE) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
+ if (sas_device) {
+ port_id = sas_device->port->port_id;
+ sas_device_put(sas_device);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ return port_id;
}
/**
@@ -277,10 +339,11 @@ struct rep_manu_reply {
};
/**
- * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * _transport_expander_report_manufacture - obtain SMP report_manufacture
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
+ * @port_id: Port ID number
*
* Fills in the sas_expander_device object when SMP port is created.
*
@@ -288,7 +351,7 @@ struct rep_manu_reply {
*/
static int
_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address, struct sas_expander_device *edev)
+ u64 sas_address, struct sas_expander_device *edev, u8 port_id)
{
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
@@ -355,7 +418,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = 0xFF;
+ mpi_request->PhysicalPort = port_id;
mpi_request->SASAddress = cpu_to_le64(sas_address);
mpi_request->RequestDataLength = cpu_to_le16(data_out_sz);
psge = &mpi_request->SGL;
@@ -439,6 +502,7 @@ _transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
struct _sas_port *mpt3sas_port)
{
u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+ struct hba_port *port = mpt3sas_port->hba_port;
enum sas_device_type device_type =
mpt3sas_port->remote_identify.device_type;
@@ -448,10 +512,11 @@ _transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
ioc->logging_level |= MPT_DEBUG_TRANSPORT;
if (device_type == SAS_END_DEVICE)
- mpt3sas_device_remove_by_sas_address(ioc, sas_address);
+ mpt3sas_device_remove_by_sas_address(ioc,
+ sas_address, port);
else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
device_type == SAS_FANOUT_EXPANDER_DEVICE)
- mpt3sas_expander_remove(ioc, sas_address);
+ mpt3sas_expander_remove(ioc, sas_address, port);
ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
}
@@ -500,16 +565,17 @@ _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
}
/**
- * _transport_add_phy_to_an_existing_port - adding new phy to existing port
+ * mpt3sas_transport_add_phy_to_an_existing_port - adding new phy to existing port
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
* @sas_address: sas address of device/expander were phy needs to be added to
+ * @port: hba port entry
*/
-static void
-_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+void
+mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
struct _sas_port *mpt3sas_port;
struct _sas_phy *phy_srch;
@@ -517,11 +583,16 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
if (mpt3sas_phy->phy_belongs_to_port == 1)
return;
+ if (!port)
+ return;
+
list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list,
port_list) {
if (mpt3sas_port->remote_identify.sas_address !=
sas_address)
continue;
+ if (mpt3sas_port->hba_port != port)
+ continue;
list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
port_siblings) {
if (phy_srch == mpt3sas_phy)
@@ -534,13 +605,13 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _transport_del_phy_from_an_existing_port - delete phy from existing port
+ * mpt3sas_transport_del_phy_from_an_existing_port - delete phy from existing port
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
*/
-static void
-_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+void
+mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy)
{
struct _sas_port *mpt3sas_port, *next;
@@ -556,7 +627,11 @@ _transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
if (phy_srch != mpt3sas_phy)
continue;
- if (mpt3sas_port->num_phys == 1)
+ /*
+ * Don't delete port during host reset,
+ * just delete phy.
+ */
+ if (mpt3sas_port->num_phys == 1 && !ioc->shost_recovery)
_transport_delete_port(ioc, mpt3sas_port);
else
_transport_delete_phy(ioc, mpt3sas_port,
@@ -571,21 +646,24 @@ _transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @sas_address: sas address of device being added
+ * @port: hba port entry
*
* See the explanation above from _transport_delete_duplicate_port
*/
static void
_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
int i;
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address != sas_address)
continue;
+ if (sas_node->phy[i].port != port)
+ continue;
if (sas_node->phy[i].phy_belongs_to_port == 1)
- _transport_del_phy_from_an_existing_port(ioc, sas_node,
- &sas_node->phy[i]);
+ mpt3sas_transport_del_phy_from_an_existing_port(ioc,
+ sas_node, &sas_node->phy[i]);
}
}
@@ -594,6 +672,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
* @ioc: per adapter object
* @handle: handle of attached device
* @sas_address: sas address of parent expander or sas host
+ * @hba_port: hba port entry
* Context: This function will acquire ioc->sas_node_lock.
*
* Adding new port object to the sas_node->sas_port_list.
@@ -602,7 +681,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
*/
struct _sas_port *
mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 sas_address)
+ u64 sas_address, struct hba_port *hba_port)
{
struct _sas_phy *mpt3sas_phy, *next;
struct _sas_port *mpt3sas_port;
@@ -612,6 +691,13 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
struct _sas_device *sas_device = NULL;
int i;
struct sas_port *port;
+ struct virtual_phy *vphy = NULL;
+
+ if (!hba_port) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return NULL;
+ }
mpt3sas_port = kzalloc(sizeof(struct _sas_port),
GFP_KERNEL);
@@ -624,7 +710,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
INIT_LIST_HEAD(&mpt3sas_port->port_list);
INIT_LIST_HEAD(&mpt3sas_port->phy_list);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc,
+ sas_address, hba_port);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!sas_node) {
@@ -646,16 +733,32 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
+ mpt3sas_port->hba_port = hba_port;
_transport_sanity_check(ioc, sas_node,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address, hba_port);
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address !=
mpt3sas_port->remote_identify.sas_address)
continue;
+ if (sas_node->phy[i].port != hba_port)
+ continue;
list_add_tail(&sas_node->phy[i].port_siblings,
&mpt3sas_port->phy_list);
mpt3sas_port->num_phys++;
+ if (sas_node->handle <= ioc->sas_hba.num_phys) {
+ if (!sas_node->phy[i].hba_vphy) {
+ hba_port->phy_mask |= (1 << i);
+ continue;
+ }
+
+ vphy = mpt3sas_get_vphy_by_phy(ioc, hba_port, i);
+ if (!vphy) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+ }
}
if (!mpt3sas_port->num_phys) {
@@ -664,6 +767,18 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ sas_device = mpt3sas_get_sdev_by_addr(ioc,
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
+ if (!sas_device) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+ sas_device->pend_sas_rphy_add = 1;
+ }
+
if (!sas_node->parent_dev) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -686,29 +801,31 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpt3sas_phy->phy_id);
sas_port_add_phy(port, mpt3sas_phy->phy);
mpt3sas_phy->phy_belongs_to_port = 1;
+ mpt3sas_phy->port = hba_port;
}
mpt3sas_port->port = port;
- if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE)
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
rphy = sas_end_device_alloc(port);
- else
+ sas_device->rphy = rphy;
+ if (sas_node->handle <= ioc->sas_hba.num_phys) {
+ if (!vphy)
+ hba_port->sas_address =
+ sas_device->sas_address;
+ else
+ vphy->sas_address =
+ sas_device->sas_address;
+ }
+ } else {
rphy = sas_expander_alloc(port,
mpt3sas_port->remote_identify.device_type);
+ if (sas_node->handle <= ioc->sas_hba.num_phys)
+ hba_port->sas_address =
+ mpt3sas_port->remote_identify.sas_address;
+ }
rphy->identify = mpt3sas_port->remote_identify;
- if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
- sas_device = mpt3sas_get_sdev_by_addr(ioc,
- mpt3sas_port->remote_identify.sas_address);
- if (!sas_device) {
- dfailprintk(ioc,
- ioc_info(ioc, "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__));
- goto out_fail;
- }
- sas_device->pend_sas_rphy_add = 1;
- }
-
if ((sas_rphy_add(rphy))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -735,7 +852,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
_transport_expander_report_manufacture(ioc,
mpt3sas_port->remote_identify.sas_address,
- rphy_to_expander_device(rphy));
+ rphy_to_expander_device(rphy), hba_port->port_id);
return mpt3sas_port;
out_fail:
@@ -751,6 +868,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
* @ioc: per adapter object
* @sas_address: sas address of attached device
* @sas_address_parent: sas address of parent expander or sas host
+ * @port: hba port entry
* Context: This function will acquire ioc->sas_node_lock.
*
* Removing object and freeing associated memory from the
@@ -758,7 +876,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
*/
void
mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
- u64 sas_address_parent)
+ u64 sas_address_parent, struct hba_port *port)
{
int i;
unsigned long flags;
@@ -766,10 +884,15 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
struct _sas_node *sas_node;
u8 found = 0;
struct _sas_phy *mpt3sas_phy, *next_phy;
+ struct hba_port *hba_port_next, *hba_port = NULL;
+ struct virtual_phy *vphy, *vphy_next = NULL;
+
+ if (!port)
+ return;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_node = _transport_sas_node_find_by_sas_address(ioc,
- sas_address_parent);
+ sas_address_parent, port);
if (!sas_node) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
@@ -778,6 +901,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
port_list) {
if (mpt3sas_port->remote_identify.sas_address != sas_address)
continue;
+ if (mpt3sas_port->hba_port != port)
+ continue;
found = 1;
list_del(&mpt3sas_port->port_list);
goto out;
@@ -788,6 +913,61 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
return;
}
+ if (sas_node->handle <= ioc->sas_hba.num_phys &&
+ (ioc->multipath_on_hba)) {
+ if (port->vphys_mask) {
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ if (vphy->sas_address != sas_address)
+ continue;
+ ioc_info(ioc,
+ "remove vphy entry: %p of port:%p,from %d port's vphys list\n",
+ vphy, port, port->port_id);
+ port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+ }
+
+ list_for_each_entry_safe(hba_port, hba_port_next,
+ &ioc->port_table_list, list) {
+ if (hba_port != port)
+ continue;
+ /*
+ * Delete hba_port object if
+ * - hba_port object's sas address matches with current
+ * removed device's sas address and no vphy's
+ * associated with it.
+ * - Current removed device is a vSES device and
+ * none of the other direct attached device have
+ * this vSES device's port number (hence hba_port
+ * object sas_address field will be zero).
+ */
+ if ((hba_port->sas_address == sas_address ||
+ !hba_port->sas_address) && !hba_port->vphys_mask) {
+ ioc_info(ioc,
+ "remove hba_port entry: %p port: %d from hba_port list\n",
+ hba_port, hba_port->port_id);
+ list_del(&hba_port->list);
+ kfree(hba_port);
+ } else if (hba_port->sas_address == sas_address &&
+ hba_port->vphys_mask) {
+ /*
+ * Current removed device is a non vSES device
+ * and a vSES device has the same port number
+ * as of current device's port number. Hence
+ * only clear the sas_address filed, don't
+ * delete the hba_port object.
+ */
+ ioc_info(ioc,
+ "clearing sas_address from hba_port entry: %p port: %d from hba_port list\n",
+ hba_port, hba_port->port_id);
+ port->sas_address = 0;
+ }
+ break;
+ }
+ }
+
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address == sas_address)
memset(&sas_node->phy[i].remote_identify, 0 ,
@@ -864,6 +1044,7 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
phy->maximum_linkrate = _transport_convert_phy_link_rate(
phy_pg0.ProgrammedLinkRate >> 4);
+ phy->hostdata = mpt3sas_phy->port;
if ((sas_phy_add(phy))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -934,6 +1115,7 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
phy->maximum_linkrate = _transport_convert_phy_link_rate(
expander_pg1.ProgrammedLinkRate >> 4);
+ phy->hostdata = mpt3sas_phy->port;
if ((sas_phy_add(phy))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -961,20 +1143,26 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
* @handle: attached device handle
* @phy_number: phy number
* @link_rate: new link rate
+ * @port: hba port entry
+ *
+ * Return nothing.
*/
void
mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate,
+ struct hba_port *port)
{
unsigned long flags;
struct _sas_node *sas_node;
struct _sas_phy *mpt3sas_phy;
+ struct hba_port *hba_port = NULL;
if (ioc->shost_recovery || ioc->pci_error_recovery)
return;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc,
+ sas_address, port);
if (!sas_node) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
@@ -986,8 +1174,19 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
_transport_set_identify(ioc, handle,
&mpt3sas_phy->remote_identify);
- _transport_add_phy_to_an_existing_port(ioc, sas_node,
- mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
+ if ((sas_node->handle <= ioc->sas_hba.num_phys) &&
+ (ioc->multipath_on_hba)) {
+ list_for_each_entry(hba_port,
+ &ioc->port_table_list, list) {
+ if (hba_port->sas_address == sas_address &&
+ hba_port == port)
+ hba_port->phy_mask |=
+ (1 << mpt3sas_phy->phy_id);
+ }
+ }
+ mpt3sas_transport_add_phy_to_an_existing_port(ioc, sas_node,
+ mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address,
+ port);
} else
memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
sas_identify));
@@ -1122,7 +1321,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = 0xFF;
+ mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy);
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
@@ -1212,10 +1411,13 @@ _transport_get_linkerrors(struct sas_phy *phy)
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
Mpi2SasPhyPage1_t phy_pg1;
+ struct hba_port *port = phy->hostdata;
+ int port_id = port->port_id;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
- phy->identify.sas_address) == NULL) {
+ phy->identify.sas_address,
+ mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
@@ -1265,8 +1467,7 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- rphy->identify.sas_address);
+ sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
if (sas_device) {
*identifier = sas_device->enclosure_logical_id;
rc = 0;
@@ -1295,8 +1496,7 @@ _transport_get_bay_identifier(struct sas_rphy *rphy)
int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- rphy->identify.sas_address);
+ sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
if (sas_device) {
rc = sas_device->slot;
sas_device_put(sas_device);
@@ -1417,7 +1617,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = 0xFF;
+ mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy);
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
@@ -1499,11 +1699,14 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
Mpi2SasIoUnitControlReply_t mpi_reply;
Mpi2SasIoUnitControlRequest_t mpi_request;
+ struct hba_port *port = phy->hostdata;
+ int port_id = port->port_id;
unsigned long flags;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
- phy->identify.sas_address) == NULL) {
+ phy->identify.sas_address,
+ mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
@@ -1556,10 +1759,13 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
int rc = 0;
unsigned long flags;
int i, discovery_active;
+ struct hba_port *port = phy->hostdata;
+ int port_id = port->port_id;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
- phy->identify.sas_address) == NULL) {
+ phy->identify.sas_address,
+ mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
@@ -1693,10 +1899,13 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
int i;
int rc = 0;
unsigned long flags;
+ struct hba_port *port = phy->hostdata;
+ int port_id = port->port_id;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
- phy->identify.sas_address) == NULL) {
+ phy->identify.sas_address,
+ mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
@@ -1898,7 +2107,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = 0xFF;
+ mpi_request->PhysicalPort = _transport_get_port_id_by_rphy(ioc, rphy);
mpi_request->SASAddress = (rphy) ?
cpu_to_le64(rphy->identify.sas_address) :
cpu_to_le64(ioc->sas_hba.sas_address);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index 8ec9bab20ec4..d9b7d0ee25b0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -132,6 +132,35 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
&issue_reset);
}
+ ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_TRIGGER;
+ if (event_data) {
+ ioc->htb_rel.trigger_type = event_data->trigger_type;
+ switch (event_data->trigger_type) {
+ case MPT3SAS_TRIGGER_SCSI:
+ memcpy(&ioc->htb_rel.trigger_info_dwords,
+ &event_data->u.scsi,
+ sizeof(struct SL_WH_SCSI_TRIGGER_T));
+ break;
+ case MPT3SAS_TRIGGER_MPI:
+ memcpy(&ioc->htb_rel.trigger_info_dwords,
+ &event_data->u.mpi,
+ sizeof(struct SL_WH_MPI_TRIGGER_T));
+ break;
+ case MPT3SAS_TRIGGER_MASTER:
+ ioc->htb_rel.trigger_info_dwords[0] =
+ event_data->u.master.MasterData;
+ break;
+ case MPT3SAS_TRIGGER_EVENT:
+ memcpy(&ioc->htb_rel.trigger_info_dwords,
+ &event_data->u.event,
+ sizeof(struct SL_WH_EVENT_TRIGGER_T));
+ break;
+ default:
+ ioc_err(ioc, "%d - Is not a valid Trigger type\n",
+ event_data->trigger_type);
+ break;
+ }
+ }
_mpt3sas_raise_sigio(ioc, event_data);
dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
@@ -201,9 +230,14 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
event_data.u.master.MasterData = trigger_bitmask;
if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
- trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
+ trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) {
+ ioc->htb_rel.trigger_type = MPT3SAS_TRIGGER_MASTER;
+ ioc->htb_rel.trigger_info_dwords[0] = trigger_bitmask;
+ if (ioc->reset_from_user)
+ ioc->htb_rel.trigger_info_dwords[1] =
+ MPT_DIAG_RESET_ISSUED_BY_USER;
_mpt3sas_raise_sigio(ioc, &event_data);
- else
+ } else
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
index 6586a463bea9..405eada2669d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
@@ -69,7 +69,7 @@
#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004)
#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008)
-/* fake firmware event for tigger */
+/* fake firmware event for trigger */
#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E)
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h
new file mode 100644
index 000000000000..5f3328f011a2
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * to store diag trigger values into persistent driver triggers pages
+ * for MPT (Message Passing Technology) based controllers.
+ *
+ * Copyright (C) 2020 Broadcom Inc.
+ *
+ * Authors: Broadcom Inc.
+ * Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+ *
+ * Send feedback to : MPT-FusionLinux.pdl@broadcom.com)
+ */
+
+#include "mpi/mpi2_cnfg.h"
+
+#ifndef MPI2_TRIGGER_PAGES_H
+#define MPI2_TRIGGER_PAGES_H
+
+#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER (0xE0)
+#define MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION (0x01)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 TriggerFlags; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ U32 Reserved0xC[61]; /* 0x0C */
+} _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0, Mpi26DriverTriggerPage0_t;
+
+/* Trigger Flags */
+#define MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID (0x0001)
+#define MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID (0x0002)
+#define MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID (0x0004)
+#define MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID (0x0008)
+
+#define MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION (0x01)
+typedef struct _MPI26_DRIVER_MASTER_TIGGER_ENTRY {
+ U32 MasterTriggerFlags;
+} MPI26_DRIVER_MASTER_TIGGER_ENTRY;
+
+#define MPI26_MAX_MASTER_TRIGGERS (1)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 NumMasterTrigger; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ MPI26_DRIVER_MASTER_TIGGER_ENTRY MasterTriggers[MPI26_MAX_MASTER_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TIGGER_1, Mpi26DriverTriggerPage1_t;
+
+#define MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION (0x01)
+typedef struct _MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY {
+ U16 MPIEventCode; /* 0x00 */
+ U16 MPIEventCodeSpecific; /* 0x02 */
+} MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY;
+
+#define MPI26_MAX_MPI_EVENT_TRIGGERS (20)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 NumMPIEventTrigger; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY MPIEventTriggers[MPI26_MAX_MPI_EVENT_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TIGGER_2, Mpi26DriverTriggerPage2_t;
+
+#define MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION (0x01)
+typedef struct _MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY {
+ U8 ASCQ; /* 0x00 */
+ U8 ASC; /* 0x01 */
+ U8 SenseKey; /* 0x02 */
+ U8 Reserved; /* 0x03 */
+} MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY;
+
+#define MPI26_MAX_SCSI_SENSE_TRIGGERS (20)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 NumSCSISenseTrigger; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY SCSISenseTriggers[MPI26_MAX_SCSI_SENSE_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TIGGER_3, Mpi26DriverTriggerPage3_t;
+
+#define MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION (0x01)
+typedef struct _MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY {
+ U16 IOCStatus; /* 0x00 */
+ U16 Reserved; /* 0x02 */
+ U32 LogInfo; /* 0x04 */
+} MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY;
+
+#define MPI26_MAX_LOGINFO_TRIGGERS (20)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 NumIOCStatusLogInfoTrigger; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY IOCStatusLoginfoTriggers[MPI26_MAX_LOGINFO_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TIGGER_4, Mpi26DriverTriggerPage4_t;
+
+#endif
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index ca96d6d9c350..472fa043094f 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -8,12 +8,15 @@
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mvme147hw.h>
#include <asm/irq.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "mvme147.h"
@@ -30,10 +33,11 @@ static irqreturn_t mvme147_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct Scsi_Host *instance = cmd->device->host;
struct WD33C93_hostdata *hdata = shost_priv(instance);
unsigned char flags = 0x01;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ unsigned long addr = virt_to_bus(scsi_pointer->ptr);
/* setup dma direction */
if (!dir_in)
@@ -44,14 +48,14 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
if (dir_in) {
/* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
+ cache_clear(addr, scsi_pointer->this_residual);
} else {
/* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
+ cache_push(addr, scsi_pointer->this_residual);
}
/* start DMA */
- m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24);
+ m147_pcc->dma_bcr = scsi_pointer->this_residual | (1 << 24);
m147_pcc->dma_dadr = addr;
m147_pcc->dma_cntrl = flags;
@@ -78,6 +82,7 @@ static struct scsi_host_template mvme147_host_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static struct Scsi_Host *mvme147_shost;
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index 199ab49aa047..7123a2efbf58 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -486,9 +486,4 @@ enum datapres_field {
SENSE_DATA = 2,
};
-/* define task management IU */
-struct mvs_tmf_task{
- u8 tmf;
- u16 tag_of_task_to_be_managed;
-};
#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7af9173c4925..2fde496fff5f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -25,7 +25,7 @@ static const struct mvs_chip_info mvs_chips[] = {
[chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
};
-struct device_attribute *mvst_host_attrs[];
+static const struct attribute_group *mvst_host_groups[];
#define SOC_SAS_NUM 2
@@ -33,6 +33,7 @@ static struct scsi_host_template mvs_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = mvs_scan_finished,
@@ -45,12 +46,13 @@ static struct scsi_host_template mvs_sht = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = mvst_host_attrs,
+ .shost_groups = mvst_host_groups,
.track_queue_depth = 1,
};
@@ -61,9 +63,8 @@ static struct sas_domain_function_template mvs_transport_ops = {
.lldd_control_phy = mvs_phy_control,
.lldd_abort_task = mvs_abort_task,
- .lldd_abort_task_set = mvs_abort_task_set,
- .lldd_clear_aca = mvs_clear_aca,
- .lldd_clear_task_set = mvs_clear_task_set,
+ .lldd_abort_task_set = sas_abort_task_set,
+ .lldd_clear_task_set = sas_clear_task_set,
.lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
.lldd_lu_reset = mvs_lu_reset,
.lldd_query_task = mvs_query_task,
@@ -176,15 +177,16 @@ out:
static irqreturn_t mvs_interrupt(int irq, void *opaque)
{
- u32 core_nr;
u32 stat;
struct mvs_info *mvi;
struct sas_ha_struct *sha = opaque;
#ifndef CONFIG_SCSI_MVSAS_TASKLET
u32 i;
-#endif
+ u32 core_nr;
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+#endif
+
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
if (unlikely(!mvi))
@@ -244,19 +246,16 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
&mvi->tx_dma, GFP_KERNEL);
if (!mvi->tx)
goto err_out;
- memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
&mvi->rx_fis_dma, GFP_KERNEL);
if (!mvi->rx_fis)
goto err_out;
- memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
mvi->rx = dma_alloc_coherent(mvi->dev,
sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
&mvi->rx_dma, GFP_KERNEL);
if (!mvi->rx)
goto err_out;
- memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
mvi->rx[0] = cpu_to_le32(0xfff);
mvi->rx_cons = 0xfff;
@@ -265,7 +264,6 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
&mvi->slot_dma, GFP_KERNEL);
if (!mvi->slot)
goto err_out;
- memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
TRASH_BUCKET_SIZE,
@@ -298,7 +296,7 @@ err_out:
int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
{
- unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
+ unsigned long res_start, res_len, res_flag_ex = 0;
struct pci_dev *pdev = mvi->pdev;
if (bar_ex != -1) {
/*
@@ -326,7 +324,6 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
goto err_out;
}
- res_flag = pci_resource_flags(pdev, bar);
mvi->regs = ioremap(res_start, res_len);
if (!mvi->regs) {
@@ -496,7 +493,6 @@ static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int rc, nhost = 0;
struct mvs_info *mvi;
- struct mvs_prv_info *mpi;
irq_handler_t irq_handler = mvs_interrupt;
struct Scsi_Host *shost = NULL;
const struct mvs_chip_info *chip;
@@ -561,10 +557,13 @@ static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
}
nhost++;
} while (nhost < chip->n_host);
- mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ {
+ struct mvs_prv_info *mpi = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
(unsigned long)SHOST_TO_SAS_HA(shost));
+ }
#endif
mvs_post_sas_ha_init(shost, chip);
@@ -648,6 +647,7 @@ static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+ { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },
@@ -695,22 +695,17 @@ static struct pci_driver mvs_pci_driver = {
.remove = mvs_pci_remove,
};
-static ssize_t
-mvs_show_driver_version(struct device *cdev,
- struct device_attribute *attr, char *buffer)
+static ssize_t driver_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
+ return sysfs_emit(buffer, "%s\n", DRV_VERSION);
}
-static DEVICE_ATTR(driver_version,
- S_IRUGO,
- mvs_show_driver_version,
- NULL);
+static DEVICE_ATTR_RO(driver_version);
-static ssize_t
-mvs_store_interrupt_coalescing(struct device *cdev,
- struct device_attribute *attr,
- const char *buffer, size_t size)
+static ssize_t interrupt_coalescing_store(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buffer, size_t size)
{
unsigned int val = 0;
struct mvs_info *mvi = NULL;
@@ -748,19 +743,14 @@ mvs_store_interrupt_coalescing(struct device *cdev,
return strlen(buffer);
}
-static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
- struct device_attribute *attr, char *buffer)
+static ssize_t interrupt_coalescing_show(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
+ return sysfs_emit(buffer, "%d\n", interrupt_coalescing);
}
-static DEVICE_ATTR(interrupt_coalescing,
- S_IRUGO|S_IWUSR,
- mvs_show_interrupt_coalescing,
- mvs_store_interrupt_coalescing);
+static DEVICE_ATTR_RW(interrupt_coalescing);
-/* task handler */
-struct task_struct *mvs_th;
static int __init mvs_init(void)
{
int rc;
@@ -785,12 +775,14 @@ static void __exit mvs_exit(void)
sas_release_transport(mvs_stt);
}
-struct device_attribute *mvst_host_attrs[] = {
- &dev_attr_driver_version,
- &dev_attr_interrupt_coalescing,
+static struct attribute *mvst_host_attrs[] = {
+ &dev_attr_driver_version.attr,
+ &dev_attr_interrupt_coalescing.attr,
NULL,
};
+ATTRIBUTE_GROUPS(mvst_host);
+
module_init(mvs_init);
module_exit(mvs_exit);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index a920eced92ec..a6867dae0e7c 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -67,8 +67,10 @@ static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
+ spin_lock(&sha->sas_port[i]->phy_list_lock);
phy = container_of(sha->sas_port[i]->phy_list.next,
struct asd_sas_phy, port_phy_el);
+ spin_unlock(&sha->sas_port[i]->phy_list_lock);
j = 0;
while (sha->sas_phy[j]) {
if (sha->sas_phy[j] == phy)
@@ -96,6 +98,8 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
struct asd_sas_phy *phy;
+
+ spin_lock(&sha->sas_port[i]->phy_list_lock);
list_for_each_entry(phy,
&sha->sas_port[i]->phy_list, port_phy_el) {
j = 0;
@@ -109,6 +113,7 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
num++;
n++;
}
+ spin_unlock(&sha->sas_port[i]->phy_list_lock);
break;
}
i++;
@@ -216,11 +221,11 @@ void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
}
-static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+static void mvs_bytes_dmaed(struct mvs_info *mvi, int i, gfp_t gfp_flags)
{
struct mvs_phy *phy = &mvi->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha;
+
if (!phy->phy_attached)
return;
@@ -229,8 +234,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
return;
}
- sas_ha = mvi->sas;
- sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
if (sas_phy->phy) {
struct sas_phy *sphy = sas_phy->phy;
@@ -262,8 +266,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
- mvi->sas->notify_port_event(sas_phy,
- PORTE_BYTES_DMAED);
+ sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
}
void mvs_scan_start(struct Scsi_Host *shost)
@@ -279,7 +282,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
for (j = 0; j < core_nr; j++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
for (i = 0; i < mvi->chip->n_phy; ++i)
- mvs_bytes_dmaed(mvi, i);
+ mvs_bytes_dmaed(mvi, i, GFP_KERNEL);
}
mvs_prv->scan_finished = 1;
}
@@ -553,7 +556,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
static int mvs_task_prep_ssp(struct mvs_info *mvi,
struct mvs_task_exec_info *tei, int is_tmf,
- struct mvs_tmf_task *tmf)
+ struct sas_tmf_task *tmf)
{
struct sas_task *task = tei->task;
struct mvs_cmd_hdr *hdr = tei->hdr;
@@ -693,7 +696,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
- struct mvs_tmf_task *tmf, int *pass)
+ struct sas_tmf_task *tmf, int *pass)
{
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
@@ -812,9 +815,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
slot->port = tei.port;
task->lldd_task = slot;
list_add_tail(&slot->entry, &tei.port->list);
- spin_lock(&task->task_state_lock);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock(&task->task_state_lock);
mvi_dev->running_req++;
++(*pass);
@@ -837,14 +837,14 @@ prep_out:
return rc;
}
-static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
- struct completion *completion, int is_tmf,
- struct mvs_tmf_task *tmf)
+int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
struct mvs_info *mvi = NULL;
u32 rc = 0;
u32 pass = 0;
unsigned long flags = 0;
+ struct sas_tmf_task *tmf = task->tmf;
+ int is_tmf = !!task->tmf;
mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
@@ -861,11 +861,6 @@ static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
return rc;
}
-int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
-{
- return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
-}
-
static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
{
u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
@@ -1259,112 +1254,6 @@ void mvs_dev_gone(struct domain_device *dev)
mvs_dev_gone_notify(dev);
}
-static void mvs_task_done(struct sas_task *task)
-{
- if (!del_timer(&task->slow_task->timer))
- return;
- complete(&task->slow_task->completion);
-}
-
-static void mvs_tmf_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
-
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->slow_task->completion);
-}
-
-#define MVS_TASK_TIMEOUT 20
-static int mvs_exec_internal_tmf_task(struct domain_device *dev,
- void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
-{
- int res, retry;
- struct sas_task *task = NULL;
-
- for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
-
- memcpy(&task->ssp_task, parameter, para_len);
- task->task_done = mvs_task_done;
-
- task->slow_task->timer.function = mvs_tmf_timedout;
- task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
- add_timer(&task->slow_task->timer);
-
- res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- mv_printk("executing internal task failed:%d\n", res);
- goto ex_err;
- }
-
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
- goto ex_err;
- }
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun */
- res = task->task_status.residual;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- mv_dprintk("blocked task error.\n");
- res = -EMSGSIZE;
- break;
- } else {
- mv_dprintk(" task to dev %016llx response: 0x%x "
- "status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- sas_free_task(task);
- task = NULL;
-
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- sas_free_task(task);
- return res;
-}
-
-static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
- u8 *lun, struct mvs_tmf_task *tmf)
-{
- struct sas_ssp_task ssp_task;
- if (!(dev->tproto & SAS_PROTOCOL_SSP))
- return TMF_RESP_FUNC_ESUPP;
-
- memcpy(ssp_task.LUN, lun, 8);
-
- return mvs_exec_internal_tmf_task(dev, &ssp_task,
- sizeof(ssp_task), tmf);
-}
-
-
/* Standard mandates link reset for ATA (type 0)
and hard reset for SSP (type 1) , only for RECOVERY */
static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
@@ -1384,13 +1273,11 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
{
unsigned long flags;
int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_tmf_task tmf_task;
struct mvs_device * mvi_dev = dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
- tmf_task.tmf = TMF_LU_RESET;
mvi_dev->dev_status = MVS_DEV_EH;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+ rc = sas_lu_reset(dev, lun);
if (rc == TMF_RESP_FUNC_COMPLETE) {
spin_lock_irqsave(&mvi->lock, flags);
mvs_release_task(mvi, dev);
@@ -1427,27 +1314,20 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
int mvs_query_task(struct sas_task *task)
{
u32 tag;
- struct scsi_lun lun;
- struct mvs_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
- int_to_scsilun(cmnd->device->lun, &lun);
rc = mvs_find_tag(mvi, task, &tag);
if (rc == 0) {
rc = TMF_RESP_FUNC_FAILED;
return rc;
}
- tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
-
- rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ rc = sas_query_task(task, tag);
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
@@ -1464,8 +1344,6 @@ int mvs_query_task(struct sas_task *task)
/* mandatory SAM-3, still need free task/slot info */
int mvs_abort_task(struct sas_task *task)
{
- struct scsi_lun lun;
- struct mvs_tmf_task tmf_task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi;
@@ -1489,9 +1367,6 @@ int mvs_abort_task(struct sas_task *task)
spin_unlock_irqrestore(&task->task_state_lock, flags);
mvi_dev->dev_status = MVS_DEV_EH;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
-
- int_to_scsilun(cmnd->device->lun, &lun);
rc = mvs_find_tag(mvi, task, &tag);
if (rc == 0) {
mv_printk("No such tag in %s\n", __func__);
@@ -1499,10 +1374,7 @@ int mvs_abort_task(struct sas_task *task)
return rc;
}
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
-
- rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ rc = sas_abort_task(task, tag);
/* if successful, clear the task and callback forwards.*/
if (rc == TMF_RESP_FUNC_COMPLETE) {
@@ -1539,39 +1411,6 @@ out:
return rc;
}
-int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
-{
- int rc;
- struct mvs_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_ABORT_TASK_SET;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
-}
-
-int mvs_clear_aca(struct domain_device *dev, u8 *lun)
-{
- int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_CLEAR_ACA;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
-}
-
-int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
-{
- int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_CLEAR_TASK_SET;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
-}
-
static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
u32 slot_idx, int err)
{
@@ -1638,7 +1477,7 @@ static void mvs_set_sense(u8 *buffer, int len, int d_sense,
static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
u8 key, u8 asc, u8 asc_q)
{
- iu->datapres = 2;
+ iu->datapres = SAS_DATAPRES_SENSE_DATA;
iu->response_data_len = 0;
iu->sense_data_len = 17;
iu->status = 02;
@@ -1718,8 +1557,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
mvi_dev = dev->lldd_dev;
spin_lock(&task->task_state_lock);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags |= SAS_TASK_STATE_DONE;
/* race condition*/
aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
@@ -1766,7 +1604,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
case SAS_PROTOCOL_SSP:
/* hw says status == 0, datapres == 0 */
if (rx_desc & RXQ_GOOD) {
- tstat->stat = SAM_STAT_GOOD;
+ tstat->stat = SAS_SAM_STAT_GOOD;
tstat->resp = SAS_TASK_COMPLETE;
}
/* response frame present */
@@ -1775,12 +1613,12 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
sizeof(struct mvs_err_info);
sas_ssp_task_response(mvi->dev, task, iu);
} else
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
case SAS_PROTOCOL_SMP: {
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
- tstat->stat = SAM_STAT_GOOD;
+ tstat->stat = SAS_SAM_STAT_GOOD;
to = kmap_atomic(sg_page(sg_resp));
memcpy(to + sg_resp->offset,
slot->response + sizeof(struct mvs_err_info),
@@ -1797,7 +1635,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
}
default:
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
if (!slot->port->port_attached) {
@@ -1880,7 +1718,6 @@ static void mvs_work_queue(struct work_struct *work)
struct mvs_info *mvi = mwq->mvi;
unsigned long flags;
u32 phy_no = (unsigned long) mwq->data;
- struct sas_ha_struct *sas_ha = mvi->sas;
struct mvs_phy *phy = &mvi->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -1895,21 +1732,21 @@ static void mvs_work_queue(struct work_struct *work)
if (!(tmp & PHY_READY_MASK)) {
sas_phy_disconnected(sas_phy);
mvs_phy_disconnected(phy);
- sas_ha->notify_phy_event(sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
mv_dprintk("phy%d Removed Device\n", phy_no);
} else {
MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
mvs_update_phyinfo(mvi, phy_no, 1);
- mvs_bytes_dmaed(mvi, phy_no);
+ mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC);
mvs_port_notify_formed(sas_phy, 0);
mv_dprintk("phy%d Attached Device\n", phy_no);
}
}
} else if (mwq->handler & EXP_BRCT_CHG) {
phy->phy_event &= ~EXP_BRCT_CHG;
- sas_ha->notify_port_event(sas_phy,
- PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD, GFP_ATOMIC);
mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
}
list_del(&mwq->entry);
@@ -2026,7 +1863,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
mdelay(10);
}
- mvs_bytes_dmaed(mvi, phy_no);
+ mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC);
/* whether driver is going to handle hot plug */
if (phy->phy_event & PHY_PLUG_OUT) {
mvs_port_notify_formed(&phy->sas_phy, 0);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 519edc796691..509d8f32a04f 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -40,7 +40,7 @@
#define mv_dprintk(format, arg...) \
printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
#else
-#define mv_dprintk(format, arg...)
+#define mv_dprintk(format, arg...) no_printk(format, ## arg)
#endif
#define MV_MAX_U32 0xffffffff
@@ -394,7 +394,7 @@ struct mvs_info {
dma_addr_t bulk_buffer_dma1;
#define TRASH_BUCKET_SIZE 0x20000
void *dma_pool;
- struct mvs_slot_info slot_info[0];
+ struct mvs_slot_info slot_info[];
};
struct mvs_prv_info{
@@ -440,9 +440,6 @@ void mvs_scan_start(struct Scsi_Host *shost);
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags);
int mvs_abort_task(struct sas_task *task);
-int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
-int mvs_clear_aca(struct domain_device *dev, u8 *lun);
-int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
void mvs_port_formed(struct asd_sas_phy *sas_phy);
void mvs_port_deformed(struct asd_sas_phy *sas_phy);
int mvs_dev_found(struct domain_device *dev);
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 8906aceda4c4..05d3ce9b72db 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -66,9 +66,9 @@ static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
static bool tag_is_empty(struct mvumi_tag *st)
{
if (st->top == 0)
- return 1;
+ return true;
else
- return 0;
+ return false;
}
static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
@@ -182,7 +182,7 @@ static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
* @mhba: Adapter soft state
* @scmd: SCSI command from the mid-layer
* @sgl_p: SGL to be filled in
- * @sg_count return the number of SG elements
+ * @sg_count: return the number of SG elements
*
* If successful, this function returns 0. otherwise, it returns -1.
*/
@@ -702,7 +702,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
- scmd->request->tag, scmd->cmnd[0], scmd->retries);
+ scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries);
return mhba->instancet->reset_host(mhba);
}
@@ -1295,13 +1295,14 @@ static unsigned char mvumi_start(struct mvumi_hba *mhba)
* mvumi_complete_cmd - Completes a command
* @mhba: Adapter soft state
* @cmd: Command to be completed
+ * @ob_frame: Command response
*/
static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
struct mvumi_rsp_frame *ob_frame)
{
struct scsi_cmnd *scmd = cmd->scmd;
- cmd->scmd->SCp.ptr = NULL;
+ mvumi_priv(cmd->scmd)->cmd_priv = NULL;
scmd->result = ob_frame->req_status;
switch (ob_frame->req_status) {
@@ -1316,11 +1317,10 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
sizeof(struct mvumi_sense_data));
- scmd->result |= (DRIVER_SENSE << 24);
}
break;
default:
- scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ scmd->result |= (DID_ABORT << 16);
break;
}
@@ -1328,7 +1328,7 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
scmd->sc_data_direction);
- cmd->scmd->scsi_done(scmd);
+ scsi_done(scmd);
mvumi_return_cmd(mhba, cmd);
}
@@ -2067,17 +2067,14 @@ static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
return 0;
error:
- scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
- 0);
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
return -1;
}
/**
* mvumi_queue_command - Queue entry point
+ * @shost: Scsi host to queue command on
* @scmd: SCSI command to be queued
- * @done: Callback entry point
*/
static int mvumi_queue_command(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
@@ -2100,21 +2097,21 @@ static int mvumi_queue_command(struct Scsi_Host *shost,
goto out_return_cmd;
cmd->scmd = scmd;
- scmd->SCp.ptr = (char *) cmd;
+ mvumi_priv(scmd)->cmd_priv = cmd;
mhba->instancet->fire_cmd(mhba, cmd);
spin_unlock_irqrestore(shost->host_lock, irq_flags);
return 0;
out_return_cmd:
mvumi_return_cmd(mhba, cmd);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
spin_unlock_irqrestore(shost->host_lock, irq_flags);
return 0;
}
static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
{
- struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
+ struct mvumi_cmd *cmd = mvumi_priv(scmd)->cmd_priv;
struct Scsi_Host *host = scmd->device->host;
struct mvumi_hba *mhba = shost_priv(host);
unsigned long flags;
@@ -2130,8 +2127,8 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
else
atomic_dec(&mhba->fw_outstanding);
- scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
- scmd->SCp.ptr = NULL;
+ scmd->result = (DID_ABORT << 16);
+ mvumi_priv(scmd)->cmd_priv = NULL;
if (scsi_bufflen(scmd)) {
dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
@@ -2182,6 +2179,7 @@ static struct scsi_host_template mvumi_template = {
.bios_param = mvumi_bios_param,
.dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
+ .cmd_size = sizeof(struct mvumi_cmd_priv),
};
static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
@@ -2296,7 +2294,6 @@ static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
break;
default:
return -1;
- break;
}
return 0;
@@ -2425,6 +2422,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
if (IS_ERR(mhba->dm_thread)) {
dev_err(&mhba->pdev->dev,
"failed to create device scan thread\n");
+ ret = PTR_ERR(mhba->dm_thread);
mutex_unlock(&mhba->sas_discovery_mutex);
goto fail_create_thread;
}
@@ -2558,7 +2556,7 @@ static void mvumi_detach_one(struct pci_dev *pdev)
/**
* mvumi_shutdown - Shutdown entry point
- * @device: Generic device structure
+ * @pdev: PCI device structure
*/
static void mvumi_shutdown(struct pci_dev *pdev)
{
@@ -2567,49 +2565,28 @@ static void mvumi_shutdown(struct pci_dev *pdev)
mvumi_flush_cache(mhba);
}
-static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused mvumi_suspend(struct device *dev)
{
- struct mvumi_hba *mhba = NULL;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mvumi_hba *mhba = pci_get_drvdata(pdev);
- mhba = pci_get_drvdata(pdev);
mvumi_flush_cache(mhba);
- pci_set_drvdata(pdev, mhba);
mhba->instancet->disable_intr(mhba);
- free_irq(mhba->pdev->irq, mhba);
mvumi_unmap_pci_addr(pdev, mhba->base_addr);
- pci_release_regions(pdev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
+static int __maybe_unused mvumi_resume(struct device *dev)
{
int ret;
- struct mvumi_hba *mhba = NULL;
-
- mhba = pci_get_drvdata(pdev);
-
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(&pdev->dev, "enable device failed\n");
- return ret;
- }
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mvumi_hba *mhba = pci_get_drvdata(pdev);
- ret = mvumi_pci_set_master(pdev);
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
goto fail;
- ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
- if (ret)
- goto fail;
ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
if (ret)
goto release_regions;
@@ -2627,12 +2604,6 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
goto unmap_pci_addr;
}
- ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
- "mvumi", mhba);
- if (ret) {
- dev_err(&pdev->dev, "failed to register IRQ\n");
- goto unmap_pci_addr;
- }
mhba->instancet->enable_intr(mhba);
return 0;
@@ -2642,11 +2613,12 @@ unmap_pci_addr:
release_regions:
pci_release_regions(pdev);
fail:
- pci_disable_device(pdev);
return ret;
}
+static SIMPLE_DEV_PM_OPS(mvumi_pm_ops, mvumi_suspend, mvumi_resume);
+
static struct pci_driver mvumi_pci_driver = {
.name = MV_DRIVER_NAME,
@@ -2654,10 +2626,7 @@ static struct pci_driver mvumi_pci_driver = {
.probe = mvumi_probe_one,
.remove = mvumi_detach_one,
.shutdown = mvumi_shutdown,
-#ifdef CONFIG_PM
- .suspend = mvumi_suspend,
- .resume = mvumi_resume,
-#endif
+ .driver.pm = &mvumi_pm_ops,
};
module_pci_driver(mvumi_pci_driver);
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index ec8cc2207536..a88c58787b68 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -130,7 +130,7 @@ enum {
struct mvumi_hotplug_event {
u16 size;
u8 dummy[2];
- u8 bitmap[0];
+ u8 bitmap[];
};
struct mvumi_driver_event {
@@ -254,6 +254,15 @@ struct mvumi_cmd {
unsigned char cmd_status;
};
+struct mvumi_cmd_priv {
+ struct mvumi_cmd *cmd_priv;
+};
+
+static inline struct mvumi_cmd_priv *mvumi_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/*
* the function type of the in bound frame
*/
@@ -290,7 +299,7 @@ struct mvumi_rsp_frame {
struct mvumi_ob_data {
struct list_head list;
- unsigned char data[0];
+ unsigned char data[];
};
struct version_info {
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index d4bd31a75b9d..e885c1dbf61f 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -82,7 +82,7 @@ static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
return NULL;
}
-/**
+/*
* myrb_create_mempools - allocates auxiliary data structures
*
* Return: true on success, false otherwise.
@@ -134,7 +134,7 @@ static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
return true;
}
-/**
+/*
* myrb_destroy_mempools - tears down the memory pools for the controller
*/
static void myrb_destroy_mempools(struct myrb_hba *cb)
@@ -146,7 +146,7 @@ static void myrb_destroy_mempools(struct myrb_hba *cb)
dma_pool_destroy(cb->dcdb_pool);
}
-/**
+/*
* myrb_reset_cmd - reset command block
*/
static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
@@ -157,7 +157,7 @@ static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
cmd_blk->status = 0;
}
-/**
+/*
* myrb_qcmd - queues command block for execution
*/
static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
@@ -177,7 +177,7 @@ static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
cb->next_cmd_mbox = next_mbox;
}
-/**
+/*
* myrb_exec_cmd - executes command block and waits for completion.
*
* Return: command status
@@ -194,12 +194,11 @@ static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
cb->qcmd(cb, cmd_blk);
spin_unlock_irqrestore(&cb->queue_lock, flags);
- WARN_ON(in_interrupt());
wait_for_completion(&cmpl);
return cmd_blk->status;
}
-/**
+/*
* myrb_exec_type3 - executes a type 3 command and waits for completion.
*
* Return: command status
@@ -221,7 +220,7 @@ static unsigned short myrb_exec_type3(struct myrb_hba *cb,
return status;
}
-/**
+/*
* myrb_exec_type3D - executes a type 3D command and waits for completion.
*
* Return: command status
@@ -333,7 +332,7 @@ static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
ev_buf, ev_addr);
}
-/**
+/*
* myrb_get_errtable - retrieves the error table from the controller
*
* Executes a type 3 command and logs the error table from the controller.
@@ -378,7 +377,7 @@ static void myrb_get_errtable(struct myrb_hba *cb)
}
}
-/**
+/*
* myrb_get_ldev_info - retrieves the logical device table from the controller
*
* Executes a type 3 command and updates the logical device table.
@@ -428,7 +427,7 @@ static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
return status;
}
-/**
+/*
* myrb_get_rbld_progress - get rebuild progress information
*
* Executes a type 3 command and returns the rebuild progress
@@ -463,11 +462,10 @@ static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
return status;
}
-/**
+/*
* myrb_update_rbld_progress - updates the rebuild status
*
* Updates the rebuild status for the attached logical devices.
- *
*/
static void myrb_update_rbld_progress(struct myrb_hba *cb)
{
@@ -524,7 +522,7 @@ static void myrb_update_rbld_progress(struct myrb_hba *cb)
cb->last_rbld_status = status;
}
-/**
+/*
* myrb_get_cc_progress - retrieve the rebuild status
*
* Execute a type 3 Command and fetch the rebuild / consistency check
@@ -572,7 +570,7 @@ static void myrb_get_cc_progress(struct myrb_hba *cb)
rbld_buf, rbld_addr);
}
-/**
+/*
* myrb_bgi_control - updates background initialisation status
*
* Executes a type 3B command and updates the background initialisation status
@@ -650,7 +648,7 @@ static void myrb_bgi_control(struct myrb_hba *cb)
if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
sdev_printk(KERN_INFO, sdev,
"Background Initialization Aborted\n");
- /* Fallthrough */
+ fallthrough;
case MYRB_STATUS_NO_BGI_INPROGRESS:
cb->bgi_status.status = MYRB_BGI_INVALID;
break;
@@ -661,7 +659,7 @@ static void myrb_bgi_control(struct myrb_hba *cb)
bgi, bgi_addr);
}
-/**
+/*
* myrb_hba_enquiry - updates the controller status
*
* Executes a DAC_V1_Enquiry command and updates the controller status.
@@ -773,7 +771,7 @@ static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
return MYRB_STATUS_SUCCESS;
}
-/**
+/*
* myrb_set_pdev_state - sets the device state for a physical device
*
* Return: command status
@@ -797,7 +795,7 @@ static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
return status;
}
-/**
+/*
* myrb_enable_mmio - enables the Memory Mailbox Interface
*
* PD and P controller types have no memory mailbox, but still need the
@@ -902,7 +900,7 @@ static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
return true;
}
-/**
+/*
* myrb_get_hba_config - reads the configuration information
*
* Reads the configuration information from the controller and
@@ -1050,7 +1048,7 @@ static int myrb_get_hba_config(struct myrb_hba *cb)
enquiry2->fw.turn_id = 0;
}
snprintf(cb->fw_version, sizeof(cb->fw_version),
- "%d.%02d-%c-%02d",
+ "%u.%02u-%c-%02u",
enquiry2->fw.major_version,
enquiry2->fw.minor_version,
enquiry2->fw.firmware_type,
@@ -1194,7 +1192,7 @@ out_free:
return ret;
}
-/**
+/*
* myrb_unmap - unmaps controller structures
*/
static void myrb_unmap(struct myrb_hba *cb)
@@ -1230,7 +1228,7 @@ static void myrb_unmap(struct myrb_hba *cb)
}
}
-/**
+/*
* myrb_cleanup - cleanup controller structures
*/
static void myrb_cleanup(struct myrb_hba *cb)
@@ -1241,7 +1239,8 @@ static void myrb_cleanup(struct myrb_hba *cb)
myrb_unmap(cb);
if (cb->mmio_base) {
- cb->disable_intr(cb->io_base);
+ if (cb->disable_intr)
+ cb->disable_intr(cb->io_base);
iounmap(cb->mmio_base);
}
if (cb->irq)
@@ -1265,6 +1264,7 @@ static int myrb_host_reset(struct scsi_cmnd *scmd)
static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
{
+ struct request *rq = scsi_cmd_to_rq(scmd);
struct myrb_hba *cb = shost_priv(shost);
struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
@@ -1283,12 +1283,12 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
if (nsge > 1) {
dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
scmd->result = (DID_ERROR << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
mbox->type3.opcode = MYRB_CMD_DCDB;
- mbox->type3.id = scmd->request->tag + 3;
+ mbox->type3.id = rq->tag + 3;
mbox->type3.addr = dcdb_addr;
dcdb->channel = sdev->channel;
dcdb->target = sdev->id;
@@ -1307,11 +1307,11 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
break;
}
dcdb->early_status = false;
- if (scmd->request->timeout <= 10)
+ if (rq->timeout <= 10)
dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
- else if (scmd->request->timeout <= 60)
+ else if (rq->timeout <= 60)
dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
- else if (scmd->request->timeout <= 600)
+ else if (rq->timeout <= 600)
dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
else
dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
@@ -1399,8 +1399,7 @@ myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
static void myrb_request_sense(struct myrb_hba *cb,
struct scsi_cmnd *scmd)
{
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- NO_SENSE, 0, 0);
+ scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
}
@@ -1438,68 +1437,56 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
sdev->id, ldev_info ? ldev_info->state : 0xff);
scmd->result = (DID_BAD_TARGET << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
switch (scmd->cmnd[0]) {
case TEST_UNIT_READY:
scmd->result = (DID_OK << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case INQUIRY:
if (scmd->cmnd[1] & 1) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
myrb_inquiry(cb, scmd);
scmd->result = (DID_OK << 16);
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case SYNCHRONIZE_CACHE:
scmd->result = (DID_OK << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case MODE_SENSE:
if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
(scmd->cmnd[2] & 0x3F) != 0x08) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
myrb_mode_sense(cb, scmd, ldev_info);
scmd->result = (DID_OK << 16);
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case READ_CAPACITY:
if ((scmd->cmnd[1] & 1) ||
(scmd->cmnd[8] & 1)) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scmd->scsi_done(scmd);
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
+ scsi_done(scmd);
return 0;
}
lba = get_unaligned_be32(&scmd->cmnd[2]);
if (lba) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scmd->scsi_done(scmd);
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
+ scsi_done(scmd);
return 0;
}
myrb_read_capacity(cb, scmd, ldev_info);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case REQUEST_SENSE:
myrb_request_sense(cb, scmd);
@@ -1508,27 +1495,21 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case SEND_DIAGNOSTIC:
if (scmd->cmnd[1] != 0x04) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
/* Assume good status */
scmd->result = (DID_OK << 16);
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
case READ_6:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- DATA_PROTECT, 0x21, 0x06);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scmd->scsi_done(scmd);
+ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
+ scsi_done(scmd);
return 0;
}
- /* fall through */
+ fallthrough;
case WRITE_6:
lba = (((scmd->cmnd[1] & 0x1F) << 16) |
(scmd->cmnd[2] << 8) |
@@ -1538,14 +1519,11 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case READ_10:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- DATA_PROTECT, 0x21, 0x06);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scmd->scsi_done(scmd);
+ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
+ scsi_done(scmd);
return 0;
}
- /* fall through */
+ fallthrough;
case WRITE_10:
case VERIFY: /* 0x2F */
case WRITE_VERIFY: /* 0x2E */
@@ -1555,14 +1533,11 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case READ_12:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- DATA_PROTECT, 0x21, 0x06);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scmd->scsi_done(scmd);
+ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
+ scsi_done(scmd);
return 0;
}
- /* fall through */
+ fallthrough;
case WRITE_12:
case VERIFY_12: /* 0xAF */
case WRITE_VERIFY_12: /* 0xAE */
@@ -1571,15 +1546,13 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
break;
default:
/* Illegal request, invalid opcode */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x20, 0);
- scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
- scmd->scsi_done(scmd);
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
+ scsi_done(scmd);
return 0;
}
myrb_reset_cmd(cmd_blk);
- mbox->type5.id = scmd->request->tag + 3;
+ mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
if (scmd->sc_data_direction == DMA_NONE)
goto submit;
nsge = scsi_dma_map(scmd);
@@ -1638,7 +1611,7 @@ static int myrb_queuecommand(struct Scsi_Host *shost,
if (sdev->channel > myrb_logical_channel(shost)) {
scmd->result = (DID_BAD_TARGET << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
if (sdev->channel == myrb_logical_channel(shost))
@@ -1702,7 +1675,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
if (sdev->id > MYRB_MAX_TARGETS)
return -ENXIO;
- pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;
@@ -2167,7 +2140,7 @@ static ssize_t ctlr_num_show(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct myrb_hba *cb = shost_priv(shost);
- return snprintf(buf, 20, "%d\n", cb->ctlr_num);
+ return snprintf(buf, 20, "%u\n", cb->ctlr_num);
}
static DEVICE_ATTR_RO(ctlr_num);
@@ -2210,23 +2183,27 @@ static ssize_t flush_cache_store(struct device *dev,
}
static DEVICE_ATTR_WO(flush_cache);
-static struct device_attribute *myrb_sdev_attrs[] = {
- &dev_attr_rebuild,
- &dev_attr_consistency_check,
- &dev_attr_raid_state,
- &dev_attr_raid_level,
+static struct attribute *myrb_sdev_attrs[] = {
+ &dev_attr_rebuild.attr,
+ &dev_attr_consistency_check.attr,
+ &dev_attr_raid_state.attr,
+ &dev_attr_raid_level.attr,
NULL,
};
-static struct device_attribute *myrb_shost_attrs[] = {
- &dev_attr_ctlr_num,
- &dev_attr_model,
- &dev_attr_firmware,
- &dev_attr_flush_cache,
+ATTRIBUTE_GROUPS(myrb_sdev);
+
+static struct attribute *myrb_shost_attrs[] = {
+ &dev_attr_ctlr_num.attr,
+ &dev_attr_model.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_flush_cache.attr,
NULL,
};
-struct scsi_host_template myrb_template = {
+ATTRIBUTE_GROUPS(myrb_shost);
+
+static struct scsi_host_template myrb_template = {
.module = THIS_MODULE,
.name = "DAC960",
.proc_name = "myrb",
@@ -2237,14 +2214,14 @@ struct scsi_host_template myrb_template = {
.slave_destroy = myrb_slave_destroy,
.bios_param = myrb_biosparam,
.cmd_size = sizeof(struct myrb_cmdblk),
- .shost_attrs = myrb_shost_attrs,
- .sdev_attrs = myrb_sdev_attrs,
+ .shost_groups = myrb_shost_groups,
+ .sdev_groups = myrb_sdev_groups,
.this_id = -1,
};
/**
* myrb_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * @dev: the device struct object
*/
static int myrb_is_raid(struct device *dev)
{
@@ -2255,7 +2232,7 @@ static int myrb_is_raid(struct device *dev)
/**
* myrb_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void myrb_get_resync(struct device *dev)
{
@@ -2282,7 +2259,7 @@ static void myrb_get_resync(struct device *dev)
/**
* myrb_get_state - get raid volume status
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void myrb_get_state(struct device *dev)
{
@@ -2315,7 +2292,7 @@ static void myrb_get_state(struct device *dev)
raid_set_state(myrb_raid_template, dev, state);
}
-struct raid_function_template myrb_raid_functions = {
+static struct raid_function_template myrb_raid_functions = {
.cookie = &myrb_template,
.is_raid = myrb_is_raid,
.get_resync = myrb_get_resync,
@@ -2354,25 +2331,19 @@ static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
"Bad Data Encountered\n");
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
/* Unrecovered read error */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x11, 0);
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
else
/* Write error */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x0C, 0);
- scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
break;
case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
/* Unrecovered read error, auto-reallocation failed */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x11, 0x04);
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
else
/* Write error, auto-reallocation failed */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x0C, 0x02);
- scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
break;
case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
dev_dbg(&scmd->device->sdev_gendev,
@@ -2383,8 +2354,7 @@ static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
dev_dbg(&scmd->device->sdev_gendev,
"Attempt to Access Beyond End of Logical Drive");
/* Logical block address out of range */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- NOT_READY, 0x21, 0);
+ scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
break;
case MYRB_STATUS_DEVICE_NONRESPONSIVE:
dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
@@ -2396,7 +2366,7 @@ static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
scmd->result = (DID_ERROR << 16);
break;
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
@@ -2481,7 +2451,7 @@ static void myrb_monitor(struct work_struct *work)
queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
}
-/**
+/*
* myrb_err_status - reports controller BIOS messages
*
* Controller BIOS messages are passed through the Error Status Register
@@ -2489,7 +2459,7 @@ static void myrb_monitor(struct work_struct *work)
*
* Return: true for fatal errors and false otherwise.
*/
-bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
+static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
unsigned char parm0, unsigned char parm1)
{
struct pci_dev *pdev = cb->pdev;
@@ -2554,11 +2524,6 @@ static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
}
-static inline void DAC960_LA_gen_intr(void __iomem *base)
-{
- writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
-}
-
static inline void DAC960_LA_reset_ctrl(void __iomem *base)
{
writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
@@ -2588,11 +2553,6 @@ static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
}
-static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
-{
- writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
-}
-
static inline void DAC960_LA_ack_intr(void __iomem *base)
{
writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
@@ -2606,13 +2566,6 @@ static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
-{
- unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
-
- return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_LA_enable_intr(void __iomem *base)
{
unsigned char odb = 0xFF;
@@ -2629,13 +2582,6 @@ static inline void DAC960_LA_disable_intr(void __iomem *base)
writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
}
-static inline bool DAC960_LA_intr_enabled(void __iomem *base)
-{
- unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
-
- return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
-}
-
static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
union myrb_cmd_mbox *mbox)
{
@@ -2658,11 +2604,6 @@ static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
}
-static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
-{
- return readb(base + DAC960_LA_STSID_OFFSET);
-}
-
static inline unsigned short DAC960_LA_read_status(void __iomem *base)
{
return readw(base + DAC960_LA_STS_OFFSET);
@@ -2732,7 +2673,6 @@ static int DAC960_LA_hw_init(struct pci_dev *pdev,
DAC960_LA_disable_intr(base);
DAC960_LA_ack_hw_mbox_status(base);
udelay(1000);
- timeout = 0;
while (DAC960_LA_init_in_progress(base) &&
timeout < MYRB_MAILBOX_TIMEOUT) {
if (DAC960_LA_read_error_status(base, &error,
@@ -2812,7 +2752,7 @@ static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrb_privdata DAC960_LA_privdata = {
+static struct myrb_privdata DAC960_LA_privdata = {
.hw_init = DAC960_LA_hw_init,
.irq_handler = DAC960_LA_intr_handler,
.mmio_size = DAC960_LA_mmio_size,
@@ -2831,11 +2771,6 @@ static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
}
-static inline void DAC960_PG_gen_intr(void __iomem *base)
-{
- writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
-}
-
static inline void DAC960_PG_reset_ctrl(void __iomem *base)
{
writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
@@ -2865,11 +2800,6 @@ static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
}
-static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
-{
- writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
-}
-
static inline void DAC960_PG_ack_intr(void __iomem *base)
{
writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
@@ -2883,13 +2813,6 @@ static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
-{
- unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
-
- return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_PG_enable_intr(void __iomem *base)
{
unsigned int imask = (unsigned int)-1;
@@ -2905,13 +2828,6 @@ static inline void DAC960_PG_disable_intr(void __iomem *base)
writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
}
-static inline bool DAC960_PG_intr_enabled(void __iomem *base)
-{
- unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
-
- return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
-}
-
static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
union myrb_cmd_mbox *mbox)
{
@@ -2934,12 +2850,6 @@ static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
}
-static inline unsigned char
-DAC960_PG_read_status_cmd_ident(void __iomem *base)
-{
- return readb(base + DAC960_PG_STSID_OFFSET);
-}
-
static inline unsigned short
DAC960_PG_read_status(void __iomem *base)
{
@@ -3088,7 +2998,7 @@ static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrb_privdata DAC960_PG_privdata = {
+static struct myrb_privdata DAC960_PG_privdata = {
.hw_init = DAC960_PG_hw_init,
.irq_handler = DAC960_PG_intr_handler,
.mmio_size = DAC960_PG_mmio_size,
@@ -3109,11 +3019,6 @@ static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
}
-static inline void DAC960_PD_gen_intr(void __iomem *base)
-{
- writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
-}
-
static inline void DAC960_PD_reset_ctrl(void __iomem *base)
{
writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
@@ -3155,13 +3060,6 @@ static inline void DAC960_PD_disable_intr(void __iomem *base)
writeb(0, base + DAC960_PD_IRQEN_OFFSET);
}
-static inline bool DAC960_PD_intr_enabled(void __iomem *base)
-{
- unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
-
- return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
-}
-
static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
union myrb_cmd_mbox *mbox)
{
@@ -3291,7 +3189,7 @@ static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrb_privdata DAC960_PD_privdata = {
+static struct myrb_privdata DAC960_PD_privdata = {
.hw_init = DAC960_PD_hw_init,
.irq_handler = DAC960_PD_intr_handler,
.mmio_size = DAC960_PD_mmio_size,
@@ -3489,7 +3387,7 @@ static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrb_privdata DAC960_P_privdata = {
+static struct myrb_privdata DAC960_P_privdata = {
.hw_init = DAC960_P_hw_init,
.irq_handler = DAC960_P_intr_handler,
.mmio_size = DAC960_PD_mmio_size,
@@ -3516,9 +3414,13 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
mutex_init(&cb->dcmd_mutex);
mutex_init(&cb->dma_mutex);
cb->pdev = pdev;
+ cb->host = shost;
- if (pci_enable_device(pdev))
- goto failure;
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ scsi_host_put(shost);
+ return NULL;
+ }
if (privdata->hw_init == DAC960_PD_hw_init ||
privdata->hw_init == DAC960_P_hw_init) {
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 5c5666491c2e..7eb8c39da366 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -87,7 +87,7 @@ static char *myrs_raid_level_name(enum myrs_raid_level level)
return NULL;
}
-/**
+/*
* myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
*/
static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
@@ -98,7 +98,7 @@ static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
cmd_blk->status = 0;
}
-/**
+/*
* myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
*/
static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
@@ -122,7 +122,7 @@ static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
cs->next_cmd_mbox = next_mbox;
}
-/**
+/*
* myrs_exec_cmd - executes V2 Command and waits for completion.
*/
static void myrs_exec_cmd(struct myrs_hba *cs,
@@ -136,11 +136,10 @@ static void myrs_exec_cmd(struct myrs_hba *cs,
myrs_qcmd(cs, cmd_blk);
spin_unlock_irqrestore(&cs->queue_lock, flags);
- WARN_ON(in_interrupt());
wait_for_completion(&complete);
}
-/**
+/*
* myrs_report_progress - prints progress message
*/
static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
@@ -153,7 +152,7 @@ static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
(100 * (int)(blocks >> 7)) / (int)(size >> 7));
}
-/**
+/*
* myrs_get_ctlr_info - executes a Controller Information IOCTL Command
*/
static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
@@ -214,7 +213,7 @@ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
return status;
}
-/**
+/*
* myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
*/
static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
@@ -301,7 +300,7 @@ static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
return status;
}
-/**
+/*
* myrs_get_pdev_info - executes a "Read Physical Device Information" Command
*/
static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
@@ -345,7 +344,7 @@ static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
return status;
}
-/**
+/*
* myrs_dev_op - executes a "Device Operation" Command
*/
static unsigned char myrs_dev_op(struct myrs_hba *cs,
@@ -369,7 +368,7 @@ static unsigned char myrs_dev_op(struct myrs_hba *cs,
return status;
}
-/**
+/*
* myrs_translate_pdev - translates a Physical Device Channel and
* TargetID into a Logical Device.
*/
@@ -414,7 +413,7 @@ static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
return status;
}
-/**
+/*
* myrs_get_event - executes a Get Event Command
*/
static unsigned char myrs_get_event(struct myrs_hba *cs,
@@ -476,7 +475,7 @@ static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
return status;
}
-/**
+/*
* myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
*/
static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
@@ -539,13 +538,11 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->fwstat_buf = NULL;
goto out_free;
}
- cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
- GFP_KERNEL | GFP_DMA);
+ cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL);
if (!cs->ctlr_info)
goto out_free;
- cs->event_buf = kzalloc(sizeof(struct myrs_event),
- GFP_KERNEL | GFP_DMA);
+ cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL);
if (!cs->event_buf)
goto out_free;
@@ -577,7 +574,7 @@ out_free:
return (status == MYRS_STATUS_SUCCESS);
}
-/**
+/*
* myrs_get_config - reads the Configuration Information
*/
static int myrs_get_config(struct myrs_hba *cs)
@@ -682,7 +679,7 @@ static int myrs_get_config(struct myrs_hba *cs)
return 0;
}
-/**
+/*
* myrs_log_event - prints a Controller Event message
*/
static struct {
@@ -1191,7 +1188,6 @@ static ssize_t consistency_check_show(struct device *dev,
struct myrs_hba *cs = shost_priv(sdev->host);
struct myrs_ldev_info *ldev_info;
unsigned short ldev_num;
- unsigned char status;
if (sdev->channel < cs->ctlr_info->physchan_present)
return snprintf(buf, 32, "physical device - not checking\n");
@@ -1200,7 +1196,7 @@ static ssize_t consistency_check_show(struct device *dev,
if (!ldev_info)
return -ENXIO;
ldev_num = ldev_info->ldev_num;
- status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ myrs_get_ldev_info(cs, ldev_num, ldev_info);
if (ldev_info->cc_active)
return snprintf(buf, 32, "checking block %zu of %zu\n",
(size_t)ldev_info->cc_lba,
@@ -1288,14 +1284,16 @@ static ssize_t consistency_check_store(struct device *dev,
}
static DEVICE_ATTR_RW(consistency_check);
-static struct device_attribute *myrs_sdev_attrs[] = {
- &dev_attr_consistency_check,
- &dev_attr_rebuild,
- &dev_attr_raid_state,
- &dev_attr_raid_level,
+static struct attribute *myrs_sdev_attrs[] = {
+ &dev_attr_consistency_check.attr,
+ &dev_attr_rebuild.attr,
+ &dev_attr_raid_state.attr,
+ &dev_attr_raid_level.attr,
NULL,
};
+ATTRIBUTE_GROUPS(myrs_sdev);
+
static ssize_t serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1512,24 +1510,26 @@ static ssize_t disable_enclosure_messages_store(struct device *dev,
}
static DEVICE_ATTR_RW(disable_enclosure_messages);
-static struct device_attribute *myrs_shost_attrs[] = {
- &dev_attr_serial,
- &dev_attr_ctlr_num,
- &dev_attr_processor,
- &dev_attr_model,
- &dev_attr_ctlr_type,
- &dev_attr_cache_size,
- &dev_attr_firmware,
- &dev_attr_discovery,
- &dev_attr_flush_cache,
- &dev_attr_disable_enclosure_messages,
+static struct attribute *myrs_shost_attrs[] = {
+ &dev_attr_serial.attr,
+ &dev_attr_ctlr_num.attr,
+ &dev_attr_processor.attr,
+ &dev_attr_model.attr,
+ &dev_attr_ctlr_type.attr,
+ &dev_attr_cache_size.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_discovery.attr,
+ &dev_attr_flush_cache.attr,
+ &dev_attr_disable_enclosure_messages.attr,
NULL,
};
+ATTRIBUTE_GROUPS(myrs_shost);
+
/*
* SCSI midlayer interface
*/
-int myrs_host_reset(struct scsi_cmnd *scmd)
+static int myrs_host_reset(struct scsi_cmnd *scmd)
{
struct Scsi_Host *shost = scmd->device->host;
struct myrs_hba *cs = shost_priv(shost);
@@ -1584,6 +1584,7 @@ static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
static int myrs_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
{
+ struct request *rq = scsi_cmd_to_rq(scmd);
struct myrs_hba *cs = shost_priv(shost);
struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
@@ -1596,16 +1597,14 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
if (!scmd->device->hostdata) {
scmd->result = (DID_NO_CONNECT << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
switch (scmd->cmnd[0]) {
case REPORT_LUNS:
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
- 0x20, 0x0);
- scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
- scmd->scsi_done(scmd);
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0);
+ scsi_done(scmd);
return 0;
case MODE_SENSE:
if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
@@ -1614,15 +1613,12 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
(scmd->cmnd[2] & 0x3F) != 0x08) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
myrs_mode_sense(cs, scmd, ldev_info);
scmd->result = (DID_OK << 16);
}
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
break;
@@ -1635,7 +1631,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
return SCSI_MLQUEUE_HOST_BUSY;
cmd_blk->sense_addr = sense_addr;
- timeout = scmd->request->timeout;
+ timeout = rq->timeout;
if (scmd->cmd_len <= 10) {
if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
struct myrs_ldev_info *ldev_info = sdev->hostdata;
@@ -1651,10 +1647,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
mbox->SCSI_10.pdev.target = sdev->id;
mbox->SCSI_10.pdev.channel = sdev->channel;
}
- mbox->SCSI_10.id = scmd->request->tag + 3;
+ mbox->SCSI_10.id = rq->tag + 3;
mbox->SCSI_10.control.dma_ctrl_to_host =
(scmd->sc_data_direction == DMA_FROM_DEVICE);
- if (scmd->request->cmd_flags & REQ_FUA)
+ if (rq->cmd_flags & REQ_FUA)
mbox->SCSI_10.control.fua = true;
mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
@@ -1697,10 +1693,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
mbox->SCSI_255.pdev.target = sdev->id;
mbox->SCSI_255.pdev.channel = sdev->channel;
}
- mbox->SCSI_255.id = scmd->request->tag + 3;
+ mbox->SCSI_255.id = rq->tag + 3;
mbox->SCSI_255.control.dma_ctrl_to_host =
(scmd->sc_data_direction == DMA_FROM_DEVICE);
- if (scmd->request->cmd_flags & REQ_FUA)
+ if (rq->cmd_flags & REQ_FUA)
mbox->SCSI_255.control.fua = true;
mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
@@ -1762,7 +1758,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
if (WARN_ON(!hw_sgl)) {
scsi_dma_unmap(scmd);
scmd->result = (DID_ERROR << 16);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
@@ -1807,7 +1803,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
ldev_num = myrs_translate_ldev(cs, sdev);
- ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
+ ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
if (!ldev_info)
return -ENOMEM;
@@ -1869,7 +1865,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
} else {
struct myrs_pdev_info *pdev_info;
- pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;
@@ -1919,7 +1915,7 @@ static void myrs_slave_destroy(struct scsi_device *sdev)
kfree(sdev->hostdata);
}
-struct scsi_host_template myrs_template = {
+static struct scsi_host_template myrs_template = {
.module = THIS_MODULE,
.name = "DAC960",
.proc_name = "myrs",
@@ -1929,8 +1925,8 @@ struct scsi_host_template myrs_template = {
.slave_configure = myrs_slave_configure,
.slave_destroy = myrs_slave_destroy,
.cmd_size = sizeof(struct myrs_cmdblk),
- .shost_attrs = myrs_shost_attrs,
- .sdev_attrs = myrs_sdev_attrs,
+ .shost_groups = myrs_shost_groups,
+ .sdev_groups = myrs_sdev_groups,
.this_id = -1,
};
@@ -1960,7 +1956,7 @@ static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
/**
* myrs_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * @dev: the device struct object
*/
static int
myrs_is_raid(struct device *dev)
@@ -1973,7 +1969,7 @@ myrs_is_raid(struct device *dev)
/**
* myrs_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
myrs_get_resync(struct device *dev)
@@ -1982,14 +1978,13 @@ myrs_get_resync(struct device *dev)
struct myrs_hba *cs = shost_priv(sdev->host);
struct myrs_ldev_info *ldev_info = sdev->hostdata;
u64 percent_complete = 0;
- u8 status;
if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
return;
if (ldev_info->rbld_active) {
unsigned short ldev_num = ldev_info->ldev_num;
- status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ myrs_get_ldev_info(cs, ldev_num, ldev_info);
percent_complete = ldev_info->rbld_lba * 100;
do_div(percent_complete, ldev_info->cfg_devsize);
}
@@ -1998,7 +1993,7 @@ myrs_get_resync(struct device *dev)
/**
* myrs_get_state - get raid volume status
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
myrs_get_state(struct device *dev)
@@ -2033,7 +2028,7 @@ myrs_get_state(struct device *dev)
raid_set_state(myrs_raid_template, dev, state);
}
-struct raid_function_template myrs_raid_functions = {
+static struct raid_function_template myrs_raid_functions = {
.cookie = &myrs_template,
.is_raid = myrs_is_raid,
.get_resync = myrs_get_resync,
@@ -2043,7 +2038,7 @@ struct raid_function_template myrs_raid_functions = {
/*
* PCI interface functions
*/
-void myrs_flush_cache(struct myrs_hba *cs)
+static void myrs_flush_cache(struct myrs_hba *cs)
{
myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
}
@@ -2090,7 +2085,7 @@ static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
scmd->result = (DID_BAD_TARGET << 16);
else
scmd->result = (DID_OK << 16) | status;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
@@ -2272,14 +2267,15 @@ static void myrs_cleanup(struct myrs_hba *cs)
myrs_unmap(cs);
if (cs->mmio_base) {
- cs->disable_intr(cs);
+ if (cs->disable_intr)
+ cs->disable_intr(cs);
iounmap(cs->mmio_base);
+ cs->mmio_base = NULL;
}
if (cs->irq)
free_irq(cs->irq, cs);
if (cs->io_addr)
release_region(cs->io_addr, 0x80);
- iounmap(cs->mmio_base);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
scsi_host_put(cs->host);
@@ -2338,11 +2334,11 @@ Failure:
return NULL;
}
-/**
+/*
* myrs_err_status reports Controller BIOS Messages passed through
- the Error Status Register when the driver performs the BIOS handshaking.
- It returns true for fatal errors and false otherwise.
-*/
+ * the Error Status Register when the driver performs the BIOS handshaking.
+ * It returns true for fatal errors and false otherwise.
+ */
static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
unsigned char parm0, unsigned char parm1)
@@ -2413,13 +2409,6 @@ static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
}
-static inline void DAC960_GEM_gen_intr(void __iomem *base)
-{
- __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
-
- writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
-}
-
static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
{
__le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
@@ -2457,13 +2446,6 @@ static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
}
-static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
-{
- __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
-
- writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
-}
-
static inline void DAC960_GEM_ack_intr(void __iomem *base)
{
__le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
@@ -2480,14 +2462,6 @@ static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
-{
- __le32 val;
-
- val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
- return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_GEM_enable_intr(void __iomem *base)
{
__le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
@@ -2502,16 +2476,6 @@ static inline void DAC960_GEM_disable_intr(void __iomem *base)
writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
}
-static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
-{
- __le32 val;
-
- val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
- return !((le32_to_cpu(val) >> 24) &
- (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
- DAC960_GEM_IRQMASK_MMBOX_IRQ));
-}
-
static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
union myrs_cmd_mbox *mbox)
{
@@ -2530,11 +2494,6 @@ static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
}
-static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
-{
- return readw(base + DAC960_GEM_CMDSTS_OFFSET);
-}
-
static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
{
return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
@@ -2659,7 +2618,7 @@ static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrs_privdata DAC960_GEM_privdata = {
+static struct myrs_privdata DAC960_GEM_privdata = {
.hw_init = DAC960_GEM_hw_init,
.irq_handler = DAC960_GEM_intr_handler,
.mmio_size = DAC960_GEM_mmio_size,
@@ -2679,11 +2638,6 @@ static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
}
-static inline void DAC960_BA_gen_intr(void __iomem *base)
-{
- writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
-}
-
static inline void DAC960_BA_reset_ctrl(void __iomem *base)
{
writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
@@ -2715,11 +2669,6 @@ static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
}
-static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
-{
- writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
-}
-
static inline void DAC960_BA_ack_intr(void __iomem *base)
{
writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
@@ -2734,14 +2683,6 @@ static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
-{
- u8 val;
-
- val = readb(base + DAC960_BA_ODB_OFFSET);
- return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_BA_enable_intr(void __iomem *base)
{
writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
@@ -2752,14 +2693,6 @@ static inline void DAC960_BA_disable_intr(void __iomem *base)
writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
}
-static inline bool DAC960_BA_intr_enabled(void __iomem *base)
-{
- u8 val;
-
- val = readb(base + DAC960_BA_IRQMASK_OFFSET);
- return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
-}
-
static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
union myrs_cmd_mbox *mbox)
{
@@ -2779,11 +2712,6 @@ static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
}
-static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
-{
- return readw(base + DAC960_BA_CMDSTS_OFFSET);
-}
-
static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
{
return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
@@ -2909,7 +2837,7 @@ static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrs_privdata DAC960_BA_privdata = {
+static struct myrs_privdata DAC960_BA_privdata = {
.hw_init = DAC960_BA_hw_init,
.irq_handler = DAC960_BA_intr_handler,
.mmio_size = DAC960_BA_mmio_size,
@@ -2929,11 +2857,6 @@ static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
}
-static inline void DAC960_LP_gen_intr(void __iomem *base)
-{
- writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
-}
-
static inline void DAC960_LP_reset_ctrl(void __iomem *base)
{
writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
@@ -2965,11 +2888,6 @@ static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
}
-static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
-{
- writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
-}
-
static inline void DAC960_LP_ack_intr(void __iomem *base)
{
writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
@@ -2984,14 +2902,6 @@ static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
-{
- u8 val;
-
- val = readb(base + DAC960_LP_ODB_OFFSET);
- return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_LP_enable_intr(void __iomem *base)
{
writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
@@ -3002,14 +2912,6 @@ static inline void DAC960_LP_disable_intr(void __iomem *base)
writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
}
-static inline bool DAC960_LP_intr_enabled(void __iomem *base)
-{
- u8 val;
-
- val = readb(base + DAC960_LP_IRQMASK_OFFSET);
- return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
-}
-
static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
union myrs_cmd_mbox *mbox)
{
@@ -3028,11 +2930,6 @@ static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
}
-static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
-{
- return readw(base + DAC960_LP_CMDSTS_OFFSET);
-}
-
static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
{
return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
@@ -3159,7 +3056,7 @@ static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrs_privdata DAC960_LP_privdata = {
+static struct myrs_privdata DAC960_LP_privdata = {
.hw_init = DAC960_LP_hw_init,
.irq_handler = DAC960_LP_intr_handler,
.mmio_size = DAC960_LP_mmio_size,
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 11a2cb844ecb..4458449c960b 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -148,6 +148,11 @@ static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
#define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
#endif
+/*
+ * Locally used status flag
+ */
+#define SAM_STAT_ILLEGAL 0xff
+
static inline struct list_head *ncr_list_pop(struct list_head *head)
{
if (!list_empty(head)) {
@@ -509,30 +514,29 @@ static m_addr_t __vtobus(m_bush_t bush, void *m)
* Deal with DMA mapping/unmapping.
*/
-/* To keep track of the dma mapping (sg/single) that has been set */
-#define __data_mapped SCp.phase
-#define __data_mapping SCp.have_data_in
-
static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd)
{
- switch(cmd->__data_mapped) {
+ struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd);
+
+ switch(cmd_priv->data_mapped) {
case 2:
scsi_dma_unmap(cmd);
break;
}
- cmd->__data_mapped = 0;
+ cmd_priv->data_mapped = 0;
}
static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
{
+ struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd);
int use_sg;
use_sg = scsi_dma_map(cmd);
if (!use_sg)
return 0;
- cmd->__data_mapped = 2;
- cmd->__data_mapping = use_sg;
+ cmd_priv->data_mapped = 2;
+ cmd_priv->data_mapping = use_sg;
return use_sg;
}
@@ -998,8 +1002,6 @@ typedef u32 tagmap_t;
** Other definitions
*/
-#define ScsiResult(host_code, scsi_code) (((host_code) << 16) + ((scsi_code) & 0x7f))
-
#define initverbose (driver_setup.verbose)
#define bootverbose (np->verbose)
@@ -1450,11 +1452,6 @@ struct head {
#define xerr_status phys.xerr_st
#define nego_status phys.nego_st
-#if 0
-#define sync_status phys.sync_st
-#define wide_status phys.wide_st
-#endif
-
/*==========================================================
**
** Declaration of structs: Data structure block
@@ -1941,11 +1938,8 @@ static void ncr_start_next_ccb (struct ncb *np, struct lcb * lp, int maxn);
static void ncr_put_start_queue(struct ncb *np, struct ccb *cp);
static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd);
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd);
static void process_waiting_list(struct ncb *np, int sts);
-#define remove_from_waiting_list(np, cmd) \
- retrieve_from_waiting_list(1, (np), (cmd))
#define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
#define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
@@ -1977,9 +1971,6 @@ static inline char *ncr_name (struct ncb *np)
#define RELOC_SOFTC 0x40000000
#define RELOC_LABEL 0x50000000
#define RELOC_REGISTER 0x60000000
-#if 0
-#define RELOC_KVAR 0x70000000
-#endif
#define RELOC_LABELH 0x80000000
#define RELOC_MASK 0xf0000000
@@ -1988,21 +1979,7 @@ static inline char *ncr_name (struct ncb *np)
#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label))
#define RADDR(label) (RELOC_REGISTER | REG(label))
#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
-#if 0
-#define KVAR(which) (RELOC_KVAR | (which))
-#endif
-#if 0
-#define SCRIPT_KVAR_JIFFIES (0)
-#define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES
-#define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES
-/*
- * Kernel variables referenced in the scripts.
- * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
- */
-static void *script_kvars[] __initdata =
- { (void *)&jiffies };
-#endif
static struct script script0 __initdata = {
/*--------------------------< START >-----------------------*/ {
@@ -2159,11 +2136,6 @@ static struct script script0 __initdata = {
SCR_COPY (1),
RADDR (scratcha),
NADDR (msgout),
-#if 0
- SCR_COPY (1),
- RADDR (scratcha),
- NADDR (msgin),
-#endif
/*
** Anticipate the COMMAND phase.
** This is the normal case for initial selection.
@@ -2203,7 +2175,7 @@ static struct script script0 __initdata = {
** Possible data corruption during Memory Write and Invalidate.
** This work-around resets the addressing logic prior to the
** start of the first MOVE of a DATA IN phase.
- ** (See Documentation/scsi/ncr53c8xx.txt for more information)
+ ** (See Documentation/scsi/ncr53c8xx.rst for more information)
*/
SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)),
20,
@@ -2430,7 +2402,7 @@ static struct script script0 __initdata = {
*/
SCR_FROM_REG (SS_REG),
0,
- SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+ SCR_CALL ^ IFFALSE (DATA (SAM_STAT_GOOD)),
PADDRH (bad_status),
#ifndef SCSI_NCR_CCB_DONE_SUPPORT
@@ -2879,7 +2851,7 @@ static struct scripth scripth0 __initdata = {
8,
SCR_TO_REG (HS_REG),
0,
- SCR_LOAD_REG (SS_REG, S_GOOD),
+ SCR_LOAD_REG (SS_REG, SAM_STAT_GOOD),
0,
SCR_JUMP,
PADDR (cleanup_ok),
@@ -3341,15 +3313,15 @@ static struct scripth scripth0 __initdata = {
PADDRH (reset),
}/*-------------------------< BAD_STATUS >-----------------*/,{
/*
- ** If command resulted in either QUEUE FULL,
+ ** If command resulted in either TASK_SET FULL,
** CHECK CONDITION or COMMAND TERMINATED,
** call the C code.
*/
- SCR_INT ^ IFTRUE (DATA (S_QUEUE_FULL)),
+ SCR_INT ^ IFTRUE (DATA (SAM_STAT_TASK_SET_FULL)),
SIR_BAD_STATUS,
- SCR_INT ^ IFTRUE (DATA (S_CHECK_COND)),
+ SCR_INT ^ IFTRUE (DATA (SAM_STAT_CHECK_CONDITION)),
SIR_BAD_STATUS,
- SCR_INT ^ IFTRUE (DATA (S_TERMINATED)),
+ SCR_INT ^ IFTRUE (DATA (SAM_STAT_COMMAND_TERMINATED)),
SIR_BAD_STATUS,
SCR_RETURN,
0,
@@ -3640,7 +3612,7 @@ ncr_script_copy_and_bind (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len)
new = old;
break;
}
- /* fall through */
+ fallthrough;
default:
panic("ncr_script_copy_and_bind: weird relocation %x\n", old);
break;
@@ -3910,14 +3882,14 @@ static void __init ncr_prepare_setting(struct ncb *np)
np->scsi_mode = SMODE_HVD;
break;
}
- /* fall through */
+ fallthrough;
case 3: /* SYMBIOS controllers report HVD through GPIO3 */
if (INB(nc_gpreg) & 0x08)
break;
- /* fall through */
+ fallthrough;
case 2: /* Set HVD unconditionally */
np->scsi_mode = SMODE_HVD;
- /* fall through */
+ fallthrough;
case 1: /* Trust previous settings for HVD */
if (np->sv_stest2 & 0x20)
np->scsi_mode = SMODE_HVD;
@@ -4030,7 +4002,7 @@ static inline void ncr_flush_done_cmds(struct scsi_cmnd *lcmd)
while (lcmd) {
cmd = lcmd;
lcmd = (struct scsi_cmnd *) cmd->host_scribble;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
@@ -4161,8 +4133,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
**
**----------------------------------------------------
*/
- if (np->settle_time && cmd->request->timeout >= HZ) {
- u_long tlimit = jiffies + cmd->request->timeout - HZ;
+ if (np->settle_time && scsi_cmd_to_rq(cmd)->timeout >= HZ) {
+ u_long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout - HZ;
if (time_after(np->settle_time, tlimit))
np->settle_time = tlimit;
}
@@ -4296,7 +4268,7 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
break;
cp->phys.header.wgoalp = cpu_to_scr(goalp);
cp->phys.header.wlastp = cpu_to_scr(lastp);
- /* fall through */
+ fallthrough;
case DMA_FROM_DEVICE:
goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8;
if (segments <= MAX_SCATTERL)
@@ -4371,14 +4343,10 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
*/
cp->actualquirks = 0;
cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
- cp->scsi_status = S_ILLEGAL;
+ cp->scsi_status = SAM_STAT_ILLEGAL;
cp->parity_status = 0;
cp->xerr_status = XE_OK;
-#if 0
- cp->sync_status = tp->sval;
- cp->wide_status = tp->wval;
-#endif
/*----------------------------------------------------
**
@@ -4550,12 +4518,8 @@ static void ncr_start_reset(struct ncb *np)
**
**==========================================================
*/
-static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
+static int ncr_reset_bus (struct ncb *np)
{
-/* struct scsi_device *device = cmd->device; */
- struct ccb *cp;
- int found;
-
/*
* Return immediately if reset is in progress.
*/
@@ -4570,24 +4534,6 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
*/
ncr_start_reset(np);
/*
- * First, look in the wakeup list
- */
- for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
- /*
- ** look for the ccb of this command.
- */
- if (cp->host_status == HS_IDLE) continue;
- if (cp->cmd == cmd) {
- found = 1;
- break;
- }
- }
-/*
- * Then, look in the waiting list
- */
- if (!found && retrieve_from_waiting_list(0, np, cmd))
- found = 1;
-/*
* Wake-up all awaiting commands with DID_RESET.
*/
reset_waiting_list(np);
@@ -4595,103 +4541,10 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
* Wake-up all pending commands with HS_RESET -> DID_RESET.
*/
ncr_wakeup(np, HS_RESET);
-/*
- * If the involved command was not in a driver queue, and the
- * scsi driver told us reset is synchronous, and the command is not
- * currently in the waiting list, complete it with DID_RESET status,
- * in order to keep it alive.
- */
- if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
- cmd->result = DID_RESET << 16;
- ncr_queue_done_cmd(np, cmd);
- }
return SUCCESS;
}
-#if 0 /* unused and broken.. */
-/*==========================================================
-**
-**
-** Abort an SCSI command.
-** This is called from the generic SCSI driver.
-**
-**
-**==========================================================
-*/
-static int ncr_abort_command (struct ncb *np, struct scsi_cmnd *cmd)
-{
-/* struct scsi_device *device = cmd->device; */
- struct ccb *cp;
- int found;
- int retv;
-
-/*
- * First, look for the scsi command in the waiting list
- */
- if (remove_from_waiting_list(np, cmd)) {
- cmd->result = ScsiResult(DID_ABORT, 0);
- ncr_queue_done_cmd(np, cmd);
- return SCSI_ABORT_SUCCESS;
- }
-
-/*
- * Then, look in the wakeup list
- */
- for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
- /*
- ** look for the ccb of this command.
- */
- if (cp->host_status == HS_IDLE) continue;
- if (cp->cmd == cmd) {
- found = 1;
- break;
- }
- }
-
- if (!found) {
- return SCSI_ABORT_NOT_RUNNING;
- }
-
- if (np->settle_time) {
- return SCSI_ABORT_SNOOZE;
- }
-
- /*
- ** If the CCB is active, patch schedule jumps for the
- ** script to abort the command.
- */
-
- switch(cp->host_status) {
- case HS_BUSY:
- case HS_NEGOTIATE:
- printk ("%s: abort ccb=%p (cancel)\n", ncr_name (np), cp);
- cp->start.schedule.l_paddr =
- cpu_to_scr(NCB_SCRIPTH_PHYS (np, cancel));
- retv = SCSI_ABORT_PENDING;
- break;
- case HS_DISCONNECT:
- cp->restart.schedule.l_paddr =
- cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
- retv = SCSI_ABORT_PENDING;
- break;
- default:
- retv = SCSI_ABORT_NOT_RUNNING;
- break;
-
- }
-
- /*
- ** If there are no requests, the script
- ** processor will sleep on SEL_WAIT_RESEL.
- ** Let's wake it up, since it may have to work.
- */
- OUTB (nc_istat, SIGP);
-
- return retv;
-}
-#endif
-
static void ncr_detach(struct ncb *np)
{
struct ccb *cp;
@@ -4895,7 +4748,8 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
** Print out any error for debugging purpose.
*/
if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
- if (cp->host_status!=HS_COMPLETE || cp->scsi_status!=S_GOOD) {
+ if (cp->host_status != HS_COMPLETE ||
+ cp->scsi_status != SAM_STAT_GOOD) {
PRINT_ADDR(cmd, "ERROR: cmd=%x host_status=%x "
"scsi_status=%x\n", cmd->cmnd[0],
cp->host_status, cp->scsi_status);
@@ -4905,15 +4759,16 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** Check the status.
*/
+ cmd->result = 0;
if ( (cp->host_status == HS_COMPLETE)
- && (cp->scsi_status == S_GOOD ||
- cp->scsi_status == S_COND_MET)) {
+ && (cp->scsi_status == SAM_STAT_GOOD ||
+ cp->scsi_status == SAM_STAT_CONDITION_MET)) {
/*
* All went well (GOOD status).
- * CONDITION MET status is returned on
+ * CONDITION MET status is returned on
* `Pre-Fetch' or `Search data' success.
*/
- cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
/*
** @RESID@
@@ -4944,11 +4799,11 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
}
}
} else if ((cp->host_status == HS_COMPLETE)
- && (cp->scsi_status == S_CHECK_COND)) {
+ && (cp->scsi_status == SAM_STAT_CHECK_CONDITION)) {
/*
** Check condition code
*/
- cmd->result = DID_OK << 16 | S_CHECK_COND;
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
/*
** Copy back sense data to caller's buffer.
@@ -4965,20 +4820,20 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
printk (".\n");
}
} else if ((cp->host_status == HS_COMPLETE)
- && (cp->scsi_status == S_CONFLICT)) {
+ && (cp->scsi_status == SAM_STAT_RESERVATION_CONFLICT)) {
/*
** Reservation Conflict condition code
*/
- cmd->result = DID_OK << 16 | S_CONFLICT;
-
+ set_status_byte(cmd, SAM_STAT_RESERVATION_CONFLICT);
+
} else if ((cp->host_status == HS_COMPLETE)
- && (cp->scsi_status == S_BUSY ||
- cp->scsi_status == S_QUEUE_FULL)) {
+ && (cp->scsi_status == SAM_STAT_BUSY ||
+ cp->scsi_status == SAM_STAT_TASK_SET_FULL)) {
/*
** Target is busy.
*/
- cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
} else if ((cp->host_status == HS_SEL_TIMEOUT)
|| (cp->host_status == HS_TIMEOUT)) {
@@ -4986,21 +4841,24 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** No response
*/
- cmd->result = ScsiResult(DID_TIME_OUT, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
+ set_host_byte(cmd, DID_TIME_OUT);
} else if (cp->host_status == HS_RESET) {
/*
** SCSI bus reset
*/
- cmd->result = ScsiResult(DID_RESET, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
+ set_host_byte(cmd, DID_RESET);
} else if (cp->host_status == HS_ABORTED) {
/*
** Transfer aborted
*/
- cmd->result = ScsiResult(DID_ABORT, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
+ set_host_byte(cmd, DID_ABORT);
} else {
@@ -5010,7 +4868,8 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
PRINT_ADDR(cmd, "COMMAND FAILED (%x %x) @%p.\n",
cp->host_status, cp->scsi_status, cp);
- cmd->result = ScsiResult(DID_ERROR, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
+ set_host_byte(cmd, DID_ERROR);
}
/*
@@ -5026,10 +4885,10 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
if (cp->host_status==HS_COMPLETE) {
switch (cp->scsi_status) {
- case S_GOOD:
+ case SAM_STAT_GOOD:
printk (" GOOD");
break;
- case S_CHECK_COND:
+ case SAM_STAT_CHECK_CONDITION:
printk (" SENSE:");
p = (u_char*) &cmd->sense_buffer;
for (i=0; i<14; i++)
@@ -5444,27 +5303,6 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl
*/
fak = (kpc - 1) / div_10M[div] + 1;
-#if 0 /* This optimization does not seem very useful */
-
- per = (fak * div_10M[div]) / clk;
-
- /*
- ** Why not to try the immediate lower divisor and to choose
- ** the one that allows the fastest output speed ?
- ** We don't want input speed too much greater than output speed.
- */
- if (div >= 1 && fak < 8) {
- u_long fak2, per2;
- fak2 = (kpc - 1) / div_10M[div-1] + 1;
- per2 = (fak2 * div_10M[div-1]) / clk;
- if (per2 < per && fak2 <= 8) {
- fak = fak2;
- per = per2;
- --div;
- }
- }
-#endif
-
if (fak < 4) fak = 4; /* Should never happen, too bad ... */
/*
@@ -5502,10 +5340,6 @@ static void ncr_set_sync_wide_status (struct ncb *np, u_char target)
for (cp = np->ccb; cp; cp = cp->link_ccb) {
if (!cp->cmd) continue;
if (scmd_id(cp->cmd) != target) continue;
-#if 0
- cp->sync_status = tp->sval;
- cp->wide_status = tp->wval;
-#endif
cp->phys.select.sel_scntl3 = tp->wval;
cp->phys.select.sel_sxfer = tp->sval;
}
@@ -6564,7 +6398,7 @@ static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp)
switch(s_status) {
default: /* Just for safety, should never happen */
- case S_QUEUE_FULL:
+ case SAM_STAT_TASK_SET_FULL:
/*
** Decrease number of tags to the number of
** disconnected commands.
@@ -6588,15 +6422,15 @@ static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp)
*/
cp->phys.header.savep = cp->startp;
cp->host_status = HS_BUSY;
- cp->scsi_status = S_ILLEGAL;
+ cp->scsi_status = SAM_STAT_ILLEGAL;
ncr_put_start_queue(np, cp);
if (disc_cnt)
INB (nc_ctest2); /* Clear SIGP */
OUTL_DSP (NCB_SCRIPT_PHYS (np, reselect));
return;
- case S_TERMINATED:
- case S_CHECK_COND:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
/*
** If we were requesting sense, give up.
*/
@@ -6646,7 +6480,7 @@ static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp)
cp->phys.header.wlastp = startp;
cp->host_status = HS_BUSY;
- cp->scsi_status = S_ILLEGAL;
+ cp->scsi_status = SAM_STAT_ILLEGAL;
cp->auto_sense = s_status;
cp->start.schedule.l_paddr =
@@ -6717,7 +6551,7 @@ void ncr_int_sir (struct ncb *np)
OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0]));
return;
}
- /* fall through */
+ fallthrough;
case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */
case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */
case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */
@@ -6825,7 +6659,7 @@ void ncr_int_sir (struct ncb *np)
*/
OUTB (HS_PRT, HS_BUSY);
- /* fall through */
+ fallthrough;
case SIR_NEGO_PROTO:
/*-------------------------------------------------------
@@ -8017,8 +7851,10 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device)
return 0;
}
-static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int ncr53c8xx_queue_command_lck(struct scsi_cmnd *cmd)
{
+ struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd);
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
unsigned long flags;
int sts;
@@ -8027,15 +7863,14 @@ static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(stru
printk("ncr53c8xx_queue_command\n");
#endif
- cmd->scsi_done = done;
cmd->host_scribble = NULL;
- cmd->__data_mapped = 0;
- cmd->__data_mapping = 0;
+ cmd_priv->data_mapped = 0;
+ cmd_priv->data_mapping = 0;
spin_lock_irqsave(&np->smp_lock, flags);
if ((sts = ncr_queue_command(np, cmd)) != DID_OK) {
- cmd->result = sts << 16;
+ set_host_byte(cmd, sts);
#ifdef DEBUG_NCR53C8XX
printk("ncr53c8xx : command not queued - result=%d\n", sts);
#endif
@@ -8116,7 +7951,7 @@ static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd)
*/
spin_lock_irqsave(&np->smp_lock, flags);
- sts = ncr_reset_bus(np, cmd, 1);
+ sts = ncr_reset_bus(np);
done_list = np->done_list;
np->done_list = NULL;
@@ -8127,30 +7962,6 @@ static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd)
return sts;
}
-#if 0 /* unused and broken */
-static int ncr53c8xx_abort(struct scsi_cmnd *cmd)
-{
- struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
- int sts;
- unsigned long flags;
- struct scsi_cmnd *done_list;
-
- printk("ncr53c8xx_abort\n");
-
- NCR_LOCK_NCB(np, flags);
-
- sts = ncr_abort_command(np, cmd);
-out:
- done_list = np->done_list;
- np->done_list = NULL;
- NCR_UNLOCK_NCB(np, flags);
-
- ncr_flush_done_cmds(done_list);
-
- return sts;
-}
-#endif
-
/*
** Scsi command waiting list management.
@@ -8183,26 +7994,6 @@ static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd)
}
}
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd)
-{
- struct scsi_cmnd **pcmd = &np->waiting_list;
-
- while (*pcmd) {
- if (cmd == *pcmd) {
- if (to_remove) {
- *pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
- cmd->next_wcmd = NULL;
- }
-#ifdef DEBUG_WAITING_LIST
- printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
-#endif
- return cmd;
- }
- pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
- }
- return NULL;
-}
-
static void process_waiting_list(struct ncb *np, int sts)
{
struct scsi_cmnd *waiting_list, *wcmd;
@@ -8226,7 +8017,7 @@ static void process_waiting_list(struct ncb *np, int sts)
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
#endif
- wcmd->result = sts << 16;
+ set_host_byte(wcmd, sts);
ncr_queue_done_cmd(np, wcmd);
}
}
@@ -8248,11 +8039,13 @@ static struct device_attribute ncr53c8xx_revision_attr = {
.show = show_ncr53c8xx_revision,
};
-static struct device_attribute *ncr53c8xx_host_attrs[] = {
- &ncr53c8xx_revision_attr,
+static struct attribute *ncr53c8xx_host_attrs[] = {
+ &ncr53c8xx_revision_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(ncr53c8xx_host);
+
/*==========================================================
**
** Boot command line.
@@ -8292,10 +8085,12 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
u_long flags = 0;
int i;
+ WARN_ON_ONCE(tpnt->cmd_size < sizeof(struct ncr_cmd_priv));
+
if (!tpnt->name)
tpnt->name = SCSI_NCR_DRIVER_NAME;
- if (!tpnt->shost_attrs)
- tpnt->shost_attrs = ncr53c8xx_host_attrs;
+ if (!tpnt->shost_groups)
+ tpnt->shost_groups = ncr53c8xx_host_groups;
tpnt->queuecommand = ncr53c8xx_queue_command;
tpnt->slave_configure = ncr53c8xx_slave_configure;
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index 8326f5f01e07..be38c902859e 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -1239,22 +1239,6 @@ struct scr_tblsel {
*/
/*
-** Status
-*/
-
-#define S_GOOD (0x00)
-#define S_CHECK_COND (0x02)
-#define S_COND_MET (0x04)
-#define S_BUSY (0x08)
-#define S_INT (0x10)
-#define S_INT_COND_MET (0x14)
-#define S_CONFLICT (0x18)
-#define S_TERMINATED (0x20)
-#define S_QUEUE_FULL (0x28)
-#define S_ILLEGAL (0xff)
-#define S_SENSE (0x80)
-
-/*
* End of ncrreg from FreeBSD
*/
@@ -1304,6 +1288,12 @@ struct ncr_device {
u8 differential;
};
+/* To keep track of the dma mapping (sg/single) that has been set */
+struct ncr_cmd_priv {
+ int data_mapped;
+ int data_mapping;
+};
+
extern struct Scsi_Host *ncr_attach(struct scsi_host_template *tpnt, int unit, struct ncr_device *device);
extern void ncr53c8xx_release(struct Scsi_Host *host);
irqreturn_t ncr53c8xx_intr(int irq, void *dev_id);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index b6e04d14292d..75bb0028ed74 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -176,38 +176,40 @@ static nsp32_sync_table nsp32_sync_table_pci[] = {
* function declaration
*/
/* module entry point */
-static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
-static void nsp32_remove(struct pci_dev *);
+static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
+static void nsp32_remove(struct pci_dev *);
static int __init init_nsp32 (void);
static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
-static int nsp32_show_info (struct seq_file *, struct Scsi_Host *);
+static int nsp32_show_info (struct seq_file *, struct Scsi_Host *);
-static int nsp32_detect (struct pci_dev *pdev);
-static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
-static const char *nsp32_info (struct Scsi_Host *);
-static int nsp32_release (struct Scsi_Host *);
+static int nsp32_detect (struct pci_dev *pdev);
+static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+static const char *nsp32_info (struct Scsi_Host *);
+static int nsp32_release (struct Scsi_Host *);
/* SCSI error handler */
-static int nsp32_eh_abort (struct scsi_cmnd *);
-static int nsp32_eh_host_reset(struct scsi_cmnd *);
+static int nsp32_eh_abort (struct scsi_cmnd *);
+static int nsp32_eh_host_reset(struct scsi_cmnd *);
/* generate SCSI message */
static void nsp32_build_identify(struct scsi_cmnd *);
static void nsp32_build_nop (struct scsi_cmnd *);
static void nsp32_build_reject (struct scsi_cmnd *);
-static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char);
+static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char,
+ unsigned char);
/* SCSI message handler */
static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short);
static void nsp32_msgout_occur (struct scsi_cmnd *);
-static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short);
+static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long,
+ unsigned short);
static int nsp32_setup_sg_table (struct scsi_cmnd *);
static int nsp32_selection_autopara(struct scsi_cmnd *);
static int nsp32_selection_autoscsi(struct scsi_cmnd *);
-static void nsp32_scsi_done (struct scsi_cmnd *);
+static void nsp32_scsi_done (struct scsi_cmnd *);
static int nsp32_arbitration (struct scsi_cmnd *, unsigned int);
static int nsp32_reselection (struct scsi_cmnd *, unsigned char);
static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int);
@@ -215,10 +217,13 @@ static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short);
/* SCSI SDTR */
static void nsp32_analyze_sdtr (struct scsi_cmnd *);
-static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char);
-static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
-static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *);
-static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char);
+static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *,
+ unsigned char);
+static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
+static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *,
+ unsigned char *, unsigned char *);
+static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *,
+ int, unsigned char);
/* SCSI bus status handler */
static void nsp32_wait_req (nsp32_hw_data *, int);
@@ -234,16 +239,16 @@ static irqreturn_t do_nsp32_isr(int, void *);
static int nsp32hw_init(nsp32_hw_data *);
/* EEPROM handler */
-static int nsp32_getprom_param (nsp32_hw_data *);
-static int nsp32_getprom_at24 (nsp32_hw_data *);
-static int nsp32_getprom_c16 (nsp32_hw_data *);
-static void nsp32_prom_start (nsp32_hw_data *);
-static void nsp32_prom_stop (nsp32_hw_data *);
-static int nsp32_prom_read (nsp32_hw_data *, int);
-static int nsp32_prom_read_bit (nsp32_hw_data *);
-static void nsp32_prom_write_bit(nsp32_hw_data *, int);
-static void nsp32_prom_set (nsp32_hw_data *, int, int);
-static int nsp32_prom_get (nsp32_hw_data *, int);
+static int nsp32_getprom_param (nsp32_hw_data *);
+static int nsp32_getprom_at24 (nsp32_hw_data *);
+static int nsp32_getprom_c16 (nsp32_hw_data *);
+static void nsp32_prom_start (nsp32_hw_data *);
+static void nsp32_prom_stop (nsp32_hw_data *);
+static int nsp32_prom_read (nsp32_hw_data *, int);
+static int nsp32_prom_read_bit (nsp32_hw_data *);
+static void nsp32_prom_write_bit(nsp32_hw_data *, int);
+static void nsp32_prom_set (nsp32_hw_data *, int, int);
+static int nsp32_prom_get (nsp32_hw_data *, int);
/* debug/warning/info message */
static void nsp32_message (const char *, int, char *, char *, ...);
@@ -268,6 +273,7 @@ static struct scsi_host_template nsp32_template = {
.eh_abort_handler = nsp32_eh_abort,
.eh_host_reset_handler = nsp32_eh_host_reset,
/* .highmem_io = 1, */
+ .cmd_size = sizeof(struct nsp32_cmd_priv),
};
#include "nsp32_io.h"
@@ -309,6 +315,7 @@ static struct scsi_host_template nsp32_template = {
#define NSP32_DEBUG_BUF_LEN 100
+__printf(4, 5)
static void nsp32_message(const char *func, int line, char *type, char *fmt, ...)
{
va_list args;
@@ -355,8 +362,8 @@ static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...)
static void nsp32_build_identify(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
- int mode = FALSE;
+ int pos = data->msgout_len;
+ int mode = FALSE;
/* XXX: Auto DiscPriv detection is progressing... */
if (disc_priv == 0) {
@@ -376,13 +383,13 @@ static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
unsigned char offset)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
+ int pos = data->msgout_len;
data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR; pos++;
- data->msgoutbuf[pos] = period; pos++;
- data->msgoutbuf[pos] = offset; pos++;
+ data->msgoutbuf[pos] = period; pos++;
+ data->msgoutbuf[pos] = offset; pos++;
data->msgout_len = pos;
}
@@ -393,7 +400,7 @@ static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
+ int pos = data->msgout_len;
if (pos != 0) {
nsp32_msg(KERN_WARNING,
@@ -411,12 +418,12 @@ static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
static void nsp32_build_reject(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
+ int pos = data->msgout_len;
data->msgoutbuf[pos] = MESSAGE_REJECT; pos++;
data->msgout_len = pos;
}
-
+
/*
* timer
*/
@@ -449,7 +456,7 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
unsigned char phase;
int i, ret;
unsigned int msgout;
- u16_le s;
+ u16_le s;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
@@ -481,7 +488,7 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
- * MCNT 1: MSG#2
+ * MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
@@ -493,7 +500,8 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
msgout = 0;
}
- // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT));
+ // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n",
+ // nsp32_read2(base, SEL_TIME_OUT));
// nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
@@ -519,10 +527,10 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
/* command control */
param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER |
- AUTOSCSI_START |
- AUTO_MSGIN_00_OR_04 |
- AUTO_MSGIN_02 |
- AUTO_ATN );
+ AUTOSCSI_START |
+ AUTO_MSGIN_00_OR_04 |
+ AUTO_MSGIN_02 |
+ AUTO_ATN );
/* transfer control */
@@ -554,9 +562,9 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
/*
* transfer parameter to ASIC
*/
- nsp32_write4(base, SGT_ADR, data->auto_paddr);
- nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER |
- AUTO_PARAMETER );
+ nsp32_write4(base, SGT_ADR, data->auto_paddr);
+ nsp32_write2(base, COMMAND_CONTROL,
+ CLEAR_CDB_FIFO_POINTER | AUTO_PARAMETER );
/*
* Check arbitration
@@ -580,7 +588,6 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
int status;
unsigned short command = 0;
unsigned int msgout = 0;
- unsigned short execph;
int i;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
@@ -599,12 +606,12 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
SCpnt->result = DID_BUS_BUSY << 16;
status = 1;
goto out;
- }
+ }
/*
* clear execph
*/
- execph = nsp32_read2(base, SCSI_EXECUTE_PHASE);
+ nsp32_read2(base, SCSI_EXECUTE_PHASE);
/*
* clear FIFO counter to set CDBs
@@ -616,13 +623,14 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
*/
for (i = 0; i < SCpnt->cmd_len; i++) {
nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]);
- }
+ }
nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]);
/*
* set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID
*/
- nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target));
+ nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID,
+ BIT(host_id) | BIT(target));
/*
* set SCSI MSGOUT REG
@@ -642,7 +650,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
- * MCNT 1: MSG#2
+ * MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
@@ -662,7 +670,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
/*
* set SREQ hazard killer sampling rate
- *
+ *
* TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz.
* check other internal clock!
*/
@@ -687,7 +695,8 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
"syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x",
nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH),
- nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
+ nsp32_read4(base, SGT_ADR),
+ nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x",
data->msgout_len, msgout);
@@ -716,10 +725,10 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
* start AUTO SCSI, kick off arbitration
*/
command = (CLEAR_CDB_FIFO_POINTER |
- AUTOSCSI_START |
+ AUTOSCSI_START |
AUTO_MSGIN_00_OR_04 |
- AUTO_MSGIN_02 |
- AUTO_ATN );
+ AUTO_MSGIN_02 |
+ AUTO_ATN);
nsp32_write2(base, COMMAND_CONTROL, command);
/*
@@ -739,9 +748,9 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
/*
* Arbitration Status Check
- *
+ *
* Note: Arbitration counter is waited during ARBIT_GO is not lifting.
- * Using udelay(1) consumes CPU time and system time, but
+ * Using udelay(1) consumes CPU time and system time, but
* arbitration delay time is defined minimal 2.4us in SCSI
* specification, thus udelay works as coarse grained wait timer.
*/
@@ -776,7 +785,7 @@ static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base)
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout");
SCpnt->result = DID_NO_CONNECT << 16;
status = FALSE;
- }
+ }
/*
* clear Arbit
@@ -822,7 +831,8 @@ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
* or current nexus is not existed, unexpected
* reselection is occurred. Send reject message.
*/
- if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) {
+ if (newid >= ARRAY_SIZE(data->lunt) ||
+ newlun >= ARRAY_SIZE(data->lunt[0])) {
nsp32_msg(KERN_WARNING, "unknown id/lun");
return FALSE;
} else if(data->lunt[newid][newlun].SCpnt == NULL) {
@@ -876,7 +886,8 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
if (le32_to_cpu(sgt[i].len) > 0x10000) {
nsp32_msg(KERN_ERR,
- "can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len));
+ "can't transfer over 64KB at a time, "
+ "size=0x%x", le32_to_cpu(sgt[i].len));
return FALSE;
}
nsp32_dbg(NSP32_DEBUG_SGLIST,
@@ -894,8 +905,9 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
return TRUE;
}
-static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target;
nsp32_lunt *cur_lunt;
@@ -904,8 +916,9 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
- SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
- scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
+ SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0],
+ SCpnt->cmd_len, scsi_sg_count(SCpnt), scsi_sglist(SCpnt),
+ scsi_bufflen(SCpnt));
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
@@ -933,17 +946,10 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
show_command(SCpnt);
- SCpnt->scsi_done = done;
data->CurrentSC = SCpnt;
- SCpnt->SCp.Status = CHECK_CONDITION;
- SCpnt->SCp.Message = 0;
+ nsp32_priv(SCpnt)->status = SAM_STAT_CHECK_CONDITION;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
- SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
- SCpnt->SCp.buffer = NULL;
- SCpnt->SCp.buffers_residual = 0;
-
/* initialize data */
data->msgout_len = 0;
data->msgin_len = 0;
@@ -966,7 +972,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
/* Build IDENTIFY */
nsp32_build_identify(SCpnt);
- /*
+ /*
* If target is the first time to transfer after the reset
* (target don't have SDTR_DONE and SDTR_INITIATOR), sync
* message SDTR is needed to do synchronous transfer.
@@ -1051,9 +1057,9 @@ static int nsp32hw_init(nsp32_hw_data *data)
nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff);
}
- nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
- nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write4(base, BM_CNT, 0);
+ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
+ nsp32_write2(base, TRANSFER_CONTROL, 0);
+ nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
do {
@@ -1081,12 +1087,13 @@ static int nsp32hw_init(nsp32_hw_data *data)
nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT));
nsp32_index_write1(base, CLOCK_DIV, data->clock);
- nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
+ nsp32_index_write1(base, BM_CYCLE,
+ MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */
/*
* initialize MISC_WRRD register
- *
+ *
* Note: Designated parameters is obeyed as following:
* MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set.
* MISC_MASTER_TERMINATION_SELECT: It must be set.
@@ -1101,10 +1108,10 @@ static int nsp32hw_init(nsp32_hw_data *data)
*/
nsp32_index_write2(base, MISC_WR,
(SCSI_DIRECTION_DETECTOR_SELECT |
- DELAYED_BMSTART |
- MASTER_TERMINATION_SELECT |
- BMREQ_NEGATE_TIMING_SEL |
- AUTOSEL_TIMING_SEL |
+ DELAYED_BMSTART |
+ MASTER_TERMINATION_SELECT |
+ BMREQ_NEGATE_TIMING_SEL |
+ AUTOSEL_TIMING_SEL |
BMSTOP_CHANGE2_NONDATA_PHASE));
nsp32_index_write1(base, TERM_PWR_CONTROL, 0);
@@ -1125,15 +1132,16 @@ static int nsp32hw_init(nsp32_hw_data *data)
* enable to select designated IRQ (except for
* IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR)
*/
- nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ |
- IRQSELECT_SCSIRESET_IRQ |
- IRQSELECT_FIFO_SHLD_IRQ |
- IRQSELECT_RESELECT_IRQ |
- IRQSELECT_PHASE_CHANGE_IRQ |
- IRQSELECT_AUTO_SCSI_SEQ_IRQ |
- // IRQSELECT_BMCNTERR_IRQ |
- IRQSELECT_TARGET_ABORT_IRQ |
- IRQSELECT_MASTER_ABORT_IRQ );
+ nsp32_index_write2(base, IRQ_SELECT,
+ IRQSELECT_TIMER_IRQ |
+ IRQSELECT_SCSIRESET_IRQ |
+ IRQSELECT_FIFO_SHLD_IRQ |
+ IRQSELECT_RESELECT_IRQ |
+ IRQSELECT_PHASE_CHANGE_IRQ |
+ IRQSELECT_AUTO_SCSI_SEQ_IRQ |
+ // IRQSELECT_BMCNTERR_IRQ |
+ IRQSELECT_TARGET_ABORT_IRQ |
+ IRQSELECT_MASTER_ABORT_IRQ );
nsp32_write2(base, IRQ_CONTROL, 0);
/* PCI LED off */
@@ -1163,11 +1171,12 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
* IRQ check, then enable IRQ mask
*/
irq_stat = nsp32_read2(base, IRQ_STATUS);
- nsp32_dbg(NSP32_DEBUG_INTR,
+ nsp32_dbg(NSP32_DEBUG_INTR,
"enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat);
/* is this interrupt comes from Ninja asic? */
if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) {
- nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat);
+ nsp32_dbg(NSP32_DEBUG_INTR,
+ "shared interrupt: irq other 0x%x", irq_stat);
goto out2;
}
handled = 1;
@@ -1207,7 +1216,8 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
if (SCpnt == NULL) {
nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened");
- nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
+ nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x",
+ irq_stat, trans_stat);
goto out;
}
@@ -1247,7 +1257,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
* ---> AutoSCSI with MSGOUTreg is processed.
*/
data->msgout_len = 0;
- };
+ }
nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed");
}
@@ -1265,13 +1275,13 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
"Data in/out phase processed");
/* read BMCNT, SGT pointer addr */
- nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
nsp32_read4(base, BM_CNT));
- nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
nsp32_read4(base, SGT_ADR));
- nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
nsp32_read4(base, SACK_CNT));
- nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
nsp32_read4(base, SAVED_SACK_CNT));
scsi_set_resid(SCpnt, 0); /* all data transferred! */
@@ -1306,7 +1316,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
* Read CSB and substitute CSB for SCpnt->result
* to save status phase stutas byte.
* scsi error handler checks host_byte (DID_*:
- * low level driver to indicate status), then checks
+ * low level driver to indicate status), then checks
* status_byte (SCSI status byte).
*/
SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN);
@@ -1314,7 +1324,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
if (auto_stat & ILLEGAL_PHASE) {
/* Illegal phase is detected. SACK is not back. */
- nsp32_msg(KERN_WARNING,
+ nsp32_msg(KERN_WARNING,
"AUTO SCSI ILLEGAL PHASE OCCUR!!!!");
/* TODO: currently we don't have any action... bus reset? */
@@ -1362,12 +1372,13 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
case BUSPHASE_STATUS:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status");
- SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+ nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN);
break;
default:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase");
- nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
+ nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x",
+ irq_stat, trans_stat);
show_busphase(busphase);
break;
}
@@ -1433,32 +1444,39 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
{
unsigned long flags;
nsp32_hw_data *data;
- int hostno;
+ int hostno;
unsigned int base;
unsigned char mode_reg;
- int id, speed;
- long model;
+ int id, speed;
+ long model;
hostno = host->host_no;
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
seq_puts(m, "NinjaSCSI-32 status\n\n");
- seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version);
- seq_printf(m, "SCSI host No.: %d\n", hostno);
- seq_printf(m, "IRQ: %d\n", host->irq);
- seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
- seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
- seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize);
- seq_printf(m, "Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
+ seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n",
+ nsp32_release_version);
+ seq_printf(m, "SCSI host No.: %d\n", hostno);
+ seq_printf(m, "IRQ: %d\n", host->irq);
+ seq_printf(m, "IO: 0x%lx-0x%lx\n",
+ host->io_port, host->io_port + host->n_io_port - 1);
+ seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n",
+ host->base, host->base + data->MmioLength - 1);
+ seq_printf(m, "sg_tablesize: %d\n",
+ host->sg_tablesize);
+ seq_printf(m, "Chip revision: 0x%x\n",
+ (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
mode_reg = nsp32_index_read1(base, CHIP_MODE);
model = data->pci_devid->driver_data;
#ifdef CONFIG_PM
- seq_printf(m, "Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no");
+ seq_printf(m, "Power Management: %s\n",
+ (mode_reg & OPTF) ? "yes" : "no");
#endif
- seq_printf(m, "OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
+ seq_printf(m, "OEM: %ld, %s\n",
+ (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
spin_lock_irqsave(&(data->Lock), flags);
seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC);
@@ -1476,7 +1494,7 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
}
if (data->target[id].sync_flag == SDTR_DONE) {
- if (data->target[id].period == 0 &&
+ if (data->target[id].period == 0 &&
data->target[id].offset == ASYNC_OFFSET ) {
seq_puts(m, "async");
} else {
@@ -1518,20 +1536,20 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
* clear TRANSFERCONTROL_BM_START
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, BM_CNT, 0);
/*
* call scsi_done
*/
- (*SCpnt->scsi_done)(SCpnt);
+ scsi_done(SCpnt);
/*
* reset parameters
*/
- data->cur_lunt->SCpnt = NULL;
- data->cur_lunt = NULL;
- data->cur_target = NULL;
- data->CurrentSC = NULL;
+ data->cur_lunt->SCpnt = NULL;
+ data->cur_lunt = NULL;
+ data->cur_target = NULL;
+ data->CurrentSC = NULL;
}
@@ -1553,7 +1571,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph);
show_autophase(execph);
- nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, TRANSFER_CONTROL, 0);
/*
@@ -1561,7 +1579,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
*
* VALID:
* Save Data Pointer is received. Adjust pointer.
- *
+ *
* NO-VALID:
* SCSI-3 says if Save Data Pointer is not received, then we restart
* processing and we can't adjust any SCSI data pointer in next data
@@ -1574,7 +1592,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
* Check sack_cnt/saved_sack_cnt, then adjust sg table if
* needed.
*/
- if (!(execph & MSGIN_00_VALID) &&
+ if (!(execph & MSGIN_00_VALID) &&
((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) {
unsigned int sacklen, s_sacklen;
@@ -1617,7 +1635,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
* no processing.
*/
}
-
+
if (execph & MSGIN_03_VALID) {
/* MsgIn03 was valid to be processed. No need processing. */
}
@@ -1639,7 +1657,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
* negotiating.
*/
if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) {
- /*
+ /*
* If valid message is received, then
* negotiation is succeeded.
*/
@@ -1665,30 +1683,25 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
/* MsgIn 00: Command Complete */
nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete");
- SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
- SCpnt->SCp.Message = 0;
- nsp32_dbg(NSP32_DEBUG_BUSFREE,
+ nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN);
+ nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
- SCpnt->SCp.Status, scsi_get_resid(SCpnt));
- SCpnt->result = (DID_OK << 16) |
- (SCpnt->SCp.Message << 8) |
- (SCpnt->SCp.Status << 0);
+ nsp32_priv(SCpnt)->status, scsi_get_resid(SCpnt));
+ SCpnt->result = (DID_OK << 16) |
+ (nsp32_priv(SCpnt)->status << 0);
nsp32_scsi_done(SCpnt);
/* All operation is done */
return TRUE;
} else if (execph & MSGIN_04_VALID) {
/* MsgIn 04: Disconnect */
- SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
- SCpnt->SCp.Message = 4;
-
+ nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN);
+
nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect");
return TRUE;
} else {
/* Unexpected bus free */
nsp32_msg(KERN_WARNING, "unexpected bus free occurred");
- /* DID_ERROR? */
- //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0);
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return TRUE;
@@ -1706,12 +1719,12 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int old_entry = data->cur_entry;
- int new_entry;
- int sg_num = data->cur_lunt->sg_num;
- nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
- unsigned int restlen, sentlen;
- u32_le len, addr;
+ int old_entry = data->cur_entry;
+ int new_entry;
+ int sg_num = data->cur_lunt->sg_num;
+ nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
+ unsigned int restlen, sentlen;
+ u32_le len, addr;
nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
@@ -1719,7 +1732,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
/*
- * calculate new_entry from sack count and each sgt[].len
+ * calculate new_entry from sack count and each sgt[].len
* calculate the byte which is intent to send
*/
sentlen = 0;
@@ -1737,8 +1750,10 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
if (sentlen == s_sacklen) {
/* XXX: confirm it's ok or not */
- /* In this case, it's ok because we are at
- the head element of the sg. restlen is correctly calculated. */
+ /* In this case, it's ok because we are at
+ * the head element of the sg. restlen is correctly
+ * calculated.
+ */
}
/* calculate the rest length for transferring */
@@ -1753,7 +1768,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
/* set cur_entry with new_entry */
data->cur_entry = new_entry;
-
+
return;
last:
@@ -1780,10 +1795,8 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
- //unsigned short command;
- long new_sgtp;
int i;
-
+
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
"enter: msgout_len: 0x%x", data->msgout_len);
@@ -1796,14 +1809,6 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
}
/*
- * Set SGTP ADDR current entry for restarting AUTOSCSI,
- * because SGTP is incremented next point.
- * There is few statement in the specification...
- */
- new_sgtp = data->cur_lunt->sglun_paddr +
- (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
-
- /*
* send messages
*/
for (i = 0; i < data->msgout_len; i++) {
@@ -1825,10 +1830,10 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
//nsp32_restart_autoscsi(SCpnt, command);
nsp32_write2(base, COMMAND_CONTROL,
(CLEAR_CDB_FIFO_POINTER |
- AUTO_COMMAND_PHASE |
- AUTOSCSI_RESTART |
- AUTO_MSGIN_00_OR_04 |
- AUTO_MSGIN_02 ));
+ AUTO_COMMAND_PHASE |
+ AUTOSCSI_RESTART |
+ AUTO_MSGIN_00_OR_04 |
+ AUTO_MSGIN_02 ));
}
/*
* Write data with SACK, then wait sack is
@@ -1839,7 +1844,7 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n",
nsp32_read1(base, SCSI_BUS_MONITOR));
- };
+ }
data->msgout_len = 0;
@@ -1928,9 +1933,9 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
unsigned char msgtype;
unsigned char newlun;
unsigned short command = 0;
- int msgclear = TRUE;
- long new_sgtp;
- int ret;
+ int msgclear = TRUE;
+ long new_sgtp;
+ int ret;
/*
* read first message
@@ -1970,7 +1975,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
goto reject;
}
}
-
+
/*
* processing messages except for IDENTIFY
*
@@ -1986,10 +1991,10 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
- nsp32_msg(KERN_WARNING,
+ nsp32_msg(KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: 0x%x", msg);
break;
-
+
case RESTORE_POINTERS:
/*
* AutoMsgIn03 is disabled, and HBA gets this message.
@@ -2015,7 +2020,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
/*
* set new sg pointer
*/
- new_sgtp = data->cur_lunt->sglun_paddr +
+ new_sgtp = data->cur_lunt->sglun_paddr +
(data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
nsp32_write4(base, SGT_ADR, new_sgtp);
@@ -2026,13 +2031,13 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
- nsp32_msg (KERN_WARNING,
+ nsp32_msg (KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: SAVE_POINTERS");
-
+
break;
-
+
case MESSAGE_REJECT:
- /* If previous message_out is sending SDTR, and get
+ /* If previous message_out is sending SDTR, and get
message_reject from target, SDTR negotiation is failed */
if (data->cur_target->sync_flag &
(SDTR_INITIATOR | SDTR_TARGET)) {
@@ -2051,7 +2056,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
case LINKED_CMD_COMPLETE:
case LINKED_FLG_CMD_COMPLETE:
/* queue tag is not supported currently */
- nsp32_msg (KERN_WARNING,
+ nsp32_msg (KERN_WARNING,
"unsupported message: 0x%x", msgtype);
break;
@@ -2104,7 +2109,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
}
/*
- * Reach here means regular length of each type of
+ * Reach here means regular length of each type of
* extended messages.
*/
switch (data->msginbuf[2]) {
@@ -2139,12 +2144,12 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
goto reject; /* not implemented yet */
break;
-
+
default:
goto reject;
}
break;
-
+
default:
goto reject;
}
@@ -2160,7 +2165,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
* AutoSCSI restart, at the same time MsgOutOccur should be
* happened (however, such situation is really possible...?).
*/
- if (data->msgout_len > 0) {
+ if (data->msgout_len > 0) {
nsp32_write4(base, SCSI_MSG_OUT, 0);
command |= AUTO_ATN;
}
@@ -2202,7 +2207,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
return;
reject:
- nsp32_msg(KERN_WARNING,
+ nsp32_msg(KERN_WARNING,
"invalid or unsupported MessageIn, rejected. "
"current msg: 0x%x (len: 0x%x), processing msg: 0x%x",
msg, data->msgin_len, msgtype);
@@ -2213,37 +2218,32 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
}
/*
- *
+ *
*/
static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- nsp32_target *target = data->cur_target;
- nsp32_sync_table *synct;
- unsigned char get_period = data->msginbuf[3];
- unsigned char get_offset = data->msginbuf[4];
- int entry;
- int syncnum;
+ nsp32_target *target = data->cur_target;
+ unsigned char get_period = data->msginbuf[3];
+ unsigned char get_offset = data->msginbuf[4];
+ int entry;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter");
- synct = data->synct;
- syncnum = data->syncnum;
-
/*
* If this inititor sent the SDTR message, then target responds SDTR,
* initiator SYNCREG, ACKWIDTH from SDTR parameter.
* Messages are not appropriate, then send back reject message.
- * If initiator did not send the SDTR, but target sends SDTR,
+ * If initiator did not send the SDTR, but target sends SDTR,
* initiator calculator the appropriate parameter and send back SDTR.
- */
+ */
if (target->sync_flag & SDTR_INITIATOR) {
/*
* Initiator sent SDTR, the target responds and
* send back negotiation SDTR.
*/
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR");
-
+
target->sync_flag &= ~SDTR_INITIATOR;
target->sync_flag |= SDTR_DONE;
@@ -2257,7 +2257,7 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
*/
goto reject;
}
-
+
if (get_offset == ASYNC_OFFSET) {
/*
* Negotiation is succeeded, the target want
@@ -2288,7 +2288,7 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
if (entry < 0) {
/*
- * Target want to use long period which is not
+ * Target want to use long period which is not
* acceptable NinjaSCSI-32Bi/UDE.
*/
goto reject;
@@ -2301,7 +2301,7 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
} else {
/* Target send SDTR to initiator. */
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR");
-
+
target->sync_flag |= SDTR_INITIATOR;
/* offset: */
@@ -2424,7 +2424,7 @@ static void nsp32_set_max_sync(nsp32_hw_data *data,
*/
static void nsp32_set_sync_entry(nsp32_hw_data *data,
nsp32_target *target,
- int entry,
+ int entry,
unsigned char offset)
{
unsigned char period, ackwidth, sample_rate;
@@ -2453,7 +2453,7 @@ static void nsp32_set_sync_entry(nsp32_hw_data *data,
static void nsp32_wait_req(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
- int wait_time = 0;
+ int wait_time = 0;
unsigned char bus, req_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
@@ -2465,7 +2465,7 @@ static void nsp32_wait_req(nsp32_hw_data *data, int state)
do {
bus = nsp32_read1(base, SCSI_BUS_MONITOR);
if ((bus & BUSMON_REQ) == req_bit) {
- nsp32_dbg(NSP32_DEBUG_WAIT,
+ nsp32_dbg(NSP32_DEBUG_WAIT,
"wait_time: %d", wait_time);
return;
}
@@ -2482,7 +2482,7 @@ static void nsp32_wait_req(nsp32_hw_data *data, int state)
static void nsp32_wait_sack(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
- int wait_time = 0;
+ int wait_time = 0;
unsigned char bus, ack_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
@@ -2547,8 +2547,8 @@ static int nsp32_detect(struct pci_dev *pdev)
struct Scsi_Host *host; /* registered host structure */
struct resource *res;
nsp32_hw_data *data;
- int ret;
- int i, j;
+ int ret;
+ int i, j;
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
@@ -2625,7 +2625,7 @@ static int nsp32_detect(struct pci_dev *pdev)
*/
/*
- * setup DMA
+ * setup DMA
*/
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
@@ -2725,16 +2725,16 @@ static int nsp32_detect(struct pci_dev *pdev)
goto free_sg_list;
}
- /*
+ /*
* PCI IO register
*/
res = request_region(host->io_port, host->n_io_port, "nsp32");
if (res == NULL) {
- nsp32_msg(KERN_ERR,
- "I/O region 0x%lx+0x%lx is already used",
+ nsp32_msg(KERN_ERR,
+ "I/O region 0x%x+0x%x is already used",
data->BaseAddress, data->NumAddress);
goto free_irq;
- }
+ }
ret = scsi_add_host(host, &pdev->dev);
if (ret) {
@@ -2758,7 +2758,7 @@ static int nsp32_detect(struct pci_dev *pdev)
free_autoparam:
dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
-
+
scsi_unregister:
scsi_host_put(host);
@@ -2825,7 +2825,7 @@ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
}
nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write2(base, BM_CNT, 0);
+ nsp32_write2(base, BM_CNT, 0);
SCpnt->result = DID_ABORT << 16;
nsp32_scsi_done(SCpnt);
@@ -2837,8 +2837,8 @@ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
static void nsp32_do_bus_reset(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
- unsigned short intrdat;
int i;
+ unsigned short __maybe_unused intrdat;
nsp32_dbg(NSP32_DEBUG_BUSRESET, "in");
@@ -2848,8 +2848,8 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
* clear counter
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write4(base, BM_CNT, 0);
- nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
+ nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
/*
* fall back to asynchronous transfer mode
@@ -2871,7 +2871,7 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
for(i = 0; i < 5; i++) {
intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat);
- }
+ }
data->CurrentSC = NULL;
}
@@ -2882,7 +2882,7 @@ static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt)
unsigned int base = SCpnt->device->host->io_port;
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
- nsp32_msg(KERN_INFO, "Host Reset");
+ nsp32_msg(KERN_INFO, "Host Reset");
nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
spin_lock_irq(SCpnt->device->host->host_lock);
@@ -2908,7 +2908,8 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
{
int vendor = data->pci_devid->vendor;
int device = data->pci_devid->device;
- int ret, val, i;
+ int ret, i;
+ int __maybe_unused val;
/*
* EEPROM checking.
@@ -2956,13 +2957,13 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
* AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map:
*
* ROMADDR
- * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
+ * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M
* 0x07 : HBA Synchronous Transfer Period
* Value 0: AutoSync, 1: Manual Setting
* 0x08 - 0x0f : Not Used? (0x0)
* 0x10 : Bus Termination
- * Value 0: Auto[ON], 1: ON, 2: OFF
+ * Value 0: Auto[ON], 1: ON, 2: OFF
* 0x11 : Not Used? (0)
* 0x12 : Bus Reset Delay Time (0x03)
* 0x13 : Bootable CD Support
@@ -2970,7 +2971,7 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
* 0x14 : Device Scan
* Bit 7 6 5 4 3 2 1 0
* | <----------------->
- * | SCSI ID: Value 0: Skip, 1: YES
+ * | SCSI ID: Value 0: Skip, 1: YES
* |-> Value 0: ALL scan, Value 1: Manual
* 0x15 - 0x1b : Not Used? (0)
* 0x1c : Constant? (0x01) (clock div?)
@@ -2981,10 +2982,10 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
*/
static int nsp32_getprom_at24(nsp32_hw_data *data)
{
- int ret, i;
- int auto_sync;
+ int ret, i;
+ int auto_sync;
nsp32_target *target;
- int entry;
+ int entry;
/*
* Reset time which is designated by EEPROM.
@@ -3050,7 +3051,7 @@ static int nsp32_getprom_at24(nsp32_hw_data *data)
* C16 110 (I-O Data: SC-NBD) data map:
*
* ROMADDR
- * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
+ * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC
* 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync)
* 0x08 - 0x0f : Not Used? (0x0)
@@ -3058,7 +3059,7 @@ static int nsp32_getprom_at24(nsp32_hw_data *data)
* Value 0: PIO, 1: Busmater
* 0x11 : Bus Reset Delay Time (0x00-0x20)
* 0x12 : Bus Termination
- * Value 0: Disable, 1: Enable
+ * Value 0: Disable, 1: Enable
* 0x13 - 0x19 : Disconnection
* Value 0: Disable, 1: Enable
* 0x1a - 0x7c : Not Used? (0)
@@ -3068,9 +3069,9 @@ static int nsp32_getprom_at24(nsp32_hw_data *data)
*/
static int nsp32_getprom_c16(nsp32_hw_data *data)
{
- int ret, i;
+ int ret, i;
nsp32_target *target;
- int entry, val;
+ int entry, val;
/*
* Reset time which is designated by EEPROM.
@@ -3170,7 +3171,7 @@ static int nsp32_prom_read(nsp32_hw_data *data, int romaddr)
for (i = 7; i >= 0; i--) {
val += (nsp32_prom_read_bit(data) << i);
}
-
+
/* no ack */
nsp32_prom_write_bit(data, 1);
@@ -3278,7 +3279,8 @@ static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
- nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host);
+ nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state.event=%x, slot=%s, host=0x%p",
+ pdev, state.event, pci_name(pdev), host);
pci_save_state (pdev);
pci_disable_device (pdev);
@@ -3294,7 +3296,8 @@ static int nsp32_resume(struct pci_dev *pdev)
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
unsigned short reg;
- nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host);
+ nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p",
+ pdev, pci_name(pdev), host);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake (pdev, PCI_D0, 0);
@@ -3329,13 +3332,13 @@ static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
- ret = pci_enable_device(pdev);
+ ret = pci_enable_device(pdev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to enable pci device");
return ret;
}
- data->Pci = pdev;
+ data->Pci = pdev;
data->pci_devid = id;
data->IrqNumber = pdev->irq;
data->BaseAddress = pci_resource_start(pdev, 0);
@@ -3364,7 +3367,7 @@ static void nsp32_remove(struct pci_dev *pdev)
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
- scsi_remove_host(host);
+ scsi_remove_host(host);
nsp32_release(host);
@@ -3377,8 +3380,8 @@ static struct pci_driver nsp32_driver = {
.probe = nsp32_probe,
.remove = nsp32_remove,
#ifdef CONFIG_PM
- .suspend = nsp32_suspend,
- .resume = nsp32_resume,
+ .suspend = nsp32_suspend,
+ .resume = nsp32_resume,
#endif
};
diff --git a/drivers/scsi/nsp32.h b/drivers/scsi/nsp32.h
index ab0726c070f7..924889f8bd37 100644
--- a/drivers/scsi/nsp32.h
+++ b/drivers/scsi/nsp32.h
@@ -534,6 +534,15 @@ typedef struct _nsp32_sync_table {
---PERIOD-- ---OFFSET-- */
#define TO_SYNCREG(period, offset) (((period) & 0x0f) << 4 | ((offset) & 0x0f))
+struct nsp32_cmd_priv {
+ enum sam_status status;
+};
+
+static inline struct nsp32_cmd_priv *nsp32_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
typedef struct _nsp32_target {
unsigned char syncreg; /* value for SYNCREG */
unsigned char ackwidth; /* value for ACKWIDTH */
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index dc9b74c9348a..9696b6b5591f 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -36,7 +36,7 @@ config PCMCIA_NINJA_SCSI
help
If you intend to attach this type of PCMCIA SCSI host adapter to
your computer, say Y here and read
- <file:Documentation/scsi/NinjaSCSI.txt>.
+ <file:Documentation/scsi/NinjaSCSI.rst>.
Supported cards:
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index df82a349e969..6a6621728c69 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -40,13 +40,16 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/ioport.h>
-#include <scsi/scsi.h>
#include <linux/major.h>
#include <linux/blkdev.h>
-#include <scsi/scsi_ioctl.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_tcq.h>
#include "aha152x.h"
#include <pcmcia/cistpl.h>
diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c
index e42acf314d06..33df6a9ba9b5 100644
--- a/drivers/scsi/pcmcia/fdomain_cs.c
+++ b/drivers/scsi/pcmcia/fdomain_cs.c
@@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link)
goto fail_disable;
if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
- "fdomain_cs"))
+ "fdomain_cs")) {
+ ret = -EBUSY;
goto fail_disable;
+ }
sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
if (!sh) {
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index d79ce97a04bd..48acab03a8a0 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -41,10 +41,9 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <../drivers/scsi/scsi.h>
-#include <scsi/scsi_host.h>
-
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <pcmcia/cistpl.h>
@@ -55,7 +54,6 @@
MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>");
MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module");
-MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
MODULE_LICENSE("GPL");
#include "nsp_io.h"
@@ -72,6 +70,11 @@ static bool free_ports = 0;
module_param(free_ports, bool, 0);
MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))");
+static struct scsi_pointer *nsp_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static struct scsi_host_template nsp_driver_template = {
.proc_name = "nsp_cs",
.show_info = nsp_show_info,
@@ -85,6 +88,7 @@ static struct scsi_host_template nsp_driver_template = {
.this_id = NSP_INITIATOR_ID,
.sg_tablesize = SG_ALL,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
@@ -134,6 +138,7 @@ static inline void nsp_inc_resid(struct scsi_cmnd *SCpnt, int residInc)
scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) + residInc);
}
+__printf(4, 5)
static void nsp_cs_message(const char *func, int line, char *type, char *fmt, ...)
{
va_list args;
@@ -178,12 +183,12 @@ static void nsp_scsi_done(struct scsi_cmnd *SCpnt)
data->CurrentSC = NULL;
- SCpnt->scsi_done(SCpnt);
+ scsi_done(SCpnt);
}
-static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
+static int nsp_queuecommand_lck(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
#ifdef NSP_DEBUG
/*unsigned int host_id = SCpnt->device->host->this_id;*/
/*unsigned int base = SCpnt->device->host->io_port;*/
@@ -197,8 +202,6 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
scsi_bufflen(SCpnt), scsi_sg_count(SCpnt));
//nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC);
- SCpnt->scsi_done = done;
-
if (data->CurrentSC != NULL) {
nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen");
SCpnt->result = DID_BAD_TARGET << 16;
@@ -221,11 +224,11 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
data->CurrentSC = SCpnt;
- SCpnt->SCp.Status = CHECK_CONDITION;
- SCpnt->SCp.Message = 0;
- SCpnt->SCp.have_data_in = IO_UNKNOWN;
- SCpnt->SCp.sent_command = 0;
- SCpnt->SCp.phase = PH_UNDETERMINED;
+ scsi_pointer->Status = SAM_STAT_CHECK_CONDITION;
+ scsi_pointer->Message = 0;
+ scsi_pointer->have_data_in = IO_UNKNOWN;
+ scsi_pointer->sent_command = 0;
+ scsi_pointer->phase = PH_UNDETERMINED;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
/* setup scratch area
@@ -235,18 +238,18 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
SCp.buffers_residual : left buffers in list
SCp.phase : current state of the command */
if (scsi_bufflen(SCpnt)) {
- SCpnt->SCp.buffer = scsi_sglist(SCpnt);
- SCpnt->SCp.ptr = BUFFER_ADDR;
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
- SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
+ scsi_pointer->buffer = scsi_sglist(SCpnt);
+ scsi_pointer->ptr = BUFFER_ADDR(SCpnt);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1;
} else {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
- SCpnt->SCp.buffer = NULL;
- SCpnt->SCp.buffers_residual = 0;
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->buffers_residual = 0;
}
- if (nsphw_start_selection(SCpnt) == FALSE) {
+ if (!nsphw_start_selection(SCpnt)) {
nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail");
SCpnt->result = DID_BUS_BUSY << 16;
nsp_scsi_done(SCpnt);
@@ -266,14 +269,14 @@ static DEF_SCSI_QCMD(nsp_queuecommand)
/*
* setup PIO FIFO transfer mode and enable/disable to data out
*/
-static void nsp_setup_fifo(nsp_hw_data *data, int enabled)
+static void nsp_setup_fifo(nsp_hw_data *data, bool enabled)
{
unsigned int base = data->BaseAddress;
unsigned char transfer_mode_reg;
//nsp_dbg(NSP_DEBUG_DATA_IO, "enabled=%d", enabled);
- if (enabled != FALSE) {
+ if (enabled) {
transfer_mode_reg = TRANSFER_GO | BRAIND;
} else {
transfer_mode_reg = 0;
@@ -301,7 +304,7 @@ static void nsphw_init_sync(nsp_hw_data *data)
/*
* Initialize Ninja hardware
*/
-static int nsphw_init(nsp_hw_data *data)
+static void nsphw_init(nsp_hw_data *data)
{
unsigned int base = data->BaseAddress;
@@ -351,16 +354,15 @@ static int nsphw_init(nsp_hw_data *data)
SCSI_RESET_IRQ_EI );
nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR);
- nsp_setup_fifo(data, FALSE);
-
- return TRUE;
+ nsp_setup_fifo(data, false);
}
/*
* Start selection phase
*/
-static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
+static bool nsphw_start_selection(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int host_id = SCpnt->device->host->this_id;
unsigned int base = SCpnt->device->host->io_port;
unsigned char target = scmd_id(SCpnt);
@@ -373,12 +375,12 @@ static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
phase = nsp_index_read(base, SCSIBUSMON);
if(phase != BUSMON_BUS_FREE) {
//nsp_dbg(NSP_DEBUG_RESELECTION, "bus busy");
- return FALSE;
+ return false;
}
/* start arbitration */
//nsp_dbg(NSP_DEBUG_RESELECTION, "start arbit");
- SCpnt->SCp.phase = PH_ARBSTART;
+ scsi_pointer->phase = PH_ARBSTART;
nsp_index_write(base, SETARBIT, ARBIT_GO);
time_out = 1000;
@@ -393,12 +395,12 @@ static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
if (!(arbit & ARBIT_WIN)) {
//nsp_dbg(NSP_DEBUG_RESELECTION, "arbit fail");
nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR);
- return FALSE;
+ return false;
}
/* assert select line */
//nsp_dbg(NSP_DEBUG_RESELECTION, "assert SEL line");
- SCpnt->SCp.phase = PH_SELSTART;
+ scsi_pointer->phase = PH_SELSTART;
udelay(3); /* wait 2.4us */
nsp_index_write(base, SCSIDATALATCH, BIT(host_id) | BIT(target));
nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_ATN);
@@ -412,7 +414,7 @@ static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
nsp_start_timer(SCpnt, 1000/51);
data->SelectionTimeOut = 1;
- return TRUE;
+ return true;
}
struct nsp_sync_table {
@@ -482,7 +484,7 @@ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt)
sync->SyncRegister = 0;
sync->AckWidth = 0;
- return FALSE;
+ return false;
}
sync->SyncRegister = (sync_table->chip_period << SYNCREG_PERIOD_SHIFT) |
@@ -491,7 +493,7 @@ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt)
nsp_dbg(NSP_DEBUG_SYNC, "sync_reg=0x%x, ack_width=0x%x", sync->SyncRegister, sync->AckWidth);
- return TRUE;
+ return true;
}
@@ -574,8 +576,9 @@ static int nsp_expect_signal(struct scsi_cmnd *SCpnt,
/*
* transfer SCSI message
*/
-static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase)
+static int nsp_xfer(struct scsi_cmnd *const SCpnt, int phase)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int base = SCpnt->device->host->io_port;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
char *buf = data->MsgBuffer;
@@ -593,7 +596,7 @@ static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase)
}
/* if last byte, negate ATN */
- if (len == 1 && SCpnt->SCp.phase == PH_MSG_OUT) {
+ if (len == 1 && scsi_pointer->phase == PH_MSG_OUT) {
nsp_index_write(base, SCSIBUSCTRL, AUTODIRECTION | ACKENB);
}
@@ -614,14 +617,15 @@ static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase)
/*
* get extra SCSI data from fifo
*/
-static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt)
+static int nsp_dataphase_bypass(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
unsigned int count;
//nsp_dbg(NSP_DEBUG_DATA_IO, "in");
- if (SCpnt->SCp.have_data_in != IO_IN) {
+ if (scsi_pointer->have_data_in != IO_IN) {
return 0;
}
@@ -636,9 +640,9 @@ static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt)
* data phase skip only occures in case of SCSI_LOW_READ
*/
nsp_dbg(NSP_DEBUG_DATA_IO, "use bypass quirk");
- SCpnt->SCp.phase = PH_DATA;
+ scsi_pointer->phase = PH_DATA;
nsp_pio_read(SCpnt);
- nsp_setup_fifo(data, FALSE);
+ nsp_setup_fifo(data, false);
return 0;
}
@@ -646,7 +650,7 @@ static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt)
/*
* accept reselection
*/
-static int nsp_reselected(struct scsi_cmnd *SCpnt)
+static void nsp_reselected(struct scsi_cmnd *SCpnt)
{
unsigned int base = SCpnt->device->host->io_port;
unsigned int host_id = SCpnt->device->host->this_id;
@@ -678,8 +682,6 @@ static int nsp_reselected(struct scsi_cmnd *SCpnt)
bus_reg = nsp_index_read(base, SCSIBUSCTRL) & ~(SCSI_BSY | SCSI_ATN);
nsp_index_write(base, SCSIBUSCTRL, bus_reg);
nsp_index_write(base, SCSIBUSCTRL, bus_reg | AUTODIRECTION | ACKENB);
-
- return TRUE;
}
/*
@@ -689,14 +691,14 @@ static int nsp_fifo_count(struct scsi_cmnd *SCpnt)
{
unsigned int base = SCpnt->device->host->io_port;
unsigned int count;
- unsigned int l, m, h, dummy;
+ unsigned int l, m, h;
nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER);
l = nsp_index_read(base, TRANSFERCOUNT);
m = nsp_index_read(base, TRANSFERCOUNT);
h = nsp_index_read(base, TRANSFERCOUNT);
- dummy = nsp_index_read(base, TRANSFERCOUNT); /* required this! */
+ nsp_index_read(base, TRANSFERCOUNT); /* required this! */
count = (h << 16) | (m << 8) | (l << 0);
@@ -712,8 +714,9 @@ static int nsp_fifo_count(struct scsi_cmnd *SCpnt)
/*
* read data in DATA IN phase
*/
-static void nsp_pio_read(struct scsi_cmnd *SCpnt)
+static void nsp_pio_read(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int base = SCpnt->device->host->io_port;
unsigned long mmio_base = SCpnt->device->host->base;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
@@ -724,24 +727,25 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
ocount = data->FifoCount;
nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d",
- SCpnt, scsi_get_resid(SCpnt), ocount, SCpnt->SCp.ptr,
- SCpnt->SCp.this_residual, SCpnt->SCp.buffer,
- SCpnt->SCp.buffers_residual);
+ SCpnt, scsi_get_resid(SCpnt), ocount, scsi_pointer->ptr,
+ scsi_pointer->this_residual, scsi_pointer->buffer,
+ scsi_pointer->buffers_residual);
time_out = 1000;
while ((time_out-- != 0) &&
- (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0 ) ) {
+ (scsi_pointer->this_residual > 0 ||
+ scsi_pointer->buffers_residual > 0)) {
stat = nsp_index_read(base, SCSIBUSMON);
stat &= BUSMON_PHASE_MASK;
res = nsp_fifo_count(SCpnt) - ocount;
- //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount, res);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount, res);
if (res == 0) { /* if some data available ? */
if (stat == BUSPHASE_DATA_IN) { /* phase changed? */
- //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", SCpnt->SCp.this_residual);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", scsi_pointer->this_residual);
continue;
} else {
nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x", stat);
@@ -755,20 +759,21 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
continue;
}
- res = min(res, SCpnt->SCp.this_residual);
+ res = min(res, scsi_pointer->this_residual);
switch (data->TransferMode) {
case MODE_IO32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_fifo32_read(base, SCpnt->SCp.ptr, res >> 2);
+ nsp_fifo32_read(base, scsi_pointer->ptr, res >> 2);
break;
case MODE_IO8:
- nsp_fifo8_read (base, SCpnt->SCp.ptr, res );
+ nsp_fifo8_read(base, scsi_pointer->ptr, res);
break;
case MODE_MEM32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_mmio_fifo32_read(mmio_base, SCpnt->SCp.ptr, res >> 2);
+ nsp_mmio_fifo32_read(mmio_base, scsi_pointer->ptr,
+ res >> 2);
break;
default:
@@ -777,22 +782,23 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
}
nsp_inc_resid(SCpnt, -res);
- SCpnt->SCp.ptr += res;
- SCpnt->SCp.this_residual -= res;
+ scsi_pointer->ptr += res;
+ scsi_pointer->this_residual -= res;
ocount += res;
- //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount);
/* go to next scatter list if available */
- if (SCpnt->SCp.this_residual == 0 &&
- SCpnt->SCp.buffers_residual != 0 ) {
+ if (scsi_pointer->this_residual == 0 &&
+ scsi_pointer->buffers_residual != 0 ) {
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out);
- SCpnt->SCp.buffers_residual--;
- SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
- SCpnt->SCp.ptr = BUFFER_ADDR;
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ scsi_pointer->buffers_residual--;
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ scsi_pointer->ptr = BUFFER_ADDR(SCpnt);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
time_out = 1000;
- //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", SCpnt->SCp.buffer->page, SCpnt->SCp.buffer->offset);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", scsi_pointer->buffer->page, scsi_pointer->buffer->offset);
}
}
@@ -800,8 +806,8 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
if (time_out < 0) {
nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
- scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
- SCpnt->SCp.buffers_residual);
+ scsi_get_resid(SCpnt), scsi_pointer->this_residual,
+ scsi_pointer->buffers_residual);
}
nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount);
nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId,
@@ -813,6 +819,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
*/
static void nsp_pio_write(struct scsi_cmnd *SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int base = SCpnt->device->host->io_port;
unsigned long mmio_base = SCpnt->device->host->base;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
@@ -823,14 +830,15 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
ocount = data->FifoCount;
nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x",
- data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual,
- SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual,
+ data->FifoCount, scsi_pointer->ptr, scsi_pointer->this_residual,
+ scsi_pointer->buffer, scsi_pointer->buffers_residual,
scsi_get_resid(SCpnt));
time_out = 1000;
while ((time_out-- != 0) &&
- (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0)) {
+ (scsi_pointer->this_residual > 0 ||
+ scsi_pointer->buffers_residual > 0)) {
stat = nsp_index_read(base, SCSIBUSMON);
stat &= BUSMON_PHASE_MASK;
@@ -840,9 +848,9 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res);
/* Put back pointer */
nsp_inc_resid(SCpnt, res);
- SCpnt->SCp.ptr -= res;
- SCpnt->SCp.this_residual += res;
- ocount -= res;
+ scsi_pointer->ptr -= res;
+ scsi_pointer->this_residual += res;
+ ocount -= res;
break;
}
@@ -853,21 +861,22 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
continue;
}
- res = min(SCpnt->SCp.this_residual, WFIFO_CRIT);
+ res = min(scsi_pointer->this_residual, WFIFO_CRIT);
- //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, res);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, res);
switch (data->TransferMode) {
case MODE_IO32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_fifo32_write(base, SCpnt->SCp.ptr, res >> 2);
+ nsp_fifo32_write(base, scsi_pointer->ptr, res >> 2);
break;
case MODE_IO8:
- nsp_fifo8_write (base, SCpnt->SCp.ptr, res );
+ nsp_fifo8_write(base, scsi_pointer->ptr, res);
break;
case MODE_MEM32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_mmio_fifo32_write(mmio_base, SCpnt->SCp.ptr, res >> 2);
+ nsp_mmio_fifo32_write(mmio_base, scsi_pointer->ptr,
+ res >> 2);
break;
default:
@@ -876,18 +885,19 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
}
nsp_inc_resid(SCpnt, -res);
- SCpnt->SCp.ptr += res;
- SCpnt->SCp.this_residual -= res;
- ocount += res;
+ scsi_pointer->ptr += res;
+ scsi_pointer->this_residual -= res;
+ ocount += res;
/* go to next scatter list if available */
- if (SCpnt->SCp.this_residual == 0 &&
- SCpnt->SCp.buffers_residual != 0 ) {
+ if (scsi_pointer->this_residual == 0 &&
+ scsi_pointer->buffers_residual != 0 ) {
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next");
- SCpnt->SCp.buffers_residual--;
- SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
- SCpnt->SCp.ptr = BUFFER_ADDR;
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ scsi_pointer->buffers_residual--;
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ scsi_pointer->ptr = BUFFER_ADDR(SCpnt);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
time_out = 1000;
}
}
@@ -934,7 +944,7 @@ static int nsp_nexus(struct scsi_cmnd *SCpnt)
}
/* setup pdma fifo */
- nsp_setup_fifo(data, TRUE);
+ nsp_setup_fifo(data, true);
/* clear ack counter */
data->FifoCount = 0;
@@ -955,6 +965,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
unsigned int base;
unsigned char irq_status, irq_phase, phase;
struct scsi_cmnd *tmpSC;
+ struct scsi_pointer *scsi_pointer;
unsigned char target, lun;
unsigned int *sync_neg;
int i, tmp;
@@ -1033,9 +1044,10 @@ static irqreturn_t nspintr(int irq, void *dev_id)
if(data->CurrentSC != NULL) {
tmpSC = data->CurrentSC;
- tmpSC->result = (DID_RESET << 16) |
- ((tmpSC->SCp.Message & 0xff) << 8) |
- ((tmpSC->SCp.Status & 0xff) << 0);
+ scsi_pointer = nsp_priv(tmpSC);
+ tmpSC->result = (DID_RESET << 16) |
+ ((scsi_pointer->Message & 0xff) << 8) |
+ ((scsi_pointer->Status & 0xff) << 0);
nsp_scsi_done(tmpSC);
}
return IRQ_HANDLED;
@@ -1049,6 +1061,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
}
tmpSC = data->CurrentSC;
+ scsi_pointer = nsp_priv(tmpSC);
target = tmpSC->device->id;
lun = tmpSC->device->lun;
sync_neg = &(data->Sync[target].SyncNegotiation);
@@ -1060,9 +1073,8 @@ static irqreturn_t nspintr(int irq, void *dev_id)
if (irq_phase & RESELECT_IRQ) {
nsp_dbg(NSP_DEBUG_INTR, "reselect");
nsp_write(base, IRQCONTROL, IRQCONTROL_RESELECT_CLEAR);
- if (nsp_reselected(tmpSC) != FALSE) {
- return IRQ_HANDLED;
- }
+ nsp_reselected(tmpSC);
+ return IRQ_HANDLED;
}
if ((irq_phase & (PHASE_CHANGE_IRQ | LATCHED_BUS_FREE)) == 0) {
@@ -1072,7 +1084,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
//show_phase(tmpSC);
- switch(tmpSC->SCp.phase) {
+ switch (scsi_pointer->phase) {
case PH_SELSTART:
// *sync_neg = SYNC_NOT_YET;
if ((phase & BUSMON_BSY) == 0) {
@@ -1095,14 +1107,12 @@ static irqreturn_t nspintr(int irq, void *dev_id)
/* attention assert */
//nsp_dbg(NSP_DEBUG_INTR, "attention assert");
data->SelectionTimeOut = 0;
- tmpSC->SCp.phase = PH_SELECTED;
+ scsi_pointer->phase = PH_SELECTED;
nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN);
udelay(1);
nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN | AUTODIRECTION | ACKENB);
return IRQ_HANDLED;
- break;
-
case PH_RESELECT:
//nsp_dbg(NSP_DEBUG_INTR, "phase reselect");
// *sync_neg = SYNC_NOT_YET;
@@ -1112,7 +1122,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
nsp_scsi_done(tmpSC);
return IRQ_HANDLED;
}
- /* fall thru */
+ fallthrough;
default:
if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) {
return IRQ_HANDLED;
@@ -1126,17 +1136,18 @@ static irqreturn_t nspintr(int irq, void *dev_id)
//nsp_dbg(NSP_DEBUG_INTR, "start scsi seq");
/* normal disconnect */
- if (((tmpSC->SCp.phase == PH_MSG_IN) || (tmpSC->SCp.phase == PH_MSG_OUT)) &&
- (irq_phase & LATCHED_BUS_FREE) != 0 ) {
+ if ((scsi_pointer->phase == PH_MSG_IN ||
+ scsi_pointer->phase == PH_MSG_OUT) &&
+ (irq_phase & LATCHED_BUS_FREE) != 0) {
nsp_dbg(NSP_DEBUG_INTR, "normal disconnect irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase);
//*sync_neg = SYNC_NOT_YET;
/* all command complete and return status */
- if (tmpSC->SCp.Message == MSG_COMMAND_COMPLETE) {
- tmpSC->result = (DID_OK << 16) |
- ((tmpSC->SCp.Message & 0xff) << 8) |
- ((tmpSC->SCp.Status & 0xff) << 0);
+ if (scsi_pointer->Message == COMMAND_COMPLETE) {
+ tmpSC->result = (DID_OK << 16) |
+ ((scsi_pointer->Message & 0xff) << 8) |
+ ((scsi_pointer->Status & 0xff) << 0);
nsp_dbg(NSP_DEBUG_INTR, "command complete result=0x%x", tmpSC->result);
nsp_scsi_done(tmpSC);
@@ -1165,7 +1176,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
return IRQ_HANDLED;
}
- tmpSC->SCp.phase = PH_COMMAND;
+ scsi_pointer->phase = PH_COMMAND;
nsp_nexus(tmpSC);
@@ -1181,8 +1192,8 @@ static irqreturn_t nspintr(int irq, void *dev_id)
case BUSPHASE_DATA_OUT:
nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_OUT");
- tmpSC->SCp.phase = PH_DATA;
- tmpSC->SCp.have_data_in = IO_OUT;
+ scsi_pointer->phase = PH_DATA;
+ scsi_pointer->have_data_in = IO_OUT;
nsp_pio_write(tmpSC);
@@ -1191,8 +1202,8 @@ static irqreturn_t nspintr(int irq, void *dev_id)
case BUSPHASE_DATA_IN:
nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_IN");
- tmpSC->SCp.phase = PH_DATA;
- tmpSC->SCp.have_data_in = IO_IN;
+ scsi_pointer->phase = PH_DATA;
+ scsi_pointer->have_data_in = IO_IN;
nsp_pio_read(tmpSC);
@@ -1202,10 +1213,11 @@ static irqreturn_t nspintr(int irq, void *dev_id)
nsp_dataphase_bypass(tmpSC);
nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_STATUS");
- tmpSC->SCp.phase = PH_STATUS;
+ scsi_pointer->phase = PH_STATUS;
- tmpSC->SCp.Status = nsp_index_read(base, SCSIDATAWITHACK);
- nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x", tmpSC->SCp.Message, tmpSC->SCp.Status);
+ scsi_pointer->Status = nsp_index_read(base, SCSIDATAWITHACK);
+ nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x",
+ scsi_pointer->Message, scsi_pointer->Status);
break;
@@ -1215,21 +1227,21 @@ static irqreturn_t nspintr(int irq, void *dev_id)
goto timer_out;
}
- tmpSC->SCp.phase = PH_MSG_OUT;
+ scsi_pointer->phase = PH_MSG_OUT;
//*sync_neg = SYNC_NOT_YET;
data->MsgLen = i = 0;
- data->MsgBuffer[i] = IDENTIFY(TRUE, lun); i++;
+ data->MsgBuffer[i] = IDENTIFY(true, lun); i++;
if (*sync_neg == SYNC_NOT_YET) {
data->Sync[target].SyncPeriod = 0;
data->Sync[target].SyncOffset = 0;
/**/
- data->MsgBuffer[i] = MSG_EXTENDED; i++;
+ data->MsgBuffer[i] = EXTENDED_MESSAGE; i++;
data->MsgBuffer[i] = 3; i++;
- data->MsgBuffer[i] = MSG_EXT_SDTR; i++;
+ data->MsgBuffer[i] = EXTENDED_SDTR; i++;
data->MsgBuffer[i] = 0x0c; i++;
data->MsgBuffer[i] = 15; i++;
/**/
@@ -1248,7 +1260,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
goto timer_out;
}
- tmpSC->SCp.phase = PH_MSG_IN;
+ scsi_pointer->phase = PH_MSG_IN;
nsp_message_in(tmpSC);
/**/
@@ -1256,9 +1268,9 @@ static irqreturn_t nspintr(int irq, void *dev_id)
//nsp_dbg(NSP_DEBUG_INTR, "sync target=%d,lun=%d",target,lun);
if (data->MsgLen >= 5 &&
- data->MsgBuffer[0] == MSG_EXTENDED &&
+ data->MsgBuffer[0] == EXTENDED_MESSAGE &&
data->MsgBuffer[1] == 3 &&
- data->MsgBuffer[2] == MSG_EXT_SDTR ) {
+ data->MsgBuffer[2] == EXTENDED_SDTR ) {
data->Sync[target].SyncPeriod = data->MsgBuffer[3];
data->Sync[target].SyncOffset = data->MsgBuffer[4];
//nsp_dbg(NSP_DEBUG_INTR, "sync ok, %d %d", data->MsgBuffer[3], data->MsgBuffer[4]);
@@ -1276,13 +1288,14 @@ static irqreturn_t nspintr(int irq, void *dev_id)
tmp = -1;
for (i = 0; i < data->MsgLen; i++) {
tmp = data->MsgBuffer[i];
- if (data->MsgBuffer[i] == MSG_EXTENDED) {
+ if (data->MsgBuffer[i] == EXTENDED_MESSAGE) {
i += (1 + data->MsgBuffer[i+1]);
}
}
- tmpSC->SCp.Message = tmp;
+ scsi_pointer->Message = tmp;
- nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d", tmpSC->SCp.Message, data->MsgLen);
+ nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d",
+ scsi_pointer->Message, data->MsgLen);
show_message(data);
break;
@@ -1562,6 +1575,9 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
data->MmioAddress = (unsigned long)
ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
+ if (!data->MmioAddress)
+ goto next_entry;
+
data->MmioLength = resource_size(p_dev->resource[2]);
}
/* If we got this far, we're cool! */
@@ -1616,9 +1632,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d",
data->BaseAddress, data->NumAddress, data->IrqNumber);
- if(nsphw_init(data) == FALSE) {
- goto cs_failed;
- }
+ nsphw_init(data);
host = nsp_detect(&nsp_driver_template);
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index ea5122f3396d..e1ee8ef90ad3 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -304,8 +304,8 @@ static int nsp_eh_host_reset (struct scsi_cmnd *SCpnt);
static int nsp_bus_reset (nsp_hw_data *data);
/* */
-static int nsphw_init (nsp_hw_data *data);
-static int nsphw_start_selection(struct scsi_cmnd *SCpnt);
+static void nsphw_init (nsp_hw_data *data);
+static bool nsphw_start_selection(struct scsi_cmnd *SCpnt);
static void nsp_start_timer (struct scsi_cmnd *SCpnt, int time);
static int nsp_fifo_count (struct scsi_cmnd *SCpnt);
static void nsp_pio_read (struct scsi_cmnd *SCpnt);
@@ -320,7 +320,7 @@ static int nsp_expect_signal (struct scsi_cmnd *SCpnt,
unsigned char mask);
static int nsp_xfer (struct scsi_cmnd *SCpnt, int phase);
static int nsp_dataphase_bypass (struct scsi_cmnd *SCpnt);
-static int nsp_reselected (struct scsi_cmnd *SCpnt);
+static void nsp_reselected (struct scsi_cmnd *SCpnt);
static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht);
/* Interrupt handler */
@@ -370,19 +370,8 @@ enum _burst_mode {
BURST_MEM32 = 2,
};
-/**************************************************************************
- * SCSI messaage
- */
-#define MSG_COMMAND_COMPLETE 0x00
-#define MSG_EXTENDED 0x01
-#define MSG_ABORT 0x06
-#define MSG_NO_OPERATION 0x08
-#define MSG_BUS_DEVICE_RESET 0x0c
-
-#define MSG_EXT_SDTR 0x01
-
/* scatter-gather table */
-# define BUFFER_ADDR ((char *)((sg_virt(SCpnt->SCp.buffer))))
+#define BUFFER_ADDR(SCpnt) ((char *)(sg_virt(nsp_priv(SCpnt)->buffer)))
#endif /*__nsp_cs__*/
/* end */
diff --git a/drivers/scsi/pcmcia/nsp_debug.c b/drivers/scsi/pcmcia/nsp_debug.c
index 6aa7d269d3b3..23b68dd26f74 100644
--- a/drivers/scsi/pcmcia/nsp_debug.c
+++ b/drivers/scsi/pcmcia/nsp_debug.c
@@ -145,7 +145,7 @@ static void show_command(struct scsi_cmnd *SCpnt)
static void show_phase(struct scsi_cmnd *SCpnt)
{
- int i = SCpnt->SCp.phase;
+ int i = nsp_scsi_pointer(SCpnt)->phase;
char *ph[] = {
"PH_UNDETERMINED",
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 828d53faf09a..310d0b6586a6 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -38,14 +38,17 @@
#include <linux/string.h>
#include <linux/ioport.h>
#include <asm/io.h>
-#include <scsi/scsi.h>
#include <linux/major.h>
#include <linux/blkdev.h>
-#include <scsi/scsi_ioctl.h>
#include <linux/interrupt.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_tcq.h>
#include "../qlogicfas408.h"
#include <pcmcia/cistpl.h>
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index a366ff1a3959..5d7dfefd6f6c 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -192,6 +192,12 @@ struct sym53c500_data {
int fast_pio;
};
+struct sym53c500_cmd_priv {
+ int status;
+ int message;
+ int phase;
+};
+
enum Phase {
idle,
data_out,
@@ -351,6 +357,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct sym53c500_data *data =
(struct sym53c500_data *)dev->hostdata;
struct scsi_cmnd *curSC = data->current_SC;
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC);
int fast_pio = data->fast_pio;
spin_lock_irqsave(dev->host_lock, flags);
@@ -397,11 +404,11 @@ SYM53C500_intr(int irq, void *dev_id)
if (int_reg & 0x20) { /* Disconnect */
DEB(printk("SYM53C500: disconnect intr received\n"));
- if (curSC->SCp.phase != message_in) { /* Unexpected disconnect */
+ if (scp->phase != message_in) { /* Unexpected disconnect */
curSC->result = DID_NO_CONNECT << 16;
} else { /* Command complete, return status and message */
- curSC->result = (curSC->SCp.Status & 0xff)
- | ((curSC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+ curSC->result = (scp->status & 0xff) |
+ ((scp->message & 0xff) << 8) | (DID_OK << 16);
}
goto idle_out;
}
@@ -412,7 +419,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- curSC->SCp.phase = data_out;
+ scp->phase = data_out;
VDEB(printk("SYM53C500: Data-Out phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -431,7 +438,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- curSC->SCp.phase = data_in;
+ scp->phase = data_in;
VDEB(printk("SYM53C500: Data-In phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -446,12 +453,12 @@ SYM53C500_intr(int irq, void *dev_id)
break;
case 0x02: /* COMMAND */
- curSC->SCp.phase = command_ph;
+ scp->phase = command_ph;
printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
break;
case 0x03: /* STATUS */
- curSC->SCp.phase = status_ph;
+ scp->phase = status_ph;
VDEB(printk("SYM53C500: Status phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
@@ -464,22 +471,22 @@ SYM53C500_intr(int irq, void *dev_id)
case 0x06: /* MESSAGE-OUT */
DEB(printk("SYM53C500: Message-Out phase\n"));
- curSC->SCp.phase = message_out;
+ scp->phase = message_out;
outb(SET_ATN, port_base + CMD_REG); /* Reject the message */
outb(MSG_ACCEPT, port_base + CMD_REG);
break;
case 0x07: /* MESSAGE-IN */
VDEB(printk("SYM53C500: Message-In phase\n"));
- curSC->SCp.phase = message_in;
+ scp->phase = message_in;
- curSC->SCp.Status = inb(port_base + SCSI_FIFO);
- curSC->SCp.Message = inb(port_base + SCSI_FIFO);
+ scp->status = inb(port_base + SCSI_FIFO);
+ scp->message = inb(port_base + SCSI_FIFO);
VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
- DEB(printk("Status = %02x Message = %02x\n", curSC->SCp.Status, curSC->SCp.Message));
+ DEB(printk("Status = %02x Message = %02x\n", scp->status, scp->message));
- if (curSC->SCp.Message == SAVE_POINTERS || curSC->SCp.Message == DISCONNECT) {
+ if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) {
outb(SET_ATN, port_base + CMD_REG); /* Reject message */
DEB(printk("Discarding SAVE_POINTERS message\n"));
}
@@ -491,8 +498,8 @@ out:
return IRQ_HANDLED;
idle_out:
- curSC->SCp.phase = idle;
- curSC->scsi_done(curSC);
+ scp->phase = idle;
+ scsi_done(curSC);
goto out;
}
@@ -537,9 +544,9 @@ SYM53C500_info(struct Scsi_Host *SChost)
return (info_msg);
}
-static int
-SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
{
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt);
int i;
int port_base = SCpnt->device->host->io_port;
struct sym53c500_data *data =
@@ -556,10 +563,9 @@ SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
VDEB(printk("\n"));
data->current_SC = SCpnt;
- data->current_SC->scsi_done = done;
- data->current_SC->SCp.phase = command_ph;
- data->current_SC->SCp.Status = 0;
- data->current_SC->SCp.Message = 0;
+ scp->phase = command_ph;
+ scp->status = 0;
+ scp->message = 0;
/* We are locked here already by the mid layer */
REG0(port_base);
@@ -652,11 +658,13 @@ static struct device_attribute SYM53C500_pio_attr = {
.store = SYM53C500_store_pio,
};
-static struct device_attribute *SYM53C500_shost_attrs[] = {
- &SYM53C500_pio_attr,
+static struct attribute *SYM53C500_shost_attrs[] = {
+ &SYM53C500_pio_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(SYM53C500_shost);
+
/*
* scsi_host_template initializer
*/
@@ -671,7 +679,8 @@ static struct scsi_host_template sym53c500_driver_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = 32,
- .shost_attrs = SYM53C500_shost_attrs
+ .shost_groups = SYM53C500_shost_groups,
+ .cmd_size = sizeof(struct sym53c500_cmd_priv),
};
static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
index 02b7338999cc..bbb51b7312f1 100644
--- a/drivers/scsi/pm8001/Makefile
+++ b/drivers/scsi/pm8001/Makefile
@@ -6,9 +6,12 @@
obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+
+CFLAGS_pm80xx_tracepoints.o := -I$(src)
+
pm80xx-y += pm8001_init.o \
pm8001_sas.o \
pm8001_ctl.o \
pm8001_hwi.o \
- pm80xx_hwi.o
-
+ pm80xx_hwi.o \
+ pm80xx_tracepoints.o
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 7c6be2ec110d..73f036bed128 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -41,12 +41,14 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm8001_ctl.h"
+#include "pm8001_chips.h"
/* scsi host attributes */
/**
* pm8001_ctl_mpi_interface_rev_show - MPI interface revision number
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -72,9 +74,10 @@ DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
/**
* controller_fatal_error_show - check controller is under fatal err
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
- * A sysfs 'read only' shost attribute.
+ * A sysfs 'read-only' shost attribute.
*/
static ssize_t controller_fatal_error_show(struct device *cdev,
struct device_attribute *attr, char *buf)
@@ -91,6 +94,7 @@ static DEVICE_ATTR_RO(controller_fatal_error);
/**
* pm8001_ctl_fw_version_show - firmware version
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -121,6 +125,7 @@ static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
/**
* pm8001_ctl_ila_version_show - ila version
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -144,8 +149,9 @@ static ssize_t pm8001_ctl_ila_version_show(struct device *cdev,
static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL);
/**
- * pm8001_ctl_inactive_fw_version_show - Inacative firmware version number
+ * pm8001_ctl_inactive_fw_version_show - Inactive firmware version number
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -172,6 +178,7 @@ DEVICE_ATTR(inc_fw_ver, 0444, pm8001_ctl_inactive_fw_version_show, NULL);
/**
* pm8001_ctl_max_out_io_show - max outstanding io supported
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -195,6 +202,7 @@ static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
/**
* pm8001_ctl_max_devices_show - max devices support
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -221,6 +229,7 @@ static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
* pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no
* hardware limitation
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -265,6 +274,7 @@ show_sas_spec_support_status(unsigned int mode, char *buf)
/**
* pm8001_ctl_sas_spec_support_show - sas spec supported
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -290,8 +300,9 @@ static DEVICE_ATTR(sas_spec_support, S_IRUGO,
pm8001_ctl_sas_spec_support_show, NULL);
/**
- * pm8001_ctl_sas_address_show - sas address
+ * pm8001_ctl_host_sas_address_show - sas address
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* This is the controller sas address
@@ -313,6 +324,7 @@ static DEVICE_ATTR(host_sas_address, S_IRUGO,
/**
* pm8001_ctl_logging_level_show - logging level
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
@@ -326,6 +338,7 @@ static ssize_t pm8001_ctl_logging_level_show(struct device *cdev,
return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level);
}
+
static ssize_t pm8001_ctl_logging_level_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -346,6 +359,7 @@ static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
/**
* pm8001_ctl_aap_log_show - aap1 event log
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -356,24 +370,22 @@ static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ u8 *ptr = (u8 *)pm8001_ha->memoryMap.region[AAP1].virt_ptr;
int i;
-#define AAP1_MEMMAP(r, c) \
- (*(u32 *)((u8*)pm8001_ha->memoryMap.region[AAP1].virt_ptr + (r) * 32 \
- + (c)))
char *str = buf;
int max = 2;
for (i = 0; i < max; i++) {
str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
"0x%08x 0x%08x\n",
- AAP1_MEMMAP(i, 0),
- AAP1_MEMMAP(i, 4),
- AAP1_MEMMAP(i, 8),
- AAP1_MEMMAP(i, 12),
- AAP1_MEMMAP(i, 16),
- AAP1_MEMMAP(i, 20),
- AAP1_MEMMAP(i, 24),
- AAP1_MEMMAP(i, 28));
+ pm8001_ctl_aap1_memmap(ptr, i, 0),
+ pm8001_ctl_aap1_memmap(ptr, i, 4),
+ pm8001_ctl_aap1_memmap(ptr, i, 8),
+ pm8001_ctl_aap1_memmap(ptr, i, 12),
+ pm8001_ctl_aap1_memmap(ptr, i, 16),
+ pm8001_ctl_aap1_memmap(ptr, i, 20),
+ pm8001_ctl_aap1_memmap(ptr, i, 24),
+ pm8001_ctl_aap1_memmap(ptr, i, 28));
}
return str - buf;
@@ -382,7 +394,9 @@ static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
/**
* pm8001_ctl_ib_queue_log_show - Out bound Queue log
* @cdev:pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
+ *
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
@@ -394,9 +408,11 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
int offset;
char *str = buf;
int start = 0;
+ u32 ib_offset = pm8001_ha->ib_offset;
+ u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128;
#define IB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
- memoryMap.region[IB].virt_ptr + \
+ memoryMap.region[ib_offset].virt_ptr + \
pm8001_ha->evtlog_ib_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
@@ -404,7 +420,7 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
start = start + 4;
}
pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
- if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ if (((pm8001_ha->evtlog_ib_offset) % queue_size) == 0)
pm8001_ha->evtlog_ib_offset = 0;
return str - buf;
@@ -414,7 +430,9 @@ static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
/**
* pm8001_ctl_ob_queue_log_show - Out bound Queue log
* @cdev:pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
+ *
* A sysfs 'read-only' shost attribute.
*/
@@ -427,9 +445,11 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
int offset;
char *str = buf;
int start = 0;
+ u32 ob_offset = pm8001_ha->ob_offset;
+ u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128;
#define OB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
- memoryMap.region[OB].virt_ptr + \
+ memoryMap.region[ob_offset].virt_ptr + \
pm8001_ha->evtlog_ob_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
@@ -437,7 +457,7 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
start = start + 4;
}
pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
- if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ if (((pm8001_ha->evtlog_ob_offset) % queue_size) == 0)
pm8001_ha->evtlog_ob_offset = 0;
return str - buf;
@@ -446,7 +466,9 @@ static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
/**
* pm8001_ctl_bios_version_show - Bios version Display
* @cdev:pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf:the buffer returned
+ *
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
@@ -463,7 +485,7 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
pm8001_ha->nvmd_completion = &completion;
payload.minor_function = 7;
payload.offset = 0;
- payload.length = 4096;
+ payload.rd_length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
if (!payload.func_specific)
return -ENOMEM;
@@ -483,6 +505,7 @@ static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
/**
* event_log_size_show - event log size
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs read shost attribute.
@@ -499,8 +522,9 @@ static ssize_t event_log_size_show(struct device *cdev,
}
static DEVICE_ATTR_RO(event_log_size);
/**
- * pm8001_ctl_aap_log_show - IOP event log
+ * pm8001_ctl_iop_log_show - IOP event log
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
@@ -536,12 +560,13 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
/**
- ** pm8001_ctl_fatal_log_show - fatal error logging
- ** @cdev:pointer to embedded class device
- ** @buf: the buffer returned
- **
- ** A sysfs 'read-only' shost attribute.
- **/
+ * pm8001_ctl_fatal_log_show - fatal error logging
+ * @cdev:pointer to embedded class device
+ * @attr: device attribute
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
@@ -554,13 +579,59 @@ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
+/**
+ * non_fatal_log_show - non fatal error logging
+ * @cdev:pointer to embedded class device
+ * @attr: device attribute
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t non_fatal_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 count;
+
+ count = pm80xx_get_non_fatal_dump(cdev, attr, buf);
+ return count;
+}
+static DEVICE_ATTR_RO(non_fatal_log);
+
+static ssize_t non_fatal_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ return snprintf(buf, PAGE_SIZE, "%08x",
+ pm8001_ha->non_fatal_count);
+}
+
+static ssize_t non_fatal_count_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int val = 0;
+
+ if (kstrtoint(buf, 16, &val) != 0)
+ return -EINVAL;
+
+ pm8001_ha->non_fatal_count = val;
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(non_fatal_count);
/**
- ** pm8001_ctl_gsm_log_show - gsm dump collection
- ** @cdev:pointer to embedded class device
- ** @buf: the buffer returned
- **A sysfs 'read-only' shost attribute.
- **/
+ * pm8001_ctl_gsm_log_show - gsm dump collection
+ * @cdev:pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -581,8 +652,7 @@ struct flash_command {
int code;
};
-static struct flash_command flash_command_table[] =
-{
+static const struct flash_command flash_command_table[] = {
{"set_nvmd", FLASH_CMD_SET_NVMD},
{"update", FLASH_CMD_UPDATE},
{"", FLASH_CMD_NONE} /* Last entry should be NULL. */
@@ -593,8 +663,7 @@ struct error_fw {
int err_code;
};
-static struct error_fw flash_error_table[] =
-{
+static const struct error_fw flash_error_table[] = {
{"Failed to open fw image file", FAIL_OPEN_BIOS_FILE},
{"image header mismatch", FLASH_UPDATE_HDR_ERR},
{"image offset mismatch", FLASH_UPDATE_OFFSET_ERR},
@@ -631,7 +700,7 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
pm8001_ha->fw_image->size);
- payload->length = pm8001_ha->fw_image->size;
+ payload->wr_length = pm8001_ha->fw_image->size;
payload->id = 0;
payload->minor_function = 0x1;
pm8001_ha->nvmd_completion = &completion;
@@ -652,12 +721,15 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
DECLARE_COMPLETION_ONSTACK(completion);
u8 *ioctlbuffer;
struct fw_control_info *fwControl;
- u32 partitionSize, partitionSizeTmp;
+ __be32 partitionSizeTmp;
+ u32 partitionSize;
u32 loopNumber, loopcount;
struct pm8001_fw_image_header *image_hdr;
u32 sizeRead = 0;
u32 ret = 0;
u32 length = 1024 * 16 + sizeof(*payload) - 1;
+ u32 fc_len;
+ u8 *read_buf;
if (pm8001_ha->fw_image->size < 28) {
pm8001_ha->fw_status = FAIL_FILE_SIZE;
@@ -671,13 +743,13 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
while (sizeRead < pm8001_ha->fw_image->size) {
partitionSizeTmp =
- *(u32 *)((u8 *)&image_hdr->image_length + sizeRead);
+ *(__be32 *)((u8 *)&image_hdr->image_length + sizeRead);
partitionSize = be32_to_cpu(partitionSizeTmp);
loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN,
IOCTL_BUF_SIZE);
for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
- payload->length = 1024*16;
+ payload->wr_length = 1024*16;
payload->id = 0;
fwControl =
(struct fw_control_info *)&payload->func_specific;
@@ -686,36 +758,35 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
fwControl->retcode = 0;/* OUT */
fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */
- /* for the last chunk of data in case file size is not even with
- 4k, load only the rest*/
- if (((loopcount-loopNumber) == 1) &&
- ((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) {
- fwControl->len =
- (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
- memcpy((u8 *)fwControl->buffer,
- (u8 *)pm8001_ha->fw_image->data + sizeRead,
- (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE);
- sizeRead +=
- (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
- } else {
- memcpy((u8 *)fwControl->buffer,
- (u8 *)pm8001_ha->fw_image->data + sizeRead,
- IOCTL_BUF_SIZE);
- sizeRead += IOCTL_BUF_SIZE;
- }
-
- pm8001_ha->nvmd_completion = &completion;
- ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
- if (ret) {
- pm8001_ha->fw_status = FAIL_OUT_MEMORY;
- goto out;
- }
- wait_for_completion(&completion);
- if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
- pm8001_ha->fw_status = fwControl->retcode;
- ret = -EFAULT;
- goto out;
- }
+ /*
+ * for the last chunk of data in case file size is
+ * not even with 4k, load only the rest
+ */
+
+ read_buf = (u8 *)pm8001_ha->fw_image->data + sizeRead;
+ fc_len = (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
+
+ if (loopcount - loopNumber == 1 && fc_len) {
+ fwControl->len = fc_len;
+ memcpy((u8 *)fwControl->buffer, read_buf, fc_len);
+ sizeRead += fc_len;
+ } else {
+ memcpy((u8 *)fwControl->buffer, read_buf, IOCTL_BUF_SIZE);
+ sizeRead += IOCTL_BUF_SIZE;
+ }
+
+ pm8001_ha->nvmd_completion = &completion;
+ ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
+ if (ret) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ goto out;
+ }
+ wait_for_completion(&completion);
+ if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
+ pm8001_ha->fw_status = fwControl->retcode;
+ ret = -EFAULT;
+ goto out;
+ }
}
}
out:
@@ -775,10 +846,9 @@ static ssize_t pm8001_store_update_fw(struct device *cdev,
pm8001_ha->dev);
if (ret) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(
- "Failed to load firmware image file %s, error %d\n",
- filename_ptr, ret));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Failed to load firmware image file %s, error %d\n",
+ filename_ptr, ret);
pm8001_ha->fw_status = FAIL_OPEN_BIOS_FILE;
goto out;
}
@@ -818,30 +888,158 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
flash_error_table[i].err_code,
flash_error_table[i].reason);
}
-
static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
-struct device_attribute *pm8001_host_attrs[] = {
- &dev_attr_interface_rev,
- &dev_attr_controller_fatal_error,
- &dev_attr_fw_version,
- &dev_attr_update_fw,
- &dev_attr_aap_log,
- &dev_attr_iop_log,
- &dev_attr_fatal_log,
- &dev_attr_gsm_log,
- &dev_attr_max_out_io,
- &dev_attr_max_devices,
- &dev_attr_max_sg_list,
- &dev_attr_sas_spec_support,
- &dev_attr_logging_level,
- &dev_attr_event_log_size,
- &dev_attr_host_sas_address,
- &dev_attr_bios_version,
- &dev_attr_ib_log,
- &dev_attr_ob_log,
- &dev_attr_ila_version,
- &dev_attr_inc_fw_ver,
+
+static const char *const mpiStateText[] = {
+ "MPI is not initialized",
+ "MPI is successfully initialized",
+ "MPI termination is in progress",
+ "MPI initialization failed with error in [31:16]"
+};
+
+/**
+ * ctl_mpi_state_show - controller MPI state check
+ * @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t ctl_mpi_state_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int mpidw0;
+
+ mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0);
+ return sysfs_emit(buf, "%s\n", mpiStateText[mpidw0 & 0x0003]);
+}
+static DEVICE_ATTR_RO(ctl_mpi_state);
+
+/**
+ * ctl_hmi_error_show - controller MPI initialization fails
+ * @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t ctl_hmi_error_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int mpidw0;
+
+ mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0);
+ return sysfs_emit(buf, "0x%08x\n", (mpidw0 >> 16));
+}
+static DEVICE_ATTR_RO(ctl_hmi_error);
+
+/**
+ * ctl_raae_count_show - controller raae count check
+ * @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t ctl_raae_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int raaecnt;
+
+ raaecnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 12);
+ return sysfs_emit(buf, "0x%08x\n", raaecnt);
+}
+static DEVICE_ATTR_RO(ctl_raae_count);
+
+/**
+ * ctl_iop0_count_show - controller iop0 count check
+ * @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t ctl_iop0_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int iop0cnt;
+
+ iop0cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 16);
+ return sysfs_emit(buf, "0x%08x\n", iop0cnt);
+}
+static DEVICE_ATTR_RO(ctl_iop0_count);
+
+/**
+ * ctl_iop1_count_show - controller iop1 count check
+ * @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t ctl_iop1_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int iop1cnt;
+
+ iop1cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 20);
+ return sysfs_emit(buf, "0x%08x\n", iop1cnt);
+
+}
+static DEVICE_ATTR_RO(ctl_iop1_count);
+
+static struct attribute *pm8001_host_attrs[] = {
+ &dev_attr_interface_rev.attr,
+ &dev_attr_controller_fatal_error.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_update_fw.attr,
+ &dev_attr_aap_log.attr,
+ &dev_attr_iop_log.attr,
+ &dev_attr_fatal_log.attr,
+ &dev_attr_non_fatal_log.attr,
+ &dev_attr_non_fatal_count.attr,
+ &dev_attr_gsm_log.attr,
+ &dev_attr_max_out_io.attr,
+ &dev_attr_max_devices.attr,
+ &dev_attr_max_sg_list.attr,
+ &dev_attr_sas_spec_support.attr,
+ &dev_attr_logging_level.attr,
+ &dev_attr_event_log_size.attr,
+ &dev_attr_host_sas_address.attr,
+ &dev_attr_bios_version.attr,
+ &dev_attr_ib_log.attr,
+ &dev_attr_ob_log.attr,
+ &dev_attr_ila_version.attr,
+ &dev_attr_inc_fw_ver.attr,
+ &dev_attr_ctl_mpi_state.attr,
+ &dev_attr_ctl_hmi_error.attr,
+ &dev_attr_ctl_raae_count.attr,
+ &dev_attr_ctl_iop0_count.attr,
+ &dev_attr_ctl_iop1_count.attr,
NULL,
};
+static const struct attribute_group pm8001_host_attr_group = {
+ .attrs = pm8001_host_attrs
+};
+
+const struct attribute_group *pm8001_host_groups[] = {
+ &pm8001_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index d0d43a250b9e..4743f0de223e 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -59,5 +59,10 @@
#define SYSFS_OFFSET 1024
#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024)
#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024)
+
+static inline u32 pm8001_ctl_aap1_memmap(u8 *ptr, int idx, int off)
+{
+ return *(u32 *)(ptr + idx * 32 + off);
+}
#endif /* PM8001_CTL_H_INCLUDED */
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 48e0624ecc68..501b574239e8 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -75,12 +75,10 @@ enum port_type {
};
/* driver compile-time configuration */
-#define PM8001_MAX_CCB 512 /* max ccbs supported */
+#define PM8001_MAX_CCB 1024 /* max ccbs supported */
#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
-#define PM8001_MAX_INB_NUM 1
-#define PM8001_MAX_OUTB_NUM 1
-#define PM8001_MAX_SPCV_INB_NUM 1
-#define PM8001_MAX_SPCV_OUTB_NUM 4
+#define PM8001_MAX_INB_NUM 64
+#define PM8001_MAX_OUTB_NUM 64
#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */
/* Inbound/Outbound queue size */
@@ -92,25 +90,27 @@ enum port_type {
#define PM8001_MAX_PORTS 16 /* max. possible ports */
#define PM8001_MAX_DEVICES 2048 /* max supported device */
#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */
+#define PM8001_RESERVE_SLOT 8
+
+#define CONFIG_SCSI_PM8001_MAX_DMA_SG 528
+#define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG
-#define USI_MAX_MEMCNT_BASE 5
-#define IB (USI_MAX_MEMCNT_BASE + 1)
-#define CI (IB + PM8001_MAX_SPCV_INB_NUM)
-#define OB (CI + PM8001_MAX_SPCV_INB_NUM)
-#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM)
-#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM)
-#define PM8001_MAX_DMA_SG SG_ALL
enum memory_region_num {
AAP1 = 0x0, /* application acceleration processor */
IOP, /* IO processor */
NVMD, /* NVM device */
- DEV_MEM, /* memory for devices */
- CCB_MEM, /* memory for command control block */
FW_FLASH, /* memory for fw flash update */
- FORENSIC_MEM /* memory for fw forensic data */
+ FORENSIC_MEM, /* memory for fw forensic data */
+ USI_MAX_MEMCNT_BASE
};
#define PM8001_EVENT_LOG_SIZE (128 * 1024)
+/**
+ * maximum DMA memory regions(number of IBQ + number of IBQ CI
+ * + number of OBQ + number of OBQ PI)
+ */
+#define USI_MAX_MEMCNT (USI_MAX_MEMCNT_BASE + ((2 * PM8001_MAX_INB_NUM) \
+ + (2 * PM8001_MAX_OUTB_NUM)))
/*error code*/
enum mpi_err {
MPI_IO_STATUS_SUCCESS = 0x0,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 2328ff1349ac..628b08ba6770 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -42,6 +42,7 @@
#include "pm8001_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
+ #include "pm80xx_tracepoints.h"
/**
* read_main_config_table - read the configure table and save it.
@@ -189,6 +190,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
u32 offsetib, offsetob;
void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+ u32 ib_offset = pm8001_ha->ib_offset;
+ u32 ob_offset = pm8001_ha->ob_offset;
+ u32 ci_offset = pm8001_ha->ci_offset;
+ u32 pi_offset = pm8001_ha->pi_offset;
pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0;
pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0;
@@ -219,23 +224,24 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
PM8001_EVENT_LOG_SIZE;
pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01;
pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01;
- for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
- pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
- pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].base_virt =
- (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+ (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr;
pm8001_ha->inbnd_q_tbl[i].total_length =
- pm8001_ha->memoryMap.region[IB + i].total_len;
+ pm8001_ha->memoryMap.region[ib_offset + i].total_len;
pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
- pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
- pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].ci_virt =
- pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+ pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr;
+ pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0);
offsetib = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(addressib,
@@ -245,25 +251,26 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
}
- for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
- pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
- pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo;
pm8001_ha->outbnd_q_tbl[i].base_virt =
- (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+ (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr;
pm8001_ha->outbnd_q_tbl[i].total_length =
- pm8001_ha->memoryMap.region[OB + i].total_len;
+ pm8001_ha->memoryMap.region[ob_offset + i].total_len;
pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
- pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
- pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo;
pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay =
0 | (10 << 16) | (i << 24);
pm8001_ha->outbnd_q_tbl[i].pi_virt =
- pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+ pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr;
+ pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0);
offsetob = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(addressob,
@@ -333,6 +340,7 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
/**
* update_inbnd_queue_table - update the inbound queue table to the HBA.
* @pm8001_ha: our hba card information
+ * @number: entry in the queue
*/
static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
int number)
@@ -354,6 +362,7 @@ static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
/**
* update_outbnd_queue_table - update the outbound queue table to the HBA.
* @pm8001_ha: our hba card information
+ * @number: entry in the queue
*/
static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
int number)
@@ -376,7 +385,7 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
/**
* pm8001_bar4_shift - function is called to shift BAR base address
- * @pm8001_ha : our hba card infomation
+ * @pm8001_ha : our hba card information
* @shiftValue : shifting value in memory bar.
*/
int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
@@ -394,9 +403,9 @@ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
} while ((regVal != shiftValue) && time_before(jiffies, start));
if (regVal != shiftValue) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
- " = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT,
+ "TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW = 0x%x\n",
+ regVal);
return -1;
}
return 0;
@@ -410,7 +419,7 @@ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha,
u32 SSCbit)
{
- u32 value, offset, i;
+ u32 offset, i;
unsigned long flags;
#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
@@ -461,7 +470,7 @@ static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha,
so that the written value will be 0x8090c016.
This will ensure only down-spreading SSC is enabled on the SPC.
*************************************************************/
- value = pm8001_cr32(pm8001_ha, 2, 0xd8);
+ pm8001_cr32(pm8001_ha, 2, 0xd8);
pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
/*set the shifted destination address to 0x0 to avoid error operation */
@@ -473,7 +482,7 @@ static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha,
/**
* mpi_set_open_retry_interval_reg
* @pm8001_ha: our hba card information
- * @interval - interval time for each OPEN_REJECT (RETRY). The units are in 1us.
+ * @interval: interval time for each OPEN_REJECT (RETRY). The units are in 1us.
*/
static void mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
u32 interval)
@@ -617,12 +626,10 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, 0x44);
offset = value & 0x03FFFFFF;
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 Offset: %x\n", offset));
+ pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 Offset: %x\n", offset);
pcilogic = (value & 0xFC000000) >> 26;
pcibar = get_pci_bar_index(pcilogic);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+ pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar);
pm8001_ha->main_cfg_tbl_addr = base_addr =
pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
pm8001_ha->general_stat_tbl_addr =
@@ -639,23 +646,22 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
*/
static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
{
- u8 i = 0;
+ u32 i = 0;
u16 deviceid;
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
/* 8081 controllers need BAR shift to access MPI space
* as this is shared with BIOS data */
if (deviceid == 0x8081 || deviceid == 0x0042) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- GSM_SM_BASE));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Shift Bar4 to 0x%x failed\n",
+ GSM_SM_BASE);
return -1;
}
}
/* check the firmware status */
if (-1 == check_fw_ready(pm8001_ha)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Firmware is not ready!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n");
return -EBUSY;
}
@@ -668,9 +674,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
read_outbnd_queue_table(pm8001_ha);
/* update main config table ,inbound table and outbound table */
update_main_config_table(pm8001_ha);
- for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+ for (i = 0; i < pm8001_ha->max_q_num; i++)
update_inbnd_queue_table(pm8001_ha, i);
- for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+ for (i = 0; i < pm8001_ha->max_q_num; i++)
update_outbnd_queue_table(pm8001_ha, i);
/* 8081 controller donot require these operations */
if (deviceid != 0x8081 && deviceid != 0x0042) {
@@ -680,8 +686,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
}
/* notify firmware update finished and check initialization status */
if (0 == mpi_init_check(pm8001_ha)) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MPI initialize successful!\n"));
+ pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n");
} else
return -EBUSY;
/*This register is a 16-bit timer with a resolution of 1us. This is the
@@ -694,6 +699,10 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
return 0;
}
+static void pm8001_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
+}
+
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
{
u32 max_wait_count;
@@ -703,9 +712,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
if (deviceid == 0x8081 || deviceid == 0x0042) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- GSM_SM_BASE));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Shift Bar4 to 0x%x failed\n",
+ GSM_SM_BASE);
return -1;
}
}
@@ -723,8 +732,8 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
} while ((value != 0) && (--max_wait_count));
if (!max_wait_count) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:IBDB value/=0x%x\n", value));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=0x%x\n",
+ value);
return -1;
}
@@ -741,9 +750,8 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
break;
} while (--max_wait_count);
if (!max_wait_count) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(" TIME OUT MPI State = 0x%x\n",
- gst_len_mpistate & GST_MPI_STATE_MASK));
+ pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n",
+ gst_len_mpistate & GST_MPI_STATE_MASK);
return -1;
}
return 0;
@@ -757,25 +765,23 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
{
u32 regVal, regVal1, regVal2;
if (mpi_uninit_check(pm8001_ha) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MPI state is not ready\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "MPI state is not ready\n");
return -1;
}
/* read the scratch pad 2 register bit 2 */
regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
& SCRATCH_PAD2_FWRDY_RST;
if (regVal == SCRATCH_PAD2_FWRDY_RST) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Firmware is ready for reset .\n"));
+ pm8001_dbg(pm8001_ha, INIT, "Firmware is ready for reset.\n");
} else {
unsigned long flags;
/* Trigger NMI twice via RB6 */
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- RB6_ACCESS_REG));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Shift Bar4 to 0x%x failed\n",
+ RB6_ACCESS_REG);
return -1;
}
pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET,
@@ -788,16 +794,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
if (regVal != SCRATCH_PAD2_FWRDY_RST) {
regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:MSGU_SCRATCH_PAD1"
- "=0x%x, MSGU_SCRATCH_PAD2=0x%x\n",
- regVal1, regVal2));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MSGU_SCRATCH_PAD1=0x%x, MSGU_SCRATCH_PAD2=0x%x\n",
+ regVal1, regVal2);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD0 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD3 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3));
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
@@ -822,7 +826,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* step1: Check FW is ready for soft reset */
if (soft_reset_ready_check(pm8001_ha) != 0) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("FW is not ready\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "FW is not ready\n");
return -1;
}
@@ -832,46 +836,43 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- MBIC_AAP1_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
+ MBIC_AAP1_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (IOP)= 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
/* map 0x70000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- MBIC_IOP_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
+ MBIC_IOP_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0);
regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCIE -Event Interrupt Enable = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "PCIE -Event Interrupt Enable = 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0);
regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCIE - Event Interrupt = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "PCIE - Event Interrupt = 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal);
regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCIE -Error Interrupt Enable = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "PCIE -Error Interrupt Enable = 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0);
regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCIE - Error Interrupt = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "PCIE - Error Interrupt = 0x%x\n", regVal);
pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal);
/* read the scratch pad 1 register bit 2 */
@@ -887,15 +888,13 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* map 0x0700000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- GSM_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
+ GSM_ADDR_BASE);
return -1;
}
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x0(0x00007b88)-GSM Configuration and"
- " Reset = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x0(0x00007b88)-GSM Configuration and Reset = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
/* step 3: host read GSM Configuration and Reset register */
regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
@@ -910,59 +909,52 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
regVal &= ~(0x00003b00);
/* host write GSM Configuration and Reset register */
pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM "
- "Configuration and Reset is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM Configuration and Reset is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
/* step 4: */
/* disable GSM - Read Address Parity Check */
regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700038 - Read Address Parity Check "
- "Enable = 0x%x\n", regVal1));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n",
+ regVal1);
pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
- "is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK));
/* disable GSM - Write Address Parity Check */
regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700040 - Write Address Parity Check"
- " Enable = 0x%x\n", regVal2));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700040 - Write Address Parity Check Enable = 0x%x\n",
+ regVal2);
pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700040 - Write Address Parity Check "
- "Enable is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK));
/* disable GSM - Write Data Parity Check */
regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x300048 - Write Data Parity Check"
- " Enable = 0x%x\n", regVal3));
+ pm8001_dbg(pm8001_ha, INIT, "GSM 0x300048 - Write Data Parity Check Enable = 0x%x\n",
+ regVal3);
pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x300048 - Write Data Parity Check Enable"
- "is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x300048 - Write Data Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK));
/* step 5: delay 10 usec */
udelay(10);
/* step 5-b: set GPIO-0 output control to tristate anyway */
if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- GPIO_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, INIT, "Shift Bar4 to 0x%x failed\n",
+ GPIO_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GPIO Output Control Register:"
- " = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "GPIO Output Control Register: = 0x%x\n",
+ regVal);
/* set GPIO-0 output control to tri-state */
regVal &= 0xFFFFFFFC;
pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
@@ -971,23 +963,20 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* map 0x00000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
- SPC_TOP_LEVEL_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n",
+ SPC_TOP_LEVEL_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Top Register before resetting IOP/AAP1"
- ":= 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting IOP/AAP1:= 0x%x\n",
+ regVal);
regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
/* step 7: Reset the BDMA/OSSP */
regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Top Register before resetting BDMA/OSSP"
- ": = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting BDMA/OSSP: = 0x%x\n",
+ regVal);
regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
@@ -996,9 +985,9 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* step 9: bring the BDMA and OSSP out of reset */
regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Top Register before bringing up BDMA/OSSP"
- ":= 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Top Register before bringing up BDMA/OSSP:= 0x%x\n",
+ regVal);
regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
@@ -1009,14 +998,13 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* map 0x0700000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
- GSM_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n",
+ GSM_ADDR_BASE);
return -1;
}
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x0 (0x00007b88)-GSM Configuration and "
- "Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x0 (0x00007b88)-GSM Configuration and Reset = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
/* Put those bits to high */
/* GSM XCBI offset = 0x70 0000
@@ -1028,44 +1016,37 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
*/
regVal |= (GSM_CONFIG_RESET_VALUE);
pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM (0x00004088 ==> 0x00007b88) - GSM"
- " Configuration and Reset is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+ pm8001_dbg(pm8001_ha, INIT, "GSM (0x00004088 ==> 0x00007b88) - GSM Configuration and Reset is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
/* step 12: Restore GSM - Read Address Parity Check */
regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
/* just for debugging */
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
- " = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700038 - Read Address Parity"
- " Check Enable is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT, "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK));
/* Restore GSM - Write Address Parity Check */
regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700040 - Write Address Parity Check"
- " Enable is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK));
/* Restore GSM - Write Data Parity Check */
regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700048 - Write Data Parity Check Enable"
- "is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700048 - Write Data Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK));
/* step 13: bring the IOP and AAP1 out of reset */
/* map 0x00000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- SPC_TOP_LEVEL_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
+ SPC_TOP_LEVEL_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
@@ -1088,22 +1069,20 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (!max_wait_count) {
regVal = pm8001_cr32(pm8001_ha, 0,
MSGU_SCRATCH_PAD_1);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT : ToggleVal 0x%x,"
- "MSGU_SCRATCH_PAD1 = 0x%x\n",
- toggleVal, regVal));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_0)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD2 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_2)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_3)));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT : ToggleVal 0x%x,MSGU_SCRATCH_PAD1 = 0x%x\n",
+ toggleVal, regVal);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD0 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD2 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_2));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD3 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_3));
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
@@ -1118,22 +1097,22 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (check_fw_ready(pm8001_ha) == -1) {
regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
/* return error if MPI Configuration Table not ready */
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("FW not ready SCRATCH_PAD1"
- " = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT,
+ "FW not ready SCRATCH_PAD1 = 0x%x\n",
+ regVal);
regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
/* return error if MPI Configuration Table not ready */
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("FW not ready SCRATCH_PAD2"
- " = 0x%x\n", regVal));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_0)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_3)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "FW not ready SCRATCH_PAD2 = 0x%x\n",
+ regVal);
+ pm8001_dbg(pm8001_ha, INIT,
+ "SCRATCH_PAD0 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, INIT,
+ "SCRATCH_PAD3 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_3));
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
@@ -1141,8 +1120,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
pm8001_bar4_shift(pm8001_ha, 0);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SPC soft reset Complete\n"));
+ pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n");
return 0;
}
@@ -1150,8 +1128,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
{
u32 i;
u32 regVal;
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip reset start\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip reset start\n");
/* do SPC chip reset. */
regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
@@ -1175,12 +1152,11 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
mdelay(1);
} while ((--i) != 0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip reset finished\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n");
}
/**
- * pm8001_chip_iounmap - which maped when initialized.
+ * pm8001_chip_iounmap - which mapped when initialized.
* @pm8001_ha: our hba card information
*/
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
@@ -1206,7 +1182,7 @@ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
#ifndef PM8001_USE_MSIX
/**
- * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * pm8001_chip_intx_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
*/
static void
@@ -1216,10 +1192,10 @@ pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
}
- /**
- * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
+/**
+ * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
static void
pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
{
@@ -1231,6 +1207,7 @@ pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
/**
* pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
+ * @int_vec_idx: interrupt number to enable
*/
static void
pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha,
@@ -1249,6 +1226,7 @@ pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha,
/**
* pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt
* @pm8001_ha: our hba card information
+ * @int_vec_idx: interrupt number to disable
*/
static void
pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
@@ -1264,6 +1242,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
/**
* pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
+ * @vec: unused
*/
static void
pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
@@ -1276,8 +1255,9 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
}
/**
- * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * pm8001_chip_interrupt_disable - disable PM8001 chip interrupt
* @pm8001_ha: our hba card information
+ * @vec: unused
*/
static void
pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
@@ -1333,25 +1313,36 @@ int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
* pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
* FW to tell the fw to get this message from IOMB.
* @pm8001_ha: our hba card information
- * @circularQ: the inbound queue we want to transfer to HBA.
+ * @q_index: the index in the inbound queue we want to transfer to HBA.
* @opCode: the operation code represents commands which LLDD and fw recognized.
* @payload: the command payload of each operation command.
* @nb: size in bytes of the command payload
* @responseQueue: queue to interrupt on w/ command response (if any)
*/
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
- struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, size_t nb,
+ u32 q_index, u32 opCode, void *payload, size_t nb,
u32 responseQueue)
{
u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
void *pMessage;
-
- if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
- &pMessage) < 0) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("No free mpi buffer\n"));
- return -ENOMEM;
+ unsigned long flags;
+ struct inbound_queue_table *circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
+ int rv;
+ u32 htag = le32_to_cpu(*(__le32 *)payload);
+
+ trace_pm80xx_mpi_build_cmd(pm8001_ha->id, opCode, htag, q_index,
+ circularQ->producer_idx, le32_to_cpu(circularQ->consumer_index));
+
+ if (WARN_ON(q_index >= pm8001_ha->max_q_num))
+ return -EINVAL;
+
+ spin_lock_irqsave(&circularQ->iq_lock, flags);
+ rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
+ &pMessage);
+ if (rv < 0) {
+ pm8001_dbg(pm8001_ha, IO, "No free mpi buffer\n");
+ rv = -ENOMEM;
+ goto done;
}
if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr)))
@@ -1370,11 +1361,13 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
/*Update the PI to the firmware*/
pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
circularQ->pi_offset, circularQ->producer_idx);
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
- responseQueue, opCode, circularQ->producer_idx,
- circularQ->consumer_index));
- return 0;
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
+ responseQueue, opCode, circularQ->producer_idx,
+ circularQ->consumer_index);
+done:
+ spin_unlock_irqrestore(&circularQ->iq_lock, flags);
+ return rv;
}
u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
@@ -1388,17 +1381,17 @@ u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
circularQ->consumer_idx * pm8001_ha->iomb_size);
if (pOutBoundMsgHeader != msgHeader) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("consumer_idx = %d msgHeader = %p\n",
- circularQ->consumer_idx, msgHeader));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "consumer_idx = %d msgHeader = %p\n",
+ circularQ->consumer_idx, msgHeader);
/* Update the producer index from SPC */
producer_index = pm8001_read_32(circularQ->pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("consumer_idx = %d producer_index = %d"
- "msgHeader = %p\n", circularQ->consumer_idx,
- circularQ->producer_index, msgHeader));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "consumer_idx = %d producer_index = %dmsgHeader = %p\n",
+ circularQ->consumer_idx,
+ circularQ->producer_index, msgHeader);
return 0;
}
/* free the circular queue buffer elements associated with the message*/
@@ -1410,9 +1403,8 @@ u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
/* Update the producer index from SPC*/
producer_index = pm8001_read_32(circularQ->pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk(" CI=%d PI=%d\n", circularQ->consumer_idx,
- circularQ->producer_index));
+ pm8001_dbg(pm8001_ha, IO, " CI=%d PI=%d\n",
+ circularQ->consumer_idx, circularQ->producer_index);
return 0;
}
@@ -1442,10 +1434,10 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
/* read header */
header_tmp = pm8001_read_32(msgHeader);
msgHeader_tmp = cpu_to_le32(header_tmp);
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "outbound opcode msgheader:%x ci=%d pi=%d\n",
- msgHeader_tmp, circularQ->consumer_idx,
- circularQ->producer_index));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "outbound opcode msgheader:%x ci=%d pi=%d\n",
+ msgHeader_tmp, circularQ->consumer_idx,
+ circularQ->producer_index);
if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
if (OPC_OUB_SKIP_ENTRY !=
(le32_to_cpu(msgHeader_tmp) & 0xfff)) {
@@ -1454,12 +1446,11 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
sizeof(struct mpi_msg_hdr);
*pBC = (u8)((le32_to_cpu(msgHeader_tmp)
>> 24) & 0x1f);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk(": CI=%d PI=%d "
- "msgHeader=%x\n",
- circularQ->consumer_idx,
- circularQ->producer_index,
- msgHeader_tmp));
+ pm8001_dbg(pm8001_ha, IO,
+ ": CI=%d PI=%d msgHeader=%x\n",
+ circularQ->consumer_idx,
+ circularQ->producer_index,
+ msgHeader_tmp);
return MPI_IO_STATUS_SUCCESS;
} else {
circularQ->consumer_idx =
@@ -1520,19 +1511,20 @@ void pm8001_work_fn(struct work_struct *work)
* was cancelled. This nullification happens when the device
* goes away.
*/
- pm8001_dev = pw->data; /* Most stash device structure */
- if ((pm8001_dev == NULL)
- || ((pw->handler != IO_XFER_ERROR_BREAK)
- && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
- kfree(pw);
- return;
+ if (pw->handler != IO_FATAL_ERROR) {
+ pm8001_dev = pw->data; /* Most stash device structure */
+ if ((pm8001_dev == NULL)
+ || ((pw->handler != IO_XFER_ERROR_BREAK)
+ && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
+ kfree(pw);
+ return;
+ }
}
switch (pw->handler) {
case IO_XFER_ERROR_BREAK:
{ /* This one stashes the sas_task instead */
struct sas_task *t = (struct sas_task *)pm8001_dev;
- u32 tag;
struct pm8001_ccb_info *ccb;
struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
unsigned long flags, flags1;
@@ -1554,8 +1546,8 @@ void pm8001_work_fn(struct work_struct *work)
/* Search for a possible ccb that matches the task */
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
- tag = ccb->ccb_tag;
- if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ if ((ccb->ccb_tag != PM8001_INVALID_TAG) &&
+ (ccb->task == t))
break;
}
if (!ccb) {
@@ -1568,22 +1560,19 @@ void pm8001_work_fn(struct work_struct *work)
ts->stat = SAS_QUEUE_FULL;
pm8001_dev = ccb->device;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
spin_lock_irqsave(&t->task_state_lock, flags1);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p"
- " done with event 0x%x resp 0x%x stat 0x%x but"
- " aborted by upper layer!\n",
- t, pw->handler, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, pw->handler, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
@@ -1592,32 +1581,21 @@ void pm8001_work_fn(struct work_struct *work)
case IO_XFER_OPEN_RETRY_TIMEOUT:
{ /* This one stashes the sas_task instead */
struct sas_task *t = (struct sas_task *)pm8001_dev;
- u32 tag;
struct pm8001_ccb_info *ccb;
struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
unsigned long flags, flags1;
int i, ret = 0;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ret = pm8001_query_task(t);
- PM8001_IO_DBG(pm8001_ha,
- switch (ret) {
- case TMF_RESP_FUNC_SUCC:
- pm8001_printk("...Task on lu\n");
- break;
-
- case TMF_RESP_FUNC_COMPLETE:
- pm8001_printk("...Task NOT on lu\n");
- break;
-
- default:
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "...query task failed!!!\n"));
- break;
- });
+ if (ret == TMF_RESP_FUNC_SUCC)
+ pm8001_dbg(pm8001_ha, IO, "...Task on lu\n");
+ else if (ret == TMF_RESP_FUNC_COMPLETE)
+ pm8001_dbg(pm8001_ha, IO, "...Task NOT on lu\n");
+ else
+ pm8001_dbg(pm8001_ha, DEVIO, "...query task failed!!!\n");
spin_lock_irqsave(&pm8001_ha->lock, flags);
@@ -1636,8 +1614,8 @@ void pm8001_work_fn(struct work_struct *work)
/* Search for a possible ccb that matches the task */
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
- tag = ccb->ccb_tag;
- if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ if ((ccb->ccb_tag != PM8001_INVALID_TAG) &&
+ (ccb->task == t))
break;
}
if (!ccb) {
@@ -1662,8 +1640,7 @@ void pm8001_work_fn(struct work_struct *work)
break;
default: /* device misbehavior */
ret = TMF_RESP_FUNC_FAILED;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("...Reset phy\n"));
+ pm8001_dbg(pm8001_ha, IO, "...Reset phy\n");
pm8001_I_T_nexus_reset(dev);
break;
}
@@ -1677,15 +1654,14 @@ void pm8001_work_fn(struct work_struct *work)
default: /* device misbehavior */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
ret = TMF_RESP_FUNC_FAILED;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("...Reset phy\n"));
+ pm8001_dbg(pm8001_ha, IO, "...Reset phy\n");
pm8001_I_T_nexus_reset(dev);
}
if (ret == TMF_RESP_FUNC_FAILED)
t = NULL;
pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Complete\n"));
+ pm8001_dbg(pm8001_ha, IO, "...Complete\n");
} break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
dev = pm8001_dev->sas_device;
@@ -1703,6 +1679,52 @@ void pm8001_work_fn(struct work_struct *work)
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
+ case IO_FATAL_ERROR:
+ {
+ struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+ struct pm8001_ccb_info *ccb;
+ struct task_status_struct *ts;
+ struct sas_task *task;
+ int i;
+ u32 device_id;
+
+ for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+ ccb = &pm8001_ha->ccb_info[i];
+ task = ccb->task;
+ ts = &task->task_status;
+
+ if (task != NULL) {
+ dev = task->dev;
+ if (!dev) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "dev is NULL\n");
+ continue;
+ }
+ /*complete sas task and update to top layer */
+ pm8001_ccb_task_free(pm8001_ha, ccb);
+ ts->resp = SAS_TASK_COMPLETE;
+ task->task_done(task);
+ } else if (ccb->ccb_tag != PM8001_INVALID_TAG) {
+ /* complete the internal commands/non-sas task */
+ pm8001_dev = ccb->device;
+ if (pm8001_dev->dcompletion) {
+ complete(pm8001_dev->dcompletion);
+ pm8001_dev->dcompletion = NULL;
+ }
+ complete(pm8001_ha->nvmd_completion);
+ pm8001_ccb_free(pm8001_ha, ccb);
+ }
+ }
+ /* Deregister all the device ids */
+ for (i = 0; i < PM8001_MAX_DEVICES; i++) {
+ pm8001_dev = &pm8001_ha->devices[i];
+ device_id = pm8001_dev->device_id;
+ if (device_id) {
+ PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+ pm8001_free_dev(pm8001_dev);
+ }
+ }
+ } break;
}
kfree(pw);
}
@@ -1729,51 +1751,40 @@ int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev)
{
- int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
- struct sas_task *task = NULL;
+ struct sas_task *task;
struct task_abort_req task_abort;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_ABORT;
int ret;
- if (!pm8001_ha_dev) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
- return;
- }
+ pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG;
+ pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
- "allocate task\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
}
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res)
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
+ sas_free_task(task);
return;
-
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
-
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ }
memset(&task_abort, 0, sizeof(task_abort));
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- task_abort.tag = cpu_to_le32(ccb_tag);
-
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
- sizeof(task_abort), 0);
- if (ret)
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ task_abort.tag = cpu_to_le32(ccb->ccb_tag);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort,
+ sizeof(task_abort), 0);
+ if (ret) {
+ sas_free_task(task);
+ pm8001_ccb_free(pm8001_ha, ccb);
+ }
}
static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
@@ -1781,55 +1792,43 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
{
struct sata_start_req sata_cmd;
int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
struct sas_task *task = NULL;
struct host_to_dev_fis fis;
struct domain_device *dev;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("cannot allocate task !!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
return;
}
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res) {
- sas_free_task(task);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("cannot allocate tag !!!\n"));
- return;
- }
-
- /* allocate domain device by ourselves as libsas
- * is not going to provide any
- */
+ /*
+ * Allocate domain device by ourselves as libsas is not going to
+ * provide any.
+ */
dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
if (!dev) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Domain device cannot be allocated\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Domain device cannot be allocated\n");
return;
}
task->dev = dev;
task->dev->lldd_dev = pm8001_ha_dev;
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
+ sas_free_task(task);
+ kfree(dev);
+ return;
+ }
+
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
- memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
-
/* construct read log FIS */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.fis_type = 0x27;
@@ -1838,16 +1837,17 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
fis.lbal = 0x10;
fis.sector_count = 0x1;
- sata_cmd.tag = cpu_to_le32(ccb_tag);
+ memset(&sata_cmd, 0, sizeof(sata_cmd));
+ sata_cmd.tag = cpu_to_le32(ccb->ccb_tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+ sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
- sizeof(sata_cmd), 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
if (res) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
kfree(dev);
}
}
@@ -1858,13 +1858,13 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
* @piomb: the message contents of this outbound message.
*
* When FW has completed a ssp request for example a IO request, after it has
- * filled the SG data with the data, it will trigger this event represent
- * that he has finished the job,please check the coresponding buffer.
+ * filled the SG data with the data, it will trigger this event representing
+ * that he has finished the job; please check the corresponding buffer.
* So we will tell the caller who maybe waiting the result to tell upper layer
* that the task has been finished.
*/
static void
-mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
@@ -1891,30 +1891,28 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
if (status && status != IO_UNDERFLOW)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sas IO status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n",
+ SAS_ADDR(t->dev->sas_addr));
if (status)
- PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
- "status:0x%x, tag:0x%x, task:0x%p\n",
- status, tag, t));
+ pm8001_dbg(pm8001_ha, IOERR,
+ "status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t);
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
- ",param = %d\n", param));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS,param = %d\n",
+ param);
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
} else {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
@@ -1923,69 +1921,63 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
sas_ssp_task_response(pm8001_ha->dev, t, iu);
}
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
break;
case IO_UNDERFLOW:
/* SSP Completion with error */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW"
- ",param = %d\n", param));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW,param = %d\n",
+ param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
/* Force the midlayer to retry */
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -1995,68 +1987,59 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_DMA:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (!t->uldd_task)
@@ -2065,62 +2048,54 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_DS_NON_OPERATIONAL);
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_TM_TAG_NOT_FOUND:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
}
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("scsi_status = %x\n ",
- psspPayload->ssp_resp_iu.status));
+ pm8001_dbg(pm8001_ha, IO, "scsi_status = %x\n",
+ psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
- " io_status 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
}
/*See the comments for mpi_ssp_completion */
-static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
unsigned long flags;
@@ -2138,60 +2113,52 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sas IO status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("port_id = %x,device_id = %x\n",
- port_id, dev_id));
+ pm8001_dbg(pm8001_ha, DEVIO, "port_id = %x,device_id = %x\n",
+ port_id, dev_id);
switch (event) {
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
return;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
- "_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -2201,88 +2168,78 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
return;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_OVERRUN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_CMD_FRAME_ISSUED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
return;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
@@ -2290,18 +2247,15 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
- " event 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
@@ -2330,60 +2284,42 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
+ param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
- if (!tag) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("tag null\n"));
- return;
- }
ccb = &pm8001_ha->ccb_info[tag];
- param = le32_to_cpu(psataPayload->param);
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("ccb null\n"));
- return;
- }
+ t = ccb->task;
+ pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
pm8001_dev = t->dev->lldd_dev;
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task null\n");
return;
}
if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
&& unlikely(!t || !t->lldd_task || !t->dev)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task or dev null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
}
ts = &t->task_status;
- if (!ts) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("ts null\n"));
- return;
- }
if (status)
- PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
- "status:0x%x, tag:0x%x, task::0x%p\n",
- status, tag, t));
+ pm8001_dbg(pm8001_ha, IOERR,
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t);
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
(dev_is_expander(t->dev->parent->dev_type)))) {
- for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
+ for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
- for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
+ for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++)
sata_addr_hi[i] = pm8001_ha->sas_addr[j];
memcpy(&temp_sata_addr_low, sata_addr_low,
sizeof(sata_addr_low));
@@ -2406,29 +2342,25 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
& 0xff000000)) +
pm8001_dev->attached_phy +
0x10);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%08x%08x", temp_sata_addr_hi,
- temp_sata_addr_low));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SAS Address of IO Failure Drive:%08x%08x\n",
+ temp_sata_addr_hi,
+ temp_sata_addr_low);
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SAS Address of IO Failure Drive:%016llx\n",
+ SAS_ADDR(t->dev->sas_addr));
}
}
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
- (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
- /* set new bit for abort_all */
- pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
- /* clear bit for read log */
- pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+ (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
pm8001_send_abort_all(pm8001_ha, pm8001_dev);
/* Free the tag */
pm8001_tag_free(pm8001_ha, tag);
@@ -2440,99 +2372,103 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
ts->residual = param;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
- param));
+ pm8001_dbg(pm8001_ha, IO,
+ "SAS_PROTO_RESPONSE len = %d\n",
+ param);
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("PIO read len = %d\n", len));
- } else if (t->ata_task.use_ncq) {
+ pm8001_dbg(pm8001_ha, IO,
+ "PIO read len = %d\n", len);
+ } else if (t->ata_task.use_ncq &&
+ t->data_dir != DMA_NONE) {
len = sizeof(struct set_dev_bits_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("FPDMA len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
+ len);
} else {
len = sizeof(struct dev_to_host_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("other len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO, "other len = %d\n",
+ len);
}
if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
resp->frame_len = len;
memcpy(&resp->ending_fis[0], sata_resp, len);
ts->buf_valid_size = sizeof(*resp);
} else
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("response to large\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "response too large\n");
}
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
/* following cases are to do cases */
case IO_UNDERFLOW:
/* SATA Completion with error */
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
- "_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2541,13 +2477,13 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
@@ -2557,22 +2493,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_STP_RESOURCES"
- "_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2581,62 +2515,70 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_DMA:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_REJECTED_NCQ_MODE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2644,19 +2586,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk(" IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, " IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_IN_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2664,44 +2606,45 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p done with io_status 0x%x"
- " resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
/*See the comments for mpi_ssp_completion */
-static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct task_status_struct *ts;
@@ -2713,20 +2656,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
-
- ccb = &pm8001_ha->ccb_info[tag];
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("No CCB !!!. returning\n"));
- }
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SATA EVENT 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
/* Check if this is NCQ error */
if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
@@ -2742,61 +2674,52 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sata IO status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "sata IO status 0x%x\n", event);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
- port_id, dev_id, tag, event));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
+ port_id, dev_id, tag, event);
switch (event) {
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- pm8001_dev->running_req--;
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
- "_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2805,119 +2728,91 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_PEER_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_REJECTED_NCQ_MODE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_RDY_OVERRUN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_CMD_FRAME_ISSUED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
break;
case IO_XFER_PIO_SETUP_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p done with io_status 0x%x"
- " resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- }
}
/*See the comments for mpi_ssp_completion */
@@ -2942,86 +2837,79 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts = &t->task_status;
pm8001_dev = ccb->device;
if (status) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("smp IO status 0x%x\n", status));
- PM8001_IOERR_DBG(pm8001_ha,
- pm8001_printk("status:0x%x, tag:0x%x, task:0x%p\n",
- status, tag, t));
+ pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status);
+ pm8001_dbg(pm8001_ha, IOERR,
+ "status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t);
}
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PHY_DOWN;
break;
case IO_ERROR_HW_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -3030,76 +2918,67 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_RX_FRAME:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_ERROR_INTERNAL_SMP_RESOURCE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
/* not allowed case. Therefore, return failed status */
@@ -3107,20 +2986,15 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
- " io_status 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
- t->task_done(t);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
@@ -3136,13 +3010,12 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
u32 device_id = le32_to_cpu(pPayload->device_id);
u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
- "from 0x%x to 0x%x status = 0x%x!\n",
- device_id, pds, nds, status));
+
+ pm8001_dbg(pm8001_ha, MSG,
+ "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n",
+ device_id, pds, nds, status);
complete(pm8001_dev->setds_completion);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3152,16 +3025,14 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 tag = le32_to_cpu(pPayload->tag);
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
+
complete(pm8001_ha->nvmd_completion);
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set nvm data complete!\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n");
if ((dlen_status & NVMD_STAT) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Set nvm data error!\n"));
- return;
+ pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error %x\n",
+ dlen_status);
}
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
void
@@ -3178,26 +3049,26 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
fw_control_context = ccb->fw_control_context;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Get nvm data complete!\n");
if ((dlen_status & NVMD_STAT) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Get nvm data error!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error %x\n",
+ dlen_status);
complete(pm8001_ha->nvmd_completion);
+ /* We should free tag during failure also, the tag is not being
+ * freed by requesting path anywhere.
+ */
+ pm8001_ccb_free(pm8001_ha, ccb);
return;
}
-
if (ir_tds_bn_dps_das_nvm & IPMode) {
/* indirect mode - IR bit set */
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Get NVMD success, IR=1\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Get NVMD success, IR=1\n");
if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) {
if (ir_tds_bn_dps_das_nvm == 0x80a80200) {
memcpy(pm8001_ha->sas_addr,
((u8 *)virt_addr + 4),
SAS_ADDR_SIZE);
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Get SAS address"
- " from VPD successfully!\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Get SAS address from VPD successfully!\n");
}
} else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM)
|| ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) ||
@@ -3208,14 +3079,14 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
;
} else {
/* Should not be happened*/
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("(IR=1)Wrong Device type 0x%x\n",
- ir_tds_bn_dps_das_nvm));
+ pm8001_dbg(pm8001_ha, MSG,
+ "(IR=1)Wrong Device type 0x%x\n",
+ ir_tds_bn_dps_das_nvm);
}
} else /* direct mode */{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
- (dlen_status & NVMD_LEN) >> 24));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Get NVMD success, IR=0, dataLen=%d\n",
+ (dlen_status & NVMD_LEN) >> 24);
}
/* Though fw_control_context is freed below, usrAddr still needs
* to be updated as this holds the response to the request function
@@ -3224,10 +3095,13 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_ha->memoryMap.region[NVMD].virt_ptr,
fw_control_context->len);
kfree(ccb->fw_control_context);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ /* To avoid race condition, complete should be
+ * called after the message is copied to
+ * fw_control_context->usrAddr
+ */
complete(pm8001_ha->nvmd_completion);
+ pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n");
+ pm8001_ccb_free(pm8001_ha, ccb);
}
int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3240,13 +3114,13 @@ int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
tag = le32_to_cpu(pPayload->tag);
if (status != 0) {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("%x phy execute %x phy op failed!\n",
- phy_id, phy_op));
+ pm8001_dbg(pm8001_ha, MSG,
+ "%x phy execute %x phy op failed!\n",
+ phy_id, phy_op);
} else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("%x phy execute %x phy op success!\n",
- phy_id, phy_op));
+ pm8001_dbg(pm8001_ha, MSG,
+ "%x phy execute %x phy op success!\n",
+ phy_id, phy_op);
pm8001_ha->phy[phy_id].reset_success = true;
}
if (pm8001_ha->phy[phy_id].enable_completion) {
@@ -3264,7 +3138,7 @@ int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
*
* when HBA driver received the identify done event or initiate FIS received
* event(for SATA), it will invoke this function to notify the sas layer that
- * the sas toplogy has formed, please discover the the whole sas domain,
+ * the sas toplogy has formed, please discover the whole sas domain,
* while receive a broadcast(change) primitive just tell the sas
* layer to discover the changed domain rather than the whole domain.
*/
@@ -3275,15 +3149,6 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
if (!phy->phy_attached)
return;
- if (sas_phy->phy) {
- struct sas_phy *sphy = sas_phy->phy;
- sphy->negotiated_linkrate = sas_phy->linkrate;
- sphy->minimum_linkrate = phy->minimum_linkrate;
- sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sphy->maximum_linkrate = phy->maximum_linkrate;
- sphy->maximum_linkrate_hw = phy->maximum_linkrate;
- }
-
if (phy->phy_type & PORT_TYPE_SAS) {
struct sas_identify_frame *id;
id = (struct sas_identify_frame *)phy->frame_rcvd;
@@ -3293,10 +3158,10 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
} else if (phy->phy_type & PORT_TYPE_SATA) {
/*Nothing*/
}
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("phy %d byte dmaded.\n", i));
+ pm8001_dbg(pm8001_ha, MSG, "phy %d byte dmaded.\n", i);
sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
- pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+ sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC);
}
/* Get the link rate speed */
@@ -3307,30 +3172,26 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
switch (link_rate) {
case PHY_SPEED_120:
phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
break;
case PHY_SPEED_60:
phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
break;
case PHY_SPEED_30:
phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
case PHY_SPEED_15:
phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
}
sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
- sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
+ sas_phy->maximum_linkrate_hw = phy->maximum_linkrate;
sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
- sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ sas_phy->maximum_linkrate = phy->maximum_linkrate;
+ sas_phy->minimum_linkrate = phy->minimum_linkrate;
}
/**
- * asd_get_attached_sas_addr -- extract/generate attached SAS address
+ * pm8001_get_attached_sas_addr - extract/generate attached SAS address
* @phy: pointer to asd_phy
* @sas_addr: pointer to buffer where the SAS address is to be written
*
@@ -3374,17 +3235,14 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
struct hw_event_ack_req payload;
u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
- struct inbound_queue_table *circularQ;
-
memset((u8 *)&payload, 0, sizeof(payload));
- circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
payload.tag = cpu_to_le32(1);
payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload, sizeof(payload), 0);
}
static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3410,43 +3268,41 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
u8 portstate = (u8)(npip_portstate & 0x0000000F);
struct pm8001_port *port = &pm8001_ha->port[port_id];
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPC;
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
- port_id, phy_id));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
+ port_id, phy_id);
switch (deviceType) {
case SAS_PHY_UNUSED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("device type no device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "device type no device.\n");
break;
case SAS_END_DEVICE:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "end device.\n");
pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
PHY_NOTIFY_ENABLE_SPINUP);
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
case SAS_EDGE_EXPANDER_DEVICE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("expander device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "expander device.\n");
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("fanout expander device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n");
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("unknown device type(%x)\n", deviceType));
+ pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n",
+ deviceType);
break;
}
phy->phy_type |= PORT_TYPE_SAS;
@@ -3457,7 +3313,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
phy->sas_phy.oob_mode = SAS_OOB_MODE;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
memcpy(phy->frame_rcvd, &pPayload->sas_identify,
sizeof(struct sas_identify_frame)-4);
@@ -3489,12 +3345,12 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
u8 portstate = (u8)(npip_portstate & 0x0000000F);
struct pm8001_port *port = &pm8001_ha->port[port_id];
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
- " phy id = %d\n", port_id, phy_id));
+ pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n",
+ port_id, phy_id);
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPC;
port->port_attached = 1;
@@ -3502,7 +3358,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->phy_type |= PORT_TYPE_SATA;
phy->phy_attached = 1;
phy->sas_phy.oob_mode = SATA_OOB_MODE;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
sizeof(struct dev_to_host_fis));
@@ -3542,37 +3398,35 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
case PORT_VALID:
break;
case PORT_INVALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" PortInvalid portID %d\n", port_id));
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Last phy Down and port invalid\n"));
+ pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n",
+ port_id);
+ pm8001_dbg(pm8001_ha, MSG,
+ " Last phy Down and port invalid\n");
port->port_attached = 0;
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
break;
case PORT_IN_RESET:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Port In Reset portID %d\n", port_id));
+ pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n",
+ port_id);
break;
case PORT_NOT_ESTABLISHED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ " phy Down and PORT_NOT_ESTABLISHED\n");
port->port_attached = 0;
break;
case PORT_LOSTCOMM:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Last phy Down and port invalid\n"));
+ pm8001_dbg(pm8001_ha, MSG, " phy Down and PORT_LOSTCOMM\n");
+ pm8001_dbg(pm8001_ha, MSG,
+ " Last phy Down and port invalid\n");
port->port_attached = 0;
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
break;
default:
port->port_attached = 0;
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk(" phy Down and(default) = %x\n",
- portstate));
+ pm8001_dbg(pm8001_ha, DEVIO, " phy Down and(default) = %x\n",
+ portstate);
break;
}
@@ -3585,7 +3439,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
*
* when sas layer find a device it will notify LLDD, then the driver register
* the domain device to FW, this event is the return device ID which the FW
- * has assigned, from now,inter-communication with FW is no longer using the
+ * has assigned, from now, inter-communication with FW is no longer using the
* SAS address, use device ID which FW assigned.
*/
int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3603,50 +3457,46 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dev = ccb->device;
status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id);
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" register device is status = %d\n", status));
+ pm8001_dbg(pm8001_ha, MSG, " register device is status = %d\n",
+ status);
switch (status) {
case DEVREG_SUCCESS:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n");
pm8001_dev->device_id = device_id;
break;
case DEVREG_FAILURE_OUT_OF_RESOURCE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_OUT_OF_RESOURCE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_OUT_OF_RESOURCE\n");
break;
case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n");
break;
case DEVREG_FAILURE_INVALID_PHY_ID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_INVALID_PHY_ID\n"));
+ pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_INVALID_PHY_ID\n");
break;
case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n");
break;
case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n");
break;
case DEVREG_FAILURE_PORT_NOT_VALID_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_PORT_NOT_VALID_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_PORT_NOT_VALID_STATE\n");
break;
case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n");
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n");
break;
}
complete(pm8001_dev->dcompletion);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, htag);
+ pm8001_ccb_free(pm8001_ha, ccb);
return 0;
}
@@ -3660,14 +3510,14 @@ int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id);
if (status != 0)
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" deregister device failed ,status = %x"
- ", device_id = %x\n", status, device_id));
+ pm8001_dbg(pm8001_ha, MSG,
+ " deregister device failed ,status = %x, device_id = %x\n",
+ status, device_id);
return 0;
}
/**
- * fw_flash_update_resp - Response from FW for flash update command.
+ * pm8001_mpi_fw_flash_update_resp - Response from FW for flash update command.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3679,70 +3529,61 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
(struct fw_flash_Update_resp *)(piomb + 4);
u32 tag = le32_to_cpu(ppayload->tag);
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+
status = le32_to_cpu(ppayload->status);
switch (status) {
case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ ": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n");
break;
case FLASH_UPDATE_IN_PROGRESS:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_IN_PROGRESS\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_IN_PROGRESS\n");
break;
case FLASH_UPDATE_HDR_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_HDR_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HDR_ERR\n");
break;
case FLASH_UPDATE_OFFSET_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_OFFSET_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_OFFSET_ERR\n");
break;
case FLASH_UPDATE_CRC_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_CRC_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_CRC_ERR\n");
break;
case FLASH_UPDATE_LENGTH_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_LENGTH_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_LENGTH_ERR\n");
break;
case FLASH_UPDATE_HW_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_HW_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HW_ERR\n");
break;
case FLASH_UPDATE_DNLD_NOT_SUPPORTED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ ": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n");
break;
case FLASH_UPDATE_DISABLED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_DISABLED\n");
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("No matched status = %d\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "No matched status = %d\n",
+ status);
break;
}
kfree(ccb->fw_control_context);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
complete(pm8001_ha->nvmd_completion);
return 0;
}
-int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
u32 status;
int i;
struct general_event_resp *pPayload =
(struct general_event_resp *)(piomb + 4);
status = le32_to_cpu(pPayload->status);
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" status = 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, MSG, " status = 0x%x\n", status);
for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++)
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("inb_IOMB_payload[0x%x] 0x%x,\n", i,
- pPayload->inb_IOMB_payload[i]));
+ pm8001_dbg(pm8001_ha, MSG, "inb_IOMB_payload[0x%x] 0x%x,\n",
+ i,
+ pPayload->inb_IOMB_payload[i]);
return 0;
}
@@ -3761,11 +3602,6 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
status = le32_to_cpu(pPayload->status);
tag = le32_to_cpu(pPayload->tag);
- if (!tag) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(" TAG NULL. RETURNING !!!"));
- return -1;
- }
scp = le32_to_cpu(pPayload->scp);
ccb = &pm8001_ha->ccb_info[tag];
@@ -3773,41 +3609,41 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dev = ccb->device; /* retrieve device */
if (!t) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(" TASK NULL. RETURNING !!!"));
+ pm8001_dbg(pm8001_ha, FAIL, " TASK NULL. RETURNING !!!\n");
return -1;
}
+
+ if (t->task_proto == SAS_PROTOCOL_INTERNAL_ABORT)
+ atomic_dec(&pm8001_dev->running_req);
+
ts = &t->task_status;
if (status != 0)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task abort failed status 0x%x ,"
- "tag = 0x%x, scp= 0x%x\n", status, tag, scp));
+ pm8001_dbg(pm8001_ha, FAIL, "task abort failed status 0x%x ,tag = 0x%x, scp= 0x%x\n",
+ status, tag, scp);
switch (status) {
case IO_SUCCESS:
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
break;
case IO_NOT_VALID:
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n"));
+ pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n");
ts->resp = TMF_RESP_FUNC_FAILED;
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();
if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) {
- pm8001_tag_free(pm8001_ha, tag);
sas_free_task(t);
- /* clear the flag */
- pm8001_dev->id &= 0xBFFFFFFF;
- } else
+ pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG;
+ } else {
t->task_done(t);
+ }
return 0;
}
@@ -3817,7 +3653,7 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
-static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
unsigned long flags;
struct hw_event_resp *pPayload =
@@ -3834,194 +3670,191 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
- port_id, phy_id, eventType, status));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
+ port_id, phy_id, eventType, status);
switch (eventType) {
case HW_EVENT_PHY_START_STATUS:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_START_STATUS"
- " status = %x\n", status));
- if (status == 0) {
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n",
+ status);
+ if (status == 0)
phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL)
- complete(phy->enable_completion);
+
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL) {
+ complete(phy->enable_completion);
+ phy->enable_completion = NULL;
}
break;
case HW_EVENT_SAS_PHY_UP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n");
hw_event_sas_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_PHY_UP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n");
hw_event_sata_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_PHY_STOP_STATUS:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_STOP_STATUS "
- "status = %x\n", status));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_STOP_STATUS status = %x\n",
+ status);
if (status == 0)
phy->phy_state = 0;
break;
case HW_EVENT_SATA_SPINUP_HOLD:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
+ sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
+ GFP_ATOMIC);
break;
case HW_EVENT_PHY_DOWN:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_DOWN\n"));
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
+ sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL,
+ GFP_ATOMIC);
phy->phy_attached = 0;
phy->phy_state = 0;
hw_event_phy_down(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_INVALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
/* the broadcast change primitive received, tell the LIBSAS this event
to revalidate the sas domain*/
case HW_EVENT_BROADCAST_CHANGE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n");
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
port_id, phy_id, 1, 0);
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_PHY_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
sas_phy_disconnected(&phy->sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
break;
case HW_EVENT_BROADCAST_EXP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_INVALID_DWORD:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_INVALID_DWORD\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_DISPARITY_ERROR,
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_CODE_VIOLATION\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_CODE_VIOLATION,
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_MALFUNCTION:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
break;
case HW_EVENT_BROADCAST_SES:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_INBOUND_CRC_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_INBOUND_CRC_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_HARD_RESET_RECEIVED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
- sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
+ sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC);
break;
case HW_EVENT_ID_FRAME_TIMEOUT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_PORT_RECOVER:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
break;
case HW_EVENT_PORT_RESET_COMPLETE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n");
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n");
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown event type = %x\n", eventType));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type = %x\n",
+ eventType);
break;
}
return 0;
@@ -4037,163 +3870,132 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
__le32 pHeader = *(__le32 *)piomb;
u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
+ pm8001_dbg(pm8001_ha, MSG, "process_one_iomb:\n");
switch (opc) {
case OPC_OUB_ECHO:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n");
break;
case OPC_OUB_HW_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_HW_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n");
mpi_hw_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n");
mpi_ssp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_SMP_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SMP_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n");
mpi_smp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_LOCAL_PHY_CNTRL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n");
pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
break;
case OPC_OUB_DEV_REGIST:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n");
pm8001_mpi_reg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEREG_DEV:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("unregister the device\n"));
+ pm8001_dbg(pm8001_ha, MSG, "unregister the device\n");
pm8001_mpi_dereg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEV_HANDLE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n");
break;
case OPC_OUB_SATA_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n");
mpi_sata_completion(pm8001_ha, piomb);
break;
case OPC_OUB_SATA_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n");
mpi_sata_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n");
mpi_ssp_event(pm8001_ha, piomb);
break;
case OPC_OUB_DEV_HANDLE_ARRIV:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n");
/*This is for target*/
break;
case OPC_OUB_SSP_RECV_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n");
/*This is for target*/
break;
case OPC_OUB_DEV_INFO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_INFO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_INFO\n");
break;
case OPC_OUB_FW_FLASH_UPDATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n");
pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GPIO_RESPONSE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n");
break;
case OPC_OUB_GPIO_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n");
break;
case OPC_OUB_GENERAL_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n");
pm8001_mpi_general_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SATA_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SAS_DIAG_MODE_START_END:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SAS_DIAG_MODE_START_END\n");
break;
case OPC_OUB_SAS_DIAG_EXECUTE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n");
break;
case OPC_OUB_GET_TIME_STAMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n");
break;
case OPC_OUB_SAS_HW_EVENT_ACK:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n");
break;
case OPC_OUB_PORT_CONTROL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n");
break;
case OPC_OUB_SMP_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_NVMD_DATA:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n");
pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SET_NVMD_DATA:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n");
pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEVICE_HANDLE_REMOVAL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n");
break;
case OPC_OUB_SET_DEVICE_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n");
pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEVICE_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n");
break;
case OPC_OUB_SET_DEV_INFO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n");
break;
case OPC_OUB_SAS_RE_INITIALIZE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_RE_INITIALIZE\n");
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
- opc));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "Unknown outbound Queue IOMB OPC = %x\n",
+ opc);
break;
}
}
@@ -4202,7 +4004,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
void *pMsg1 = NULL;
- u8 uninitialized_var(bc);
+ u8 bc;
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
@@ -4276,7 +4078,6 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
u32 req_len, resp_len;
struct smp_req smp_cmd;
u32 opc;
- struct inbound_queue_table *circularQ;
memset(&smp_cmd, 0, sizeof(smp_cmd));
/*
@@ -4302,7 +4103,6 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
}
opc = OPC_INB_SMP_REQUEST;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
smp_cmd.long_smp_req.long_req_addr =
cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
@@ -4313,8 +4113,8 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
smp_cmd.long_smp_req.long_resp_size =
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &smp_cmd, sizeof(smp_cmd), 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc,
+ &smp_cmd, sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
@@ -4342,9 +4142,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
- int ret;
u64 phys_addr;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
@@ -4360,13 +4158,11 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr));
ssp_cmd.esgl = cpu_to_le32(1<<31);
@@ -4382,9 +4178,9 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd,
- sizeof(ssp_cmd), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &ssp_cmd,
+ sizeof(ssp_cmd), 0);
}
static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
@@ -4394,32 +4190,30 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
u32 tag = ccb->ccb_tag;
- int ret;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr;
u32 ATAP = 0x0;
u32 dir;
- struct inbound_queue_table *circularQ;
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- if (task->data_dir == DMA_NONE) {
+
+ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+ pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
- if (task->ata_task.dma_xfer) {
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
+ ATAP = 0x07; /* FPDMA */
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
+ } else if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "DMA\n");
} else {
ATAP = 0x05; /* PIO*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
- }
- if (task->ata_task.use_ncq &&
- dev->sata_dev.class != ATA_DEV_ATAPI) {
- ATAP = 0x07; /* FPDMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
@@ -4439,8 +4233,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
sata_cmd.addr_low = lower_32_bits(phys_addr);
sata_cmd.addr_high = upper_32_bits(phys_addr);
sata_cmd.esgl = cpu_to_le32(1 << 31);
@@ -4469,49 +4262,43 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
spin_lock_irqsave(&task->task_state_lock, flags);
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags &
SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p resp 0x%x "
- " stat 0x%x but aborted by upper layer "
- "\n", task, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
+ task, ts->resp,
+ ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free_done(pm8001_ha, task,
- ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return 0;
}
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
- sizeof(sata_cmd), 0);
- return ret;
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
}
/**
* pm8001_chip_phy_start_req - start phy via PHY_START COMMAND
* @pm8001_ha: our hba card information.
- * @num: the inbound queue number
* @phy_id: the phy id which we wanted to start up.
*/
static int
pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
{
struct phy_start_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTART;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
/*
@@ -4528,35 +4315,32 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
memcpy(payload.sas_identify.sas_addr,
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/**
* pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
* @pm8001_ha: our hba card information.
- * @num: the inbound queue number
* @phy_id: the phy id which we wanted to start up.
*/
static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
u8 phy_id)
{
struct phy_stop_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTOP;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
-/**
+/*
* see comments on pm8001_mpi_reg_resp.
*/
static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
@@ -4565,33 +4349,29 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
struct reg_dev_req payload;
u32 opc;
u32 stp_sspsmp_sata = 0x4;
- struct inbound_queue_table *circularQ;
u32 linkrate, phy_id;
- int rc, tag = 0xdeadbeef;
+ int rc;
struct pm8001_ccb_info *ccb;
u8 retryFlag = 0x1;
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return rc;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->device = pm8001_dev;
- ccb->ccb_tag = tag;
- payload.tag = cpu_to_le32(tag);
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
if (flag == 1)
stp_sspsmp_sata = 0x02; /*direct attached sata */
else {
if (pm8001_dev->dev_type == SAS_SATA_DEV)
stp_sspsmp_sata = 0x00; /* stp*/
else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
- pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
if (parent_dev && dev_is_expander(parent_dev->dev_type))
@@ -4602,7 +4382,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
pm8001_dev->sas_device->linkrate : dev->port->linkrate;
payload.phyid_portid =
- cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) |
+ cpu_to_le32(((port->port_id) & 0x0F) |
((phy_id & 0x0F) << 4));
payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) |
((linkrate & 0x0F) * 0x1000000) |
@@ -4611,12 +4391,16 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_ccb_free(pm8001_ha, ccb);
+
return rc;
}
-/**
+/*
* see comments on pm8001_mpi_reg_resp.
*/
int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
@@ -4624,42 +4408,36 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
{
struct dereg_dev_req payload;
u32 opc = OPC_INB_DEREG_DEV_HANDLE;
- int ret;
- struct inbound_queue_table *circularQ;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id);
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("unregister device device_id = %d\n", device_id));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return ret;
+ pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n",
+ device_id);
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
}
/**
* pm8001_chip_phy_ctl_req - support the local phy operation
* @pm8001_ha: our hba card information.
- * @num: the inbound queue number
- * @phy_id: the phy id which we wanted to operate
- * @phy_op:
+ * @phyId: the phy id which we wanted to operate
+ * @phy_op: the phy operation to request
*/
static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 phyId, u32 phy_op)
{
struct local_phy_ctl_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+
memset(&payload, 0, sizeof(payload));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(1);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
}
static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
@@ -4679,57 +4457,58 @@ static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
/**
* pm8001_chip_isr - PM8001 isr handler.
* @pm8001_ha: our hba card information.
- * @irq: irq number.
- * @stat: stat.
+ * @vec: IRQ number
*/
static irqreturn_t
pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm8001_chip_interrupt_disable(pm8001_ha, vec);
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "irq vec %d, ODMR:0x%x\n",
- vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30));
process_oq(pm8001_ha, vec);
pm8001_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
}
static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
- u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag)
+ u32 dev_id, enum sas_internal_abort type, u32 task_tag, u32 cmd_tag)
{
struct task_abort_req task_abort;
- struct inbound_queue_table *circularQ;
- int ret;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&task_abort, 0, sizeof(task_abort));
- if (ABORT_SINGLE == (flag & ABORT_MASK)) {
+ if (type == SAS_INTERNAL_ABORT_SINGLE) {
task_abort.abort_all = 0;
task_abort.device_id = cpu_to_le32(dev_id);
task_abort.tag_to_abort = cpu_to_le32(task_tag);
- task_abort.tag = cpu_to_le32(cmd_tag);
- } else if (ABORT_ALL == (flag & ABORT_MASK)) {
+ } else if (type == SAS_INTERNAL_ABORT_DEV) {
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(dev_id);
- task_abort.tag = cpu_to_le32(cmd_tag);
+ } else {
+ pm8001_dbg(pm8001_ha, EH, "unknown type (%d)\n", type);
+ return -EIO;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
- sizeof(task_abort), 0);
- return ret;
+
+ task_abort.tag = cpu_to_le32(cmd_tag);
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort,
+ sizeof(task_abort), 0);
}
-/**
+/*
* pm8001_chip_abort_task - SAS abort task when error or exception happened.
- * @task: the task we wanted to aborted.
- * @flag: the abort flag.
*/
int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
+ struct pm8001_ccb_info *ccb)
{
- u32 opc, device_id;
+ struct sas_task *task = ccb->task;
+ struct sas_internal_abort_task *abort = &task->abort_task;
+ struct pm8001_device *pm8001_dev = ccb->device;
int rc = TMF_RESP_FUNC_FAILED;
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
- cmd_tag, task_tag));
+ u32 opc, device_id;
+
+ pm8001_dbg(pm8001_ha, EH, "cmd_tag = %x, abort task tag = 0x%x\n",
+ ccb->ccb_tag, abort->tag);
if (pm8001_dev->dev_type == SAS_END_DEVICE)
opc = OPC_INB_SSP_ABORT;
else if (pm8001_dev->dev_type == SAS_SATA_DEV)
@@ -4737,10 +4516,10 @@ int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
else
opc = OPC_INB_SMP_ABORT;/* SMP */
device_id = pm8001_dev->device_id;
- rc = send_task_abort(pm8001_ha, opc, device_id, flag,
- task_tag, cmd_tag);
+ rc = send_task_abort(pm8001_ha, opc, device_id, abort->type,
+ abort->tag, ccb->ccb_tag);
if (rc != TMF_RESP_FUNC_COMPLETE)
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc));
+ pm8001_dbg(pm8001_ha, EH, "rc= %d\n", rc);
return rc;
}
@@ -4751,28 +4530,25 @@ int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
* @tmf: task management function.
*/
int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+ struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
{
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
u32 opc = OPC_INB_SSPINITMSTART;
- struct inbound_queue_table *circularQ;
struct ssp_ini_tm_start_req sspTMCmd;
- int ret;
memset(&sspTMCmd, 0, sizeof(sspTMCmd));
sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id);
- sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed);
+ sspTMCmd.relate_tag = cpu_to_le32((u32)tmf->tag_of_task_to_be_managed);
sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
if (pm8001_ha->chip_id != chip_8001)
- sspTMCmd.ds_ads_m = 0x08;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
- sizeof(sspTMCmd), 0);
- return ret;
+ sspTMCmd.ds_ads_m = cpu_to_le32(0x08);
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sspTMCmd,
+ sizeof(sspTMCmd), 0);
}
int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
@@ -4781,9 +4557,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_GET_NVMD_DATA;
u32 nvmd_type;
int rc;
- u32 tag;
struct pm8001_ccb_info *ccb;
- struct inbound_queue_table *circularQ;
struct get_nvm_data_req nvmd_req;
struct fw_control_ex *fw_control_context;
struct pm8001_ioctl_payload *ioctl_payload = payload;
@@ -4793,18 +4567,17 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context)
return -ENOMEM;
fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
- fw_control_context->len = ioctl_payload->length;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ fw_control_context->len = ioctl_payload->rd_length;
memset(&nvmd_req, 0, sizeof(nvmd_req));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc) {
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb) {
kfree(fw_control_context);
- return rc;
+ return -SAS_QUEUE_FULL;
}
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->ccb_tag = tag;
ccb->fw_control_context = fw_control_context;
- nvmd_req.tag = cpu_to_le32(tag);
+
+ nvmd_req.tag = cpu_to_le32(ccb->ccb_tag);
switch (nvmd_type) {
case TWI_DEVICE: {
@@ -4814,7 +4587,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
twi_page_size << 8 | TWI_DEVICE);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4823,7 +4596,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case C_SEEPROM: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4832,7 +4605,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case VPD_FLASH: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4841,7 +4614,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case EXPAN_ROM: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4850,7 +4623,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case IOP_RDUMP: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4861,11 +4634,12 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
- sizeof(nvmd_req), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
return rc;
}
@@ -4876,9 +4650,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_SET_NVMD_DATA;
u32 nvmd_type;
int rc;
- u32 tag;
struct pm8001_ccb_info *ccb;
- struct inbound_queue_table *circularQ;
struct set_nvm_data_req nvmd_req;
struct fw_control_ex *fw_control_context;
struct pm8001_ioctl_payload *ioctl_payload = payload;
@@ -4887,20 +4659,20 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
if (!fw_control_context)
return -ENOMEM;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
&ioctl_payload->func_specific,
- ioctl_payload->length);
+ ioctl_payload->wr_length);
memset(&nvmd_req, 0, sizeof(nvmd_req));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc) {
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb) {
kfree(fw_control_context);
- return -EBUSY;
+ return -SAS_QUEUE_FULL;
}
- ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
- ccb->ccb_tag = tag;
- nvmd_req.tag = cpu_to_le32(tag);
+
+ nvmd_req.tag = cpu_to_le32(ccb->ccb_tag);
switch (nvmd_type) {
case TWI_DEVICE: {
u32 twi_addr, twi_page_size;
@@ -4909,7 +4681,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
twi_page_size << 8 | TWI_DEVICE);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4918,7 +4690,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case C_SEEPROM:
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4927,7 +4699,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
break;
case VPD_FLASH:
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4936,7 +4708,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
break;
case EXPAN_ROM:
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4946,11 +4718,12 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req,
sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
return rc;
}
@@ -4959,6 +4732,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
* pm8001_chip_fw_flash_update_build - support the firmware update operation
* @pm8001_ha: our hba card information.
* @fw_flash_updata_info: firmware flash update param
+ * @tag: Tag to apply to the payload
*/
int
pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
@@ -4966,12 +4740,9 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
{
struct fw_flash_Update_req payload;
struct fw_flash_updata_info *info;
- struct inbound_queue_table *circularQ;
- int ret;
u32 opc = OPC_INB_FW_FLASH_UPDATE;
memset(&payload, 0, sizeof(struct fw_flash_Update_req));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
info = fw_flash_updata_info;
payload.tag = cpu_to_le32(tag);
payload.cur_image_len = cpu_to_le32(info->cur_image_len);
@@ -4982,9 +4753,9 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
payload.sgl_addr_hi =
cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
}
int
@@ -4995,7 +4766,6 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
struct fw_control_info *fw_control;
struct fw_control_ex *fw_control_context;
int rc;
- u32 tag;
struct pm8001_ccb_info *ccb;
void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
@@ -5005,8 +4775,9 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context)
return -ENOMEM;
fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "dma fw_control context input length :%x\n", fw_control->len));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "dma fw_control context input length :%x\n",
+ fw_control->len);
memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -5018,16 +4789,21 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
fw_control_context->virtAddr = buffer;
fw_control_context->phys_addr = phys_addr;
fw_control_context->len = fw_control->len;
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc) {
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb) {
kfree(fw_control_context);
- return -EBUSY;
+ return -SAS_QUEUE_FULL;
}
- ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
- ccb->ccb_tag = tag;
+
rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
- tag);
+ ccb->ccb_tag);
+ if (rc) {
+ kfree(fw_control_context);
+ pm8001_ccb_free(pm8001_ha, ccb);
+ }
+
return rc;
}
@@ -5114,59 +4890,59 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev, u32 state)
{
struct set_dev_state_req payload;
- struct inbound_queue_table *circularQ;
struct pm8001_ccb_info *ccb;
int rc;
- u32 tag;
u32 opc = OPC_INB_SET_DEVICE_STATE;
+
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return -1;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->ccb_tag = tag;
- ccb->device = pm8001_dev;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- payload.tag = cpu_to_le32(tag);
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
payload.device_id = cpu_to_le32(pm8001_dev->device_id);
payload.nds = cpu_to_le32(state);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return rc;
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_ccb_free(pm8001_ha, ccb);
+
+ return rc;
}
static int
pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
{
struct sas_re_initialization_req payload;
- struct inbound_queue_table *circularQ;
struct pm8001_ccb_info *ccb;
int rc;
- u32 tag;
u32 opc = OPC_INB_SAS_RE_INITIALIZE;
+
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return -ENOMEM;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->ccb_tag = tag;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- payload.tag = cpu_to_le32(tag);
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
payload.SSAHOLT = cpu_to_le32(0xd << 25);
payload.sata_hol_tmo = cpu_to_le32(80);
payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
if (rc)
- pm8001_tag_free(pm8001_ha, tag);
- return rc;
+ pm8001_ccb_free(pm8001_ha, ccb);
+ return rc;
}
const struct pm8001_dispatch pm8001_8001_dispatch = {
.name = "pmc8001",
.chip_init = pm8001_chip_init,
+ .chip_post_init = pm8001_chip_post_init,
.chip_soft_rst = pm8001_chip_soft_rst,
.chip_rst = pm8001_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
@@ -5191,4 +4967,5 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
.fw_flash_update_req = pm8001_chip_fw_flash_update_req,
.set_dev_state_req = pm8001_chip_set_dev_state_req,
.sas_re_init_req = pm8001_chip_sas_re_initialization,
+ .fatal_errors = pm80xx_fatal_errors,
};
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 6d91e2446542..961d0465b923 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -434,11 +434,6 @@ struct task_abort_req {
u32 reserved[11];
} __attribute__((packed, aligned(4)));
-/* These flags used for SSP SMP & SATA Abort */
-#define ABORT_MASK 0x3
-#define ABORT_SINGLE 0x0
-#define ABORT_ALL 0x1
-
/**
* brief the data structure of SSP SATA SMP Abort Response
* use to describe SSP SMP & SATA Abort Response ( 64 bytes)
@@ -805,6 +800,7 @@ struct set_dev_state_resp {
#define IO_ABORT_IN_PROGRESS 0x40
#define IO_ABORT_DELAYED 0x41
#define IO_INVALID_LENGTH 0x42
+#define IO_FATAL_ERROR 0x51
/* WARNING: This error code must always be the last number.
* If you add error code, modify this code also
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3c6076e4c6d2..7a7d63aa90e2 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -56,8 +56,9 @@ MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
" 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *);
-/**
+/*
* chip info structure to identify chip key functionality as
* encryption available/not, no of ports, hw specific function ref
*/
@@ -80,13 +81,27 @@ LIST_HEAD(hba_list);
struct workqueue_struct *pm8001_wq;
-/**
+static void pm8001_map_queues(struct Scsi_Host *shost)
+{
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+
+ if (pm8001_ha->number_of_intr > 1)
+ blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+
+ return blk_mq_map_queues(qmap);
+}
+
+/*
* The main structure which LLDD must register for scsi core.
*/
static struct scsi_host_template pm8001_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = pm8001_scan_finished,
@@ -95,20 +110,23 @@ static struct scsi_host_template pm8001_sht = {
.bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = PM8001_MAX_DMA_SG,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
- .shost_attrs = pm8001_host_attrs,
+ .shost_groups = pm8001_host_groups,
.track_queue_depth = 1,
+ .cmd_per_lun = 32,
+ .map_queues = pm8001_map_queues,
};
-/**
+/*
* Sas layer call this function to execute specific task.
*/
static struct sas_domain_function_template pm8001_transport_ops = {
@@ -119,18 +137,20 @@ static struct sas_domain_function_template pm8001_transport_ops = {
.lldd_control_phy = pm8001_phy_control,
.lldd_abort_task = pm8001_abort_task,
- .lldd_abort_task_set = pm8001_abort_task_set,
- .lldd_clear_aca = pm8001_clear_aca,
+ .lldd_abort_task_set = sas_abort_task_set,
.lldd_clear_task_set = pm8001_clear_task_set,
.lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
.lldd_lu_reset = pm8001_lu_reset,
.lldd_query_task = pm8001_query_task,
+ .lldd_port_formed = pm8001_port_formed,
+ .lldd_tmf_exec_complete = pm8001_setds_completion,
+ .lldd_tmf_aborted = pm8001_tmf_aborted,
};
/**
- *pm8001_phy_init - initiate our adapter phys
- *@pm8001_ha: our hba structure.
- *@phy_id: phy id.
+ * pm8001_phy_init - initiate our adapter phys
+ * @pm8001_ha: our hba structure.
+ * @phy_id: phy id.
*/
static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
{
@@ -138,6 +158,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->phy_state = PHY_LINK_DISABLE;
phy->pm8001_ha = pm8001_ha;
+ phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
@@ -154,9 +176,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
}
/**
- *pm8001_free - free hba
- *@pm8001_ha: our hba structure.
- *
+ * pm8001_free - free hba
+ * @pm8001_ha: our hba structure.
*/
static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
{
@@ -176,14 +197,14 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
}
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
flush_workqueue(pm8001_wq);
- kfree(pm8001_ha->tags);
+ bitmap_free(pm8001_ha->tags);
kfree(pm8001_ha);
}
#ifdef PM8001_USE_TASKLET
/**
- * tasklet for 64 msi-x interrupt handler
+ * pm8001_tasklet() - tasklet for 64 msi-x interrupt handler
* @opaque: the passed general host adapter struct
* Note: pm8001_tasklet is common for pm8001 & pm80xx
*/
@@ -204,6 +225,7 @@ static void pm8001_tasklet(unsigned long opaque)
* pm8001_interrupt_handler_msix - main MSIX interrupt handler.
* It obtains the vector number and calls the equivalent bottom
* half or services directly.
+ * @irq: interrupt number
* @opaque: the passed outbound queue/vector. Host structure is
* retrieved from the same.
*/
@@ -229,7 +251,8 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
/**
* pm8001_interrupt_handler_intx - main INTx interrupt handler.
- * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
+ * @irq: interrupt number
+ * @dev_id: sas_ha structure. The HBA is retrieved from sas_ha structure.
*/
static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
@@ -251,20 +274,47 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
return ret;
}
+static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
+static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
+
/**
* pm8001_alloc - initiate our hba structure and 6 DMAs area.
- * @pm8001_ha:our hba structure.
- *
+ * @pm8001_ha: our hba structure.
+ * @ent: PCI device ID structure to match on
*/
static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
const struct pci_device_id *ent)
{
- int i;
+ int i, count = 0, rc = 0;
+ u32 ci_offset, ib_offset, ob_offset, pi_offset;
+ struct inbound_queue_table *ibq;
+ struct outbound_queue_table *obq;
+
spin_lock_init(&pm8001_ha->lock);
spin_lock_init(&pm8001_ha->bitmap_lock);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("pm8001_alloc: PHY:%x\n",
- pm8001_ha->chip->n_phy));
+ pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
+ pm8001_ha->chip->n_phy);
+
+ /* Setup Interrupt */
+ rc = pm8001_setup_irq(pm8001_ha);
+ if (rc) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "pm8001_setup_irq failed [ret: %d]\n", rc);
+ goto err_out;
+ }
+ /* Request Interrupt */
+ rc = pm8001_request_irq(pm8001_ha);
+ if (rc)
+ goto err_out;
+
+ count = pm8001_ha->max_q_num;
+ /* Queues are chosen based on the number of cores/msix availability */
+ ib_offset = pm8001_ha->ib_offset = USI_MAX_MEMCNT_BASE;
+ ci_offset = pm8001_ha->ci_offset = ib_offset + count;
+ ob_offset = pm8001_ha->ob_offset = ci_offset + count;
+ pi_offset = pm8001_ha->pi_offset = ob_offset + count;
+ pm8001_ha->max_memcnt = pi_offset + count;
+
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
pm8001_phy_init(pm8001_ha, i);
pm8001_ha->port[i].wide_port_phymap = 0;
@@ -273,9 +323,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
INIT_LIST_HEAD(&pm8001_ha->port[i].list);
}
- pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
- if (!pm8001_ha->tags)
- goto err_out;
/* MPI Memory region 1 for AAP Event Log for fw */
pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
@@ -288,54 +335,64 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
pm8001_ha->memoryMap.region[IOP].alignment = 32;
- for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+ for (i = 0; i < count; i++) {
+ ibq = &pm8001_ha->inbnd_q_tbl[i];
+ spin_lock_init(&ibq->iq_lock);
/* MPI Memory region 3 for consumer Index of inbound queues */
- pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
- pm8001_ha->memoryMap.region[CI+i].element_size = 4;
- pm8001_ha->memoryMap.region[CI+i].total_len = 4;
- pm8001_ha->memoryMap.region[CI+i].alignment = 4;
+ pm8001_ha->memoryMap.region[ci_offset+i].num_elements = 1;
+ pm8001_ha->memoryMap.region[ci_offset+i].element_size = 4;
+ pm8001_ha->memoryMap.region[ci_offset+i].total_len = 4;
+ pm8001_ha->memoryMap.region[ci_offset+i].alignment = 4;
if ((ent->driver_data) != chip_8001) {
/* MPI Memory region 5 inbound queues */
- pm8001_ha->memoryMap.region[IB+i].num_elements =
+ pm8001_ha->memoryMap.region[ib_offset+i].num_elements =
PM8001_MPI_QUEUE;
- pm8001_ha->memoryMap.region[IB+i].element_size = 128;
- pm8001_ha->memoryMap.region[IB+i].total_len =
+ pm8001_ha->memoryMap.region[ib_offset+i].element_size
+ = 128;
+ pm8001_ha->memoryMap.region[ib_offset+i].total_len =
PM8001_MPI_QUEUE * 128;
- pm8001_ha->memoryMap.region[IB+i].alignment = 128;
+ pm8001_ha->memoryMap.region[ib_offset+i].alignment
+ = 128;
} else {
- pm8001_ha->memoryMap.region[IB+i].num_elements =
+ pm8001_ha->memoryMap.region[ib_offset+i].num_elements =
PM8001_MPI_QUEUE;
- pm8001_ha->memoryMap.region[IB+i].element_size = 64;
- pm8001_ha->memoryMap.region[IB+i].total_len =
+ pm8001_ha->memoryMap.region[ib_offset+i].element_size
+ = 64;
+ pm8001_ha->memoryMap.region[ib_offset+i].total_len =
PM8001_MPI_QUEUE * 64;
- pm8001_ha->memoryMap.region[IB+i].alignment = 64;
+ pm8001_ha->memoryMap.region[ib_offset+i].alignment = 64;
}
}
- for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+ for (i = 0; i < count; i++) {
+ obq = &pm8001_ha->outbnd_q_tbl[i];
+ spin_lock_init(&obq->oq_lock);
/* MPI Memory region 4 for producer Index of outbound queues */
- pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
- pm8001_ha->memoryMap.region[PI+i].element_size = 4;
- pm8001_ha->memoryMap.region[PI+i].total_len = 4;
- pm8001_ha->memoryMap.region[PI+i].alignment = 4;
+ pm8001_ha->memoryMap.region[pi_offset+i].num_elements = 1;
+ pm8001_ha->memoryMap.region[pi_offset+i].element_size = 4;
+ pm8001_ha->memoryMap.region[pi_offset+i].total_len = 4;
+ pm8001_ha->memoryMap.region[pi_offset+i].alignment = 4;
if (ent->driver_data != chip_8001) {
/* MPI Memory region 6 Outbound queues */
- pm8001_ha->memoryMap.region[OB+i].num_elements =
+ pm8001_ha->memoryMap.region[ob_offset+i].num_elements =
PM8001_MPI_QUEUE;
- pm8001_ha->memoryMap.region[OB+i].element_size = 128;
- pm8001_ha->memoryMap.region[OB+i].total_len =
+ pm8001_ha->memoryMap.region[ob_offset+i].element_size
+ = 128;
+ pm8001_ha->memoryMap.region[ob_offset+i].total_len =
PM8001_MPI_QUEUE * 128;
- pm8001_ha->memoryMap.region[OB+i].alignment = 128;
+ pm8001_ha->memoryMap.region[ob_offset+i].alignment
+ = 128;
} else {
/* MPI Memory region 6 Outbound queues */
- pm8001_ha->memoryMap.region[OB+i].num_elements =
+ pm8001_ha->memoryMap.region[ob_offset+i].num_elements =
PM8001_MPI_QUEUE;
- pm8001_ha->memoryMap.region[OB+i].element_size = 64;
- pm8001_ha->memoryMap.region[OB+i].total_len =
+ pm8001_ha->memoryMap.region[ob_offset+i].element_size
+ = 64;
+ pm8001_ha->memoryMap.region[ob_offset+i].total_len =
PM8001_MPI_QUEUE * 64;
- pm8001_ha->memoryMap.region[OB+i].alignment = 64;
+ pm8001_ha->memoryMap.region[ob_offset+i].alignment = 64;
}
}
@@ -343,19 +400,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
- /* Memory region for devices*/
- pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
- pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
- sizeof(struct pm8001_device);
- pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
- sizeof(struct pm8001_device);
-
- /* Memory region for ccb_info*/
- pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
- pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
- sizeof(struct pm8001_ccb_info);
- pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
- sizeof(struct pm8001_ccb_info);
/* Memory region for fw flash */
pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
@@ -364,50 +408,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
- for (i = 0; i < USI_MAX_MEMCNT; i++) {
+ for (i = 0; i < pm8001_ha->max_memcnt; i++) {
+ struct mpi_mem *region = &pm8001_ha->memoryMap.region[i];
+
if (pm8001_mem_alloc(pm8001_ha->pdev,
- &pm8001_ha->memoryMap.region[i].virt_ptr,
- &pm8001_ha->memoryMap.region[i].phys_addr,
- &pm8001_ha->memoryMap.region[i].phys_addr_hi,
- &pm8001_ha->memoryMap.region[i].phys_addr_lo,
- pm8001_ha->memoryMap.region[i].total_len,
- pm8001_ha->memoryMap.region[i].alignment) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Mem%d alloc failed\n",
- i));
- goto err_out;
+ &region->virt_ptr,
+ &region->phys_addr,
+ &region->phys_addr_hi,
+ &region->phys_addr_lo,
+ region->total_len,
+ region->alignment) != 0) {
+ pm8001_dbg(pm8001_ha, FAIL, "Mem%d alloc failed\n", i);
+ goto err_out;
}
}
- pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
+ /* Memory region for devices*/
+ pm8001_ha->devices = kzalloc(PM8001_MAX_DEVICES
+ * sizeof(struct pm8001_device), GFP_KERNEL);
+ if (!pm8001_ha->devices) {
+ rc = -ENOMEM;
+ goto err_out_nodev;
+ }
for (i = 0; i < PM8001_MAX_DEVICES; i++) {
pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
pm8001_ha->devices[i].id = i;
pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
- pm8001_ha->devices[i].running_req = 0;
- }
- pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
- for (i = 0; i < PM8001_MAX_CCB; i++) {
- pm8001_ha->ccb_info[i].ccb_dma_handle =
- pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
- i * sizeof(struct pm8001_ccb_info);
- pm8001_ha->ccb_info[i].task = NULL;
- pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
- pm8001_ha->ccb_info[i].device = NULL;
- ++pm8001_ha->tags_num;
+ atomic_set(&pm8001_ha->devices[i].running_req, 0);
}
pm8001_ha->flags = PM8001F_INIT_TIME;
/* Initialize tags */
pm8001_tag_init(pm8001_ha);
return 0;
+
+err_out_nodev:
+ for (i = 0; i < pm8001_ha->max_memcnt; i++) {
+ if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
+ dma_free_coherent(&pm8001_ha->pdev->dev,
+ (pm8001_ha->memoryMap.region[i].total_len +
+ pm8001_ha->memoryMap.region[i].alignment),
+ pm8001_ha->memoryMap.region[i].virt_ptr,
+ pm8001_ha->memoryMap.region[i].phys_addr);
+ }
+ }
err_out:
return 1;
}
/**
- * pm8001_ioremap - remap the pci high physical address to kernal virtual
+ * pm8001_ioremap - remap the pci high physical address to kernel virtual
* address so that we can access them.
- * @pm8001_ha:our hba structure.
+ * @pm8001_ha: our hba structure.
*/
static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
{
@@ -436,15 +487,18 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->io_mem[logicalBar].memvirtaddr =
ioremap(pm8001_ha->io_mem[logicalBar].membase,
pm8001_ha->io_mem[logicalBar].memsize);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCI: bar %d, logicalBar %d ",
- bar, logicalBar));
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "base addr %llx virt_addr=%llx len=%d\n",
- (u64)pm8001_ha->io_mem[logicalBar].membase,
- (u64)(unsigned long)
- pm8001_ha->io_mem[logicalBar].memvirtaddr,
- pm8001_ha->io_mem[logicalBar].memsize));
+ if (!pm8001_ha->io_mem[logicalBar].memvirtaddr) {
+ pm8001_dbg(pm8001_ha, INIT,
+ "Failed to ioremap bar %d, logicalBar %d",
+ bar, logicalBar);
+ return -ENOMEM;
+ }
+ pm8001_dbg(pm8001_ha, INIT,
+ "base addr %llx virt_addr=%llx len=%d\n",
+ (u64)pm8001_ha->io_mem[logicalBar].membase,
+ (u64)(unsigned long)
+ pm8001_ha->io_mem[logicalBar].memvirtaddr,
+ pm8001_ha->io_mem[logicalBar].memsize);
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
pm8001_ha->io_mem[logicalBar].memsize = 0;
@@ -483,13 +537,14 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
pm8001_ha->logging_level = logging_level;
+ pm8001_ha->non_fatal_count = 0;
if (link_rate >= 1 && link_rate <= 15)
pm8001_ha->link_rate = (link_rate << 8);
else {
pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 |
LINKRATE_60 | LINKRATE_120;
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "Setting link rate to default value\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Setting link rate to default value\n");
}
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
/* IOMB size is 128 for 8088/89 controllers */
@@ -509,9 +564,11 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
(unsigned long)&(pm8001_ha->irq_vector[j]));
#endif
- pm8001_ioremap(pm8001_ha);
+ if (pm8001_ioremap(pm8001_ha))
+ goto failed_pci_alloc;
if (!pm8001_alloc(pm8001_ha, ent))
return pm8001_ha;
+failed_pci_alloc:
pm8001_free(pm8001_ha);
return NULL;
}
@@ -565,12 +622,8 @@ static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
shost->transportt = pm8001_stt;
shost->max_id = PM8001_MAX_DEVICES;
- shost->max_lun = 8;
- shost->max_channel = 0;
shost->unique_id = pm8001_id;
shost->max_cmd_len = 16;
- shost->can_queue = PM8001_CAN_QUEUE;
- shost->cmd_per_lun = 32;
return 0;
exit_free1:
kfree(arr_port);
@@ -610,9 +663,9 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
/**
* pm8001_init_sas_add - initialize sas address
- * @chip_info: our ha struct.
+ * @pm8001_ha: our ha struct.
*
- * Currently we just set the fixed SAS address to our HBA,for manufacture,
+ * Currently we just set the fixed SAS address to our HBA, for manufacture,
* it should read from the EEPROM
*/
static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
@@ -635,30 +688,30 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
if (pm8001_ha->chip_id == chip_8001) {
if (deviceid == 0x8081 || deviceid == 0x0042) {
payload.minor_function = 4;
- payload.length = 4096;
+ payload.rd_length = 4096;
} else {
payload.minor_function = 0;
- payload.length = 128;
+ payload.rd_length = 128;
}
} else if ((pm8001_ha->chip_id == chip_8070 ||
pm8001_ha->chip_id == chip_8072) &&
pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) {
payload.minor_function = 4;
- payload.length = 4096;
+ payload.rd_length = 4096;
} else {
payload.minor_function = 1;
- payload.length = 4096;
+ payload.rd_length = 4096;
}
payload.offset = 0;
- payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
+ payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL);
if (!payload.func_specific) {
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("mem alloc fail\n"));
+ pm8001_dbg(pm8001_ha, INIT, "mem alloc fail\n");
return;
}
rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
if (rc) {
kfree(payload.func_specific);
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
+ pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n");
return;
}
wait_for_completion(&completion);
@@ -686,9 +739,8 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
sas_add[7] = sas_add[7] + 4;
memcpy(&pm8001_ha->phy[i].dev_sas_addr,
sas_add, SAS_ADDR_SIZE);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("phy %d sas_addr = %016llx\n", i,
- pm8001_ha->phy[i].dev_sas_addr));
+ pm8001_dbg(pm8001_ha, INIT, "phy %d sas_addr = %016llx\n", i,
+ pm8001_ha->phy[i].dev_sas_addr);
}
kfree(payload.func_specific);
#else
@@ -720,7 +772,7 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
/* SAS ADDRESS read from flash / EEPROM */
payload.minor_function = 6;
payload.offset = 0;
- payload.length = 4096;
+ payload.rd_length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
if (!payload.func_specific)
return -ENOMEM;
@@ -728,7 +780,7 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
if (rc) {
kfree(payload.func_specific);
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
+ pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n");
return -ENOMEM;
}
wait_for_completion(&completion);
@@ -751,7 +803,7 @@ struct pm8001_mpi3_phy_pg_trx_config {
};
/**
- * pm8001_get_internal_phy_settings : Retrieves the internal PHY settings
+ * pm8001_get_internal_phy_settings - Retrieves the internal PHY settings
* @pm8001_ha : our adapter
* @phycfg : PHY config page to populate
*/
@@ -771,7 +823,7 @@ void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha,
}
/**
- * pm8001_get_external_phy_settings : Retrieves the external PHY settings
+ * pm8001_get_external_phy_settings - Retrieves the external PHY settings
* @pm8001_ha : our adapter
* @phycfg : PHY config page to populate
*/
@@ -791,7 +843,7 @@ void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha,
}
/**
- * pm8001_get_phy_mask : Retrieves the mask that denotes if a PHY is int/ext
+ * pm8001_get_phy_mask - Retrieves the mask that denotes if a PHY is int/ext
* @pm8001_ha : our adapter
* @phymask : The PHY mask
*/
@@ -822,14 +874,14 @@ void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask)
break;
default:
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Unknown subsystem device=0x%.04x",
- pm8001_ha->pdev->subsystem_device));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Unknown subsystem device=0x%.04x\n",
+ pm8001_ha->pdev->subsystem_device);
}
}
/**
- * pm8001_set_phy_settings_ven_117c_12Gb : Configure ATTO 12Gb PHY settings
+ * pm8001_set_phy_settings_ven_117c_12G() - Configure ATTO 12Gb PHY settings
* @pm8001_ha : our adapter
*/
static
@@ -864,7 +916,7 @@ int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_configure_phy_settings : Configures PHY settings based on vendor ID.
+ * pm8001_configure_phy_settings - Configures PHY settings based on vendor ID.
* @pm8001_ha : our hba.
*/
static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
@@ -888,35 +940,63 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
#ifdef PM8001_USE_MSIX
/**
* pm8001_setup_msix - enable MSI-X interrupt
- * @chip_info: our ha struct.
- * @irq_handler: irq_handler
+ * @pm8001_ha: our ha struct.
*/
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
{
- u32 i = 0, j = 0;
- u32 number_of_intr;
- int flag = 0;
+ unsigned int allocated_irq_vectors;
int rc;
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
- number_of_intr = 1;
+ rc = pci_alloc_irq_vectors(pm8001_ha->pdev, 1, 1,
+ PCI_IRQ_MSIX);
} else {
- number_of_intr = PM8001_MAX_MSIX_VEC;
- flag &= ~IRQF_SHARED;
+ /*
+ * Queue index #0 is used always for housekeeping, so don't
+ * include in the affinity spreading.
+ */
+ struct irq_affinity desc = {
+ .pre_vectors = 1,
+ };
+ rc = pci_alloc_irq_vectors_affinity(
+ pm8001_ha->pdev, 2, PM8001_MAX_MSIX_VEC,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
}
- rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
- number_of_intr, PCI_IRQ_MSIX);
+ allocated_irq_vectors = rc;
if (rc < 0)
return rc;
- pm8001_ha->number_of_intr = number_of_intr;
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "pci_alloc_irq_vectors request ret:%d no of intr %d\n",
- rc, pm8001_ha->number_of_intr));
+ /* Assigns the number of interrupts */
+ pm8001_ha->number_of_intr = allocated_irq_vectors;
+
+ /* Maximum queue number updating in HBA structure */
+ pm8001_ha->max_q_num = allocated_irq_vectors;
+
+ pm8001_dbg(pm8001_ha, INIT,
+ "pci_alloc_irq_vectors request ret:%d no of intr %d\n",
+ rc, pm8001_ha->number_of_intr);
+ return 0;
+}
- for (i = 0; i < number_of_intr; i++) {
+static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 i = 0, j = 0;
+ int flag = 0, rc = 0;
+ int nr_irqs = pm8001_ha->number_of_intr;
+
+ if (pm8001_ha->chip_id != chip_8001)
+ flag &= ~IRQF_SHARED;
+
+ pm8001_dbg(pm8001_ha, INIT,
+ "pci_enable_msix request number of intr %d\n",
+ pm8001_ha->number_of_intr);
+
+ if (nr_irqs > ARRAY_SIZE(pm8001_ha->intr_drvname))
+ nr_irqs = ARRAY_SIZE(pm8001_ha->intr_drvname);
+
+ for (i = 0; i < nr_irqs; i++) {
snprintf(pm8001_ha->intr_drvname[i],
sizeof(pm8001_ha->intr_drvname[0]),
"%s-%d", pm8001_ha->name, i);
@@ -941,9 +1021,23 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
}
#endif
+static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
+{
+ struct pci_dev *pdev;
+
+ pdev = pm8001_ha->pdev;
+
+#ifdef PM8001_USE_MSIX
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ return pm8001_setup_msix(pm8001_ha);
+ pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
+#endif
+ return 0;
+}
+
/**
* pm8001_request_irq - register interrupt
- * @chip_info: our ha struct.
+ * @pm8001_ha: our ha struct.
*/
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
{
@@ -954,10 +1048,9 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
#ifdef PM8001_USE_MSIX
if (pdev->msix_cap && pci_msi_enabled())
- return pm8001_setup_msix(pm8001_ha);
+ return pm8001_request_msix(pm8001_ha);
else {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MSIX not supported!!!\n"));
+ pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
goto intx;
}
#endif
@@ -977,8 +1070,8 @@ intx:
* @ent: pci device id
*
* This function is the main initialization function, when register a new
- * pci driver it is invoked, all struct an hardware initilization should be done
- * here, also, register interrupt
+ * pci driver it is invoked, all struct and hardware initialization should be
+ * done here, also, register interrupt.
*/
static int pm8001_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -989,6 +1082,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
struct pm8001_hba_info *pm8001_ha;
struct Scsi_Host *shost = NULL;
const struct pm8001_chip_info *chip;
+ struct sas_ha_struct *sha;
dev_printk(KERN_INFO, &pdev->dev,
"pm80xx: driver version %s\n", DRV_VERSION);
@@ -1017,12 +1111,12 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_regions;
}
chip = &pm8001_chips[ent->driver_data];
- SHOST_TO_SAS_HA(shost) =
- kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
- if (!SHOST_TO_SAS_HA(shost)) {
+ sha = kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
+ if (!sha) {
rc = -ENOMEM;
goto err_out_free_host;
}
+ SHOST_TO_SAS_HA(shost) = sha;
rc = pm8001_prep_sas_ha_init(shost, chip);
if (rc) {
@@ -1036,24 +1130,35 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
rc = -ENOMEM;
goto err_out_free;
}
- list_add_tail(&pm8001_ha->list, &hba_list);
+
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "chip_init failed [ret: %d]\n", rc));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "chip_init failed [ret: %d]\n", rc);
goto err_out_ha_free;
}
+ rc = pm8001_init_ccb_tag(pm8001_ha);
+ if (rc)
+ goto err_out_enable;
+
+
+ PM8001_CHIP_DISP->chip_post_init(pm8001_ha);
+
+ if (pm8001_ha->number_of_intr > 1) {
+ shost->nr_hw_queues = pm8001_ha->number_of_intr - 1;
+ /*
+ * For now, ensure we're not sent too many commands by setting
+ * host_tagset. This is also required if we start using request
+ * tag.
+ */
+ shost->host_tagset = 1;
+ }
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
- rc = pm8001_request_irq(pm8001_ha);
- if (rc) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "pm8001_request_irq failed [ret: %d]\n", rc));
- goto err_out_shost;
- }
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
if (pm8001_ha->chip_id != chip_8001) {
@@ -1065,15 +1170,20 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
pm8001_init_sas_add(pm8001_ha);
/* phy setting support for motherboard controller */
- if (pm8001_configure_phy_settings(pm8001_ha))
+ rc = pm8001_configure_phy_settings(pm8001_ha);
+ if (rc)
goto err_out_shost;
pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
- if (rc)
+ if (rc) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "sas_register_ha failed [ret: %d]\n", rc);
goto err_out_shost;
- scsi_scan_host(pm8001_ha->shost);
+ }
+ list_add_tail(&pm8001_ha->list, &hba_list);
pm8001_ha->flags = PM8001F_RUN_TIME;
+ scsi_scan_host(pm8001_ha->shost);
return 0;
err_out_shost:
@@ -1081,7 +1191,7 @@ err_out_shost:
err_out_ha_free:
pm8001_free(pm8001_ha);
err_out_free:
- kfree(SHOST_TO_SAS_HA(shost));
+ kfree(sha);
err_out_free_host:
scsi_host_put(shost);
err_out_regions:
@@ -1092,6 +1202,62 @@ err_out_enable:
return rc;
}
+/**
+ * pm8001_init_ccb_tag - allocate memory to CCB and tag.
+ * @pm8001_ha: our hba card information.
+ */
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha)
+{
+ struct Scsi_Host *shost = pm8001_ha->shost;
+ struct device *dev = pm8001_ha->dev;
+ u32 max_out_io, ccb_count;
+ u32 can_queue;
+ int i;
+
+ max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io;
+ ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io);
+
+ /* Update to the scsi host*/
+ can_queue = ccb_count - PM8001_RESERVE_SLOT;
+ shost->can_queue = can_queue;
+
+ pm8001_ha->tags = bitmap_zalloc(ccb_count, GFP_KERNEL);
+ if (!pm8001_ha->tags)
+ goto err_out;
+
+ /* Memory region for ccb_info*/
+ pm8001_ha->ccb_count = ccb_count;
+ pm8001_ha->ccb_info =
+ kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL);
+ if (!pm8001_ha->ccb_info) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Unable to allocate memory for ccb\n");
+ goto err_out_noccb;
+ }
+ for (i = 0; i < ccb_count; i++) {
+ pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(dev,
+ sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
+ &pm8001_ha->ccb_info[i].ccb_dma_handle,
+ GFP_KERNEL);
+ if (!pm8001_ha->ccb_info[i].buf_prd) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "ccb prd memory allocation error\n");
+ goto err_out;
+ }
+ pm8001_ha->ccb_info[i].task = NULL;
+ pm8001_ha->ccb_info[i].ccb_tag = PM8001_INVALID_TAG;
+ pm8001_ha->ccb_info[i].device = NULL;
+ ++pm8001_ha->tags_num;
+ }
+
+ return 0;
+
+err_out_noccb:
+ kfree(pm8001_ha->devices);
+err_out:
+ return -ENOMEM;
+}
+
static void pm8001_pci_remove(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
@@ -1123,6 +1289,16 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
tasklet_kill(&pm8001_ha->tasklet[j]);
#endif
scsi_host_put(pm8001_ha->shost);
+
+ for (i = 0; i < pm8001_ha->ccb_count; i++) {
+ dma_free_coherent(&pm8001_ha->pdev->dev,
+ sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
+ pm8001_ha->ccb_info[i].buf_prd,
+ pm8001_ha->ccb_info[i].ccb_dma_handle);
+ }
+ kfree(pm8001_ha->ccb_info);
+ kfree(pm8001_ha->devices);
+
pm8001_free(pm8001_ha);
kfree(sha->sas_phy);
kfree(sha->sas_port);
@@ -1133,23 +1309,21 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
/**
* pm8001_pci_suspend - power management suspend main entry point
- * @pdev: PCI device struct
- * @state: PM state change to (usually PCI_D3)
+ * @dev: Device struct
*
- * Returns 0 success, anything else error.
+ * Return: 0 on success, anything else on error.
*/
-static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pm8001_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
- struct pm8001_hba_info *pm8001_ha;
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
int i, j;
- u32 device_state;
- pm8001_ha = sha->lldd_ha;
sas_suspend_ha(sha);
flush_workqueue(pm8001_wq);
scsi_block_requests(pm8001_ha->shost);
if (!pdev->pm_cap) {
- dev_err(&pdev->dev, " PCI PM not supported\n");
+ dev_err(dev, " PCI PM not supported\n");
return -ENODEV;
}
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
@@ -1172,47 +1346,33 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
tasklet_kill(&pm8001_ha->tasklet[j]);
#endif
- device_state = pci_choose_state(pdev, state);
- pm8001_printk("pdev=0x%p, slot=%s, entering "
- "operating state [D%d]\n", pdev,
- pm8001_ha->name, device_state);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, device_state);
+ pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, entering "
+ "suspended state\n", pdev,
+ pm8001_ha->name);
return 0;
}
/**
* pm8001_pci_resume - power management resume main entry point
- * @pdev: PCI device struct
+ * @dev: Device struct
*
- * Returns 0 success, anything else error.
+ * Return: 0 on success, anything else on error.
*/
-static int pm8001_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused pm8001_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
int rc;
u8 i = 0, j;
- u32 device_state;
DECLARE_COMPLETION_ONSTACK(completion);
+
pm8001_ha = sha->lldd_ha;
- device_state = pdev->current_state;
- pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
- "operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
+ pm8001_info(pm8001_ha,
+ "pdev=0x%p, slot=%s, resuming from previous operating state [D%d]\n",
+ pdev, pm8001_ha->name, pdev->current_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- rc = pci_enable_device(pdev);
- if (rc) {
- pm8001_printk("slot=%s Enable device failed during resume\n",
- pm8001_ha->name);
- goto err_out_enable;
- }
-
- pci_set_master(pdev);
rc = pci_go_44(pdev);
if (rc)
goto err_out_disable;
@@ -1220,8 +1380,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
/* chip soft rst only for spc */
if (pm8001_ha->chip_id == chip_8001) {
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip soft reset successful\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip soft reset successful\n");
}
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc)
@@ -1273,8 +1432,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
err_out_disable:
scsi_remove_host(pm8001_ha->shost);
- pci_disable_device(pdev);
-err_out_enable:
+
return rc;
}
@@ -1357,13 +1515,16 @@ static struct pci_device_id pm8001_pci_table[] = {
{} /* terminate list */
};
+static SIMPLE_DEV_PM_OPS(pm8001_pci_pm_ops,
+ pm8001_pci_suspend,
+ pm8001_pci_resume);
+
static struct pci_driver pm8001_pci_driver = {
.name = DRV_NAME,
.id_table = pm8001_pci_table,
.probe = pm8001_pci_probe,
.remove = pm8001_pci_remove,
- .suspend = pm8001_pci_suspend,
- .resume = pm8001_pci_resume,
+ .driver.pm = &pm8001_pci_pm_ops,
};
/**
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b7cbc312843e..8e3f2f9ddaac 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
+#include "pm80xx_tracepoints.h"
/**
* pm8001_find_tag - from sas task to find out tag that belongs to this task
@@ -65,7 +66,11 @@ static int pm8001_find_tag(struct sas_task *task, u32 *tag)
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
{
void *bitmap = pm8001_ha->tags;
- clear_bit(tag, bitmap);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
+ __clear_bit(tag, bitmap);
+ spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
}
/**
@@ -73,11 +78,11 @@ void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
* @pm8001_ha: our hba struct
* @tag_out: the found empty tag .
*/
-inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
+int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
{
- unsigned int tag;
void *bitmap = pm8001_ha->tags;
unsigned long flags;
+ unsigned int tag;
spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
@@ -85,7 +90,7 @@ inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
return -SAS_QUEUE_FULL;
}
- set_bit(tag, bitmap);
+ __set_bit(tag, bitmap);
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
*tag_out = tag;
return 0;
@@ -98,14 +103,16 @@ void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
pm8001_tag_free(pm8001_ha, i);
}
- /**
- * pm8001_mem_alloc - allocate memory for pm8001.
- * @pdev: pci device.
- * @virt_addr: the allocated virtual address
- * @pphys_addr_hi: the physical address high byte address.
- * @pphys_addr_lo: the physical address low byte address.
- * @mem_size: memory size.
- */
+/**
+ * pm8001_mem_alloc - allocate memory for pm8001.
+ * @pdev: pci device.
+ * @virt_addr: the allocated virtual address
+ * @pphys_addr: DMA address for this device
+ * @pphys_addr_hi: the physical address high byte address.
+ * @pphys_addr_lo: the physical address low byte address.
+ * @mem_size: memory size.
+ * @align: requested byte alignment
+ */
int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
u32 *pphys_addr_lo, u32 mem_size, u32 align)
@@ -118,10 +125,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
align_offset = (dma_addr_t)align - 1;
mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
&mem_dma_handle, GFP_KERNEL);
- if (!mem_virt_alloc) {
- pr_err("pm80xx: memory allocation error\n");
- return -1;
- }
+ if (!mem_virt_alloc)
+ return -ENOMEM;
*pphys_addr = mem_dma_handle;
phys_align = (*pphys_addr + align_offset) & ~align_offset;
*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
@@ -129,6 +134,7 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
*pphys_addr_lo = lower_32_bits(phys_align);
return 0;
}
+
/**
* pm8001_find_ha_by_dev - from domain device which come from sas layer to
* find out our hba struct.
@@ -157,7 +163,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
int rc = 0, phy_id = sas_phy->id;
struct pm8001_hba_info *pm8001_ha = NULL;
struct sas_phy_linkrates *rates;
- struct sas_ha_struct *sas_ha;
struct pm8001_phy *phy;
DECLARE_COMPLETION_ONSTACK(completion);
unsigned long flags;
@@ -206,19 +211,17 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
if (pm8001_ha->chip_id != chip_8001) {
if (pm8001_ha->phy[phy_id].phy_state ==
PHY_STATE_LINK_UP_SPCV) {
- sas_ha = pm8001_ha->sas;
sas_phy_disconnected(&phy->sas_phy);
- sas_ha->notify_phy_event(&phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
phy->phy_attached = 0;
}
} else {
if (pm8001_ha->phy[phy_id].phy_state ==
PHY_STATE_LINK_UP_SPC) {
- sas_ha = pm8001_ha->sas;
sas_phy_disconnected(&phy->sas_phy);
- sas_ha->notify_phy_event(&phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
phy->phy_attached = 0;
}
}
@@ -235,22 +238,20 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
}
{
struct sas_phy *phy = sas_phy->phy;
- uint32_t *qp = (uint32_t *)(((char *)
- pm8001_ha->io_mem[2].memvirtaddr)
- + 0x1034 + (0x4000 * (phy_id & 3)));
-
- phy->invalid_dword_count = qp[0];
- phy->running_disparity_error_count = qp[1];
- phy->loss_of_dword_sync_count = qp[3];
- phy->phy_reset_problem_count = qp[4];
+ u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
+ + 0x1034 + (0x4000 * (phy_id & 3));
+
+ phy->invalid_dword_count = readl(qp);
+ phy->running_disparity_error_count = readl(&qp[1]);
+ phy->loss_of_dword_sync_count = readl(&qp[3]);
+ phy->phy_reset_problem_count = readl(&qp[4]);
}
if (pm8001_ha->chip_id == chip_8001)
pm8001_bar4_shift(pm8001_ha, 0);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return 0;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("func 0x%x\n", func));
+ pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
rc = -EOPNOTSUPP;
}
msleep(300);
@@ -267,12 +268,17 @@ void pm8001_scan_start(struct Scsi_Host *shost)
int i;
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ DECLARE_COMPLETION_ONSTACK(completion);
pm8001_ha = sha->lldd_ha;
/* SAS_RE_INITIALIZATION not available in SPCv/ve */
if (pm8001_ha->chip_id == chip_8001)
PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
- for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
+ for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
+ pm8001_ha->phy[i].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+ wait_for_completion(&completion);
+ msleep(300);
+ }
}
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
@@ -302,16 +308,12 @@ static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
{
struct ata_queued_cmd *qc = task->uldd_task;
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ ||
- qc->tf.command == ATA_CMD_FPDMA_RECV ||
- qc->tf.command == ATA_CMD_FPDMA_SEND ||
- qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
- *tag = qc->tag;
- return 1;
- }
+
+ if (qc && ata_is_ncq(qc->tf.protocol)) {
+ *tag = qc->tag;
+ return 1;
}
+
return 0;
}
@@ -327,19 +329,31 @@ static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
}
/**
+ * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
+ * for internal abort task
+ * @pm8001_ha: our hba card information
+ * @ccb: the ccb which attached to sata task
+ */
+static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
+}
+
+/**
* pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
* @pm8001_ha: our hba card information
* @ccb: the ccb which attached to TM
* @tmf: the task management IU
*/
static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+ struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
{
return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
}
/**
- * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
+ * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
* @pm8001_ha: our hba card information
* @ccb: the ccb which attached to ssp task
*/
@@ -366,171 +380,155 @@ static int sas_find_local_port_id(struct domain_device *dev)
return 0;
}
+#define DEV_IS_GONE(pm8001_dev) \
+ ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
+
+
+static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ struct sas_task *task = ccb->task;
+ enum sas_protocol task_proto = task->task_proto;
+ struct sas_tmf_task *tmf = task->tmf;
+ int is_tmf = !!tmf;
+
+ switch (task_proto) {
+ case SAS_PROTOCOL_SMP:
+ return pm8001_task_prep_smp(pm8001_ha, ccb);
+ case SAS_PROTOCOL_SSP:
+ if (is_tmf)
+ return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
+ return pm8001_task_prep_ssp(pm8001_ha, ccb);
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ return pm8001_task_prep_ata(pm8001_ha, ccb);
+ case SAS_PROTOCOL_INTERNAL_ABORT:
+ return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
+ default:
+ dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
+ task_proto);
+ }
+
+ return -EINVAL;
+}
+
/**
- * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
+ * pm8001_queue_command - register for upper layer used, all IO commands sent
+ * to HBA are from this interface.
* @task: the task to be execute.
- * @num: if can_queue great than 1, the task can be queued up. for SMP task,
- * we always execute one one time.
- * @gfp_flags: gfp_flags.
- * @is_tmf: if it is task management task.
- * @tmf: the task management IU
+ * @gfp_flags: gfp_flags
*/
-#define DEV_IS_GONE(pm8001_dev) \
- ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
-static int pm8001_task_exec(struct sas_task *task,
- gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
+int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
+ struct task_status_struct *ts = &task->task_status;
+ enum sas_protocol task_proto = task->task_proto;
struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ bool internal_abort = sas_is_internal_abort(task);
struct pm8001_hba_info *pm8001_ha;
- struct pm8001_device *pm8001_dev;
struct pm8001_port *port = NULL;
- struct sas_task *t = task;
struct pm8001_ccb_info *ccb;
- u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
- unsigned long flags = 0;
- enum sas_protocol task_proto = t->task_proto;
+ unsigned long flags;
+ u32 n_elem = 0;
+ int rc = 0;
- if (!dev->port) {
- struct task_status_struct *tsm = &t->task_status;
- tsm->resp = SAS_TASK_UNDELIVERED;
- tsm->stat = SAS_PHY_DOWN;
+ if (!internal_abort && !dev->port) {
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
if (dev->dev_type != SAS_SATA_DEV)
- t->task_done(t);
+ task->task_done(task);
return 0;
}
- pm8001_ha = pm8001_find_ha_by_dev(task->dev);
- if (pm8001_ha->controller_fatal_error) {
- struct task_status_struct *ts = &t->task_status;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ if (pm8001_ha->controller_fatal_error) {
ts->resp = SAS_TASK_UNDELIVERED;
- t->task_done(t);
+ task->task_done(task);
return 0;
}
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
+
+ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
+
spin_lock_irqsave(&pm8001_ha->lock, flags);
- do {
- dev = t->dev;
- pm8001_dev = dev->lldd_dev;
- port = &pm8001_ha->port[sas_find_local_port_id(dev)];
- if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
- if (sas_protocol_ata(task_proto)) {
- struct task_status_struct *ts = &t->task_status;
- ts->resp = SAS_TASK_UNDELIVERED;
- ts->stat = SAS_PHY_DOWN;
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
- continue;
- } else {
- struct task_status_struct *ts = &t->task_status;
- ts->resp = SAS_TASK_UNDELIVERED;
- ts->stat = SAS_PHY_DOWN;
- t->task_done(t);
- continue;
- }
- }
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- goto err_out;
- ccb = &pm8001_ha->ccb_info[tag];
-
- if (!sas_protocol_ata(task_proto)) {
- if (t->num_scatter) {
- n_elem = dma_map_sg(pm8001_ha->dev,
- t->scatter,
- t->num_scatter,
- t->data_dir);
- if (!n_elem) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
- }
+ pm8001_dev = dev->lldd_dev;
+ port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+
+ if (!internal_abort &&
+ (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ if (sas_protocol_ata(task_proto)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ task->task_done(task);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
} else {
- n_elem = t->num_scatter;
+ task->task_done(task);
}
+ rc = -ENODEV;
+ goto err_out;
+ }
- t->lldd_task = ccb;
- ccb->n_elem = n_elem;
- ccb->ccb_tag = tag;
- ccb->task = t;
- ccb->device = pm8001_dev;
- switch (task_proto) {
- case SAS_PROTOCOL_SMP:
- rc = pm8001_task_prep_smp(pm8001_ha, ccb);
- break;
- case SAS_PROTOCOL_SSP:
- if (is_tmf)
- rc = pm8001_task_prep_ssp_tm(pm8001_ha,
- ccb, tmf);
- else
- rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
- break;
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- rc = pm8001_task_prep_ata(pm8001_ha, ccb);
- break;
- default:
- dev_printk(KERN_ERR, pm8001_ha->dev,
- "unknown sas_task proto: 0x%x\n", task_proto);
- rc = -EINVAL;
- break;
- }
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+ if (!ccb) {
+ rc = -SAS_QUEUE_FULL;
+ goto err_out;
+ }
- if (rc) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("rc is %x\n", rc));
- goto err_out_tag;
+ if (!sas_protocol_ata(task_proto)) {
+ if (task->num_scatter) {
+ n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
+ if (!n_elem) {
+ rc = -ENOMEM;
+ goto err_out_ccb;
+ }
}
- /* TODO: select normal or high priority */
- spin_lock(&t->task_state_lock);
- t->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock(&t->task_state_lock);
- pm8001_dev->running_req++;
- } while (0);
- rc = 0;
- goto out_done;
-
-err_out_tag:
- pm8001_tag_free(pm8001_ha, tag);
+ } else {
+ n_elem = task->num_scatter;
+ }
+
+ task->lldd_task = ccb;
+ ccb->n_elem = n_elem;
+
+ atomic_inc(&pm8001_dev->running_req);
+
+ rc = pm8001_deliver_command(pm8001_ha, ccb);
+ if (rc) {
+ atomic_dec(&pm8001_dev->running_req);
+ if (!sas_protocol_ata(task_proto) && n_elem)
+ dma_unmap_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
+err_out_ccb:
+ pm8001_ccb_free(pm8001_ha, ccb);
+
err_out:
- dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
- if (!sas_protocol_ata(task_proto))
- if (n_elem)
- dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
- t->data_dir);
-out_done:
+ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
+ }
+
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- return rc;
-}
-/**
- * pm8001_queue_command - register for upper layer used, all IO commands sent
- * to HBA are from this interface.
- * @task: the task to be execute.
- * @gfp_flags: gfp_flags
- */
-int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
-{
- return pm8001_task_exec(task, gfp_flags, 0, NULL);
+ return rc;
}
/**
* pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
* @pm8001_ha: our hba card information
- * @ccb: the ccb which attached to ssp task
- * @task: the task to be free.
- * @ccb_idx: ccb index.
+ * @ccb: the ccb which attached to ssp task to free
*/
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
- struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
+ struct pm8001_ccb_info *ccb)
{
- if (!ccb->task)
+ struct sas_task *task = ccb->task;
+ struct ata_queued_cmd *qc;
+ struct pm8001_device *pm8001_dev;
+
+ if (!task)
return;
- if (!sas_protocol_ata(task->task_proto))
- if (ccb->n_elem)
- dma_unmap_sg(pm8001_ha->dev, task->scatter,
- task->num_scatter, task->data_dir);
+
+ if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
+ dma_unmap_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
@@ -547,17 +545,26 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
/* do nothing */
break;
}
+
+ if (sas_protocol_ata(task->task_proto)) {
+ /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
+ qc = task->uldd_task;
+ pm8001_dev = ccb->device;
+ trace_pm80xx_request_complete(pm8001_ha->id,
+ pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
+ ccb->ccb_tag, 0 /* ctlr_opcode not known */,
+ qc ? qc->tf.command : 0, // ata opcode
+ pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
+ }
+
task->lldd_task = NULL;
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- ccb->open_retry = 0;
- pm8001_tag_free(pm8001_ha, ccb_idx);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
- /**
- * pm8001_alloc_dev - find a empty pm8001_device
- * @pm8001_ha: our hba card information
- */
+/**
+ * pm8001_alloc_dev - find a empty pm8001_device
+ * @pm8001_ha: our hba card information
+ */
static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
{
u32 dev;
@@ -568,15 +575,16 @@ static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
}
}
if (dev == PM8001_MAX_DEVICES) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("max support %d devices, ignore ..\n",
- PM8001_MAX_DEVICES));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "max support %d devices, ignore ..\n",
+ PM8001_MAX_DEVICES);
}
return NULL;
}
/**
* pm8001_find_dev - find a matching pm8001_device
* @pm8001_ha: our hba card information
+ * @device_id: device ID to match against
*/
struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
u32 device_id)
@@ -587,13 +595,12 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
return &pm8001_ha->devices[dev];
}
if (dev == PM8001_MAX_DEVICES) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
- "DEVICE FOUND !!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
}
return NULL;
}
-static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
+void pm8001_free_dev(struct pm8001_device *pm8001_dev)
{
u32 id = pm8001_dev->id;
memset(pm8001_dev, 0, sizeof(*pm8001_dev));
@@ -649,10 +656,10 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
}
}
if (phy_id == parent_dev->ex_dev.num_phys) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Error: no attached dev:%016llx"
- " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr),
- SAS_ADDR(parent_dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Error: no attached dev:%016llx at ex:%016llx.\n",
+ SAS_ADDR(dev->sas_addr),
+ SAS_ADDR(parent_dev->sas_addr));
res = -1;
}
} else {
@@ -662,7 +669,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
flag = 1; /* directly sata */
}
} /*register this device to HBA*/
- PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
+ pm8001_dbg(pm8001_ha, DISC, "Found device\n");
PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
wait_for_completion(&completion);
@@ -682,191 +689,11 @@ int pm8001_dev_found(struct domain_device *dev)
void pm8001_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
- complete(&task->slow_task->completion);
-}
-
-static void pm8001_tmf_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
-
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
#define PM8001_TASK_TIMEOUT 20
-/**
- * pm8001_exec_internal_tmf_task - execute some task management commands.
- * @dev: the wanted device.
- * @tmf: which task management wanted to be take.
- * @para_len: para_len.
- * @parameter: ssp task parameter.
- *
- * when errors or exception happened, we may want to do something, for example
- * abort the issued task which result in this execption, it is done by calling
- * this function, note it is also with the task execute interface.
- */
-static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
- void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
-{
- int res, retry;
- struct sas_task *task = NULL;
- struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
- struct pm8001_device *pm8001_dev = dev->lldd_dev;
- DECLARE_COMPLETION_ONSTACK(completion_setstate);
-
- for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
- memcpy(&task->ssp_task, parameter, para_len);
- task->task_done = pm8001_task_done;
- task->slow_task->timer.function = pm8001_tmf_timedout;
- task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
- add_timer(&task->slow_task->timer);
-
- res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Executing internal task "
- "failed\n"));
- goto ex_err;
- }
- wait_for_completion(&task->slow_task->completion);
- if (pm8001_ha->chip_id != chip_8001) {
- pm8001_dev->setds_completion = &completion_setstate;
- PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x01);
- wait_for_completion(&completion_setstate);
- }
- res = -TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TMF task[%x]timeout.\n",
- tmf->tmf));
- goto ex_err;
- }
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun */
- res = task->task_status.residual;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Blocked task error.\n"));
- res = -EMSGSIZE;
- break;
- } else {
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk(" Task to dev %016llx response:"
- "0x%x status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat));
- sas_free_task(task);
- task = NULL;
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- sas_free_task(task);
- return res;
-}
-
-static int
-pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
- u32 task_tag)
-{
- int res, retry;
- u32 ccb_tag;
- struct pm8001_ccb_info *ccb;
- struct sas_task *task = NULL;
-
- for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
- task->task_done = pm8001_task_done;
- task->slow_task->timer.function = pm8001_tmf_timedout;
- task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
- add_timer(&task->slow_task->timer);
-
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res)
- return res;
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
- ccb->n_elem = 0;
-
- res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
- pm8001_dev, flag, task_tag, ccb_tag);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Executing internal task "
- "failed\n"));
- goto ex_err;
- }
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TMF task timeout.\n"));
- goto ex_err;
- }
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
-
- } else {
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk(" Task to dev %016llx response: "
- "0x%x status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat));
- sas_free_task(task);
- task = NULL;
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- sas_free_task(task);
- return res;
-}
/**
* pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
@@ -883,22 +710,19 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
if (pm8001_dev) {
u32 device_id = pm8001_dev->device_id;
- PM8001_DISC_DBG(pm8001_ha,
- pm8001_printk("found dev[%d:%x] is gone.\n",
- pm8001_dev->device_id, pm8001_dev->dev_type));
- if (pm8001_dev->running_req) {
+ pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
+ pm8001_dev->device_id, pm8001_dev->dev_type);
+ if (atomic_read(&pm8001_dev->running_req)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
- while (pm8001_dev->running_req)
+ sas_execute_internal_abort_dev(dev, 0, NULL);
+ while (atomic_read(&pm8001_dev->running_req))
msleep(20);
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
pm8001_free_dev(pm8001_dev);
} else {
- PM8001_DISC_DBG(pm8001_ha,
- pm8001_printk("Found dev has gone.\n"));
+ pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
}
dev->lldd_dev = NULL;
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -909,18 +733,6 @@ void pm8001_dev_gone(struct domain_device *dev)
pm8001_dev_gone_notify(dev);
}
-static int pm8001_issue_ssp_tmf(struct domain_device *dev,
- u8 *lun, struct pm8001_tmf_task *tmf)
-{
- struct sas_ssp_task ssp_task;
- if (!(dev->tproto & SAS_PROTOCOL_SSP))
- return TMF_RESP_FUNC_ESUPP;
-
- strncpy((u8 *)&ssp_task.LUN, lun, 8);
- return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
- tmf);
-}
-
/* retry commands by ha, by task and/or by device */
void pm8001_open_reject_retry(
struct pm8001_hba_info *pm8001_ha,
@@ -940,9 +752,11 @@ void pm8001_open_reject_retry(
struct task_status_struct *ts;
struct pm8001_device *pm8001_dev;
unsigned long flags1;
- u32 tag;
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
+ if (ccb->ccb_tag == PM8001_INVALID_TAG)
+ continue;
+
pm8001_dev = ccb->device;
if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
continue;
@@ -954,9 +768,6 @@ void pm8001_open_reject_retry(
continue;
} else if (pm8001_dev != device_to_close)
continue;
- tag = ccb->ccb_tag;
- if (!tag || (tag == 0xFFFFFFFF))
- continue;
task = ccb->task;
if (!task || !task->task_done)
continue;
@@ -968,20 +779,19 @@ void pm8001_open_reject_retry(
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
spin_lock_irqsave(&task->task_state_lock, flags1);
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags
& SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
task->task_done(task);
@@ -993,9 +803,12 @@ void pm8001_open_reject_retry(
}
/**
- * Standard mandates link reset for ATA (type 0) and hard reset for
- * SSP (type 1) , only for RECOVERY
- */
+ * pm8001_I_T_nexus_reset() - reset the initiator/target connection
+ * @dev: the device structure for the device to reset.
+ *
+ * Standard mandates link reset for ATA (type 0) and hard reset for
+ * SSP (type 1), only for RECOVERY
+ */
int pm8001_I_T_nexus_reset(struct domain_device *dev)
{
int rc = TMF_RESP_FUNC_FAILED;
@@ -1017,27 +830,25 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
}
rc = sas_phy_reset(phy, 1);
if (rc) {
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("phy reset failed for device %x\n"
- "with rc %d\n", pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH,
+ "phy reset failed for device %x\n"
+ "with rc %d\n", pm8001_dev->device_id, rc);
rc = TMF_RESP_FUNC_FAILED;
goto out;
}
msleep(2000);
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
+ rc = sas_execute_internal_abort_dev(dev, 0, NULL);
if (rc) {
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("task abort failed %x\n"
- "with rc %d\n", pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
+ "with rc %d\n", pm8001_dev->device_id, rc);
rc = TMF_RESP_FUNC_FAILED;
}
} else {
rc = sas_phy_reset(phy, 1);
msleep(2000);
}
- PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
- pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
+ pm8001_dev->device_id, rc);
out:
sas_put_local_phy(phy);
return rc;
@@ -1060,8 +871,7 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("I_T_Nexus handler invoked !!"));
+ pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
phy = sas_get_local_phy(dev);
@@ -1072,8 +882,7 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
goto out;
}
/* send internal ssp/sata/smp abort command to FW */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
msleep(100);
/* deregister the target device */
@@ -1088,8 +897,7 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
wait_for_completion(&completion_setstate);
} else {
/* send internal ssp/sata/smp abort command to FW */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
msleep(100);
/* deregister the target device */
@@ -1100,8 +908,8 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
rc = sas_phy_reset(phy, 1);
msleep(2000);
}
- PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
- pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
+ pm8001_dev->device_id, rc);
out:
sas_put_local_phy(phy);
@@ -1111,27 +919,24 @@ out:
int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
{
int rc = TMF_RESP_FUNC_FAILED;
- struct pm8001_tmf_task tmf_task;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
DECLARE_COMPLETION_ONSTACK(completion_setstate);
if (dev_is_sata(dev)) {
struct sas_phy *phy = sas_get_local_phy(dev);
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
rc = sas_phy_reset(phy, 1);
sas_put_local_phy(phy);
pm8001_dev->setds_completion = &completion_setstate;
rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x01);
+ pm8001_dev, DS_OPERATIONAL);
wait_for_completion(&completion_setstate);
} else {
- tmf_task.tmf = TMF_LU_RESET;
- rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ rc = sas_lu_reset(dev, lun);
}
/* If failed, fall-through I_T_Nexus reset */
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n",
- pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
+ pm8001_dev->device_id, rc);
return rc;
}
@@ -1139,9 +944,6 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
int pm8001_query_task(struct sas_task *task)
{
u32 tag = 0xdeadbeef;
- int i = 0;
- struct scsi_lun lun;
- struct pm8001_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
if (unlikely(!task || !task->lldd_task || !task->dev))
return rc;
@@ -1152,32 +954,25 @@ int pm8001_query_task(struct sas_task *task)
struct pm8001_hba_info *pm8001_ha =
pm8001_find_ha_by_dev(dev);
- int_to_scsilun(cmnd->device->lun, &lun);
rc = pm8001_find_tag(task, &tag);
if (rc == 0) {
rc = TMF_RESP_FUNC_FAILED;
return rc;
}
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:["));
- for (i = 0; i < 16; i++)
- printk(KERN_INFO "%02x ", cmnd->cmnd[i]);
- printk(KERN_INFO "]\n");
- tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
-
- rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
+
+ rc = sas_query_task(task, tag);
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("The task is still in Lun\n"));
+ pm8001_dbg(pm8001_ha, EH,
+ "The task is still in Lun\n");
break;
/* The task is not in Lun or failed, reset the phy */
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("The task is not in Lun or failed,"
- " reset the phy\n"));
+ pm8001_dbg(pm8001_ha, EH,
+ "The task is not in Lun or failed, reset the phy\n");
break;
}
}
@@ -1192,21 +987,28 @@ int pm8001_abort_task(struct sas_task *task)
u32 tag;
struct domain_device *dev ;
struct pm8001_hba_info *pm8001_ha;
- struct scsi_lun lun;
struct pm8001_device *pm8001_dev;
- struct pm8001_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED, ret;
- u32 phy_id;
+ u32 phy_id, port_id;
struct sas_task_slow slow_task;
+
if (unlikely(!task || !task->lldd_task || !task->dev))
return TMF_RESP_FUNC_FAILED;
+
dev = task->dev;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
phy_id = pm8001_dev->attached_phy;
+
+ if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
+ // If the controller is seeing fatal errors
+ // abort task will not get a response from the controller
+ return TMF_RESP_FUNC_FAILED;
+ }
+
ret = pm8001_find_tag(task, &tag);
if (ret == 0) {
- pm8001_printk("no tag for task:%p\n", task);
+ pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
return TMF_RESP_FUNC_FAILED;
}
spin_lock_irqsave(&task->task_state_lock, flags);
@@ -1221,24 +1023,20 @@ int pm8001_abort_task(struct sas_task *task)
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd *cmnd = task->uldd_task;
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
- rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
- pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- pm8001_dev->sas_device, 0, tag);
+ rc = sas_abort_task(task, tag);
+ sas_execute_internal_abort_single(dev, tag, 0, NULL);
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (pm8001_ha->chip_id == chip_8006) {
DECLARE_COMPLETION_ONSTACK(completion_reset);
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+ port_id = phy->port->port_id;
/* 1. Set Device state as Recovery */
pm8001_dev->setds_completion = &completion;
PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x03);
+ pm8001_dev, DS_IN_RECOVERY);
wait_for_completion(&completion);
/* 2. Send Phy Control Hard Reset */
@@ -1263,8 +1061,8 @@ int pm8001_abort_task(struct sas_task *task)
* leaking the task in libsas or losing the race and
* getting a double free.
*/
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Waiting for local phy ctl\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Waiting for local phy ctl\n");
ret = wait_for_completion_timeout(&completion,
PM8001_TASK_TIMEOUT * HZ);
if (!ret || !phy->reset_success) {
@@ -1274,8 +1072,8 @@ int pm8001_abort_task(struct sas_task *task)
/* 3. Wait for Port Reset complete or
* Port reset TMO
*/
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Waiting for Port reset\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Waiting for Port reset\n");
ret = wait_for_completion_timeout(
&completion_reset,
PM8001_TASK_TIMEOUT * HZ);
@@ -1285,6 +1083,10 @@ int pm8001_abort_task(struct sas_task *task)
PORT_RESET_TMO);
if (phy->port_reset_status == PORT_RESET_TMO) {
pm8001_dev_gone_notify(dev);
+ PM8001_CHIP_DISP->hw_event_ack_req(
+ pm8001_ha, 0,
+ 0x07, /*HW_EVENT_PHY_DOWN ack*/
+ port_id, phy_id, 0, 0);
goto out;
}
}
@@ -1295,8 +1097,7 @@ int pm8001_abort_task(struct sas_task *task)
* is removed from the ccb. on success the caller is
* going to free the task.
*/
- ret = pm8001_exec_internal_task_abort(pm8001_ha,
- pm8001_dev, pm8001_dev->sas_device, 1, tag);
+ ret = sas_execute_internal_abort_dev(dev, 0, NULL);
if (ret)
goto out;
ret = wait_for_completion_timeout(
@@ -1309,17 +1110,15 @@ int pm8001_abort_task(struct sas_task *task)
reinit_completion(&completion);
pm8001_dev->setds_completion = &completion;
PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x01);
+ pm8001_dev, DS_OPERATIONAL);
wait_for_completion(&completion);
} else {
- rc = pm8001_exec_internal_task_abort(pm8001_ha,
- pm8001_dev, pm8001_dev->sas_device, 0, tag);
+ ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
}
rc = TMF_RESP_FUNC_COMPLETE;
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- pm8001_dev->sas_device, 0, tag);
+ rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
}
out:
@@ -1328,36 +1127,53 @@ out:
task->slow_task = NULL;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (rc != TMF_RESP_FUNC_COMPLETE)
- pm8001_printk("rc= %d\n", rc);
+ pm8001_info(pm8001_ha, "rc= %d\n", rc);
return rc;
}
-int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
+int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
{
- struct pm8001_tmf_task tmf_task;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
- tmf_task.tmf = TMF_ABORT_TASK_SET;
- return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
+ pm8001_dev->device_id);
+ return sas_clear_task_set(dev, lun);
}
-int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
+void pm8001_port_formed(struct asd_sas_phy *sas_phy)
{
- struct pm8001_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_CLEAR_ACA;
- return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ struct sas_ha_struct *sas_ha = sas_phy->ha;
+ struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
+ struct pm8001_phy *phy = sas_phy->lldd_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct pm8001_port *port = phy->port;
+
+ if (!sas_port) {
+ pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
+ return;
+ }
+ sas_port->lldd_port = port;
}
-int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
+void pm8001_setds_completion(struct domain_device *dev)
{
- struct pm8001_tmf_task tmf_task;
- struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("I_T_L_Q clear task set[%x]\n",
- pm8001_dev->device_id));
- tmf_task.tmf = TMF_CLEAR_TASK_SET;
- return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ if (pm8001_ha->chip_id != chip_8001) {
+ pm8001_dev->setds_completion = &completion_setstate;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, DS_OPERATIONAL);
+ wait_for_completion(&completion_setstate);
+ }
}
+void pm8001_tmf_aborted(struct sas_task *task)
+{
+ struct pm8001_ccb_info *ccb = task->lldd_task;
+
+ if (ccb)
+ ccb->task = NULL;
+}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 93438c8f67da..b08f52673889 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -55,10 +55,12 @@
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
-#define DRV_VERSION "0.1.39"
+#define DRV_VERSION "0.1.40"
#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -69,45 +71,16 @@
#define PM8001_DEV_LOGGING 0x80 /* development message logging */
#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
-#define pm8001_printk(format, arg...) pr_info("%s:: %s %d:" \
- format, pm8001_ha->name, __func__, __LINE__, ## arg)
-#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
-do { \
- if (unlikely(HBA->logging_level & LEVEL)) \
- do { \
- CMD; \
- } while (0); \
-} while (0);
-#define PM8001_EH_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_EH_LOGGING, CMD)
+#define pm8001_info(HBA, fmt, ...) \
+ pr_info("%s:: %s %d: " fmt, \
+ (HBA)->name, __func__, __LINE__, ##__VA_ARGS__)
-#define PM8001_INIT_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_INIT_LOGGING, CMD)
-
-#define PM8001_DISC_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_DISC_LOGGING, CMD)
-
-#define PM8001_IO_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_IO_LOGGING, CMD)
-
-#define PM8001_FAIL_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_FAIL_LOGGING, CMD)
-
-#define PM8001_IOCTL_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_IOCTL_LOGGING, CMD)
-
-#define PM8001_MSG_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
-
-#define PM8001_DEV_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_DEV_LOGGING, CMD)
-
-#define PM8001_DEVIO_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_DEVIO_LOGGING, CMD)
-
-#define PM8001_IOERR_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_IOERR_LOGGING, CMD)
+#define pm8001_dbg(HBA, level, fmt, ...) \
+do { \
+ if (unlikely((HBA)->logging_level & PM8001_##level##_LOGGING)) \
+ pm8001_info(HBA, fmt, ##__VA_ARGS__); \
+} while (0)
#define PM8001_USE_TASKLET
#define PM8001_USE_MSIX
@@ -128,19 +101,16 @@ extern const struct pm8001_dispatch pm8001_80xx_dispatch;
struct pm8001_hba_info;
struct pm8001_ccb_info;
struct pm8001_device;
-/* define task management IU */
-struct pm8001_tmf_task {
- u8 tmf;
- u32 tag_of_task_to_be_managed;
-};
+
struct pm8001_ioctl_payload {
u32 signature;
u16 major_function;
u16 minor_function;
- u16 length;
u16 status;
u16 offset;
u16 id;
+ u32 wr_length;
+ u32 rd_length;
u8 *func_specific;
};
@@ -204,6 +174,7 @@ struct forensic_data {
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
+ void (*chip_post_init)(struct pm8001_hba_info *pm8001_ha);
int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
@@ -228,10 +199,9 @@ struct pm8001_dispatch {
int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha,
u32 phy_id, u32 phy_op);
int (*task_abort)(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag,
- u32 cmd_tag);
+ struct pm8001_ccb_info *ccb);
int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf);
+ struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf);
int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha,
@@ -243,6 +213,10 @@ struct pm8001_dispatch {
int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha,
u32 state);
int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
+ int (*fatal_errors)(struct pm8001_hba_info *pm8001_ha);
+ void (*hw_event_ack_req)(struct pm8001_hba_info *pm8001_ha,
+ u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0,
+ u32 param1);
};
struct pm8001_chip_info {
@@ -257,6 +231,7 @@ struct pm8001_port {
u8 port_attached;
u16 wide_port_phymap;
u8 port_state;
+ u8 port_id;
struct list_head list;
};
@@ -292,7 +267,7 @@ struct pm8001_device {
struct completion *dcompletion;
struct completion *setds_completion;
u32 device_id;
- u32 running_req;
+ atomic_t running_req;
};
struct pm8001_prd_imt {
@@ -308,13 +283,12 @@ struct pm8001_prd {
* CCB(Command Control Block)
*/
struct pm8001_ccb_info {
- struct list_head entry;
struct sas_task *task;
u32 n_elem;
u32 ccb_tag;
dma_addr_t ccb_dma_handle;
struct pm8001_device *device;
- struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG];
+ struct pm8001_prd *buf_prd;
struct fw_control_ex *fw_control_context;
u8 open_retry;
};
@@ -467,6 +441,7 @@ struct inbound_queue_table {
u32 reserved;
__le32 consumer_index;
u32 producer_idx;
+ spinlock_t iq_lock;
};
struct outbound_queue_table {
u32 element_size_cnt;
@@ -483,6 +458,8 @@ struct outbound_queue_table {
u32 dinterrup_to_pci_offset;
__le32 producer_index;
u32 consumer_idx;
+ spinlock_t oq_lock;
+ unsigned long lock_flags;
};
struct pm8001_hba_memspace {
void __iomem *memvirtaddr;
@@ -523,8 +500,8 @@ struct pm8001_hba_info {
void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */
union main_cfg_table main_cfg_tbl;
union general_status_table gs_tbl;
- struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
- struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
+ struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM];
+ struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
struct sas_phy_attribute_table phy_attr_table;
/* MPI SAS PHY attributes */
u8 sas_addr[SAS_ADDR_SIZE];
@@ -542,6 +519,7 @@ struct pm8001_hba_info {
u32 iomb_size; /* SPC and SPCV IOMB size */
struct pm8001_device *devices;
struct pm8001_ccb_info *ccb_info;
+ u32 ccb_count;
#ifdef PM8001_USE_MSIX
int number_of_intr;/*will be used in remove()*/
char intr_drvname[PM8001_MAX_MSIX_VEC]
@@ -558,6 +536,14 @@ struct pm8001_hba_info {
const struct firmware *fw_image;
struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
u32 reset_in_progress;
+ u32 non_fatal_count;
+ u32 non_fatal_read_length;
+ u32 max_q_num;
+ u32 ib_offset;
+ u32 ob_offset;
+ u32 ci_offset;
+ u32 pi_offset;
+ u32 max_memcnt;
};
struct pm8001_work {
@@ -626,7 +612,7 @@ struct fw_control_info {
operations.*/
u32 reserved;/* padding required for 64 bit
alignment */
- u8 buffer[1];/* Start of buffer */
+ u8 buffer[];/* Start of buffer */
};
struct fw_control_ex {
struct fw_control_info *fw_control;
@@ -653,15 +639,13 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
- struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
+ struct pm8001_ccb_info *ccb);
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void pm8001_scan_start(struct Scsi_Host *shost);
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags);
int pm8001_abort_task(struct sas_task *task);
-int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
-int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
int pm8001_clear_task_set(struct domain_device *dev, u8 *lun);
int pm8001_dev_found(struct domain_device *dev);
void pm8001_dev_gone(struct domain_device *dev);
@@ -669,6 +653,7 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
int pm8001_I_T_nexus_reset(struct domain_device *dev);
int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
int pm8001_query_task(struct sas_task *task);
+void pm8001_port_formed(struct asd_sas_phy *sas_phy);
void pm8001_open_reject_retry(
struct pm8001_hba_info *pm8001_ha,
struct sas_task *task_to_close,
@@ -679,8 +664,7 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
- struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, size_t nb,
+ u32 q_index, u32 opCode, void *payload, size_t nb,
u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
u16 messageSize, void **messagePtr);
@@ -699,10 +683,9 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_ccb_info *ccb,
- struct pm8001_tmf_task *tmf);
+ struct sas_tmf_task *tmf);
int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev,
- u8 flag, u32 task_tag, u32 cmd_tag);
+ struct pm8001_ccb_info *ccb);
int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
void pm8001_work_fn(struct work_struct *work);
@@ -723,7 +706,7 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb);
-int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
struct sas_task *pm8001_alloc_task(void);
void pm8001_task_done(struct sas_task *task);
@@ -741,21 +724,74 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct device_attribute *attr, char *buf);
+ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf);
ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
+int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha);
+void pm8001_free_dev(struct pm8001_device *pm8001_dev);
/* ctl shared API */
-extern struct device_attribute *pm8001_host_attrs[];
+extern const struct attribute_group *pm8001_host_groups[];
-static inline void
-pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
- struct sas_task *task, struct pm8001_ccb_info *ccb,
- u32 ccb_idx)
+#define PM8001_INVALID_TAG ((u32)-1)
+
+/*
+ * Allocate a new tag and return the corresponding ccb after initializing it.
+ */
+static inline struct pm8001_ccb_info *
+pm8001_ccb_alloc(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *dev, struct sas_task *task)
+{
+ struct pm8001_ccb_info *ccb;
+ u32 tag;
+
+ if (pm8001_tag_alloc(pm8001_ha, &tag)) {
+ pm8001_dbg(pm8001_ha, FAIL, "Failed to allocate a tag\n");
+ return NULL;
+ }
+
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->task = task;
+ ccb->n_elem = 0;
+ ccb->ccb_tag = tag;
+ ccb->device = dev;
+ ccb->fw_control_context = NULL;
+ ccb->open_retry = 0;
+
+ return ccb;
+}
+
+/*
+ * Free the tag of an initialized ccb.
+ */
+static inline void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ u32 tag = ccb->ccb_tag;
+
+ /*
+ * Cleanup the ccb to make sure that a manual scan of the adapter
+ * ccb_info array can detect ccb's that are in use.
+ * C.f. pm8001_open_reject_retry()
+ */
+ ccb->task = NULL;
+ ccb->ccb_tag = PM8001_INVALID_TAG;
+ ccb->device = NULL;
+ ccb->fw_control_context = NULL;
+
+ pm8001_tag_free(pm8001_ha, tag);
+}
+
+static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
{
- pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
+ struct sas_task *task = ccb->task;
+
+ pm8001_ccb_task_free(pm8001_ha, ccb);
smp_mb(); /*in order to force CPU ordering*/
- spin_unlock(&pm8001_ha->lock);
task->task_done(task);
- spin_lock(&pm8001_ha->lock);
}
+void pm8001_setds_completion(struct domain_device *dev);
+void pm8001_tmf_aborted(struct sas_task *task);
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index d1d95f1a2c6a..f8b8624458f7 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -42,6 +42,7 @@
#include "pm80xx_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
+#include "pm80xx_tracepoints.h"
#define SMP_DIRECT 1
#define SMP_INDIRECT 2
@@ -58,27 +59,24 @@ int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER);
} while ((reg_val != shift_value) && time_before(jiffies, start));
if (reg_val != shift_value) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
- " = 0x%x\n", reg_val));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MEMBASE_II_SHIFT_REGISTER = 0x%x\n",
+ reg_val);
return -1;
}
return 0;
}
-void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
- const void *destination,
+static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
+ __le32 *destination,
u32 dw_count, u32 bus_base_number)
{
u32 index, value, offset;
- u32 *destination1;
- destination1 = (u32 *)destination;
- for (index = 0; index < dw_count; index += 4, destination1++) {
+ for (index = 0; index < dw_count; index += 4, destination++) {
offset = (soffset + index);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
- *destination1 = cpu_to_le32(value);
+ *destination = cpu_to_le32(value);
}
}
return;
@@ -91,7 +89,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
- u32 accum_len , reg_val, index, *temp;
+ u32 accum_len, reg_val, index, *temp;
u32 status = 1;
unsigned long start;
u8 *direct_data;
@@ -109,8 +107,8 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
}
/* initialize variables for very first call from host application */
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "forensic_info TYPE_NON_FATAL..............\n");
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
@@ -123,17 +121,13 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd);
pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("ossaHwCB: status1 %d\n", status));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("ossaHwCB: read_len 0x%x\n",
- pm8001_ha->forensic_info.data_buf.read_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("ossaHwCB: direct_len 0x%x\n",
- pm8001_ha->forensic_info.data_buf.direct_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("ossaHwCB: direct_offset 0x%x\n",
- pm8001_ha->forensic_info.data_buf.direct_offset));
+ pm8001_dbg(pm8001_ha, IO, "ossaHwCB: status1 %d\n", status);
+ pm8001_dbg(pm8001_ha, IO, "ossaHwCB: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len);
+ pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len);
+ pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset);
}
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
@@ -145,7 +139,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
pm8001_ha->fatal_bar_loc = 0;
}
- /* Read until accum_len is retrived */
+ /* Read until accum_len is retrieved */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
/* Determine length of data between previously stored transfer length
@@ -153,29 +147,24 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
*/
length_to_read =
accum_len - pm8001_ha->forensic_preserved_accumulated_transfer;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: accum_len 0x%x\n", accum_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: length_to_read 0x%x\n",
- length_to_read));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: last_offset 0x%x\n",
- pm8001_ha->forensic_last_offset));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: read_len 0x%x\n",
- pm8001_ha->forensic_info.data_buf.read_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv:: direct_len 0x%x\n",
- pm8001_ha->forensic_info.data_buf.direct_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv:: direct_offset 0x%x\n",
- pm8001_ha->forensic_info.data_buf.direct_offset));
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: accum_len 0x%x\n",
+ accum_len);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: length_to_read 0x%x\n",
+ length_to_read);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: last_offset 0x%x\n",
+ pm8001_ha->forensic_last_offset);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset);
/* If accumulated length failed to read correctly fail the attempt.*/
if (accum_len == 0xFFFFFFFF) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("Possible PCI issue 0x%x not expected\n",
- accum_len));
+ pm8001_dbg(pm8001_ha, IO,
+ "Possible PCI issue 0x%x not expected\n",
+ accum_len);
return status;
}
/* If accumulated length is zero fail the attempt */
@@ -239,8 +228,8 @@ moreData:
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv:return1 0x%x\n", offset));
+ pm8001_dbg(pm8001_ha, IO,
+ "get_fatal_spcv:return1 0x%x\n", offset);
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -262,8 +251,8 @@ moreData:
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv:return2 0x%x\n", offset));
+ pm8001_dbg(pm8001_ha, IO,
+ "get_fatal_spcv:return2 0x%x\n", offset);
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -289,8 +278,8 @@ moreData:
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: return3 0x%x\n", offset));
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return3 0x%x\n",
+ offset);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
@@ -327,9 +316,9 @@ moreData:
} while ((reg_val) && time_before(jiffies, start));
if (reg_val != 0) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
- reg_val));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
+ reg_val);
/* Fail the dump if a timeout occurs */
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(
@@ -351,46 +340,174 @@ moreData:
time_before(jiffies, start));
if (reg_val < 2) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
- reg_val));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
+ reg_val);
/* Fail the dump if a timeout occurs */
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(
pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 0xFFFFFFFF);
- pm8001_cw32(pm8001_ha, 0,
+ return((char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf);
+ }
+ /* reset fatal_forensic_shift_offset back to zero and reset MEMBASE 2 register to zero */
+ pm8001_ha->fatal_forensic_shift_offset = 0; /* location in 64k region */
+ pm8001_cw32(pm8001_ha, 0,
MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
- }
- /* Read the next block of the debug data.*/
- length_to_read = pm8001_mr32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
- pm8001_ha->forensic_preserved_accumulated_transfer;
- if (length_to_read != 0x0) {
- pm8001_ha->forensic_fatal_step = 0;
- goto moreData;
- } else {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(
- pm8001_ha->forensic_info.data_buf.direct_data,
+ }
+ /* Read the next block of the debug data.*/
+ length_to_read = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
+ pm8001_ha->forensic_preserved_accumulated_transfer;
+ if (length_to_read != 0x0) {
+ pm8001_ha->forensic_fatal_step = 0;
+ goto moreData;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 4);
- pm8001_ha->forensic_info.data_buf.read_len
- = 0xFFFFFFFF;
- pm8001_ha->forensic_info.data_buf.direct_len
- = 0;
- pm8001_ha->forensic_info.data_buf.direct_offset
- = 0;
- pm8001_ha->forensic_info.data_buf.read_len = 0;
- }
+ pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
+ pm8001_ha->forensic_info.data_buf.direct_len = 0;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
}
}
offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: return4 0x%x\n", offset));
- return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
- (char *)buf;
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return4 0x%x\n", offset);
+ return ((char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf);
+}
+
+/* pm80xx_get_non_fatal_dump - dump the nonfatal data from the dma
+ * location by the firmware.
+ */
+ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ void __iomem *nonfatal_table_address = pm8001_ha->fatal_tbl_addr;
+ u32 accum_len = 0;
+ u32 total_len = 0;
+ u32 reg_val = 0;
+ u32 *temp = NULL;
+ u32 index = 0;
+ u32 output_length;
+ unsigned long start = 0;
+ char *buf_copy = buf;
+
+ temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
+ if (++pm8001_ha->non_fatal_count == 1) {
+ if (pm8001_ha->chip_id == chip_8001) {
+ snprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ PAGE_SIZE, "Not supported for SPC controller");
+ return 0;
+ }
+ pm8001_dbg(pm8001_ha, IO, "forensic_info TYPE_NON_FATAL...\n");
+ /*
+ * Step 1: Write the host buffer parameters in the MPI Fatal and
+ * Non-Fatal Error Dump Capture Table.This is the buffer
+ * where debug data will be DMAed to.
+ */
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_LO_OFFSET,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_lo);
+
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HI_OFFSET,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_hi);
+
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_LENGTH, SYSFS_OFFSET);
+
+ /* Optionally, set the DUMPCTRL bit to 1 if the host
+ * keeps sending active I/Os while capturing the non-fatal
+ * debug data. Otherwise, leave this bit set to zero
+ */
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE, MPI_FATAL_EDUMP_HANDSHAKE_RDY);
+
+ /*
+ * Step 2: Clear Accumulative Length of Debug Data Transferred
+ * [ACCDDLEN] field in the MPI Fatal and Non-Fatal Error Dump
+ * Capture Table to zero.
+ */
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN, 0);
+
+ /* initiallize previous accumulated length to 0 */
+ pm8001_ha->forensic_preserved_accumulated_transfer = 0;
+ pm8001_ha->non_fatal_read_length = 0;
+ }
+
+ total_len = pm8001_mr32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_TOTAL_LEN);
+ /*
+ * Step 3:Clear Fatal/Non-Fatal Debug Data Transfer Status [FDDTSTAT]
+ * field and then request that the SPCv controller transfer the debug
+ * data by setting bit 7 of the Inbound Doorbell Set Register.
+ */
+ pm8001_mw32(nonfatal_table_address, MPI_FATAL_EDUMP_TABLE_STATUS, 0);
+ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET,
+ SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP);
+
+ /*
+ * Step 4.1: Read back the Inbound Doorbell Set Register (by polling for
+ * 2 seconds) until register bit 7 is cleared.
+ * This step only indicates the request is accepted by the controller.
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+ do {
+ reg_val = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET) &
+ SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP;
+ } while ((reg_val != 0) && time_before(jiffies, start));
+
+ /* Step 4.2: To check the completion of the transfer, poll the Fatal/Non
+ * Fatal Debug Data Transfer Status [FDDTSTAT] field for 2 seconds in
+ * the MPI Fatal and Non-Fatal Error Dump Capture Table.
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+ do {
+ reg_val = pm8001_mr32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS);
+ } while ((!reg_val) && time_before(jiffies, start));
+
+ if ((reg_val == 0x00) ||
+ (reg_val == MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED) ||
+ (reg_val > MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE)) {
+ pm8001_ha->non_fatal_read_length = 0;
+ buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 0xFFFFFFFF);
+ pm8001_ha->non_fatal_count = 0;
+ return (buf_copy - buf);
+ } else if (reg_val ==
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA) {
+ buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 2);
+ } else if ((reg_val == MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) ||
+ (pm8001_ha->non_fatal_read_length >= total_len)) {
+ pm8001_ha->non_fatal_read_length = 0;
+ buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 4);
+ pm8001_ha->non_fatal_count = 0;
+ }
+ accum_len = pm8001_mr32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+ output_length = accum_len -
+ pm8001_ha->forensic_preserved_accumulated_transfer;
+
+ for (index = 0; index < output_length/4; index++)
+ buf_copy += snprintf(buf_copy, PAGE_SIZE,
+ "%08x ", *(temp+index));
+
+ pm8001_ha->non_fatal_read_length += output_length;
+
+ /* store current accumulated length to use in next iteration as
+ * the previous accumulated length
+ */
+ pm8001_ha->forensic_preserved_accumulated_transfer = accum_len;
+ return (buf_copy - buf);
}
/**
@@ -451,24 +568,24 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
-
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset));
-
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Main cfg table; ila rev:%x Inactive fw rev:%x\n",
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev);
+
+ pm8001_dbg(pm8001_ha, DEV,
+ "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset);
+
+ pm8001_dbg(pm8001_ha, DEV,
+ "Main cfg table; ila rev:%x Inactive fw rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version);
}
/**
@@ -590,7 +707,7 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int i;
void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
- for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+ for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
u32 offset = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(address,
@@ -608,7 +725,7 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int i;
void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
- for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+ for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
u32 offset = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(address,
@@ -628,6 +745,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
u32 offsetib, offsetob;
void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+ u32 ib_offset = pm8001_ha->ib_offset;
+ u32 ob_offset = pm8001_ha->ob_offset;
+ u32 ci_offset = pm8001_ha->ci_offset;
+ u32 pi_offset = pm8001_ha->pi_offset;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr =
pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
@@ -645,26 +766,31 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
+ /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+ if (pm8001_ha->max_q_num > 32)
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ 1 << 16;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
- for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
- pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
- pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].base_virt =
- (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+ (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr;
pm8001_ha->inbnd_q_tbl[i].total_length =
- pm8001_ha->memoryMap.region[IB + i].total_len;
+ pm8001_ha->memoryMap.region[ib_offset + i].total_len;
pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
- pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
- pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].ci_virt =
- pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+ pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr;
+ pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0);
offsetib = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(addressib,
@@ -674,30 +800,31 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
- pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
- pm8001_ha->inbnd_q_tbl[i].pi_offset));
+ pm8001_dbg(pm8001_ha, DEV,
+ "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
+ pm8001_ha->inbnd_q_tbl[i].pi_offset);
}
- for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
- pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
- pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo;
pm8001_ha->outbnd_q_tbl[i].base_virt =
- (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+ (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr;
pm8001_ha->outbnd_q_tbl[i].total_length =
- pm8001_ha->memoryMap.region[OB + i].total_len;
+ pm8001_ha->memoryMap.region[ob_offset + i].total_len;
pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
- pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
- pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo;
/* interrupt vector based on oq */
pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
pm8001_ha->outbnd_q_tbl[i].pi_virt =
- pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+ pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr;
+ pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0);
offsetob = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(addressob,
@@ -707,10 +834,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
- pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
- pm8001_ha->outbnd_q_tbl[i].ci_offset));
+ pm8001_dbg(pm8001_ha, DEV,
+ "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
+ pm8001_ha->outbnd_q_tbl[i].ci_offset);
}
}
@@ -741,12 +868,12 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
/* Update Fatal error interrupt vector */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
- ((pm8001_ha->number_of_intr - 1) << 8);
+ ((pm8001_ha->max_q_num - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Updated Fatal error interrupt vector 0x%x\n",
- pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Updated Fatal error interrupt vector 0x%x\n",
+ pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT));
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
@@ -757,9 +884,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Programming DW 0x21 in main cfg table with 0x%x\n",
- pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Programming DW 0x21 in main cfg table with 0x%x\n",
+ pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET));
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
@@ -782,6 +909,7 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
/**
* update_inbnd_queue_table - update the inbound queue table to the HBA.
* @pm8001_ha: our hba card information
+ * @number: entry in the queue
*/
static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
int number)
@@ -799,25 +927,26 @@ static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "IQ %d: Element pri size 0x%x\n",
- number,
- pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt));
+ pm8001_dbg(pm8001_ha, DEV,
+ "IQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
- pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
- pm8001_ha->inbnd_q_tbl[number].lower_base_addr));
+ pm8001_dbg(pm8001_ha, DEV,
+ "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "CI upper base addr 0x%x CI lower base addr 0x%x\n",
- pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
- pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr));
+ pm8001_dbg(pm8001_ha, DEV,
+ "CI upper base addr 0x%x CI lower base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
}
/**
* update_outbnd_queue_table - update the outbound queue table to the HBA.
* @pm8001_ha: our hba card information
+ * @number: entry in the queue
*/
static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
int number)
@@ -837,20 +966,20 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "OQ %d: Element pri size 0x%x\n",
- number,
- pm8001_ha->outbnd_q_tbl[number].element_size_cnt));
+ pm8001_dbg(pm8001_ha, DEV,
+ "OQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
- pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
- pm8001_ha->outbnd_q_tbl[number].lower_base_addr));
+ pm8001_dbg(pm8001_ha, DEV,
+ "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "PI upper base addr 0x%x PI lower base addr 0x%x\n",
- pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
- pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr));
+ pm8001_dbg(pm8001_ha, DEV,
+ "PI upper base addr 0x%x PI lower base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
}
/**
@@ -873,35 +1002,48 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
value &= SPCv_MSGU_CFG_TABLE_UPDATE;
} while ((value != 0) && (--max_wait_count));
- if (!max_wait_count)
- return -1;
- /* check the MPI-State for initialization upto 100ms*/
- max_wait_count = 100 * 1000;/* 100 msec */
+ if (!max_wait_count) {
+ /* additional check */
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Inb doorbell clear not toggled[value:%x]\n",
+ value);
+ return -EBUSY;
+ }
+ /* check the MPI-State for initialization up to 100ms*/
+ max_wait_count = 5;/* 100 msec */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
gst_len_mpistate =
pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
GST_GSTLEN_MPIS_OFFSET);
} while ((GST_MPI_STATE_INIT !=
(gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
if (!max_wait_count)
- return -1;
+ return -EBUSY;
/* check MPI Initialization error */
gst_len_mpistate = gst_len_mpistate >> 16;
if (0x0000 != gst_len_mpistate)
- return -1;
+ return -EBUSY;
+
+ /*
+ * As per controller datasheet, after successful MPI
+ * initialization minimum 500ms delay is required before
+ * issuing commands.
+ */
+ msleep(500);
return 0;
}
/**
* check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * This function sleeps hence it must not be used in atomic context.
* @pm8001_ha: our hba card information
*/
static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
@@ -909,83 +1051,49 @@ static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
u32 value;
u32 max_wait_count;
u32 max_wait_time;
+ u32 expected_mask;
int ret = 0;
/* reset / PCIe ready */
- max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */
+ max_wait_time = max_wait_count = 5; /* 100 milli sec */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
} while ((value == 0xFFFFFFFF) && (--max_wait_count));
- /* check ila status */
- max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */
- do {
- udelay(1);
- value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- } while (((value & SCRATCH_PAD_ILA_READY) !=
- SCRATCH_PAD_ILA_READY) && (--max_wait_count));
- if (!max_wait_count)
- ret = -1;
- else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" ila ready status in %d millisec\n",
- (max_wait_time - max_wait_count)));
- }
-
- /* check RAAE status */
- max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */
- do {
- udelay(1);
- value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- } while (((value & SCRATCH_PAD_RAAE_READY) !=
- SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
- if (!max_wait_count)
- ret = -1;
- else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" raae ready status in %d millisec\n",
- (max_wait_time - max_wait_count)));
+ /* check ila, RAAE and iops status */
+ if ((pm8001_ha->chip_id != chip_8008) &&
+ (pm8001_ha->chip_id != chip_8009)) {
+ max_wait_time = max_wait_count = 180; /* 3600 milli sec */
+ expected_mask = SCRATCH_PAD_ILA_READY |
+ SCRATCH_PAD_RAAE_READY |
+ SCRATCH_PAD_IOP0_READY |
+ SCRATCH_PAD_IOP1_READY;
+ } else {
+ max_wait_time = max_wait_count = 170; /* 3400 milli sec */
+ expected_mask = SCRATCH_PAD_ILA_READY |
+ SCRATCH_PAD_RAAE_READY |
+ SCRATCH_PAD_IOP0_READY;
}
-
- /* check iop0 status */
- max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
- (--max_wait_count));
- if (!max_wait_count)
+ } while (((value & expected_mask) !=
+ expected_mask) && (--max_wait_count));
+ if (!max_wait_count) {
+ pm8001_dbg(pm8001_ha, INIT,
+ "At least one FW component failed to load within %d millisec: Scratchpad1: 0x%x\n",
+ max_wait_time * FW_READY_INTERVAL, value);
ret = -1;
- else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" iop0 ready status in %d millisec\n",
- (max_wait_time - max_wait_count)));
- }
-
- /* check iop1 status only for 16 port controllers */
- if ((pm8001_ha->chip_id != chip_8008) &&
- (pm8001_ha->chip_id != chip_8009)) {
- /* 200 milli sec */
- max_wait_time = max_wait_count = 200 * 1000;
- do {
- udelay(1);
- value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- } while (((value & SCRATCH_PAD_IOP1_READY) !=
- SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
- if (!max_wait_count)
- ret = -1;
- else {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "iop1 ready status in %d millisec\n",
- (max_wait_time - max_wait_count)));
- }
+ } else {
+ pm8001_dbg(pm8001_ha, MSG,
+ "All FW components ready by %d ms\n",
+ (max_wait_time - max_wait_count) * FW_READY_INTERVAL);
}
-
return ret;
}
-static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *base_addr;
u32 value;
@@ -994,17 +1102,48 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
u32 pcilogic;
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+
+ /*
+ * lower 26 bits of SCRATCHPAD0 register describes offset within the
+ * PCIe BAR where the MPI configuration table is present
+ */
offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
- PM8001_DEV_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
- offset, value));
+ pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n",
+ offset, value);
+ /*
+ * Upper 6 bits describe the offset within PCI config space where BAR
+ * is located.
+ */
pcilogic = (value & 0xFC000000) >> 26;
pcibar = get_pci_bar_index(pcilogic);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+ pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar);
+
+ /*
+ * Make sure the offset falls inside the ioremapped PCI BAR
+ */
+ if (offset > pm8001_ha->io_mem[pcibar].memsize) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Main cfg tbl offset outside %u > %u\n",
+ offset, pm8001_ha->io_mem[pcibar].memsize);
+ return -EBUSY;
+ }
pm8001_ha->main_cfg_tbl_addr = base_addr =
pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+
+ /*
+ * Validate main configuration table address: first DWord should read
+ * "PMCS"
+ */
+ value = pm8001_mr32(pm8001_ha->main_cfg_tbl_addr, 0);
+ if (memcmp(&value, "PMCS", 4) != 0) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "BAD main config signature 0x%x\n",
+ value);
+ return -EBUSY;
+ }
+ pm8001_dbg(pm8001_ha, INIT,
+ "VALID main config signature 0x%x\n", value);
pm8001_ha->general_stat_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
0xFFFFFF);
@@ -1024,33 +1163,26 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) &
0xFFFFFF);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GST OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("INBND OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("OBND OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("IVT OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PSPA OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("addr - main cfg %p general status %p\n",
- pm8001_ha->main_cfg_tbl_addr,
- pm8001_ha->general_stat_tbl_addr));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("addr - inbnd %p obnd %p\n",
- pm8001_ha->inbnd_q_tbl_addr,
- pm8001_ha->outbnd_q_tbl_addr));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("addr - pspa %p ivt %p\n",
- pm8001_ha->pspa_q_tbl_addr,
- pm8001_ha->ivt_tbl_addr));
+ pm8001_dbg(pm8001_ha, INIT, "GST OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x18));
+ pm8001_dbg(pm8001_ha, INIT, "INBND OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C));
+ pm8001_dbg(pm8001_ha, INIT, "OBND OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x20));
+ pm8001_dbg(pm8001_ha, INIT, "IVT OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C));
+ pm8001_dbg(pm8001_ha, INIT, "PSPA OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x90));
+ pm8001_dbg(pm8001_ha, INIT, "addr - main cfg %p general status %p\n",
+ pm8001_ha->main_cfg_tbl_addr,
+ pm8001_ha->general_stat_tbl_addr);
+ pm8001_dbg(pm8001_ha, INIT, "addr - inbnd %p obnd %p\n",
+ pm8001_ha->inbnd_q_tbl_addr,
+ pm8001_ha->outbnd_q_tbl_addr);
+ pm8001_dbg(pm8001_ha, INIT, "addr - pspa %p ivt %p\n",
+ pm8001_ha->pspa_q_tbl_addr,
+ pm8001_ha->ivt_tbl_addr);
+ return 0;
}
/**
@@ -1061,7 +1193,6 @@ int
pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
{
struct set_ctrl_cfg_req payload;
- struct inbound_queue_table *circularQ;
int rc;
u32 tag;
u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
@@ -1070,9 +1201,8 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
- return -1;
+ return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
if (IS_SPCV_12G(pm8001_ha->pdev))
@@ -1080,15 +1210,17 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
else
page_code = THERMAL_PAGE_CODE_8H;
- payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
- (THERMAL_ENABLE << 8) | page_code;
- payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+ payload.cfg_pg[0] =
+ cpu_to_le32((THERMAL_LOG_ENABLE << 9) |
+ (THERMAL_ENABLE << 8) | page_code);
+ payload.cfg_pg[1] =
+ cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8));
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
- payload.cfg_pg[0], payload.cfg_pg[1]));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
+ payload.cfg_pg[0], payload.cfg_pg[1]);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1105,7 +1237,6 @@ static int
pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
{
struct set_ctrl_cfg_req payload;
- struct inbound_queue_table *circularQ;
SASProtocolTimerConfig_t SASConfigPage;
int rc;
u32 tag;
@@ -1115,63 +1246,51 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
-
if (rc)
- return -1;
+ return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
- SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE;
- SASConfigPage.MST_MSI = 3 << 15;
- SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO;
- SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) |
- (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
- SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME;
-
- if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
- SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
-
-
- SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) |
- SAS_OPNRJT_RTRY_INTVL;
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16)
- | SAS_COPNRJT_RTRY_TMO;
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16)
- | SAS_COPNRJT_RTRY_THR;
- SASConfigPage.MAX_AIP = SAS_MAX_AIP;
-
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.pageCode "
- "0x%08x\n", SASConfigPage.pageCode));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.MST_MSI "
- " 0x%08x\n", SASConfigPage.MST_MSI));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
- " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.STP_FRM_TMO "
- " 0x%08x\n", SASConfigPage.STP_FRM_TMO));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.STP_IDLE_TMO "
- " 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
- " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
- " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
- " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
- " 0x%08x\n", SASConfigPage.MAX_AIP));
+ SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE);
+ SASConfigPage.MST_MSI = cpu_to_le32(3 << 15);
+ SASConfigPage.STP_SSP_MCT_TMO =
+ cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO);
+ SASConfigPage.STP_FRM_TMO =
+ cpu_to_le32((SAS_MAX_OPEN_TIME << 24) |
+ (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER);
+ SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME);
+
+ SASConfigPage.OPNRJT_RTRY_INTVL =
+ cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL);
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =
+ cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO);
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =
+ cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR);
+ SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP);
+
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n",
+ le32_to_cpu(SASConfigPage.pageCode));
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n",
+ le32_to_cpu(SASConfigPage.MST_MSI));
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n",
+ le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO));
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n",
+ le32_to_cpu(SASConfigPage.STP_FRM_TMO));
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n",
+ le32_to_cpu(SASConfigPage.STP_IDLE_TMO));
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n",
+ le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL));
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n",
+ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n",
+ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n",
+ le32_to_cpu(SASConfigPage.MAX_AIP));
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1206,18 +1325,18 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
SCRATCH_PAD3_SMB_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
pm8001_ha->encrypt_info.status = 0;
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
- "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
- scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
- pm8001_ha->encrypt_info.sec_mode,
- pm8001_ha->encrypt_info.status));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X.Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
+ scratch3_value,
+ pm8001_ha->encrypt_info.cipher_mode,
+ pm8001_ha->encrypt_info.sec_mode,
+ pm8001_ha->encrypt_info.status);
ret = 0;
} else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
SCRATCH_PAD3_ENC_DISABLED) {
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
- scratch3_value));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
+ scratch3_value);
pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
pm8001_ha->encrypt_info.cipher_mode = 0;
pm8001_ha->encrypt_info.sec_mode = 0;
@@ -1237,12 +1356,12 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMB_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
- "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
- scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
- pm8001_ha->encrypt_info.sec_mode,
- pm8001_ha->encrypt_info.status));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+ scratch3_value,
+ pm8001_ha->encrypt_info.cipher_mode,
+ pm8001_ha->encrypt_info.sec_mode,
+ pm8001_ha->encrypt_info.status);
} else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
SCRATCH_PAD3_ENC_ENA_ERR) {
@@ -1260,24 +1379,23 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
SCRATCH_PAD3_SMB_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
- "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
- scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
- pm8001_ha->encrypt_info.sec_mode,
- pm8001_ha->encrypt_info.status));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+ scratch3_value,
+ pm8001_ha->encrypt_info.cipher_mode,
+ pm8001_ha->encrypt_info.sec_mode,
+ pm8001_ha->encrypt_info.status);
}
return ret;
}
/**
- * pm80xx_encrypt_update - update flash with encryption informtion
+ * pm80xx_encrypt_update - update flash with encryption information
* @pm8001_ha: our hba card information.
*/
static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
{
struct kek_mgmt_req payload;
- struct inbound_queue_table *circularQ;
int rc;
u32 tag;
u32 opc = OPC_INB_KEK_MANAGEMENT;
@@ -1285,21 +1403,21 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
memset(&payload, 0, sizeof(struct kek_mgmt_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
- return -1;
+ return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
/* Currently only one key is used. New KEK index is 1.
* Current KEK index is 1. Store KEK to NVRAM is 1.
*/
- payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
- KEK_MGMT_SUBOP_KEYCARDUPDATE);
+ payload.new_curidx_ksop =
+ cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) |
+ KEK_MGMT_SUBOP_KEYCARDUPDATE));
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Saving Encryption info to flash. payload 0x%x\n",
- payload.new_curidx_ksop));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Saving Encryption info to flash. payload 0x%x\n",
+ le32_to_cpu(payload.new_curidx_ksop));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1308,7 +1426,7 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * pm80xx_chip_init - the main init function that initializes whole PM8001 chip.
* @pm8001_ha: our hba card information
*/
static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
@@ -1318,8 +1436,7 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
/* check the firmware status */
if (-1 == check_fw_ready(pm8001_ha)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Firmware is not ready!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n");
return -EBUSY;
}
@@ -1327,7 +1444,12 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->controller_fatal_error = false;
/* Initialize pci space address eg: mpi offset */
- init_pci_device_addresses(pm8001_ha);
+ ret = init_pci_device_addresses(pm8001_ha);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Failed to init pci addresses");
+ return ret;
+ }
init_default_table_values(pm8001_ha);
read_main_config_table(pm8001_ha);
read_general_status_table(pm8001_ha);
@@ -1337,38 +1459,39 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
/* update main config table ,inbound table and outbound table */
update_main_config_table(pm8001_ha);
- for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
update_inbnd_queue_table(pm8001_ha, i);
- for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
update_outbnd_queue_table(pm8001_ha, i);
-
+ }
/* notify firmware update finished and check initialization status */
if (0 == mpi_init_check(pm8001_ha)) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MPI initialize successful!\n"));
+ pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n");
} else
return -EBUSY;
+ return 0;
+}
+
+static void pm80xx_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
/* send SAS protocol timer configuration page to FW */
- ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+ pm80xx_set_sas_protocol_timer_config(pm8001_ha);
/* Check for encryption */
if (pm8001_ha->chip->encrypt) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Checking for encryption\n"));
+ int ret;
+
+ pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n");
ret = pm80xx_get_encrypt_info(pm8001_ha);
if (ret == -1) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Encryption error !!\n"));
+ pm8001_dbg(pm8001_ha, INIT, "Encryption error !!\n");
if (pm8001_ha->encrypt_info.status == 0x81) {
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption enabled with error."
- "Saving encryption key to flash\n"));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption enabled with error.Saving encryption key to flash\n");
pm80xx_encrypt_update(pm8001_ha);
}
}
}
- return 0;
}
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
@@ -1376,34 +1499,41 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
u32 max_wait_count;
u32 value;
u32 gst_len_mpistate;
- init_pci_device_addresses(pm8001_ha);
+ int ret;
+
+ ret = init_pci_device_addresses(pm8001_ha);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Failed to init pci addresses");
+ return ret;
+ }
+
/* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
table is stop */
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
/* wait until Inbound DoorBell Clear Register toggled */
if (IS_SPCV_12G(pm8001_ha->pdev)) {
- max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT;
} else {
- max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
value &= SPCv_MSGU_CFG_TABLE_RESET;
} while ((value != 0) && (--max_wait_count));
if (!max_wait_count) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=%x\n", value);
return -1;
}
/* check the MPI-State for termination in progress */
/* wait until Inbound DoorBell Clear Register toggled */
- max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */
+ max_wait_count = 100; /* 2 sec for spcv/ve */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
gst_len_mpistate =
pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
GST_GSTLEN_MPIS_OFFSET);
@@ -1412,9 +1542,8 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
break;
} while (--max_wait_count);
if (!max_wait_count) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(" TIME OUT MPI State = 0x%x\n",
- gst_len_mpistate & GST_MPI_STATE_MASK));
+ pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n",
+ gst_len_mpistate & GST_MPI_STATE_MASK);
return -1;
}
@@ -1422,8 +1551,43 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
- * the FW register status to the originated status.
+ * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors
+ * @pm8001_ha: our hba card information
+ *
+ * Fatal errors are recoverable only after a host reboot.
+ */
+int
+pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha)
+{
+ int ret = 0;
+ u32 scratch_pad_rsvd0 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_RSVD_0);
+ u32 scratch_pad_rsvd1 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_RSVD_1);
+ u32 scratch_pad1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ u32 scratch_pad2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+ u32 scratch_pad3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+
+ if (pm8001_ha->chip_id != chip_8006 &&
+ pm8001_ha->chip_id != chip_8074 &&
+ pm8001_ha->chip_id != chip_8076) {
+ return 0;
+ }
+
+ if (MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(scratch_pad1)) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Fatal error SCRATCHPAD1 = 0x%x SCRATCHPAD2 = 0x%x SCRATCHPAD3 = 0x%x SCRATCHPAD_RSVD0 = 0x%x SCRATCHPAD_RSVD1 = 0x%x\n",
+ scratch_pad1, scratch_pad2, scratch_pad3,
+ scratch_pad_rsvd0, scratch_pad_rsvd1);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all
+ * FW register status are reset to the originated status.
* @pm8001_ha: our hba card information
*/
@@ -1438,34 +1602,41 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (!pm8001_ha->controller_fatal_error) {
/* Check if MPI is in ready state to reset */
if (mpi_uninit_check(pm8001_ha) != 0) {
- regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "MPI state is not ready scratch1 :0x%x\n",
- regval));
- return -1;
+ u32 r0 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+ u32 r1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ u32 r2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+ u32 r3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "MPI state is not ready scratch: %x:%x:%x:%x\n",
+ r0, r1, r2, r3);
+ /* if things aren't ready but the bootloader is ok then
+ * try the reset anyway.
+ */
+ if (r1 & SCRATCH_PAD1_BOOTSTATE_MASK)
+ return -1;
}
}
/* checked for reset register normal state; 0x0 */
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("reset register before write : 0x%x\n", regval));
+ pm8001_dbg(pm8001_ha, INIT, "reset register before write : 0x%x\n",
+ regval);
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
msleep(500);
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("reset register after write 0x%x\n", regval));
+ pm8001_dbg(pm8001_ha, INIT, "reset register after write 0x%x\n",
+ regval);
if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" soft reset successful [regval: 0x%x]\n",
- regval));
+ pm8001_dbg(pm8001_ha, MSG,
+ " soft reset successful [regval: 0x%x]\n",
+ regval);
} else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" soft reset failed [regval: 0x%x]\n",
- regval));
+ pm8001_dbg(pm8001_ha, MSG,
+ " soft reset failed [regval: 0x%x]\n",
+ regval);
/* check bootloader is successfully executed or in HDA mode */
bootloader_state =
@@ -1473,28 +1644,27 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
SCRATCH_PAD1_BOOTSTATE_MASK;
if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "Bootloader state - HDA mode SEEPROM\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Bootloader state - HDA mode SEEPROM\n");
} else if (bootloader_state ==
SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "Bootloader state - HDA mode Bootstrap Pin\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Bootloader state - HDA mode Bootstrap Pin\n");
} else if (bootloader_state ==
SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "Bootloader state - HDA mode soft reset\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Bootloader state - HDA mode soft reset\n");
} else if (bootloader_state ==
SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "Bootloader state-HDA mode critical error\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Bootloader state-HDA mode critical error\n");
}
return -EBUSY;
}
/* check the firmware status after reset */
if (-1 == check_fw_ready(pm8001_ha)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Firmware is not ready!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n");
/* check iButton feature support for motherboard controller */
if (pm8001_ha->pdev->subsystem_vendor !=
PCI_VENDOR_ID_ADAPTEC2 &&
@@ -1502,25 +1672,22 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
PCI_VENDOR_ID_ATTO &&
pm8001_ha->pdev->subsystem_vendor != 0) {
ibutton0 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_6);
+ MSGU_SCRATCH_PAD_RSVD_0);
ibutton1 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_7);
+ MSGU_SCRATCH_PAD_RSVD_1);
if (!ibutton0 && !ibutton1) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("iButton Feature is"
- " not Available!!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "iButton Feature is not Available!!!\n");
return -EBUSY;
}
if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("CRC Check for iButton"
- " Feature Failed!!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "CRC Check for iButton Feature Failed!!!\n");
return -EBUSY;
}
}
}
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SPCv soft reset Complete\n"));
+ pm8001_dbg(pm8001_ha, INIT, "SPCv soft reset Complete\n");
return 0;
}
@@ -1528,13 +1695,11 @@ static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
{
u32 i;
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip reset start\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip reset start\n");
/* do SPCv chip reset. */
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SPC soft reset Complete\n"));
+ pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n");
/* Check this ..whether delay is required or no */
/* delay 10 usec */
@@ -1546,12 +1711,11 @@ static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
mdelay(1);
} while ((--i) != 0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip reset finished\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n");
}
/**
- * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * pm80xx_chip_intx_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
*/
static void
@@ -1562,7 +1726,7 @@ pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * pm80xx_chip_intx_interrupt_disable - disable PM8001 chip interrupt
* @pm8001_ha: our hba card information
*/
static void
@@ -1572,17 +1736,19 @@ pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * pm80xx_chip_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
+ * @vec: interrupt number to enable
*/
static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- mask = (u32)(1 << vec);
-
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+ if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+ else
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1590,19 +1756,23 @@ pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
}
/**
- * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
+ * pm80xx_chip_interrupt_disable - disable PM8001 chip interrupt
* @pm8001_ha: our hba card information
+ * @vec: interrupt number to disable
*/
static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- if (vec == 0xFF)
- mask = 0xFFFFFFFF;
+ if (vec == 0xFF) {
+ /* disable all vectors 0-31, 32-63 */
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+ } else if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
else
- mask = (u32)(1 << vec);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);
@@ -1611,55 +1781,39 @@ pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev)
{
- int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
- struct sas_task *task = NULL;
+ struct sas_task *task;
struct task_abort_req task_abort;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_ABORT;
int ret;
- if (!pm8001_ha_dev) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
- return;
- }
+ pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG;
+ pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
- "allocate task\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
}
-
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res) {
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
sas_free_task(task);
return;
}
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
-
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
-
memset(&task_abort, 0, sizeof(task_abort));
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- task_abort.tag = cpu_to_le32(ccb_tag);
+ task_abort.tag = cpu_to_le32(ccb->ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
- sizeof(task_abort), 0);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Executing abort task end\n"));
+ ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort,
+ sizeof(task_abort), 0);
+ pm8001_dbg(pm8001_ha, FAIL, "Executing abort task end\n");
if (ret) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
}
@@ -1668,56 +1822,45 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
{
struct sata_start_req sata_cmd;
int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
struct sas_task *task = NULL;
struct host_to_dev_fis fis;
struct domain_device *dev;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("cannot allocate task !!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
return;
}
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res) {
- sas_free_task(task);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("cannot allocate tag !!!\n"));
- return;
- }
-
- /* allocate domain device by ourselves as libsas
- * is not going to provide any
- */
+ /*
+ * Allocate domain device by ourselves as libsas is not going to
+ * provide any.
+ */
dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
if (!dev) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Domain device cannot be allocated\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Domain device cannot be allocated\n");
return;
}
task->dev = dev;
task->dev->lldd_dev = pm8001_ha_dev;
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
- ccb->n_elem = 0;
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
+ sas_free_task(task);
+ kfree(dev);
+ return;
+ }
+
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
/* construct read log FIS */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
@@ -1727,34 +1870,34 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
fis.lbal = 0x10;
fis.sector_count = 0x1;
- sata_cmd.tag = cpu_to_le32(ccb_tag);
+ sata_cmd.tag = cpu_to_le32(ccb->ccb_tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+ sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9)));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
- sizeof(sata_cmd), 0);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing read log end\n"));
+ res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
+ pm8001_dbg(pm8001_ha, FAIL, "Executing read log end\n");
if (res) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
kfree(dev);
}
}
/**
- * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * mpi_ssp_completion - process the event that FW response to the SSP request.
* @pm8001_ha: our hba card information
* @piomb: the message contents of this outbound message.
*
* When FW has completed a ssp request for example a IO request, after it has
- * filled the SG data with the data, it will trigger this event represent
- * that he has finished the job,please check the coresponding buffer.
+ * filled the SG data with the data, it will trigger this event representing
+ * that he has finished the job; please check the corresponding buffer.
* So we will tell the caller who maybe waiting the result to tell upper layer
* that the task has been finished.
*/
static void
-mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
@@ -1780,30 +1923,27 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
if (status && status != IO_UNDERFLOW)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sas IO status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t));
+ pm8001_dbg(pm8001_ha, DEV,
+ "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t);
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive"
- ":%016llx", SAS_ADDR(t->dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n",
+ SAS_ADDR(t->dev->sas_addr));
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
- param));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS ,param = 0x%x\n",
+ param);
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
} else {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
@@ -1812,73 +1952,83 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
sas_ssp_task_response(pm8001_ha->dev, t, iu);
}
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_UNDERFLOW:
/* SSP Completion with error */
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
- param));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW ,param = 0x%x\n",
+ param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
/* Force the midlayer to retry */
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
@@ -1886,8 +2036,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -1897,67 +2046,78 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_DMA:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (!t->uldd_task)
@@ -1966,64 +2126,65 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_DS_NON_OPERATIONAL);
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_TM_TAG_NOT_FOUND:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
}
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("scsi_status = 0x%x\n ",
- psspPayload->ssp_resp_iu.status));
+ pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ",
+ psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "task 0x%p done with io_status 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
- t->task_done(t);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
/*See the comments for mpi_ssp_completion */
-static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
unsigned long flags;
@@ -2040,52 +2201,47 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sas IO status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IOERR_DBG(pm8001_ha,
- pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
- port_id, tag, event));
+ pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n",
+ port_id, tag, event);
switch (event) {
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
return;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
@@ -2096,8 +2252,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -2107,94 +2262,86 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
return;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_OVERRUN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
- PM8001_IOERR_DBG(pm8001_ha,
- pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IOERR,
+ "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n");
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_CMD_FRAME_ISSUED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
return;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
@@ -2202,26 +2349,23 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "task 0x%p done with event 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
- t->task_done(t);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
/*See the comments for mpi_ssp_completion */
static void
-mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
@@ -2241,60 +2385,43 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
+ param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
- if (!tag) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("tag null\n"));
- return;
- }
ccb = &pm8001_ha->ccb_info[tag];
- param = le32_to_cpu(psataPayload->param);
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("ccb null\n"));
- return;
- }
+ t = ccb->task;
+ pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
pm8001_dev = t->dev->lldd_dev;
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task null\n");
return;
}
if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
&& unlikely(!t || !t->lldd_task || !t->dev)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task or dev null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
}
ts = &t->task_status;
- if (!ts) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("ts null\n"));
- return;
- }
- if (unlikely(status))
- PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
- "status:0x%x, tag:0x%x, task::0x%p\n",
- status, tag, t));
+ if (status != IO_SUCCESS) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "IO failed device_id %u status 0x%x tag %d\n",
+ pm8001_dev->device_id, status, tag);
+ }
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
(dev_is_expander(t->dev->parent->dev_type)))) {
- for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
+ for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
- for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
+ for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++)
sata_addr_hi[i] = pm8001_ha->sas_addr[j];
memcpy(&temp_sata_addr_low, sata_addr_low,
sizeof(sata_addr_low));
@@ -2317,30 +2444,26 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
& 0xff000000)) +
pm8001_dev->attached_phy +
0x10);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%08x%08x", temp_sata_addr_hi,
- temp_sata_addr_low));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SAS Address of IO Failure Drive:%08x%08x\n",
+ temp_sata_addr_hi,
+ temp_sata_addr_low);
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SAS Address of IO Failure Drive:%016llx\n",
+ SAS_ADDR(t->dev->sas_addr));
}
}
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
- (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
- /* set new bit for abort_all */
- pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
- /* clear bit for read log */
- pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+ (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
/* Free the tag */
pm8001_tag_free(pm8001_ha, tag);
@@ -2352,94 +2475,101 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
ts->residual = param;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
- param));
+ pm8001_dbg(pm8001_ha, IO,
+ "SAS_PROTO_RESPONSE len = %d\n",
+ param);
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("PIO read len = %d\n", len));
- } else if (t->ata_task.use_ncq) {
+ pm8001_dbg(pm8001_ha, IO,
+ "PIO read len = %d\n", len);
+ } else if (t->ata_task.use_ncq &&
+ t->data_dir != DMA_NONE) {
len = sizeof(struct set_dev_bits_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("FPDMA len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
+ len);
} else {
len = sizeof(struct dev_to_host_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("other len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO, "other len = %d\n",
+ len);
}
if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
resp->frame_len = len;
memcpy(&resp->ending_fis[0], sata_resp, len);
ts->buf_valid_size = sizeof(*resp);
} else
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("response too large\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "response too large\n");
}
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
/* following cases are to do cases */
case IO_UNDERFLOW:
/* SATA Completion with error */
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
@@ -2447,8 +2577,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2457,13 +2586,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
@@ -2473,20 +2606,26 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2495,62 +2634,74 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_DMA:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_REJECTED_NCQ_MODE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2558,19 +2709,23 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_IN_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2578,46 +2733,58 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "Unknown status device_id %u status 0x%x tag %d\n",
+ pm8001_dev->device_id, status, tag);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p done with io_status 0x%x"
- " resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
}
}
/*See the comments for mpi_ssp_completion */
-static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ, void *piomb)
{
struct sas_task *t;
struct task_status_struct *ts;
@@ -2629,21 +2796,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
-
- ccb = &pm8001_ha->ccb_info[tag];
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("No CCB !!!. returning\n"));
- return;
- }
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SATA EVENT 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
/* Check if this is NCQ error */
if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
@@ -2655,55 +2810,52 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
return;
}
+ ccb = &pm8001_ha->ccb_info[tag];
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+
if (unlikely(!t || !t->lldd_task || !t->dev)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task or dev null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
}
ts = &t->task_status;
- PM8001_IOERR_DBG(pm8001_ha,
- pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
- port_id, tag, event));
+ pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n",
+ port_id, tag, event);
switch (event) {
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- pm8001_dev->running_req--;
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
@@ -2714,8 +2866,8 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2724,132 +2876,105 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_PEER_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_REJECTED_NCQ_MODE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_RDY_OVERRUN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_CMD_FRAME_ISSUED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
break;
case IO_XFER_PIO_SETUP_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n");
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_DMA_ACTIVATE_TIMEOUT:
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "IO_XFR_DMA_ACTIVATE_TIMEOUT\n");
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, IO, "Unknown status 0x%x\n", event);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p done with io_status 0x%x"
- " resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- }
}
/*See the comments for mpi_ssp_completion */
@@ -2865,7 +2990,6 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct smp_completion_resp *psmpPayload;
struct task_status_struct *ts;
struct pm8001_device *pm8001_dev;
- char *pdma_respaddr = NULL;
psmpPayload = (struct smp_completion_resp *)(piomb + 4);
status = le32_to_cpu(psmpPayload->status);
@@ -2877,94 +3001,91 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts = &t->task_status;
pm8001_dev = ccb->device;
if (status)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("smp IO status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
- PM8001_DEV_DBG(pm8001_ha,
- pm8001_printk("tag::0x%x status::0x%x\n", tag, status));
+ pm8001_dbg(pm8001_ha, DEV, "tag::0x%x status::0x%x\n", tag, status);
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("DIRECT RESPONSE Length:%d\n",
- param));
- pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
- ((u64)sg_dma_address
- (&t->smp_task.smp_resp))));
+ struct scatterlist *sg_resp = &t->smp_task.smp_resp;
+ u8 *payload;
+ void *to;
+
+ pm8001_dbg(pm8001_ha, IO,
+ "DIRECT RESPONSE Length:%d\n",
+ param);
+ to = kmap_atomic(sg_page(sg_resp));
+ payload = to + sg_resp->offset;
for (i = 0; i < param; i++) {
- *(pdma_respaddr+i) = psmpPayload->_r_a[i];
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
- i, *(pdma_respaddr+i),
- psmpPayload->_r_a[i]));
+ *(payload + i) = psmpPayload->_r_a[i];
+ pm8001_dbg(pm8001_ha, IO,
+ "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
+ i, *(payload + i),
+ psmpPayload->_r_a[i]);
}
+ kunmap_atomic(to);
}
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PHY_DOWN;
break;
case IO_ERROR_HW_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
@@ -2975,8 +3096,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -2985,75 +3105,68 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_RX_FRAME:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_ERROR_INTERNAL_SMP_RESOURCE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
/* not allowed case. Therefore, return failed status */
@@ -3061,25 +3174,23 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "task 0x%p done with io_status 0x%x resp 0x%x"
- "stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%xstat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
}
/**
- * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * pm80xx_hw_event_ack_req- For PM8001, some events need to acknowledge to FW.
* @pm8001_ha: our hba card information
* @Qnum: the outbound queue message number.
* @SEA: source of event to ack
@@ -3094,17 +3205,15 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
struct hw_event_ack_req payload;
u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
- struct inbound_queue_table *circularQ;
-
memset((u8 *)&payload, 0, sizeof(payload));
- circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
payload.tag = cpu_to_le32(1);
payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
((phyId & 0xFF) << 24) | (port_id & 0xFF));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload,
+ sizeof(payload), 0);
}
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3137,7 +3246,7 @@ static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha,
}
/**
- * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * hw_event_sas_phy_up - FW tells me a SAS phy up event.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3158,45 +3267,42 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
struct pm8001_port *port = &pm8001_ha->port[port_id];
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
port->wide_port_phymap |= (1U << phy_id);
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "portid:%d; phyid:%d; linkrate:%d; "
- "portstate:%x; devicetype:%x\n",
- port_id, phy_id, link_rate, portstate, deviceType));
+ pm8001_dbg(pm8001_ha, MSG,
+ "portid:%d; phyid:%d; linkrate:%d; portstate:%x; devicetype:%x\n",
+ port_id, phy_id, link_rate, portstate, deviceType);
switch (deviceType) {
case SAS_PHY_UNUSED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("device type no device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "device type no device.\n");
break;
case SAS_END_DEVICE:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "end device.\n");
pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
PHY_NOTIFY_ENABLE_SPINUP);
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
case SAS_EDGE_EXPANDER_DEVICE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("expander device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "expander device.\n");
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("fanout expander device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n");
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("unknown device type(%x)\n", deviceType));
+ pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n",
+ deviceType);
break;
}
phy->phy_type |= PORT_TYPE_SAS;
@@ -3207,7 +3313,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
phy->sas_phy.oob_mode = SAS_OOB_MODE;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
memcpy(phy->frame_rcvd, &pPayload->sas_identify,
sizeof(struct sas_identify_frame)-4);
@@ -3215,12 +3321,12 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
if (pm8001_ha->flags == PM8001F_RUN_TIME)
- msleep(200);/*delay a moment to wait disk to spinup*/
+ mdelay(200); /* delay a moment to wait for disk to spin up */
pm8001_bytes_dmaed(pm8001_ha, phy_id);
}
/**
- * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * hw_event_sata_phy_up - FW tells me a SATA phy up event.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3241,13 +3347,14 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
struct pm8001_port *port = &pm8001_ha->port[port_id];
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "port id %d, phy id %d link_rate %d portstate 0x%x\n",
- port_id, phy_id, link_rate, portstate));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "port id %d, phy id %d link_rate %d portstate 0x%x\n",
+ port_id, phy_id, link_rate, portstate);
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
port->port_attached = 1;
@@ -3255,7 +3362,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->phy_type |= PORT_TYPE_SATA;
phy->phy_attached = 1;
phy->sas_phy.oob_mode = SATA_OOB_MODE;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
sizeof(struct dev_to_host_fis));
@@ -3268,7 +3375,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
/**
- * hw_event_phy_down -we should notify the libsas the phy is down.
+ * hw_event_phy_down - we should notify the libsas the phy is down.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3296,10 +3403,10 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
case PORT_VALID:
break;
case PORT_INVALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" PortInvalid portID %d\n", port_id));
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Last phy Down and port invalid\n"));
+ pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n",
+ port_id);
+ pm8001_dbg(pm8001_ha, MSG,
+ " Last phy Down and port invalid\n");
if (port_sata) {
phy->phy_type = 0;
port->port_attached = 0;
@@ -3309,19 +3416,18 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
sas_phy_disconnected(&phy->sas_phy);
break;
case PORT_IN_RESET:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Port In Reset portID %d\n", port_id));
+ pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n",
+ port_id);
break;
case PORT_NOT_ESTABLISHED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Phy Down and PORT_NOT_ESTABLISHED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ " Phy Down and PORT_NOT_ESTABLISHED\n");
port->port_attached = 0;
break;
case PORT_LOSTCOMM:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Phy Down and PORT_LOSTCOMM\n"));
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Last phy Down and port invalid\n"));
+ pm8001_dbg(pm8001_ha, MSG, " Phy Down and PORT_LOSTCOMM\n");
+ pm8001_dbg(pm8001_ha, MSG,
+ " Last phy Down and port invalid\n");
if (port_sata) {
port->port_attached = 0;
phy->phy_type = 0;
@@ -3332,17 +3438,15 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk(" Phy Down and(default) = 0x%x\n",
- portstate));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ " Phy Down and(default) = 0x%x\n",
+ portstate);
break;
}
- if (port_sata && (portstate != PORT_IN_RESET)) {
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
-
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
- }
+ if (port_sata && (portstate != PORT_IN_RESET))
+ sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL,
+ GFP_ATOMIC);
}
static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3352,26 +3456,26 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 status =
le32_to_cpu(pPayload->status);
u32 phy_id =
- le32_to_cpu(pPayload->phyid);
+ le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
- status, phy_id));
- if (status == 0) {
+ pm8001_dbg(pm8001_ha, INIT,
+ "phy start resp status:0x%x, phyid:0x%x\n",
+ status, phy_id);
+ if (status == 0)
phy->phy_state = PHY_LINK_DOWN;
- if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL) {
- complete(phy->enable_completion);
- phy->enable_completion = NULL;
- }
+
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL) {
+ complete(phy->enable_completion);
+ phy->enable_completion = NULL;
}
return 0;
}
/**
- * mpi_thermal_hw_event -The hw event has come.
+ * mpi_thermal_hw_event - a thermal hw event has come.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3384,24 +3488,24 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
if (thermal_event & 0x40) {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Thermal Event: Local high temperature violated!\n"));
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Thermal Event: Measured local high temperature %d\n",
- ((rht_lht & 0xFF00) >> 8)));
+ pm8001_dbg(pm8001_ha, IO,
+ "Thermal Event: Local high temperature violated!\n");
+ pm8001_dbg(pm8001_ha, IO,
+ "Thermal Event: Measured local high temperature %d\n",
+ ((rht_lht & 0xFF00) >> 8));
}
if (thermal_event & 0x10) {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Thermal Event: Remote high temperature violated!\n"));
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Thermal Event: Measured remote high temperature %d\n",
- ((rht_lht & 0xFF000000) >> 24)));
+ pm8001_dbg(pm8001_ha, IO,
+ "Thermal Event: Remote high temperature violated!\n");
+ pm8001_dbg(pm8001_ha, IO,
+ "Thermal Event: Measured remote high temperature %d\n",
+ ((rht_lht & 0xFF000000) >> 24));
}
return 0;
}
/**
- * mpi_hw_event -The hw event has come.
+ * mpi_hw_event - The hw event has come.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3424,149 +3528,144 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_DEV_DBG(pm8001_ha,
- pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
- port_id, phy_id, eventType, status));
+ pm8001_dbg(pm8001_ha, DEV,
+ "portid:%d phyid:%d event:0x%x status:0x%x\n",
+ port_id, phy_id, eventType, status);
switch (eventType) {
case HW_EVENT_SAS_PHY_UP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n");
hw_event_sas_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_PHY_UP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n");
hw_event_sata_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_SPINUP_HOLD:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
+ sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
+ GFP_ATOMIC);
break;
case HW_EVENT_PHY_DOWN:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
hw_event_phy_down(pm8001_ha, piomb);
if (pm8001_ha->reset_in_progress) {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Reset in progress\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Reset in progress\n");
return 0;
}
phy->phy_attached = 0;
phy->phy_state = PHY_LINK_DISABLE;
break;
case HW_EVENT_PORT_INVALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
/* the broadcast change primitive received, tell the LIBSAS this event
to revalidate the sas domain*/
case HW_EVENT_BROADCAST_CHANGE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
port_id, phy_id, 1, 0);
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_PHY_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
sas_phy_disconnected(&phy->sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
break;
case HW_EVENT_BROADCAST_EXP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_INVALID_DWORD:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_INVALID_DWORD\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_DISPARITY_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_CODE_VIOLATION\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_CODE_VIOLATION,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_MALFUNCTION:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
break;
case HW_EVENT_BROADCAST_SES:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_INBOUND_CRC_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_INBOUND_CRC_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_HARD_RESET_RECEIVED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
- sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
+ sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC);
break;
case HW_EVENT_ID_FRAME_TIMEOUT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
- pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
- port_id, phy_id, 0, 0);
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
+ if (!pm8001_ha->phy[phy_id].reset_completion) {
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ }
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
if (pm8001_ha->phy[phy_id].reset_completion) {
pm8001_ha->phy[phy_id].port_reset_status =
PORT_RESET_TMO;
@@ -3575,28 +3674,26 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_PORT_RECOVERY_TIMER_TMO,
port_id, phy_id, 0, 0);
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
if (port->wide_port_phymap & (1 << i)) {
phy = &pm8001_ha->phy[i];
- sas_ha->notify_phy_event(&phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
port->wide_port_phymap &= ~(1 << i);
}
}
break;
case HW_EVENT_PORT_RECOVER:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
hw_event_port_recover(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_RESET_COMPLETE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n");
if (pm8001_ha->phy[phy_id].reset_completion) {
pm8001_ha->phy[phy_id].port_reset_status =
PORT_RESET_SUCCESS;
@@ -3605,12 +3702,11 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n");
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown event type 0x%x\n", eventType));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type 0x%x\n",
+ eventType);
break;
}
return 0;
@@ -3630,12 +3726,15 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phyid =
le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("phy:0x%x status:0x%x\n",
- phyid, status));
+ pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n",
+ phyid, status);
if (status == PHY_STOP_SUCCESS ||
- status == PHY_STOP_ERR_DEVICE_ATTACHED)
+ status == PHY_STOP_ERR_DEVICE_ATTACHED) {
phy->phy_state = PHY_LINK_DISABLE;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_PHY_DISABLED;
+ phy->sas_phy.linkrate = SAS_PHY_DISABLED;
+ }
+
return 0;
}
@@ -3652,9 +3751,9 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
u32 status = le32_to_cpu(pPayload->status);
u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
- status, err_qlfr_pgcd));
+ pm8001_dbg(pm8001_ha, MSG,
+ "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+ status, err_qlfr_pgcd);
return 0;
}
@@ -3667,8 +3766,7 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3681,8 +3779,7 @@ static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3694,8 +3791,7 @@ static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
*/
static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3708,28 +3804,31 @@ static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
+ u32 tag;
u8 page_code;
+ int rc = 0;
struct set_phy_profile_resp *pPayload =
(struct set_phy_profile_resp *)(piomb + 4);
u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid);
u32 status = le32_to_cpu(pPayload->status);
+ tag = le32_to_cpu(pPayload->tag);
page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
if (status) {
/* status is FAILED */
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("PhyProfile command failed with status "
- "0x%08X \n", status));
- return -1;
+ pm8001_dbg(pm8001_ha, FAIL,
+ "PhyProfile command failed with status 0x%08X\n",
+ status);
+ rc = -1;
} else {
if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Invalid page code 0x%X\n",
- page_code));
- return -1;
+ pm8001_dbg(pm8001_ha, FAIL, "Invalid page code 0x%X\n",
+ page_code);
+ rc = -1;
}
}
- return 0;
+ pm8001_tag_free(pm8001_ha, tag);
+ return rc;
}
/**
@@ -3746,9 +3845,9 @@ static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
- status, kidx_new_curr_ksop, err_qlfr));
+ pm8001_dbg(pm8001_ha, MSG,
+ "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
+ status, kidx_new_curr_ksop, err_qlfr);
return 0;
}
@@ -3761,8 +3860,7 @@ static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3775,8 +3873,7 @@ static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3784,281 +3881,270 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
/**
* process_one_iomb - process one outbound Queue memory block
* @pm8001_ha: our hba card information
+ * @circularQ: outbound circular queue
* @piomb: IO message buffer
*/
-static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ, void *piomb)
{
__le32 pHeader = *(__le32 *)piomb;
u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
switch (opc) {
case OPC_OUB_ECHO:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n");
break;
case OPC_OUB_HW_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_HW_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n");
mpi_hw_event(pm8001_ha, piomb);
break;
case OPC_OUB_THERM_HW_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_THERMAL_EVENT\n");
mpi_thermal_hw_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n");
mpi_ssp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_SMP_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SMP_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n");
mpi_smp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_LOCAL_PHY_CNTRL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n");
pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
break;
case OPC_OUB_DEV_REGIST:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n");
pm8001_mpi_reg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEREG_DEV:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("unregister the device\n"));
+ pm8001_dbg(pm8001_ha, MSG, "unregister the device\n");
pm8001_mpi_dereg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEV_HANDLE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n");
break;
case OPC_OUB_SATA_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_COMP\n"));
- mpi_sata_completion(pm8001_ha, piomb);
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n");
+ mpi_sata_completion(pm8001_ha, circularQ, piomb);
break;
case OPC_OUB_SATA_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_EVENT\n"));
- mpi_sata_event(pm8001_ha, piomb);
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n");
+ mpi_sata_event(pm8001_ha, circularQ, piomb);
break;
case OPC_OUB_SSP_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n");
mpi_ssp_event(pm8001_ha, piomb);
break;
case OPC_OUB_DEV_HANDLE_ARRIV:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n");
/*This is for target*/
break;
case OPC_OUB_SSP_RECV_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n");
/*This is for target*/
break;
case OPC_OUB_FW_FLASH_UPDATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n");
pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GPIO_RESPONSE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n");
break;
case OPC_OUB_GPIO_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n");
break;
case OPC_OUB_GENERAL_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n");
pm8001_mpi_general_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SATA_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SAS_DIAG_MODE_START_END:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SAS_DIAG_MODE_START_END\n");
break;
case OPC_OUB_SAS_DIAG_EXECUTE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n");
break;
case OPC_OUB_GET_TIME_STAMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n");
break;
case OPC_OUB_SAS_HW_EVENT_ACK:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n");
break;
case OPC_OUB_PORT_CONTROL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n");
break;
case OPC_OUB_SMP_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_NVMD_DATA:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n");
pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SET_NVMD_DATA:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n");
pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEVICE_HANDLE_REMOVAL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n");
break;
case OPC_OUB_SET_DEVICE_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n");
pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEVICE_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n");
break;
case OPC_OUB_SET_DEV_INFO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n");
break;
- /* spcv specifc commands */
+ /* spcv specific commands */
case OPC_OUB_PHY_START_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_PHY_START_RESP opcode:%x\n", opc);
mpi_phy_start_resp(pm8001_ha, piomb);
break;
case OPC_OUB_PHY_STOP_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc);
mpi_phy_stop_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SET_CONTROLLER_CONFIG:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc);
mpi_set_controller_config_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_CONTROLLER_CONFIG:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc);
mpi_get_controller_config_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_PHY_PROFILE:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc);
mpi_get_phy_profile_resp(pm8001_ha, piomb);
break;
case OPC_OUB_FLASH_OP_EXT:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc);
mpi_flash_op_ext_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SET_PHY_PROFILE:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc);
mpi_set_phy_profile_resp(pm8001_ha, piomb);
break;
case OPC_OUB_KEK_MANAGEMENT_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc);
mpi_kek_management_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEK_MANAGEMENT_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc);
mpi_dek_management_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_COALESCED_COMP_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc);
ssp_coalesced_comp_resp(pm8001_ha, piomb);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "Unknown outbound Queue IOMB OPC = 0x%x\n", opc);
break;
}
}
static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
{
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_1:0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_0));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_1));
}
static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
void *pMsg1 = NULL;
- u8 uninitialized_var(bc);
+ u8 bc;
u32 ret = MPI_IO_STATUS_FAIL;
- unsigned long flags;
u32 regval;
- if (vec == (pm8001_ha->number_of_intr - 1)) {
+ /*
+ * Fatal errors are programmed to be signalled in irq vector
+ * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl.
+ * fatal_err_interrupt
+ */
+ if (vec == (pm8001_ha->max_q_num - 1)) {
+ u32 mipsall_ready;
+
+ if (pm8001_ha->chip_id == chip_8008 ||
+ pm8001_ha->chip_id == chip_8009)
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT;
+ else
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT;
+
regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
- SCRATCH_PAD_MIPSALL_READY) {
+ if ((regval & mipsall_ready) != mipsall_ready) {
pm8001_ha->controller_fatal_error = true;
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "Firmware Fatal error! Regval:0x%x\n", regval));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Firmware Fatal error! Regval:0x%x\n",
+ regval);
+ pm8001_handle_event(pm8001_ha, NULL, IO_FATAL_ERROR);
print_scratchpad_registers(pm8001_ha);
return ret;
+ } else {
+ /*read scratchpad rsvd 0 register*/
+ regval = pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_RSVD_0);
+ switch (regval) {
+ case NON_FATAL_SPBC_LBUS_ECC_ERR:
+ case NON_FATAL_BDMA_ERR:
+ case NON_FATAL_THERM_OVERTEMP_ERR:
+ /*Clear the register*/
+ pm8001_cw32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_RSVD_0,
+ 0x00000000);
+ break;
+ default:
+ break;
+ }
}
}
- spin_lock_irqsave(&pm8001_ha->lock, flags);
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
+ spin_lock_irqsave(&circularQ->oq_lock, circularQ->lock_flags);
do {
/* spurious interrupt during setup if kexec-ing and
* driver doing a doorbell access w/ the pre-kexec oq
@@ -4069,7 +4155,8 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
if (MPI_IO_STATUS_SUCCESS == ret) {
/* process the outbound message */
- process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+ process_one_iomb(pm8001_ha, circularQ,
+ (void *)(pMsg1 - 4));
/* free the message from the outbound circular buffer */
pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
circularQ, bc);
@@ -4084,7 +4171,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
break;
}
} while (1);
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irqrestore(&circularQ->oq_lock, circularQ->lock_flags);
return ret;
}
@@ -4110,7 +4197,7 @@ static void build_smp_cmd(u32 deviceID, __le32 hTag,
}
/**
- * pm8001_chip_smp_req - send a SMP task to FW
+ * pm80xx_chip_smp_req - send an SMP task to FW
* @pm8001_ha: our hba card information.
* @ccb: the ccb information this request used.
*/
@@ -4121,14 +4208,13 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
- struct scatterlist *sg_req, *sg_resp;
+ struct scatterlist *sg_req, *sg_resp, *smp_req;
u32 req_len, resp_len;
struct smp_req smp_cmd;
u32 opc;
- struct inbound_queue_table *circularQ;
- char *preq_dma_addr = NULL;
- __le64 tmp_addr;
u32 i, length;
+ u8 *payload;
+ u8 *to;
memset(&smp_cmd, 0, sizeof(smp_cmd));
/*
@@ -4154,29 +4240,27 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
}
opc = OPC_INB_SMP_REQUEST;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
length = sg_req->length;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SMP Frame Length %d\n", sg_req->length));
+ pm8001_dbg(pm8001_ha, IO, "SMP Frame Length %d\n", sg_req->length);
if (!(length - 8))
pm8001_ha->smp_exp_mode = SMP_DIRECT;
else
pm8001_ha->smp_exp_mode = SMP_INDIRECT;
- tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
- preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+ smp_req = &task->smp_task.smp_req;
+ to = kmap_atomic(sg_page(smp_req));
+ payload = to + smp_req->offset;
/* INDIRECT MODE command settings. Use DMA */
if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "SMP REQUEST INDIRECT MODE\n");
/* for SPCv indirect mode. Place the top 4 bytes of
* SMP Request header here. */
for (i = 0; i < 4; i++)
- smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+ smp_cmd.smp_req16[i] = *(payload + i);
/* exclude top 4 bytes for SMP req header */
smp_cmd.long_smp_req.long_req_addr =
cpu_to_le64((u64)sg_dma_address
@@ -4204,28 +4288,27 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
}
if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SMP REQUEST DIRECT MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n");
for (i = 0; i < length; i++)
if (i < 16) {
- smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Byte[%d]:%x (DMA data:%x)\n",
- i, smp_cmd.smp_req16[i],
- *(preq_dma_addr)));
+ smp_cmd.smp_req16[i] = *(payload + i);
+ pm8001_dbg(pm8001_ha, IO,
+ "Byte[%d]:%x (DMA data:%x)\n",
+ i, smp_cmd.smp_req16[i],
+ *(payload));
} else {
- smp_cmd.smp_req[i] = *(preq_dma_addr+i);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Byte[%d]:%x (DMA data:%x)\n",
- i, smp_cmd.smp_req[i],
- *(preq_dma_addr)));
+ smp_cmd.smp_req[i] = *(payload + i);
+ pm8001_dbg(pm8001_ha, IO,
+ "Byte[%d]:%x (DMA data:%x)\n",
+ i, smp_cmd.smp_req[i],
+ *(payload));
}
}
-
+ kunmap_atomic(to);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
- sizeof(smp_cmd), 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &smp_cmd,
+ sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
return 0;
@@ -4272,8 +4355,31 @@ static int check_enc_sat_cmd(struct sas_task *task)
return ret;
}
+static u32 pm80xx_chip_get_q_index(struct sas_task *task)
+{
+ struct scsi_cmnd *scmd = NULL;
+ u32 blk_tag;
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(task->dev)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
+
+ if (!scmd)
+ return 0;
+
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ return blk_mq_unique_tag_to_hwq(blk_tag);
+}
+
/**
- * pm80xx_chip_ssp_io_req - send a SSP task to FW
+ * pm80xx_chip_ssp_io_req - send an SSP task to FW
* @pm8001_ha: our hba card information.
* @ccb: the ccb information this request used.
*/
@@ -4285,14 +4391,14 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
- int ret;
- u64 phys_addr, start_addr, end_addr;
+ u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
- struct inbound_queue_table *circularQ;
u32 q_index;
u32 opc = OPC_INB_SSPINIIOSTART;
+
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+
/* data address domain added for spcv; set to 0 by host,
* used internally by controller
* 0 for SAS 1.1 and SAS 2.0 compatible TLR
@@ -4303,20 +4409,19 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+ ssp_cmd.ssp_iu.efb_prio_attr = 0x80;
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
- circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
+ q_index = pm80xx_chip_get_q_index(task);
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
!(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
- task->ssp_task.cmd->cmnd[0]));
+ pm8001_dbg(pm8001_ha, IO,
+ "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
+ task->ssp_task.cmd->cmnd[0]);
opc = OPC_INB_SSP_INI_DIF_ENC_IO;
/* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
ssp_cmd.dad_dir_m_tlr = cpu_to_le32
@@ -4326,8 +4431,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.enc_addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.enc_addr_high =
@@ -4335,35 +4439,33 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
ssp_cmd.enc_addr_low =
cpu_to_le32(lower_32_bits(dma_addr));
ssp_cmd.enc_addr_high =
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + ssp_cmd.enc_len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != ssp_cmd.enc_addr_high) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("The sg list address "
- "start_addr=0x%016llx data_len=0x%x "
- "end_addr_high=0x%08x end_addr_low="
- "0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.enc_len,
- end_addr_high, end_addr_low));
+ end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+
+ if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
+ dma_addr,
+ le32_to_cpu(ssp_cmd.enc_len),
+ end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info,
- buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.enc_addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.enc_addr_high =
cpu_to_le32(upper_32_bits(phys_addr));
- ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+ ssp_cmd.enc_esgl = cpu_to_le32(1U<<31);
}
} else if (task->num_scatter == 0) {
ssp_cmd.enc_addr_low = 0;
@@ -4371,23 +4473,24 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
}
+
/* XTS mode. All other fields are 0 */
- ssp_cmd.key_cmode = 0x6 << 4;
+ ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4);
+
/* set tweak values. Should be the start lba */
ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) |
(task->ssp_task.cmd->cmnd[3] << 16) |
(task->ssp_task.cmd->cmnd[4] << 8) |
(task->ssp_task.cmd->cmnd[5]));
} else {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Sending Normal SAS command 0x%x inb q %x\n",
- task->ssp_task.cmd->cmnd[0], q_index));
+ pm8001_dbg(pm8001_ha, IO,
+ "Sending Normal SAS command 0x%x inb q %x\n",
+ task->ssp_task.cmd->cmnd[0], q_index);
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.addr_high =
@@ -4395,29 +4498,26 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
ssp_cmd.addr_high =
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + ssp_cmd.len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != ssp_cmd.addr_high) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("The sg list address "
- "start_addr=0x%016llx data_len=0x%x "
- "end_addr_high=0x%08x end_addr_low="
- "0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.len,
- end_addr_high, end_addr_low));
+ end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+ if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
+ dma_addr,
+ le32_to_cpu(ssp_cmd.len),
+ end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info,
- buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.addr_high =
@@ -4431,10 +4531,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.esgl = 0;
}
}
- q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &ssp_cmd, sizeof(ssp_cmd), q_index);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &ssp_cmd,
+ sizeof(ssp_cmd), q_index);
}
static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
@@ -4443,37 +4542,34 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
- u32 tag = ccb->ccb_tag;
- int ret;
- u32 q_index;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ u32 tag = ccb->ccb_tag, q_index;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
- u64 phys_addr, start_addr, end_addr;
+ u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0;
u32 dir;
- struct inbound_queue_table *circularQ;
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
- circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
- if (task->data_dir == DMA_NONE) {
+ q_index = pm80xx_chip_get_q_index(task);
+
+ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+ pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
- if (task->ata_task.dma_xfer) {
- ATAP = 0x06; /* DMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
- } else {
- ATAP = 0x05; /* PIO*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
- }
if (task->ata_task.use_ncq &&
dev->sata_dev.class != ATA_DEV_ATAPI) {
ATAP = 0x07; /* FPDMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
+ } else if (task->ata_task.dma_xfer) {
+ ATAP = 0x06; /* DMA */
+ pm8001_dbg(pm8001_ha, IO, "DMA\n");
+ } else {
+ ATAP = 0x05; /* PIO*/
+ pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
@@ -4493,9 +4589,9 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
!(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
- sata_cmd.sata_fis.command));
+ pm8001_dbg(pm8001_ha, IO,
+ "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
+ sata_cmd.sata_fis.command);
opc = OPC_INB_SATA_DIF_ENC_IO;
/* set encryption bit */
@@ -4507,39 +4603,39 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
- sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
- sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+ phys_addr = ccb->ccb_dma_handle;
+ sata_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ sata_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
- sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
- sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+
+ sata_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(dma_addr));
+ sata_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(dma_addr));
sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
sata_cmd.enc_esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + sata_cmd.enc_len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != sata_cmd.enc_addr_high) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("The sg list address "
- "start_addr=0x%016llx data_len=0x%x "
- "end_addr_high=0x%08x end_addr_low"
- "=0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.enc_len,
- end_addr_high, end_addr_low));
+ end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+ if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
+ dma_addr,
+ le32_to_cpu(sata_cmd.enc_len),
+ end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info,
- buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
sata_cmd.enc_addr_low =
- lower_32_bits(phys_addr);
+ cpu_to_le32(lower_32_bits(phys_addr));
sata_cmd.enc_addr_high =
- upper_32_bits(phys_addr);
+ cpu_to_le32(upper_32_bits(phys_addr));
sata_cmd.enc_esgl =
cpu_to_le32(1 << 31);
}
@@ -4550,7 +4646,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.enc_esgl = 0;
}
/* XTS mode. All other fields are 0 */
- sata_cmd.key_index_mode = 0x6 << 4;
+ sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4);
+
/* set tweak values. Should be the start lba */
sata_cmd.twk_val0 =
cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
@@ -4561,9 +4658,9 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
(sata_cmd.sata_fis.lbam_exp));
} else {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Sending Normal SATA command 0x%x inb %x\n",
- sata_cmd.sata_fis.command, q_index));
+ pm8001_dbg(pm8001_ha, IO,
+ "Sending Normal SATA command 0x%x inb %x\n",
+ sata_cmd.sata_fis.command, q_index);
/* dad (bit 0-1) is 0 */
sata_cmd.ncqtag_atap_dir_m_dad =
cpu_to_le32(((ncg_tag & 0xff)<<16) |
@@ -4573,40 +4670,34 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
sata_cmd.addr_low = lower_32_bits(phys_addr);
sata_cmd.addr_high = upper_32_bits(phys_addr);
- sata_cmd.esgl = cpu_to_le32(1 << 31);
+ sata_cmd.esgl = cpu_to_le32(1U << 31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
sata_cmd.addr_low = lower_32_bits(dma_addr);
sata_cmd.addr_high = upper_32_bits(dma_addr);
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + sata_cmd.len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
if (end_addr_high != sata_cmd.addr_high) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("The sg list address "
- "start_addr=0x%016llx data_len=0x%x"
- "end_addr_high=0x%08x end_addr_low="
- "0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.len,
- end_addr_high, end_addr_low));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
+ dma_addr,
+ le32_to_cpu(sata_cmd.len),
+ end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info,
- buf_prd[0]);
- sata_cmd.addr_low =
- lower_32_bits(phys_addr);
- sata_cmd.addr_high =
- upper_32_bits(phys_addr);
- sata_cmd.esgl = cpu_to_le32(1 << 31);
+ phys_addr = ccb->ccb_dma_handle;
+ sata_cmd.addr_low = lower_32_bits(phys_addr);
+ sata_cmd.addr_high = upper_32_bits(phys_addr);
+ sata_cmd.esgl = cpu_to_le32(1U << 31);
}
} else if (task->num_scatter == 0) {
sata_cmd.addr_low = 0;
@@ -4614,27 +4705,28 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
}
+
/* scsi cdb */
sata_cmd.atapi_scsi_cdb[0] =
cpu_to_le32(((task->ata_task.atapi_packet[0]) |
- (task->ata_task.atapi_packet[1] << 8) |
- (task->ata_task.atapi_packet[2] << 16) |
- (task->ata_task.atapi_packet[3] << 24)));
+ (task->ata_task.atapi_packet[1] << 8) |
+ (task->ata_task.atapi_packet[2] << 16) |
+ (task->ata_task.atapi_packet[3] << 24)));
sata_cmd.atapi_scsi_cdb[1] =
cpu_to_le32(((task->ata_task.atapi_packet[4]) |
- (task->ata_task.atapi_packet[5] << 8) |
- (task->ata_task.atapi_packet[6] << 16) |
- (task->ata_task.atapi_packet[7] << 24)));
+ (task->ata_task.atapi_packet[5] << 8) |
+ (task->ata_task.atapi_packet[6] << 16) |
+ (task->ata_task.atapi_packet[7] << 24)));
sata_cmd.atapi_scsi_cdb[2] =
cpu_to_le32(((task->ata_task.atapi_packet[8]) |
- (task->ata_task.atapi_packet[9] << 8) |
- (task->ata_task.atapi_packet[10] << 16) |
- (task->ata_task.atapi_packet[11] << 24)));
+ (task->ata_task.atapi_packet[9] << 8) |
+ (task->ata_task.atapi_packet[10] << 16) |
+ (task->ata_task.atapi_packet[11] << 24)));
sata_cmd.atapi_scsi_cdb[3] =
cpu_to_le32(((task->ata_task.atapi_packet[12]) |
- (task->ata_task.atapi_packet[13] << 8) |
- (task->ata_task.atapi_packet[14] << 16) |
- (task->ata_task.atapi_packet[15] << 24)));
+ (task->ata_task.atapi_packet[13] << 8) |
+ (task->ata_task.atapi_packet[14] << 16) |
+ (task->ata_task.atapi_packet[15] << 24)));
}
/* Check for read log for failed drive and return */
@@ -4649,101 +4741,96 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
spin_lock_irqsave(&task->task_state_lock, flags);
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags &
SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p resp 0x%x "
- " stat 0x%x but aborted by upper layer "
- "\n", task, ts->resp, ts->stat));
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
+ task, ts->resp,
+ ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
return 0;
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free_done(pm8001_ha, task,
- ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
+ atomic_dec(&pm8001_ha_dev->running_req);
return 0;
}
}
}
- q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &sata_cmd, sizeof(sata_cmd), q_index);
- return ret;
+ trace_pm80xx_request_issue(pm8001_ha->id,
+ ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS,
+ ccb->ccb_tag, opc,
+ qc ? qc->tf.command : 0, // ata opcode
+ ccb->device ? atomic_read(&ccb->device->running_req) : 0);
+ return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &sata_cmd,
+ sizeof(sata_cmd), q_index);
}
/**
* pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND
* @pm8001_ha: our hba card information.
- * @num: the inbound queue number
* @phy_id: the phy id which we wanted to start up.
*/
static int
pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
{
struct phy_start_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTART;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
+ pm8001_dbg(pm8001_ha, INIT, "PHY START REQ for phy_id %d\n", phy_id);
payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
LINKMODE_AUTO | pm8001_ha->link_rate | phy_id);
/* SSC Disable and SAS Analog ST configuration */
- /**
+ /*
payload.ase_sh_lm_slr_phyid =
cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
phy_id);
Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
- **/
+ */
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
&pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/**
- * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * pm80xx_chip_phy_stop_req - start phy via PHY_STOP COMMAND
* @pm8001_ha: our hba card information.
- * @num: the inbound queue number
* @phy_id: the phy id which we wanted to start up.
*/
static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
u8 phy_id)
{
struct phy_stop_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTOP;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
-/**
+/*
* see comments on pm8001_mpi_reg_resp.
*/
static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
@@ -4752,25 +4839,22 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
struct reg_dev_req payload;
u32 opc;
u32 stp_sspsmp_sata = 0x4;
- struct inbound_queue_table *circularQ;
u32 linkrate, phy_id;
- int rc, tag = 0xdeadbeef;
+ int rc;
struct pm8001_ccb_info *ccb;
u8 retryFlag = 0x1;
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return rc;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->device = pm8001_dev;
- ccb->ccb_tag = tag;
- payload.tag = cpu_to_le32(tag);
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
if (flag == 1) {
stp_sspsmp_sata = 0x02; /*direct attached sata */
@@ -4778,8 +4862,7 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
if (pm8001_dev->dev_type == SAS_SATA_DEV)
stp_sspsmp_sata = 0x00; /* stp*/
else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
- pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
if (parent_dev && dev_is_expander(parent_dev->dev_type))
@@ -4793,7 +4876,7 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dev->sas_device->linkrate : dev->port->linkrate;
payload.phyid_portid =
- cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
+ cpu_to_le32(((port->port_id) & 0xFF) |
((phy_id & 0xFF) << 8));
payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
@@ -4805,10 +4888,10 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
return rc;
}
@@ -4816,9 +4899,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
/**
* pm80xx_chip_phy_ctl_req - support the local phy operation
* @pm8001_ha: our hba card information.
- * @num: the inbound queue number
- * @phy_id: the phy id which we wanted to operate
- * @phy_op:
+ * @phyId: the phy id which we wanted to operate
+ * @phy_op: phy operation to request
*/
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 phyId, u32 phy_op)
@@ -4826,18 +4908,23 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 tag;
int rc;
struct local_phy_ctl_req payload;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+
+ return rc;
}
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
@@ -4855,48 +4942,50 @@ static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_isr - PM8001 isr handler.
+ * pm80xx_chip_isr - PM8001 isr handler.
* @pm8001_ha: our hba card information.
- * @irq: irq number.
- * @stat: stat.
+ * @vec: irq number.
*/
static irqreturn_t
pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm80xx_chip_interrupt_disable(pm8001_ha, vec);
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "irq vec %d, ODMR:0x%x\n",
- vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30));
process_oq(pm8001_ha, vec);
pm80xx_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
}
-void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
- u32 operation, u32 phyid, u32 length, u32 *buf)
+static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
+ u32 operation, u32 phyid,
+ u32 length, u32 *buf)
{
- u32 tag , i, j = 0;
+ u32 tag, i, j = 0;
int rc;
struct set_phy_profile_req payload;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SET_PHY_PROFILE;
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n"));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ if (rc) {
+ pm8001_dbg(pm8001_ha, FAIL, "Invalid tag\n");
+ return;
+ }
+
payload.tag = cpu_to_le32(tag);
- payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk(" phy profile command for phy %x ,length is %d\n",
- payload.ppc_phyid, length));
+ payload.ppc_phyid =
+ cpu_to_le32(((operation & 0xF) << 8) | (phyid & 0xFF));
+ pm8001_dbg(pm8001_ha, INIT,
+ " phy profile command for phy %x ,length is %d\n",
+ le32_to_cpu(payload.ppc_phyid), length);
for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
- payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
+ payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
}
@@ -4911,7 +5000,7 @@ void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
length = length + PHY_DWORD_LENGTH;
}
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n"));
+ pm8001_dbg(pm8001_ha, INIT, "phy settings completed\n");
}
void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
@@ -4920,35 +5009,36 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
u32 tag, opc;
int rc, i;
struct set_phy_profile_req payload;
- struct inbound_queue_table *circularQ;
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("Invalid tag"));
+ if (rc) {
+ pm8001_dbg(pm8001_ha, INIT, "Invalid tag\n");
+ return;
+ }
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
opc = OPC_INB_SET_PHY_PROFILE;
payload.tag = cpu_to_le32(tag);
- payload.ppc_phyid = (((SAS_PHY_ANALOG_SETTINGS_PAGE & 0xF) << 8)
- | (phy & 0xFF));
+ payload.ppc_phyid =
+ cpu_to_le32(((SAS_PHY_ANALOG_SETTINGS_PAGE & 0xF) << 8)
+ | (phy & 0xFF));
for (i = 0; i < length; i++)
payload.reserved[i] = cpu_to_le32(*(buf + i));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PHY %d settings applied", phy));
+ pm8001_dbg(pm8001_ha, INIT, "PHY %d settings applied\n", phy);
}
const struct pm8001_dispatch pm8001_80xx_dispatch = {
.name = "pmc80xx",
.chip_init = pm80xx_chip_init,
+ .chip_post_init = pm80xx_chip_post_init,
.chip_soft_rst = pm80xx_chip_soft_rst,
.chip_rst = pm80xx_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
@@ -4972,4 +5062,6 @@ const struct pm8001_dispatch pm8001_80xx_dispatch = {
.set_nvmd_req = pm8001_chip_set_nvmd_req,
.fw_flash_update_req = pm8001_chip_fw_flash_update_req,
.set_dev_state_req = pm8001_chip_set_dev_state_req,
+ .fatal_errors = pm80xx_fatal_errors,
+ .hw_event_ack_req = pm80xx_hw_event_ack_req,
};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 701951a0f715..acf6e3005b84 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -220,8 +220,8 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
-#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 1000 * 1000) /* 30 sec */
-#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 1000 * 1000) /* 15 sec */
+#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 50) /* 30 sec */
+#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 50) /* 15 sec */
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
@@ -672,11 +672,6 @@ struct task_abort_req {
u32 reserved[27];
} __attribute__((packed, aligned(4)));
-/* These flags used for SSP SMP & SATA Abort */
-#define ABORT_MASK 0x3
-#define ABORT_SINGLE 0x0
-#define ABORT_ALL 0x1
-
/**
* brief the data structure of SSP SATA SMP Abort Response
* use to describe SSP SMP & SATA Abort Response ( 64 bytes)
@@ -972,7 +967,7 @@ struct dek_mgmt_req {
struct set_phy_profile_req {
__le32 tag;
__le32 ppc_phyid;
- u32 reserved[29];
+ __le32 reserved[29];
} __attribute__((packed, aligned(4)));
/**
@@ -1272,6 +1267,7 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47
#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48
#define IO_DS_INVALID 0x49
+#define IO_FATAL_ERROR 0x51
/* WARNING: the value is not contiguous from here */
#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52
#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53
@@ -1365,8 +1361,21 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define MSGU_HOST_SCRATCH_PAD_3 0x60
#define MSGU_HOST_SCRATCH_PAD_4 0x64
#define MSGU_HOST_SCRATCH_PAD_5 0x68
-#define MSGU_HOST_SCRATCH_PAD_6 0x6C
-#define MSGU_HOST_SCRATCH_PAD_7 0x70
+#define MSGU_SCRATCH_PAD_RSVD_0 0x6C
+#define MSGU_SCRATCH_PAD_RSVD_1 0x70
+
+#define MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) ((x & 0x3) == 0x2)
+#define MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) (((x >> 2) & 0x3) == 0x2)
+#define MSGU_SCRATCHPAD1_BOOTLDR_STATE_ERR(x) ((((x >> 4) & 0x7) == 0x7) || \
+ (((x >> 4) & 0x7) == 0x4))
+#define MSGU_SCRATCHPAD1_IOP0_STATE_ERR(x) (((x >> 10) & 0x3) == 0x2)
+#define MSGU_SCRATCHPAD1_IOP1_STATE_ERR(x) (((x >> 12) & 0x3) == 0x2)
+#define MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(x) \
+ (MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) || \
+ MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) || \
+ MSGU_SCRATCHPAD1_BOOTLDR_STATE_ERR(x) || \
+ MSGU_SCRATCHPAD1_IOP0_STATE_ERR(x) || \
+ MSGU_SCRATCHPAD1_IOP1_STATE_ERR(x))
/* bit definition for ODMR register */
#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all
@@ -1391,8 +1400,12 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
-#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
+#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \
SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
+ SCRATCH_PAD_RAAE_READY)
+#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
SCRATCH_PAD_RAAE_READY)
/* boot loader state */
@@ -1421,6 +1434,11 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */
#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */
+/*state definition for Scratchpad Rsvd 0, Offset 0x6C, Non-fatal*/
+#define NON_FATAL_SPBC_LBUS_ECC_ERR 0x70000001
+#define NON_FATAL_BDMA_ERR 0xE0000001
+#define NON_FATAL_THERM_OVERTEMP_ERR 0x80000001
+
/* main configuration offset - byte offset */
#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */
#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */
@@ -1639,3 +1657,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define MEMBASE_II_SHIFT_REGISTER 0x1010
#endif
+
+/**
+ * As we know sleep (1~20) ms may result in sleep longer than ~20 ms, hence we
+ * choose 20 ms interval.
+ */
+#define FW_READY_INTERVAL 20
diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.c b/drivers/scsi/pm8001/pm80xx_tracepoints.c
new file mode 100644
index 000000000000..344aface9cdb
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_tracepoints.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Trace events in pm8001 driver.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Akshat Jain <akshatzen@google.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "pm80xx_tracepoints.h"
diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.h b/drivers/scsi/pm8001/pm80xx_tracepoints.h
new file mode 100644
index 000000000000..5e669a8a9344
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_tracepoints.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events in pm8001 driver.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Akshat Jain <akshatzen@google.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pm80xx
+
+#if !defined(_TRACE_PM80XX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PM80XX_H
+
+#include <linux/tracepoint.h>
+#include "pm8001_sas.h"
+
+TRACE_EVENT(pm80xx_request_issue,
+ TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
+ u16 ata_opcode, int running_req),
+
+ TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, phy_id)
+ __field(u32, htag)
+ __field(u32, ctlr_opcode)
+ __field(u16, ata_opcode)
+ __field(int, running_req)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->phy_id = phy_id;
+ __entry->htag = htag;
+ __entry->ctlr_opcode = ctlr_opcode;
+ __entry->ata_opcode = ata_opcode;
+ __entry->running_req = running_req;
+ ),
+
+ TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
+ __entry->id, __entry->phy_id, __entry->htag,
+ __entry->ctlr_opcode, __entry->ata_opcode,
+ __entry->running_req)
+);
+
+TRACE_EVENT(pm80xx_request_complete,
+ TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
+ u16 ata_opcode, int running_req),
+
+ TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, phy_id)
+ __field(u32, htag)
+ __field(u32, ctlr_opcode)
+ __field(u16, ata_opcode)
+ __field(int, running_req)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->phy_id = phy_id;
+ __entry->htag = htag;
+ __entry->ctlr_opcode = ctlr_opcode;
+ __entry->ata_opcode = ata_opcode;
+ __entry->running_req = running_req;
+ ),
+
+ TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
+ __entry->id, __entry->phy_id, __entry->htag,
+ __entry->ctlr_opcode, __entry->ata_opcode,
+ __entry->running_req)
+);
+
+TRACE_EVENT(pm80xx_mpi_build_cmd,
+ TP_PROTO(u32 id, u32 opc, u32 htag, u32 qi, u32 pi, u32 ci),
+
+ TP_ARGS(id, opc, htag, qi, pi, ci),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, opc)
+ __field(u32, htag)
+ __field(u32, qi)
+ __field(u32, pi)
+ __field(u32, ci)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->opc = opc;
+ __entry->htag = htag;
+ __entry->qi = qi;
+ __entry->pi = pi;
+ __entry->ci = ci;
+ ),
+
+ TP_printk("ctlr_id = %u opc = %#x htag = %#x QI = %u PI = %u CI = %u",
+ __entry->id, __entry->opc, __entry->htag, __entry->qi,
+ __entry->pi, __entry->ci)
+);
+
+#endif /* _TRACE_PM80XX_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE pm80xx_tracepoints
+
+#include <trace/define_trace.h>
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 7eb88fe1eb0b..836ddc476764 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -443,15 +443,14 @@ static void pmcraid_disable_interrupts(
* pmcraid_enable_interrupts - Enables specified interrupts
*
* @pinstance: pointer to per adapter instance structure
- * @intr: interrupts to enable
+ * @intrs: interrupts to enable
*
* Return Value
* None
*/
static void pmcraid_enable_interrupts(
struct pmcraid_instance *pinstance,
- u32 intrs
-)
+ u32 intrs)
{
u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
@@ -533,15 +532,13 @@ static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
pinstance->ioa_unit_check = 1;
}
+static void pmcraid_ioa_reset(struct pmcraid_cmd *);
/**
* pmcraid_bist_done - completion function for PCI BIST
- * @cmd: pointer to reset command
+ * @t: pointer to reset command
* Return Value
* none
*/
-
-static void pmcraid_ioa_reset(struct pmcraid_cmd *);
-
static void pmcraid_bist_done(struct timer_list *t)
{
struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
@@ -595,7 +592,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
/**
* pmcraid_reset_alert_done - completion routine for reset_alert
- * @cmd: pointer to command block used in reset sequence
+ * @t: pointer to command block used in reset sequence
* Return value
* None
*/
@@ -626,16 +623,16 @@ static void pmcraid_reset_alert_done(struct timer_list *t)
}
}
+static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
/**
* pmcraid_reset_alert - alerts IOA for a possible reset
- * @cmd : command block to be used for reset sequence.
+ * @cmd: command block to be used for reset sequence.
*
* Return Value
* returns 0 if pci config-space is accessible and RESET_DOORBELL is
* successfully written to IOA. Returns non-zero in case pci_config_space
* is not accessible
*/
-static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -676,7 +673,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
/**
* pmcraid_timeout_handler - Timeout handler for internally generated ops
*
- * @cmd : pointer to command structure, that got timedout
+ * @t: pointer to command structure, that got timedout
*
* This function blocks host requests and initiates an adapter reset.
*
@@ -840,11 +837,11 @@ static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
scsi_dma_unmap(scsi_cmd);
pmcraid_return_cmd(cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
}
/**
- * pmcraid_fire_command - sends an IOA command to adapter
+ * _pmcraid_fire_command - sends an IOA command to adapter
*
* This function adds the given block into pending command list
* and returns without waiting
@@ -961,6 +958,7 @@ static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
pmcraid_timeout_handler);
}
+static void pmcraid_querycfg(struct pmcraid_cmd *);
/**
* pmcraid_get_fwversion_done - completion function for get_fwversion
*
@@ -969,8 +967,6 @@ static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
* Return Value
* none
*/
-static void pmcraid_querycfg(struct pmcraid_cmd *);
-
static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -1382,10 +1378,9 @@ static void pmcraid_netlink_release(void)
genl_unregister_family(&pmcraid_event_family);
}
-/**
+/*
* pmcraid_notify_aen - sends event msg to user space application
* @pinstance: pointer to adapter instance structure
- * @type: HCAM type
*
* Return value:
* 0 if success, error value in case of any failure.
@@ -1393,8 +1388,7 @@ static void pmcraid_netlink_release(void)
static int pmcraid_notify_aen(
struct pmcraid_instance *pinstance,
struct pmcraid_aen_msg *aen_msg,
- u32 data_size
-)
+ u32 data_size)
{
struct sk_buff *skb;
void *msg_header;
@@ -1440,7 +1434,7 @@ static int pmcraid_notify_aen(
return -EINVAL;
}
- /* send genetlink multicast message to notify appplications */
+ /* send genetlink multicast message to notify applications */
genlmsg_end(skb, msg_header);
result = genlmsg_multicast(&pmcraid_event_family, skb,
@@ -1771,6 +1765,8 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
}
}
+static void pmcraid_initiate_reset(struct pmcraid_instance *);
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
/**
* pmcraid_process_ldn - op done function for an LDN
* @cmd: pointer to command block
@@ -1778,9 +1774,6 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
* Return value
* none
*/
-static void pmcraid_initiate_reset(struct pmcraid_instance *);
-static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
-
static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -1878,14 +1871,14 @@ static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
pmcraid_cancel_ldn(cmd);
}
+static void pmcraid_reinit_buffers(struct pmcraid_instance *);
+
/**
* pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
* @pinstance: pointer to adapter instance structure
* Return Value
* 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
*/
-static void pmcraid_reinit_buffers(struct pmcraid_instance *);
-
static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
{
u32 intrs;
@@ -2024,7 +2017,7 @@ static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
le32_to_cpu(resp) >> 2,
cmd->ioa_cb->ioarcb.cdb[0],
scsi_cmd->result);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
} else if (cmd->cmd_done == pmcraid_internal_done ||
cmd->cmd_done == pmcraid_erp_done) {
cmd->cmd_done(cmd);
@@ -2687,6 +2680,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
* pmcraid_reset_device - device reset handler functions
*
* @scsi_cmd: scsi command struct
+ * @timeout: command timeout
* @modifier: reset modifier indicating the reset sequence to be performed
*
* This function issues a device reset to the affected device.
@@ -2699,8 +2693,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
static int pmcraid_reset_device(
struct scsi_cmnd *scsi_cmd,
unsigned long timeout,
- u8 modifier
-)
+ u8 modifier)
{
struct pmcraid_cmd *cmd;
struct pmcraid_instance *pinstance;
@@ -2821,7 +2814,7 @@ static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
if (rc == 0) {
scsi_dma_unmap(scsi_cmd);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
}
return rc;
@@ -2860,10 +2853,8 @@ static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
{
struct pmcraid_cmd *cancel_cmd;
struct pmcraid_instance *pinstance;
- struct pmcraid_resource_entry *res;
pinstance = (struct pmcraid_instance *)cmd->drv_inst;
- res = cmd->scsi_cmd->device->hostdata;
cancel_cmd = pmcraid_get_free_cmd(pinstance);
@@ -3010,7 +3001,7 @@ static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
}
/**
- * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
+ * pmcraid_eh_device_reset_handler - bus/target/device reset handler callbacks
*
* @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
*
@@ -3191,127 +3182,8 @@ static int pmcraid_build_ioadl(
}
/**
- * pmcraid_free_sglist - Frees an allocated SG buffer list
- * @sglist: scatter/gather list pointer
- *
- * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
- *
- * Return value:
- * none
- */
-static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
-{
- sgl_free_order(sglist->scatterlist, sglist->order);
- kfree(sglist);
-}
-
-/**
- * pmcraid_alloc_sglist - Allocates memory for a SG list
- * @buflen: buffer length
- *
- * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
- * list.
- *
- * Return value
- * pointer to sglist / NULL on failure
- */
-static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
-{
- struct pmcraid_sglist *sglist;
- int sg_size;
- int order;
-
- sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
- order = (sg_size > 0) ? get_order(sg_size) : 0;
-
- /* Allocate a scatter/gather list for the DMA */
- sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL);
- if (sglist == NULL)
- return NULL;
-
- sglist->order = order;
- sgl_alloc_order(buflen, order, false,
- GFP_KERNEL | GFP_DMA | __GFP_ZERO, &sglist->num_sg);
-
- return sglist;
-}
-
-/**
- * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
- * @sglist: scatter/gather list pointer
- * @buffer: buffer pointer
- * @len: buffer length
- * @direction: data transfer direction
- *
- * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
- *
- * Return value:
- * 0 on success / other on failure
- */
-static int pmcraid_copy_sglist(
- struct pmcraid_sglist *sglist,
- void __user *buffer,
- u32 len,
- int direction
-)
-{
- struct scatterlist *sg;
- void *kaddr;
- int bsize_elem;
- int i;
- int rc = 0;
-
- /* Determine the actual number of bytes per element */
- bsize_elem = PAGE_SIZE * (1 << sglist->order);
-
- sg = sglist->scatterlist;
-
- for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, bsize_elem);
-
- kunmap(page);
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- return -EFAULT;
- }
-
- sg->length = bsize_elem;
- }
-
- if (len % bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
-
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, len % bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, len % bsize_elem);
-
- kunmap(page);
-
- sg->length = len % bsize_elem;
- }
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-/**
- * pmcraid_queuecommand - Queue a mid-layer request
+ * pmcraid_queuecommand_lck - Queue a mid-layer request
* @scsi_cmd: scsi command struct
- * @done: done function
*
* This function queues a request generated by the mid-layer. Midlayer calls
* this routine within host->lock. Some of the functions called by queuecommand
@@ -3322,10 +3194,7 @@ static int pmcraid_copy_sglist(
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
*/
-static int pmcraid_queuecommand_lck(
- struct scsi_cmnd *scsi_cmd,
- void (*done) (struct scsi_cmnd *)
-)
+static int pmcraid_queuecommand_lck(struct scsi_cmnd *scsi_cmd)
{
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
@@ -3337,7 +3206,6 @@ static int pmcraid_queuecommand_lck(
pinstance =
(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
- scsi_cmd->scsi_done = done;
res = scsi_cmd->device->hostdata;
scsi_cmd->result = (DID_OK << 16);
@@ -3347,7 +3215,7 @@ static int pmcraid_queuecommand_lck(
if (pinstance->ioa_state == IOA_STATE_DEAD) {
pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
scsi_cmd->result = (DID_NO_CONNECT << 16);
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
return 0;
}
@@ -3360,7 +3228,7 @@ static int pmcraid_queuecommand_lck(
*/
if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) {
pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n");
- scsi_cmd->scsi_done(scsi_cmd);
+ scsi_done(scsi_cmd);
return 0;
}
@@ -3432,7 +3300,7 @@ static int pmcraid_queuecommand_lck(
static DEF_SCSI_QCMD(pmcraid_queuecommand)
-/**
+/*
* pmcraid_open -char node "open" entry, allowed only users with admin access
*/
static int pmcraid_chr_open(struct inode *inode, struct file *filep)
@@ -3449,7 +3317,7 @@ static int pmcraid_chr_open(struct inode *inode, struct file *filep)
return 0;
}
-/**
+/*
* pmcraid_fasync - Async notifier registration from applications
*
* This function adds the calling process to a driver global queue. When an
@@ -3468,364 +3336,6 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
return rc;
}
-
-/**
- * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
- * commands sent over IOCTL interface
- *
- * @cmd : pointer to struct pmcraid_cmd
- * @buflen : length of the request buffer
- * @direction : data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static int pmcraid_build_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = NULL;
- struct scatterlist *sg = NULL;
- struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
- struct pmcraid_ioadl_desc *ioadl;
- int i;
-
- sglist = pmcraid_alloc_sglist(buflen);
-
- if (!sglist) {
- pmcraid_err("can't allocate memory for passthrough SGls\n");
- return -ENOMEM;
- }
-
- sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg, direction);
-
- if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
- dev_err(&cmd->drv_inst->pdev->dev,
- "Failed to map passthrough buffer!\n");
- pmcraid_free_sglist(sglist);
- return -EIO;
- }
-
- cmd->sglist = sglist;
- ioarcb->request_flags0 |= NO_LINK_DESCS;
-
- ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
-
- /* Initialize IOADL descriptor addresses */
- for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
- ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
- ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
- ioadl[i].flags = 0;
- }
-
- /* setup the last descriptor */
- ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
-
- return 0;
-}
-
-
-/**
- * pmcraid_release_passthrough_ioadls - release passthrough ioadls
- *
- * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
- * @buflen: size of the request buffer
- * @direction: data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static void pmcraid_release_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = cmd->sglist;
-
- if (buflen > 0) {
- dma_unmap_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg,
- direction);
- pmcraid_free_sglist(sglist);
- cmd->sglist = NULL;
- }
-}
-
-/**
- * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
- *
- * @pinstance: pointer to adapter instance structure
- * @cmd: ioctl code
- * @arg: pointer to pmcraid_passthrough_buffer user buffer
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static long pmcraid_ioctl_passthrough(
- struct pmcraid_instance *pinstance,
- unsigned int ioctl_cmd,
- unsigned int buflen,
- void __user *arg
-)
-{
- struct pmcraid_passthrough_ioctl_buffer *buffer;
- struct pmcraid_ioarcb *ioarcb;
- struct pmcraid_cmd *cmd;
- struct pmcraid_cmd *cancel_cmd;
- void __user *request_buffer;
- unsigned long request_offset;
- unsigned long lock_flags;
- void __user *ioasa;
- u32 ioasc;
- int request_size;
- int buffer_size;
- u8 direction;
- int rc = 0;
-
- /* If IOA reset is in progress, wait 10 secs for reset to complete */
- if (pinstance->ioa_reset_in_progress) {
- rc = wait_event_interruptible_timeout(
- pinstance->reset_wait_q,
- !pinstance->ioa_reset_in_progress,
- msecs_to_jiffies(10000));
-
- if (!rc)
- return -ETIMEDOUT;
- else if (rc < 0)
- return -ERESTARTSYS;
- }
-
- /* If adapter is not in operational state, return error */
- if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
- pmcraid_err("IOA is not operational\n");
- return -ENOTTY;
- }
-
- buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
-
- if (!buffer) {
- pmcraid_err("no memory for passthrough buffer\n");
- return -ENOMEM;
- }
-
- request_offset =
- offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
-
- request_buffer = arg + request_offset;
-
- rc = copy_from_user(buffer, arg,
- sizeof(struct pmcraid_passthrough_ioctl_buffer));
-
- ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
-
- if (rc) {
- pmcraid_err("ioctl: can't copy passthrough buffer\n");
- rc = -EFAULT;
- goto out_free_buffer;
- }
-
- request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
-
- if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
- direction = DMA_TO_DEVICE;
- } else {
- direction = DMA_FROM_DEVICE;
- }
-
- if (request_size < 0) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- /* check if we have any additional command parameters */
- if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
- > PMCRAID_ADD_CMD_PARAM_LEN) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- cmd = pmcraid_get_free_cmd(pinstance);
-
- if (!cmd) {
- pmcraid_err("free command block is not available\n");
- rc = -ENOMEM;
- goto out_free_buffer;
- }
-
- cmd->scsi_cmd = NULL;
- ioarcb = &(cmd->ioa_cb->ioarcb);
-
- /* Copy the user-provided IOARCB stuff field by field */
- ioarcb->resource_handle = buffer->ioarcb.resource_handle;
- ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
- ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
- ioarcb->request_type = buffer->ioarcb.request_type;
- ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
- ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
- memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
-
- if (buffer->ioarcb.add_cmd_param_length) {
- ioarcb->add_cmd_param_length =
- buffer->ioarcb.add_cmd_param_length;
- ioarcb->add_cmd_param_offset =
- buffer->ioarcb.add_cmd_param_offset;
- memcpy(ioarcb->add_data.u.add_cmd_params,
- buffer->ioarcb.add_data.u.add_cmd_params,
- le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
- }
-
- /* set hrrq number where the IOA should respond to. Note that all cmds
- * generated internally uses hrrq_id 0, exception to this is the cmd
- * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
- * hrrq_id assigned here in queuecommand
- */
- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
- pinstance->num_hrrq;
-
- if (request_size) {
- rc = pmcraid_build_passthrough_ioadls(cmd,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("couldn't build passthrough ioadls\n");
- goto out_free_cmd;
- }
- }
-
- /* If data is being written into the device, copy the data from user
- * buffers
- */
- if (direction == DMA_TO_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- goto out_free_sglist;
- }
- }
-
- /* passthrough ioctl is a blocking command so, put the user to sleep
- * until timeout. Note that a timeout value of 0 means, do timeout.
- */
- cmd->cmd_done = pmcraid_internal_done;
- init_completion(&cmd->wait_for_completion);
- cmd->completion_req = 1;
-
- pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0],
- le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- _pmcraid_fire_command(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- /* NOTE ! Remove the below line once abort_task is implemented
- * in firmware. This line disables ioctl command timeout handling logic
- * similar to IO command timeout handling, making ioctl commands to wait
- * until the command completion regardless of timeout value specified in
- * ioarcb
- */
- buffer->ioarcb.cmd_timeout = 0;
-
- /* If command timeout is specified put caller to wait till that time,
- * otherwise it would be blocking wait. If command gets timed out, it
- * will be aborted.
- */
- if (buffer->ioarcb.cmd_timeout == 0) {
- wait_for_completion(&cmd->wait_for_completion);
- } else if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
-
- pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0]);
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- cancel_cmd = pmcraid_abort_cmd(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- if (cancel_cmd) {
- wait_for_completion(&cancel_cmd->wait_for_completion);
- ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
- pmcraid_return_cmd(cancel_cmd);
-
- /* if abort task couldn't find the command i.e it got
- * completed prior to aborting, return good completion.
- * if command got aborted successfully or there was IOA
- * reset due to abort task itself getting timedout then
- * return -ETIMEDOUT
- */
- if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
- PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
- if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
- rc = -ETIMEDOUT;
- goto out_handle_response;
- }
- }
-
- /* no command block for abort task or abort task failed to abort
- * the IOARCB, then wait for 150 more seconds and initiate reset
- * sequence after timeout
- */
- if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(150 * 1000))) {
- pmcraid_reset_bringup(cmd->drv_inst);
- rc = -ETIMEDOUT;
- }
- }
-
-out_handle_response:
- /* copy entire IOASA buffer and return IOCTL success.
- * If copying IOASA to user-buffer fails, return
- * EFAULT
- */
- if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
- sizeof(struct pmcraid_ioasa))) {
- pmcraid_err("failed to copy ioasa buffer to user\n");
- rc = -EFAULT;
- }
-
- /* If the data transfer was from device, copy the data onto user
- * buffers
- */
- else if (direction == DMA_FROM_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- rc = -EFAULT;
- }
- }
-
-out_free_sglist:
- pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
-
-out_free_cmd:
- pmcraid_return_cmd(cmd);
-
-out_free_buffer:
- kfree(buffer);
-
- return rc;
-}
-
-
-
-
/**
* pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
*
@@ -3896,7 +3406,7 @@ static int pmcraid_check_ioctl_buffer(
return 0;
}
-/**
+/*
* pmcraid_ioctl - char node ioctl entry point
*/
static long pmcraid_chr_ioctl(
@@ -3935,20 +3445,6 @@ static long pmcraid_chr_ioctl(
switch (_IOC_TYPE(cmd)) {
- case PMCRAID_PASSTHROUGH_IOCTL:
- /* If ioctl code is to download microcode, we need to block
- * mid-layer requests.
- */
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_block_requests(pinstance->host);
-
- retval = pmcraid_ioctl_passthrough(pinstance, cmd,
- hdr->buffer_length, argp);
-
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_unblock_requests(pinstance->host);
- break;
-
case PMCRAID_DRIVER_IOCTL:
arg += sizeof(struct pmcraid_ioctl_header);
retval = pmcraid_ioctl_driver(pinstance, cmd,
@@ -3965,7 +3461,7 @@ static long pmcraid_chr_ioctl(
return retval;
}
-/**
+/*
* File operations structure for management interface
*/
static const struct file_operations pmcraid_fops = {
@@ -3983,6 +3479,7 @@ static const struct file_operations pmcraid_fops = {
/**
* pmcraid_show_log_level - Display adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
*
* Return value:
@@ -4002,6 +3499,7 @@ static ssize_t pmcraid_show_log_level(
/**
* pmcraid_store_log_level - Change the adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
* @count: not used
*
@@ -4044,6 +3542,7 @@ static struct device_attribute pmcraid_log_level_attr = {
/**
* pmcraid_show_drv_version - Display driver version
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
*
* Return value:
@@ -4068,8 +3567,9 @@ static struct device_attribute pmcraid_driver_version_attr = {
};
/**
- * pmcraid_show_io_adapter_id - Display driver assigned adapter id
+ * pmcraid_show_adapter_id - Display driver assigned adapter id
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
*
* Return value:
@@ -4101,13 +3601,14 @@ static struct device_attribute pmcraid_adapter_id_attr = {
.show = pmcraid_show_adapter_id,
};
-static struct device_attribute *pmcraid_host_attrs[] = {
- &pmcraid_log_level_attr,
- &pmcraid_driver_version_attr,
- &pmcraid_adapter_id_attr,
+static struct attribute *pmcraid_host_attrs[] = {
+ &pmcraid_log_level_attr.attr,
+ &pmcraid_driver_version_attr.attr,
+ &pmcraid_adapter_id_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(pmcraid_host);
/* host template structure for pmcraid driver */
static struct scsi_host_template pmcraid_host_template = {
@@ -4130,7 +3631,7 @@ static struct scsi_host_template pmcraid_host_template = {
.max_sectors = PMCRAID_IOA_MAX_SECTORS,
.no_write_same = 1,
.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
- .shost_attrs = pmcraid_host_attrs,
+ .shost_groups = pmcraid_host_groups,
.proc_name = PMCRAID_DRIVER_NAME,
};
@@ -4530,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
return 0;
out_unwind:
- while (--i > 0)
+ while (--i >= 0)
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
pci_free_irq_vectors(pdev);
return rc;
@@ -4591,7 +4092,7 @@ pmcraid_release_control_blocks(
/**
* pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
- * @pinstance - pointer to per adapter instance structure
+ * @pinstance: pointer to per adapter instance structure
*
* Allocates memory for command blocks using kernel slab allocator.
*
@@ -4652,7 +4153,7 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
pinstance->cmd_list[i]->ioa_cb =
- dma_pool_alloc(
+ dma_pool_zalloc(
pinstance->control_pool,
GFP_KERNEL,
&(pinstance->cmd_list[i]->ioa_cb_bus_addr));
@@ -4661,8 +4162,6 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
pmcraid_release_control_blocks(pinstance, i);
return -ENOMEM;
}
- memset(pinstance->cmd_list[i]->ioa_cb, 0,
- sizeof(struct pmcraid_control_block));
}
return 0;
}
@@ -4718,7 +4217,6 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
return -ENOMEM;
}
- memset(pinstance->hrrq_start[i], 0, buffer_size);
pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
pinstance->hrrq_end[i] =
pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
@@ -5092,7 +4590,7 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
pint_regs->global_interrupt_mask_reg =
mapped_pci_addr + chip_cfg->global_intr_mask;
- };
+ }
pinstance->ioa_reset_attempts = 0;
init_waitqueue_head(&pinstance->reset_wait_q);
@@ -5139,7 +4637,7 @@ static void pmcraid_shutdown(struct pci_dev *pdev)
}
-/**
+/*
* pmcraid_get_minor - returns unused minor number from minor number bitmap
*/
static unsigned short pmcraid_get_minor(void)
@@ -5151,7 +4649,7 @@ static unsigned short pmcraid_get_minor(void)
return minor;
}
-/**
+/*
* pmcraid_release_minor - releases given minor back to minor number bitmap
*/
static void pmcraid_release_minor(unsigned short minor)
@@ -5239,54 +4737,37 @@ static void pmcraid_remove(struct pci_dev *pdev)
return;
}
-#ifdef CONFIG_PM
/**
* pmcraid_suspend - driver suspend entry point for power management
- * @pdev: PCI device structure
- * @state: PCI power state to suspend routine
+ * @dev: Device structure
*
* Return Value - 0 always
*/
-static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pmcraid_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
pmcraid_kill_tasklets(pinstance);
- pci_set_drvdata(pinstance->pdev, pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
/**
* pmcraid_resume - driver resume entry point PCI power management
- * @pdev: PCI device structure
+ * @dev: Device structure
*
* Return Value - 0 in case of success. Error code in case of any failure
*/
-static int pmcraid_resume(struct pci_dev *pdev)
+static int __maybe_unused pmcraid_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
struct Scsi_Host *host = pinstance->host;
- int rc;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- rc = pci_enable_device(pdev);
-
- if (rc) {
- dev_err(&pdev->dev, "resume: Enable device failed\n");
- return rc;
- }
-
- pci_set_master(pdev);
+ int rc = 0;
if (sizeof(dma_addr_t) == 4 ||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
@@ -5339,18 +4820,10 @@ release_host:
scsi_host_put(host);
disable_device:
- pci_disable_device(pdev);
return rc;
}
-#else
-
-#define pmcraid_suspend NULL
-#define pmcraid_resume NULL
-
-#endif /* CONFIG_PM */
-
/**
* pmcraid_complete_ioa_reset - Called by either timer or tasklet during
* completion of the ioa reset
@@ -5838,6 +5311,8 @@ out_disable_device:
return -ENODEV;
}
+static SIMPLE_DEV_PM_OPS(pmcraid_pm_ops, pmcraid_suspend, pmcraid_resume);
+
/*
* PCI driver structure of pmcraid driver
*/
@@ -5846,8 +5321,7 @@ static struct pci_driver pmcraid_driver = {
.id_table = pmcraid_pci_table,
.probe = pmcraid_probe,
.remove = pmcraid_remove,
- .suspend = pmcraid_suspend,
- .resume = pmcraid_resume,
+ .driver.pm = &pmcraid_pm_ops,
.shutdown = pmcraid_shutdown
};
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index a4f7eb8f50a3..9f59930e8b4f 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -47,8 +47,8 @@
/*
* MAX_CMD : maximum commands that can be outstanding with IOA
* MAX_IO_CMD : command blocks available for IO commands
- * MAX_HCAM_CMD : command blocks avaibale for HCAMS
- * MAX_INTERNAL_CMD : command blocks avaible for internal commands like reset
+ * MAX_HCAM_CMD : command blocks available for HCAMS
+ * MAX_INTERNAL_CMD : command blocks available for internal commands like reset
*/
#define PMCRAID_MAX_CMD 1024
#define PMCRAID_MAX_IO_CMD 1020
@@ -244,7 +244,7 @@ struct pmcraid_ioarcb {
__u8 hrrq_id;
__u8 cdb[PMCRAID_MAX_CDB_LEN];
struct pmcraid_ioarcb_add_data add_data;
-} __attribute__((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
+};
/* well known resource handle values */
#define PMCRAID_IOA_RES_HANDLE 0xffffffff
@@ -623,7 +623,7 @@ struct pmcraid_aen_msg {
u32 hostno;
u32 length;
u8 reserved[8];
- u8 data[0];
+ u8 data[];
};
/* Controller state event message type */
@@ -1023,40 +1023,15 @@ struct pmcraid_ioctl_header {
#define PMCRAID_IOCTL_SIGNATURE "PMCRAID"
/*
- * pmcraid_passthrough_ioctl_buffer - structure given as argument to
- * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
- * 32-byte alignment so, it is necessary to pack this structure to avoid any
- * holes between ioctl_header and passthrough buffer
- *
- * .ioactl_header : ioctl header
- * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer
- * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware
- * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
- * the transfer directions passed in ioarcb.flags0. Contents
- * of this buffer are valid only when ioarcb.data_transfer_len
- * is not zero.
- */
-struct pmcraid_passthrough_ioctl_buffer {
- struct pmcraid_ioctl_header ioctl_header;
- struct pmcraid_ioarcb ioarcb;
- struct pmcraid_ioasa ioasa;
- u8 request_buffer[1];
-} __attribute__ ((packed));
-
-/*
* keys to differentiate between driver handled IOCTLs and passthrough
* IOCTLs passed to IOA. driver determines the ioctl type using macro
* _IOC_TYPE
*/
#define PMCRAID_DRIVER_IOCTL 'D'
-#define PMCRAID_PASSTHROUGH_IOCTL 'F'
#define DRV_IOCTL(n, size) \
_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
-#define FMW_IOCTL(n, size) \
- _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
-
/*
* _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
* This is to facilitate applications avoiding un-necessary memory allocations.
@@ -1069,12 +1044,4 @@ struct pmcraid_passthrough_ioctl_buffer {
#define PMCRAID_IOCTL_RESET_ADAPTER \
DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
-/* passthrough/firmware handled commands */
-#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \
- FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \
- FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-
#endif /* _PMCRAID_H */
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index a406cc825426..c6c1bc608224 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -45,6 +45,11 @@ typedef struct {
#include "ppa.h"
+static struct scsi_pointer *ppa_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static inline ppa_struct *ppa_dev(struct Scsi_Host *host)
{
return *(ppa_struct **)&host->hostdata;
@@ -56,7 +61,7 @@ static void got_it(ppa_struct *dev)
{
dev->base = dev->dev->port->base;
if (dev->cur_cmd)
- dev->cur_cmd->SCp.phase = 1;
+ ppa_scsi_pointer(dev->cur_cmd)->phase = 1;
else
wake_up(dev->waiting);
}
@@ -511,13 +516,14 @@ static inline int ppa_send_command(struct scsi_cmnd *cmd)
* The driver appears to remain stable if we speed up the parallel port
* i/o in this function, but not elsewhere.
*/
-static int ppa_completion(struct scsi_cmnd *cmd)
+static int ppa_completion(struct scsi_cmnd *const cmd)
{
/* Return codes:
* -1 Error
* 0 Told to schedule
* 1 Finished data transfer
*/
+ struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd);
ppa_struct *dev = ppa_dev(cmd->device->host);
unsigned short ppb = dev->base;
unsigned long start_jiffies = jiffies;
@@ -543,7 +549,7 @@ static int ppa_completion(struct scsi_cmnd *cmd)
if (time_after(jiffies, start_jiffies + 1))
return 0;
- if ((cmd->SCp.this_residual <= 0)) {
+ if (scsi_pointer->this_residual <= 0) {
ppa_fail(dev, DID_ERROR);
return -1; /* ERROR_RETURN */
}
@@ -572,28 +578,30 @@ static int ppa_completion(struct scsi_cmnd *cmd)
}
/* determine if we should use burst I/O */
- fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE))
- ? PPA_BURST_SIZE : 1;
+ fast = bulk && scsi_pointer->this_residual >= PPA_BURST_SIZE ?
+ PPA_BURST_SIZE : 1;
if (r == (unsigned char) 0xc0)
- status = ppa_out(dev, cmd->SCp.ptr, fast);
+ status = ppa_out(dev, scsi_pointer->ptr, fast);
else
- status = ppa_in(dev, cmd->SCp.ptr, fast);
+ status = ppa_in(dev, scsi_pointer->ptr, fast);
- cmd->SCp.ptr += fast;
- cmd->SCp.this_residual -= fast;
+ scsi_pointer->ptr += fast;
+ scsi_pointer->this_residual -= fast;
if (!status) {
ppa_fail(dev, DID_BUS_BUSY);
return -1; /* ERROR_RETURN */
}
- if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ if (scsi_pointer->buffer && !scsi_pointer->this_residual) {
/* if scatter/gather, advance to the next segment */
- if (cmd->SCp.buffers_residual--) {
- cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
- cmd->SCp.this_residual =
- cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ if (scsi_pointer->buffers_residual--) {
+ scsi_pointer->buffer =
+ sg_next(scsi_pointer->buffer);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
+ scsi_pointer->ptr =
+ sg_virt(scsi_pointer->buffer);
}
}
/* Now check to see if the drive is ready to comunicate */
@@ -658,18 +666,19 @@ static void ppa_interrupt(struct work_struct *work)
}
#endif
- if (cmd->SCp.phase > 1)
+ if (ppa_scsi_pointer(cmd)->phase > 1)
ppa_disconnect(dev);
ppa_pb_dismiss(dev);
dev->cur_cmd = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
{
+ struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd);
unsigned short ppb = dev->base;
unsigned char l = 0, h = 0;
int retv;
@@ -680,7 +689,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (dev->failed)
return 0;
- switch (cmd->SCp.phase) {
+ switch (scsi_pointer->phase) {
case 0: /* Phase 0 - Waiting for parport */
if (time_after(jiffies, dev->jstart + HZ)) {
/*
@@ -715,17 +724,17 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 1; /* Try again in a jiffy */
}
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
}
- /* fall through */
+ fallthrough;
case 2: /* Phase 2 - We are now talking to the scsi bus */
if (!ppa_select(dev, scmd_id(cmd))) {
ppa_fail(dev, DID_NO_CONNECT);
return 0;
}
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->phase++;
+ fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
@@ -734,22 +743,23 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (!ppa_send_command(cmd))
return 0;
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->phase++;
+ fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ scsi_pointer->buffer = scsi_sglist(cmd);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.this_residual = 0;
- cmd->SCp.ptr = NULL;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->ptr = NULL;
}
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1;
+ scsi_pointer->phase++;
+ fallthrough;
case 5: /* Phase 5 - Data transfer stage */
w_ctr(ppb, 0x0c);
@@ -761,8 +771,8 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
if (retv == 0)
return 1;
- cmd->SCp.phase++;
- /* fall through */
+ scsi_pointer->phase++;
+ fallthrough;
case 6: /* Phase 6 - Read status/message */
cmd->result = DID_OK << 16;
@@ -779,7 +789,6 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
(DID_OK << 16) + (h << 8) + (l & STATUS_MASK);
}
return 0; /* Finished */
- break;
default:
printk(KERN_ERR "ppa: Invalid scsi phase\n");
@@ -787,8 +796,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
-static int ppa_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done) (struct scsi_cmnd *))
+static int ppa_queuecommand_lck(struct scsi_cmnd *cmd)
{
ppa_struct *dev = ppa_dev(cmd->device->host);
@@ -799,9 +807,8 @@ static int ppa_queuecommand_lck(struct scsi_cmnd *cmd,
dev->failed = 0;
dev->jstart = jiffies;
dev->cur_cmd = cmd;
- cmd->scsi_done = done;
cmd->result = DID_ERROR << 16; /* default return code */
- cmd->SCp.phase = 0; /* bus free */
+ ppa_scsi_pointer(cmd)->phase = 0; /* bus free */
schedule_delayed_work(&dev->ppa_tq, 0);
@@ -842,15 +849,13 @@ static int ppa_abort(struct scsi_cmnd *cmd)
* have tied the SCSI_MESSAGE line high in the interface
*/
- switch (cmd->SCp.phase) {
+ switch (ppa_scsi_pointer(cmd)->phase) {
case 0: /* Do not have access to parport */
case 1: /* Have not connected to interface */
dev->cur_cmd = NULL; /* Forget the problem */
return SUCCESS;
- break;
default: /* SCSI command sent, can not abort */
return FAILED;
- break;
}
}
@@ -866,7 +871,7 @@ static int ppa_reset(struct scsi_cmnd *cmd)
{
ppa_struct *dev = ppa_dev(cmd->device->host);
- if (cmd->SCp.phase)
+ if (ppa_scsi_pointer(cmd)->phase)
ppa_disconnect(dev);
dev->cur_cmd = NULL; /* Forget the problem */
@@ -981,6 +986,7 @@ static struct scsi_host_template ppa_template = {
.sg_tablesize = SG_ALL,
.can_queue = 1,
.slave_alloc = ppa_adjust_queue,
+ .cmd_size = sizeof(struct scsi_pointer),
};
/***************************************************************************
@@ -1151,18 +1157,6 @@ static struct parport_driver ppa_driver = {
.detach = ppa_detach,
.devmodel = true,
};
+module_parport_driver(ppa_driver);
-static int __init ppa_driver_init(void)
-{
- printk(KERN_INFO "ppa: Version %s\n", PPA_VERSION);
- return parport_register_driver(&ppa_driver);
-}
-
-static void __exit ppa_driver_exit(void)
-{
- parport_unregister_driver(&ppa_driver);
-}
-
-module_init(ppa_driver_init);
-module_exit(ppa_driver_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index f75c0b5cd587..2b80cab70333 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -200,8 +200,7 @@ static int ps3rom_write_request(struct ps3_storage_device *dev,
return 0;
}
-static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct ps3rom_private *priv = shost_priv(cmd->device->host);
struct ps3_storage_device *dev = priv->dev;
@@ -209,7 +208,6 @@ static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
int res;
priv->curr_cmd = cmd;
- cmd->scsi_done = done;
opcode = cmd->cmnd[0];
/*
@@ -234,12 +232,10 @@ static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
}
if (res) {
- memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0, 0);
cmd->result = res;
- cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = ILLEGAL_REQUEST;
priv->curr_cmd = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
return 0;
@@ -319,12 +315,11 @@ static irqreturn_t ps3rom_interrupt(int irq, void *data)
goto done;
}
- scsi_build_sense_buffer(0, cmd->sense_buffer, sense_key, asc, ascq);
- cmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 0, sense_key, asc, ascq);
done:
priv->curr_cmd = NULL;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return IRQ_HANDLED;
}
@@ -402,7 +397,7 @@ fail_free_bounce:
return error;
}
-static int ps3rom_remove(struct ps3_system_bus_device *_dev)
+static void ps3rom_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct Scsi_Host *host = ps3_system_bus_get_drvdata(&dev->sbd);
@@ -412,7 +407,6 @@ static int ps3rom_remove(struct ps3_system_bus_device *_dev)
scsi_host_put(host);
ps3_system_bus_set_drvdata(&dev->sbd, NULL);
kfree(dev->bounce_buf);
- return 0;
}
static struct ps3_system_bus_driver ps3rom = {
diff --git a/drivers/scsi/qedf/Kconfig b/drivers/scsi/qedf/Kconfig
index 7cd993be4e57..eb81a1b0374a 100644
--- a/drivers/scsi/qedf/Kconfig
+++ b/drivers/scsi/qedf/Kconfig
@@ -7,6 +7,6 @@ config QEDF
depends on LIBFCOE
select QED_LL2
select QED_FCOE
- ---help---
+ help
This driver supports FCoE offload for the QLogic FastLinQ
41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 747af96dd15c..e8bc8d9e4583 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -22,9 +22,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
u32 task_retry_id,
u8 fcp_cmd_payload[32])
{
- struct e4_fcoe_task_context *ctx = task_params->context;
+ struct fcoe_task_context *ctx = task_params->context;
const u8 val_byte = ctx->ystorm_ag_context.byte0;
- struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
@@ -115,9 +115,9 @@ int init_initiator_midpath_unsolicited_fcoe_task(
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header)
{
- struct e4_fcoe_task_context *ctx = task_params->context;
+ struct fcoe_task_context *ctx = task_params->context;
const u8 val_byte = ctx->ystorm_ag_context.byte0;
- struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index 1ee31a5f063b..7125e484bf93 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -10,7 +10,7 @@
struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */
- struct e4_fcoe_task_context *context;
+ struct fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index f3f399fe10c8..c5c0bbdafc4e 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -11,9 +11,6 @@
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_fc2.h>
#include <scsi/scsi_tcq.h>
-#include <scsi/fc_encode.h>
-#include <linux/version.h>
-
/* qedf_hsi.h needs to before included any qed includes */
#include "qedf_hsi.h"
@@ -94,7 +91,6 @@ enum qedf_ioreq_event {
#define FC_GOOD 0
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
-#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
struct qedf_ioreq {
@@ -144,7 +140,7 @@ struct qedf_ioreq {
struct completion tm_done;
struct completion abts_done;
struct completion cleanup_done;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
int idx;
@@ -192,6 +188,15 @@ struct qedf_ioreq {
unsigned int alloc;
};
+struct qedf_cmd_priv {
+ struct qedf_ioreq *io_req;
+};
+
+static inline struct qedf_cmd_priv *qedf_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
extern struct workqueue_struct *qedf_io_wq;
struct qedf_rport {
@@ -336,6 +341,7 @@ struct qedf_ctx {
unsigned int curr_conn_id;
struct workqueue_struct *ll2_recv_wq;
struct workqueue_struct *link_update_wq;
+ struct devlink *devlink;
struct delayed_work link_update;
struct delayed_work link_recovery;
struct completion flogi_compl;
@@ -355,6 +361,7 @@ struct qedf_ctx {
#define QEDF_GRCDUMP_CAPTURE 4
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
+#define QEDF_PROBING 8
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
@@ -387,7 +394,10 @@ struct qedf_ctx {
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
+ struct delayed_work recovery_work;
+ struct delayed_work board_disable_work;
struct delayed_work grcdump_work;
+ struct delayed_work stag_work;
u32 slow_sge_ios;
u32 fast_sge_ios;
@@ -403,6 +413,7 @@ struct qedf_ctx {
u32 flogi_cnt;
u32 flogi_failed;
+ u32 flogi_pending;
/* Used for fc statistics */
struct mutex stats_mutex;
@@ -468,7 +479,7 @@ extern uint qedf_dump_frames;
extern uint qedf_io_tracing;
extern uint qedf_stop_io_on_error;
extern uint qedf_link_down_tmo;
-#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */
+#define QEDF_RETRY_DELAY_MAX 600 /* 60 seconds */
extern bool qedf_retry_delay;
extern uint qedf_debug;
@@ -495,12 +506,12 @@ extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport,
u8 cmd_type);
-extern struct device_attribute *qedf_host_attrs[];
+extern const struct attribute_group *qedf_host_groups[];
extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
@@ -537,9 +548,17 @@ extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
extern void qedf_wq_grcdump(struct work_struct *work);
void qedf_stag_change_work(struct work_struct *work);
void qedf_ctx_soft_reset(struct fc_lport *lport);
+extern void qedf_board_disable_work(struct work_struct *work);
+extern void qedf_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF
+#define QL45xxx 0x165C
+#define QL41xxx 0x8080
+#define MAX_CT_PAYLOAD 2048
+#define DISCOVERED_PORTS 4
+#define NUMBER_OF_PORTS 1
struct fip_vlan {
struct ethhdr eth;
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index d995f72a6759..8d8c760eee43 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -24,9 +24,8 @@ static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
return lport_priv(base_lport);
}
-static ssize_t
-qedf_fcoe_mac_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fcoe_mac_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fc_lport *lport = shost_priv(class_to_shost(dev));
u32 port_id;
@@ -42,9 +41,8 @@ qedf_fcoe_mac_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
}
-static ssize_t
-qedf_fka_period_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fka_period_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fc_lport *lport = shost_priv(class_to_shost(dev));
struct qedf_ctx *qedf = lport_priv(lport);
@@ -59,15 +57,24 @@ qedf_fka_period_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period);
}
-static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
-static DEVICE_ATTR(fka_period, S_IRUGO, qedf_fka_period_show, NULL);
+static DEVICE_ATTR_RO(fcoe_mac);
+static DEVICE_ATTR_RO(fka_period);
-struct device_attribute *qedf_host_attrs[] = {
- &dev_attr_fcoe_mac,
- &dev_attr_fka_period,
+static struct attribute *qedf_host_attrs[] = {
+ &dev_attr_fcoe_mac.attr,
+ &dev_attr_fka_period.attr,
NULL,
};
+static const struct attribute_group qedf_host_attr_group = {
+ .attrs = qedf_host_attrs
+};
+
+const struct attribute_group *qedf_host_groups[] = {
+ &qedf_host_attr_group,
+ NULL
+};
+
extern const struct qed_fcoe_ops *qed_ops;
void qedf_capture_grc_dump(struct qedf_ctx *qedf)
@@ -124,7 +131,6 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
struct qedf_ctx *qedf = NULL;
long reading;
int ret = 0;
- char msg[40];
if (off != 0)
return ret;
@@ -141,7 +147,6 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
return ret;
}
- memset(msg, 0, sizeof(msg));
switch (reading) {
case 0:
memset(qedf->grcdump, 0, qedf->grcdump_size);
diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c
index e0387e495261..0d2aed82882a 100644
--- a/drivers/scsi/qedf/qedf_dbg.c
+++ b/drivers/scsi/qedf/qedf_dbg.c
@@ -106,11 +106,10 @@ ret:
int
qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len)
{
- *buf = vmalloc(len);
+ *buf = vzalloc(len);
if (!(*buf))
return -ENOMEM;
- memset(*buf, 0, len);
return 0;
}
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 2386bfb73c46..f4d81127239e 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <scsi/scsi_transport.h>
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index b88bed9bb133..a3ed681c8ce3 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -14,10 +14,9 @@
static struct dentry *qedf_dbg_root;
-/**
+/*
* qedf_dbg_host_init - setup the debugfs file for the pf
- * @pf: the pf that is starting up
- **/
+ */
void
qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
const struct qedf_debugfs_ops *dops,
@@ -42,10 +41,9 @@ qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
}
}
-/**
+/*
* qedf_dbg_host_exit - clear out the pf's debugfs entries
- * @pf: the pf that is stopping
- **/
+ */
void
qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf_dbg)
{
@@ -56,9 +54,9 @@ qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf_dbg)
qedf_dbg->bdf_dentry = NULL;
}
-/**
+/*
* qedf_dbg_init - start up debugfs for the driver
- **/
+ */
void
qedf_dbg_init(char *drv_name)
{
@@ -68,9 +66,9 @@ qedf_dbg_init(char *drv_name)
qedf_dbg_root = debugfs_create_dir(drv_name, NULL);
}
-/**
+/*
* qedf_dbg_exit - clean out the driver's debugfs entries
- **/
+ */
void
qedf_dbg_exit(void)
{
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 87e169dcebdb..1ff5bc314fc0 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -16,7 +16,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
int rc = 0;
uint32_t did, sid;
uint16_t xid;
@@ -124,7 +124,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
task = qedf_get_task_mem(&qedf->tasks, xid);
qedf_init_mp_task(els_req, task, sqe);
- /* Put timer on original I/O request */
+ /* Put timer on els request */
if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
@@ -143,10 +143,33 @@ void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req)
{
struct fcoe_cqe_midpath_info *mp_info;
+ struct qedf_rport *fcport;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
" cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
+ if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
+ || (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS)
+ || (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "ELS completion xid=0x%x after flush event=0x%x",
+ els_req->xid, els_req->event);
+ return;
+ }
+
+ fcport = els_req->fcport;
+
+ /* When flush is active,
+ * let the cmds be completed from the cleanup context
+ */
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
+ test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Dropping ELS completion xid=0x%x as fcport is flushing",
+ els_req->xid);
+ return;
+ }
+
clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
/* Kill the ELS timer */
@@ -185,10 +208,6 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
goto out_free;
}
- if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
- rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
- cancel_delayed_work_sync(&orig_io_req->timeout_work);
-
refcount = kref_read(&orig_io_req->refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
" orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
@@ -388,14 +407,10 @@ void qedf_restart_rport(struct qedf_rport *fcport)
mutex_lock(&lport->disc.disc_mutex);
/* Recreate the rport and log back in */
rdata = fc_rport_create(lport, port_id);
- if (rdata) {
- mutex_unlock(&lport->disc.disc_mutex);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (rdata)
fc_rport_login(rdata);
- fcport->rdata = rdata;
- } else {
- mutex_unlock(&lport->disc.disc_mutex);
- fcport->rdata = NULL;
- }
+ fcport->rdata = rdata;
}
clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
}
@@ -887,6 +902,11 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
opcode = fc_frame_payload_op(fp);
if (opcode == ELS_LS_RJT) {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ if (!rjt) {
+ QEDF_ERR(&qedf->dbg_ctx, "payload get failed");
+ goto out_free_frame;
+ }
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Received LS_RJT for REC: er_reason=0x%x, "
"er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index bb82f0875eca..ad6a56ce72a8 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -20,7 +20,7 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
unsigned long flags = 0;
- int rc = -1;
+ int rc;
skb = dev_alloc_skb(sizeof(struct fip_vlan));
if (!skb) {
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index e749a2dcaad7..4750ec5789a8 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -22,12 +22,6 @@ static void qedf_cmd_timeout(struct work_struct *work)
container_of(work, struct qedf_ioreq, timeout_work.work);
struct qedf_ctx *qedf;
struct qedf_rport *fcport;
- u8 op = 0;
-
- if (io_req == NULL) {
- QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
- return;
- }
fcport = io_req->fcport;
if (io_req->fcport == NULL) {
@@ -86,14 +80,13 @@ static void qedf_cmd_timeout(struct work_struct *work)
*/
QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
io_req->xid);
+ qedf_initiate_cleanup(io_req, true);
io_req->event = QEDF_IOREQ_EV_ELS_TMO;
/* Call callback function to complete command */
if (io_req->cb_func && io_req->cb_arg) {
- op = io_req->cb_arg->op;
io_req->cb_func(io_req->cb_arg);
io_req->cb_arg = NULL;
}
- qedf_initiate_cleanup(io_req, true);
kref_put(&io_req->refcount, qedf_release_cmd);
break;
case QEDF_SEQ_CLEANUP:
@@ -487,7 +480,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
int sg_count = 0;
int bd_count = 0;
u32 sg_len;
- u64 addr, end_addr;
+ u64 addr;
int i = 0;
sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
@@ -502,10 +495,9 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
scsi_for_each_sg(sc, sg, sg_count, i) {
sg_len = (u32)sg_dma_len(sg);
addr = (u64)sg_dma_address(sg);
- end_addr = (u64)(addr + sg_len);
/*
- * Intermediate s/g element so check if start and end address
+ * Intermediate s/g element so check if start address
* is page aligned. Only required for writes and only if the
* number of scatter/gather elements is 8 or more.
*/
@@ -592,7 +584,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
+ struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
@@ -610,7 +602,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
/* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
- memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
@@ -682,7 +674,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
@@ -700,7 +692,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
@@ -812,7 +804,6 @@ static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
struct qedf_io_log *io_log;
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
unsigned long flags;
- uint8_t op;
spin_lock_irqsave(&qedf->io_trace_lock, flags);
@@ -821,7 +812,7 @@ static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
io_log->task_id = io_req->xid;
io_log->port_id = fcport->rdata->ids.port_id;
io_log->lun = sc_cmd->device->lun;
- io_log->op = op = sc_cmd->cmnd[0];
+ io_log->op = sc_cmd->cmnd[0];
io_log->lba[0] = sc_cmd->cmnd[2];
io_log->lba[1] = sc_cmd->cmnd[3];
io_log->lba[2] = sc_cmd->cmnd[4];
@@ -858,26 +849,23 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc_cmd->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct e4_fcoe_task_context *task_ctx;
+ struct fcoe_task_context *task_ctx;
u16 xid;
- enum fcoe_task_type req_type = 0;
struct fcoe_wqe *sqe;
u16 sqe_idx;
/* Initialize rest of io_req fileds */
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
- sc_cmd->SCp.ptr = (char *)io_req;
+ qedf_priv(sc_cmd)->io_req = io_req;
io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
/* Record which cpu this request is associated with */
io_req->cpu = smp_processor_id();
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
- req_type = FCOE_TASK_TYPE_READ_INITIATOR;
io_req->io_req_flags = QEDF_READ;
qedf->input_requests++;
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
- req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
io_req->io_req_flags = QEDF_WRITE;
qedf->output_requests++;
} else {
@@ -905,7 +893,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EINVAL;
}
- /* Record LUN number for later use if we neeed them */
+ /* Record LUN number for later use if we need them */
io_req->lun = (int)sc_cmd->device->lun;
/* Obtain free SQE */
@@ -958,7 +946,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
"Number of SG elements %d exceeds what hardware limitation of %d.\n",
num_sgs, QEDF_MAX_BDS_PER_CMD);
sc_cmd->result = DID_ERROR;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -968,7 +956,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
"Returning DNC as unloading or stop io, flags 0x%lx.\n",
qedf->flags);
sc_cmd->result = DID_NO_CONNECT << 16;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -977,7 +965,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
"Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
sc_cmd);
sc_cmd->result = DID_NO_CONNECT << 16;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -987,7 +975,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
"fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
rval, rport->port_id);
sc_cmd->result = rval;
- sc_cmd->scsi_done(sc_cmd);
+ scsi_done(sc_cmd);
return 0;
}
@@ -1021,14 +1009,18 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
atomic_inc(&fcport->ios_to_queue);
if (fcport->retry_delay_timestamp) {
+ /* Take fcport->rport_lock for resetting the delay_timestamp */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
if (time_after(jiffies, fcport->retry_delay_timestamp)) {
fcport->retry_delay_timestamp = 0;
} else {
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
/* If retry_delay timer is active, flow off the ML */
rc = SCSI_MLQUEUE_TARGET_BUSY;
atomic_dec(&fcport->ios_to_queue);
goto exit_qcmd;
}
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
}
io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
@@ -1072,8 +1064,7 @@ static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
io_req->fcp_resid = fcp_rsp->fcp_resid;
io_req->scsi_comp_flags = rsp_flags;
- CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
- fcp_rsp->scsi_status_code;
+ io_req->cdb_status = fcp_rsp->scsi_status_code;
if (rsp_flags &
FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
@@ -1126,14 +1117,14 @@ static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
- u16 xid;
- struct e4_fcoe_task_context *task_ctx;
struct scsi_cmnd *sc_cmd;
struct fcoe_cqe_rsp_info *fcp_rsp;
struct qedf_rport *fcport;
int refcount;
u16 scope, qualifier = 0;
u8 fw_residual_flag = 0;
+ unsigned long flags = 0;
+ u16 chk_scope = 0;
if (!io_req)
return;
@@ -1149,8 +1140,6 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
return;
}
- xid = io_req->xid;
- task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
sc_cmd = io_req->sc_cmd;
fcp_rsp = &cqe->cqe_info.rsp_info;
@@ -1159,9 +1148,9 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
return;
}
- if (!sc_cmd->SCp.ptr) {
- QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
- "another context.\n");
+ if (!qedf_priv(sc_cmd)->io_req) {
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "io_req is NULL, returned in another context.\n");
return;
}
@@ -1171,13 +1160,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
return;
}
- if (!sc_cmd->request) {
- QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
- "sc_cmd=%p.\n", sc_cmd);
- return;
- }
-
- if (!sc_cmd->request->q) {
+ if (!scsi_cmd_to_rq(sc_cmd)->q) {
QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
"is not valid, sc_cmd=%p.\n", sc_cmd);
return;
@@ -1267,16 +1250,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
/* Lower 14 bits */
qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
- if (qedf_retry_delay &&
- scope > 0 && qualifier > 0 &&
- qualifier <= 0x3FEF) {
- /* Check we don't go over the max */
- if (qualifier > QEDF_RETRY_DELAY_MAX)
- qualifier =
- QEDF_RETRY_DELAY_MAX;
- fcport->retry_delay_timestamp =
- jiffies + (qualifier * HZ / 10);
- }
+ if (qedf_retry_delay)
+ chk_scope = 1;
/* Record stats */
if (io_req->cdb_status ==
SAM_STAT_TASK_SET_FULL)
@@ -1287,6 +1262,36 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
+
+ if (chk_scope == 1) {
+ if ((scope == 1 || scope == 2) &&
+ (qualifier > 0 && qualifier <= 0x3FEF)) {
+ /* Check we don't go over the max */
+ if (qualifier > QEDF_RETRY_DELAY_MAX) {
+ qualifier = QEDF_RETRY_DELAY_MAX;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "qualifier = %d\n",
+ (fcp_rsp->retry_delay_timer &
+ 0x3FFF));
+ }
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Scope = %d and qualifier = %d",
+ scope, qualifier);
+ /* Take fcport->rport_lock to
+ * update the retry_delay_timestamp
+ */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ fcport->retry_delay_timestamp =
+ jiffies + (qualifier * HZ / 10);
+ spin_unlock_irqrestore(&fcport->rport_lock,
+ flags);
+
+ } else {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
+ scope, qualifier);
+ }
+ }
break;
default:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
@@ -1305,8 +1310,8 @@ out:
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
io_req->sc_cmd = NULL;
- sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ qedf_priv(sc_cmd)->io_req = NULL;
+ scsi_done(sc_cmd);
kref_put(&io_req->refcount, qedf_release_cmd);
}
@@ -1314,7 +1319,6 @@ out:
void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
int result)
{
- u16 xid;
struct scsi_cmnd *sc_cmd;
int refcount;
@@ -1336,7 +1340,6 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
*/
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
- xid = io_req->xid;
sc_cmd = io_req->sc_cmd;
if (!sc_cmd) {
@@ -1349,9 +1352,9 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
goto bad_scsi_ptr;
}
- if (!sc_cmd->SCp.ptr) {
- QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
- "another context.\n");
+ if (!qedf_priv(sc_cmd)->io_req) {
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "io_req is NULL, returned in another context.\n");
return;
}
@@ -1381,13 +1384,6 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
goto bad_scsi_ptr;
}
- if (!sc_cmd->scsi_done) {
- QEDF_ERR(&qedf->dbg_ctx,
- "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
- sc_cmd);
- goto bad_scsi_ptr;
- }
-
qedf_unmap_sg_list(qedf, io_req);
sc_cmd->result = result << 16;
@@ -1411,8 +1407,8 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
io_req->sc_cmd = NULL;
- sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
+ qedf_priv(sc_cmd)->io_req = NULL;
+ scsi_done(sc_cmd);
kref_put(&io_req->refcount, qedf_release_cmd);
return;
@@ -1504,9 +1500,19 @@ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
{
int rval;
+ if (io_req == NULL) {
+ QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
+ return;
+ }
+
+ if (io_req->fcport == NULL) {
+ QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
+ return;
+ }
+
if (!cqe) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
- "cqe is NULL for io_req %p\n", io_req);
+ "cqe is NULL for io_req %p\n", io_req);
return;
}
@@ -1522,6 +1528,16 @@ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
le32_to_cpu(cqe->cqe_info.err_info.rx_id));
+ /* When flush is active, let the cmds be flushed out from the cleanup context */
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
+ (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
+ io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Dropping EQE for xid=0x%x as fcport is flushing",
+ io_req->xid);
+ return;
+ }
+
if (qedf->stop_io_on_error) {
qedf_stop_all_io(qedf);
return;
@@ -1546,6 +1562,8 @@ static void qedf_flush_els_req(struct qedf_ctx *qedf,
*/
els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
+ clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
+
/* Cancel the timer */
cancel_delayed_work_sync(&els_req->timeout_work);
@@ -1688,8 +1706,10 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
io_req, io_req->xid);
continue;
}
+ qedf_initiate_cleanup(io_req, false);
flush_cnt++;
qedf_flush_els_req(qedf, io_req);
+
/*
* Release the kref and go back to the top of the
* loop.
@@ -1835,7 +1855,6 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
struct fc_rport_priv *rdata;
struct qedf_ctx *qedf;
u16 xid;
- u32 r_a_tov = 0;
int rc = 0;
unsigned long flags;
struct fcoe_wqe *sqe;
@@ -1858,7 +1877,6 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
goto out;
}
- r_a_tov = rdata->r_a_tov;
lport = qedf->lport;
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
@@ -1936,14 +1954,12 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
uint32_t r_ctl;
- uint16_t xid;
int rc;
struct qedf_rport *fcport = io_req->fcport;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
"0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
- xid = io_req->xid;
r_ctl = cqe->cqe_info.abts_info.r_ctl;
/* This was added at a point when we were scheduling abts_compl &
@@ -2131,8 +2147,6 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
{
struct qedf_rport *fcport;
struct qedf_ctx *qedf;
- uint16_t xid;
- struct e4_fcoe_task_context *task;
int tmo = 0;
int rc = SUCCESS;
unsigned long flags;
@@ -2149,7 +2163,6 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
/* Sanity check qedf_rport before dereferencing any pointers */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "tgt not offloaded\n");
- rc = 1;
return SUCCESS;
}
@@ -2159,6 +2172,10 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
return SUCCESS;
}
+ if (io_req->cmd_type == QEDF_ELS) {
+ goto process_els;
+ }
+
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
@@ -2168,6 +2185,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
}
set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
+process_els:
/* Ensure room on SQ */
if (!atomic_read(&fcport->free_sqes)) {
QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
@@ -2192,12 +2210,9 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
refcount, fcport, fcport->rdata->ids.port_id);
/* Cleanup cmds re-use the same TID as the original I/O */
- xid = io_req->xid;
io_req->cmd_type = QEDF_CLEANUP;
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
- task = qedf_get_task_mem(&qedf->tasks, xid);
-
init_completion(&io_req->cleanup_done);
spin_lock_irqsave(&fcport->rport_lock, flags);
@@ -2233,6 +2248,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
io_req->tm_flags == FCP_TMF_TGT_RESET) {
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
io_req->sc_cmd = NULL;
+ kref_put(&io_req->refcount, qedf_release_cmd);
complete(&io_req->tm_done);
}
@@ -2269,7 +2285,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
int rc = 0;
@@ -2415,8 +2431,8 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
(tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
"LUN RESET");
- if (sc_cmd->SCp.ptr) {
- io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+ if (qedf_priv(sc_cmd)->io_req) {
+ io_req = qedf_priv(sc_cmd)->io_req;
ref_cnt = kref_read(&io_req->refcount);
QEDF_ERR(NULL,
"orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
@@ -2503,7 +2519,6 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
struct fcoe_cqe *cqe)
{
unsigned long flags;
- uint16_t tmp;
uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
u32 payload_len, crc;
struct fc_frame_header *fh;
@@ -2601,9 +2616,9 @@ increment_prod:
qedf->bdq_prod_idx = 0;
writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
- tmp = readw(qedf->bdq_primary_prod);
+ readw(qedf->bdq_primary_prod);
writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
- tmp = readw(qedf->bdq_secondary_prod);
+ readw(qedf->bdq_secondary_prod);
spin_unlock_irqrestore(&qedf->hba_lock, flags);
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 604856e72cfb..e045c6e25090 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/kthread.h>
+#include <linux/phylink.h>
#include <scsi/libfc.h>
#include <scsi/scsi_host.h>
#include <scsi/fc_frame.h>
@@ -28,6 +29,8 @@ const struct qed_fcoe_ops *qed_ops;
static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qedf_remove(struct pci_dev *pdev);
static void qedf_shutdown(struct pci_dev *pdev);
+static void qedf_schedule_recovery_handler(void *dev);
+static void qedf_recovery_handler(struct work_struct *work);
/*
* Driver module parameters.
@@ -38,7 +41,7 @@ MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
"remote ports (default 60)");
uint qedf_debug = QEDF_LOG_INFO;
-module_param_named(debug, qedf_debug, uint, S_IRUGO);
+module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
" mask");
@@ -102,6 +105,12 @@ module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
"during probe (0-3: 0 more verbose).");
+static bool qedf_enable_recovery = true;
+module_param_named(enable_recovery, qedf_enable_recovery,
+ bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
+ "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
+
struct workqueue_struct *qedf_io_wq;
static struct fcoe_percpu_s qedf_global;
@@ -282,6 +291,7 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
/* Set the source MAC we will use for FCoE traffic */
qedf_set_data_src_addr(qedf, fp);
+ qedf->flogi_pending = 0;
}
/* Complete flogi_compl so we can proceed to sending ADISCs */
@@ -307,6 +317,11 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
*/
if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++;
+ if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+ schedule_delayed_work(&qedf->stag_work, 2);
+ return NULL;
+ }
+ qedf->flogi_pending++;
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
arg, timeout);
}
@@ -432,6 +447,7 @@ static void qedf_link_recovery(struct work_struct *work)
static void qedf_update_link_speed(struct qedf_ctx *qedf,
struct qed_link_output *link)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
struct fc_lport *lport = qedf->lport;
lport->link_speed = FC_PORTSPEED_UNKNOWN;
@@ -466,41 +482,89 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
* Set supported link speed by querying the supported
* capabilities of the link.
*/
- if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) ||
- (link->supported_caps & QED_LM_10000baseKX4_Full_BIT) ||
- (link->supported_caps & QED_LM_10000baseR_FEC_BIT) ||
- (link->supported_caps & QED_LM_10000baseCR_Full_BIT) ||
- (link->supported_caps & QED_LM_10000baseSR_Full_BIT) ||
- (link->supported_caps & QED_LM_10000baseLR_Full_BIT) ||
- (link->supported_caps & QED_LM_10000baseLRM_Full_BIT) ||
- (link->supported_caps & QED_LM_10000baseKR_Full_BIT)) {
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 10000baseT_Full);
+ phylink_set(sup_caps, 10000baseKX4_Full);
+ phylink_set(sup_caps, 10000baseR_FEC);
+ phylink_set(sup_caps, 10000baseCR_Full);
+ phylink_set(sup_caps, 10000baseSR_Full);
+ phylink_set(sup_caps, 10000baseLR_Full);
+ phylink_set(sup_caps, 10000baseLRM_Full);
+ phylink_set(sup_caps, 10000baseKR_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
- }
- if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) ||
- (link->supported_caps & QED_LM_25000baseCR_Full_BIT) ||
- (link->supported_caps & QED_LM_25000baseSR_Full_BIT)) {
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 25000baseKR_Full);
+ phylink_set(sup_caps, 25000baseCR_Full);
+ phylink_set(sup_caps, 25000baseSR_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
- }
- if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) ||
- (link->supported_caps & QED_LM_40000baseKR4_Full_BIT) ||
- (link->supported_caps & QED_LM_40000baseCR4_Full_BIT) ||
- (link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) {
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 40000baseLR4_Full);
+ phylink_set(sup_caps, 40000baseKR4_Full);
+ phylink_set(sup_caps, 40000baseCR4_Full);
+ phylink_set(sup_caps, 40000baseSR4_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
- }
- if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) ||
- (link->supported_caps & QED_LM_50000baseCR2_Full_BIT) ||
- (link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) {
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 50000baseKR2_Full);
+ phylink_set(sup_caps, 50000baseCR2_Full);
+ phylink_set(sup_caps, 50000baseSR2_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
- }
- if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) ||
- (link->supported_caps & QED_LM_100000baseSR4_Full_BIT) ||
- (link->supported_caps & QED_LM_100000baseCR4_Full_BIT) ||
- (link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) {
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 100000baseKR4_Full);
+ phylink_set(sup_caps, 100000baseSR4_Full);
+ phylink_set(sup_caps, 100000baseCR4_Full);
+ phylink_set(sup_caps, 100000baseLR4_ER4_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
- }
- if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT)
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 20000baseKR2_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
- fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
+
+ if (lport->host && lport->host->shost_data)
+ fc_host_supported_speeds(lport->host) =
+ lport->link_supported_speeds;
+}
+
+static void qedf_bw_update(void *dev)
+{
+ struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+ struct qed_link_output link;
+
+ /* Get the latest status of the link */
+ qed_ops->common->get_link(qedf->cdev, &link);
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore link update, driver getting unload.\n");
+ return;
+ }
+
+ if (link.link_up) {
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+ qedf_update_link_speed(qedf, &link);
+ else
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore bw update, link is down.\n");
+
+ } else {
+ QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
+ }
}
static void qedf_link_update(void *dev, struct qed_link_output *link)
@@ -629,9 +693,12 @@ static u32 qedf_get_login_failures(void *cookie)
static struct qed_fcoe_cb_ops qedf_cb_ops = {
{
.link_update = qedf_link_update,
+ .bw_update = qedf_bw_update,
+ .schedule_recovery_handler = qedf_schedule_recovery_handler,
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
+ .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
}
};
@@ -668,12 +735,12 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
rdata = fcport->rdata;
if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
- rc = 1;
+ rc = SUCCESS;
goto out;
}
- io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+ io_req = qedf_priv(sc_cmd)->io_req;
if (!io_req) {
QEDF_ERR(&qedf->dbg_ctx,
"sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
@@ -806,7 +873,7 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
bool qedf_wait_for_upload(struct qedf_ctx *qedf)
{
- struct qedf_rport *fcport = NULL;
+ struct qedf_rport *fcport;
int wait_cnt = 120;
while (wait_cnt--) {
@@ -821,7 +888,7 @@ bool qedf_wait_for_upload(struct qedf_ctx *qedf)
rcu_read_lock();
list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
- if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
+ if (test_bit(QEDF_RPORT_SESSION_READY,
&fcport->flags)) {
if (fcport->rdata)
QEDF_ERR(&qedf->dbg_ctx,
@@ -832,9 +899,9 @@ bool qedf_wait_for_upload(struct qedf_ctx *qedf)
"Waiting for fcport %p.\n", fcport);
}
}
+
rcu_read_unlock();
return false;
-
}
/* Performs soft reset of qedf_ctx by simulating a link down/up */
@@ -844,12 +911,13 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
struct qed_link_output if_link;
if (lport->vport) {
- QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
+ printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
qedf = lport_priv(lport);
+ qedf->flogi_pending = 0;
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
@@ -918,7 +986,7 @@ static struct scsi_host_template qedf_host_template = {
.cmd_per_lun = 32,
.max_sectors = 0xffff,
.queuecommand = qedf_queuecommand,
- .shost_attrs = qedf_host_attrs,
+ .shost_groups = qedf_host_groups,
.eh_abort_handler = qedf_eh_abort,
.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
@@ -928,6 +996,7 @@ static struct scsi_host_template qedf_host_template = {
.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
.can_queue = FCOE_PARAMS_NUM_TASKS,
.change_queue_depth = scsi_change_queue_depth,
+ .cmd_size = sizeof(struct qedf_cmd_priv),
};
static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
@@ -982,9 +1051,8 @@ static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
return rc;
}
-/**
+/*
* qedf_xmit - qedf FCoE frame transmit function
- *
*/
static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
{
@@ -999,7 +1067,6 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
u32 crc;
unsigned int hlen, tlen, elen;
int wlen;
- struct fc_stats *stats;
struct fc_lport *tmp_lport;
struct fc_lport *vn_port = NULL;
struct qedf_rport *fcport;
@@ -1147,10 +1214,8 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
hp->fcoe_sof = sof;
/*update tx stats */
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->TxFrames++;
- stats->TxWords += wlen;
- put_cpu();
+ this_cpu_inc(lport->stats->TxFrames);
+ this_cpu_add(lport->stats->TxWords, wlen);
/* Get VLAN ID from skb for printing purposes */
__vlan_hwaccel_get_tag(skb, &vlan_tci);
@@ -1275,7 +1340,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
- conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
+ conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
@@ -1348,6 +1413,8 @@ static void qedf_upload_connection(struct qedf_ctx *qedf,
*/
term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
&term_params_dma, GFP_KERNEL);
+ if (!term_params)
+ return;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
"port_id=%06x.\n", fcport->rdata->ids.port_id);
@@ -1378,7 +1445,7 @@ static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
kref_put(&rdata->kref, fc_rport_destroy);
}
-/**
+/*
* This event_callback is called after successful completion of libfc
* initiated target login. qedf can proceed with initiating the session
* establishment.
@@ -1500,6 +1567,17 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
if (port_id == FC_FID_DIR_SERV)
break;
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "No action since spp type isn't FCP\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Not FCP target so no action\n");
+ break;
+ }
+
if (!rport) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"port_id=%x - rport notcreated Yet!!\n", port_id);
@@ -1576,12 +1654,13 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
static void qedf_setup_fdmi(struct qedf_ctx *qedf)
{
struct fc_lport *lport = qedf->lport;
- struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
u8 buf[8];
- int i, pos;
+ int pos;
+ uint32_t i;
/*
- * fdmi_enabled needs to be set for libfc to execute FDMI registration.
+ * fdmi_enabled needs to be set for libfc
+ * to execute FDMI registration
*/
lport->fdmi_enabled = 1;
@@ -1597,33 +1676,50 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
for (i = 0; i < 8; i++)
pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
- snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number),
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE,
"%02X%02X%02X%02X%02X%02X%02X%02X",
buf[7], buf[6], buf[5], buf[4],
buf[3], buf[2], buf[1], buf[0]);
} else
- snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number), "Unknown");
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE, "Unknown");
+
+ snprintf(fc_host_manufacturer(lport->host),
+ FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
- snprintf(fc_host->manufacturer,
- sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
+ if (qedf->pdev->device == QL45xxx) {
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
+
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s",
+ "Marvell FastLinQ QL45xxx FCoE Adapter");
+ }
+
+ if (qedf->pdev->device == QL41xxx) {
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
+
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s",
+ "Marvell FastLinQ QL41xxx FCoE Adapter");
+ }
- snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
+ snprintf(fc_host_hardware_version(lport->host),
+ FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
- snprintf(fc_host->model_description, sizeof(fc_host->model_description),
- "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
- "(FCoE)");
+ snprintf(fc_host_driver_version(lport->host),
+ FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
- snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
- "Rev %d", qedf->pdev->revision);
+ snprintf(fc_host_firmware_version(lport->host),
+ FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
+ FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
+ FW_ENGINEERING_VERSION);
- snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
- "%s", QEDF_VERSION);
+ snprintf(fc_host_vendor_identifier(lport->host),
+ FC_VENDOR_IDENTIFIER, "%s", "Marvell");
- snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
- "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
- FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
}
static int qedf_lport_setup(struct qedf_ctx *qedf)
@@ -1670,8 +1766,13 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
/* Set symbolic node name */
- snprintf(fc_host_symbolic_name(lport->host), 256,
- "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
+ if (qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
qedf_setup_fdmi(qedf);
@@ -1729,22 +1830,20 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
"WWPN (0x%s) already exists.\n", buf);
- goto err1;
+ return rc;
}
if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
"because link is not up.\n");
- rc = -EIO;
- goto err1;
+ return -EIO;
}
vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
if (!vn_port) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
"for vport.\n");
- rc = -ENOMEM;
- goto err1;
+ return -ENOMEM;
}
fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
@@ -1763,12 +1862,13 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
init_completion(&vport_qedf->flogi_compl);
INIT_LIST_HEAD(&vport_qedf->fcports);
+ INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
rc = qedf_vport_libfc_config(vport, vn_port);
if (rc) {
QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
"for lport stats.\n");
- goto err2;
+ goto err;
}
fc_set_wwnn(vn_port, vport->node_name);
@@ -1781,12 +1881,13 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vn_port->host->max_lun = qedf_max_lun;
vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ vn_port->host->max_id = QEDF_MAX_SESSIONS;
rc = scsi_add_host(vn_port->host, &vport->dev);
if (rc) {
QEDF_WARN(&base_qedf->dbg_ctx,
"Error adding Scsi_Host rc=0x%x.\n", rc);
- goto err2;
+ goto err;
}
/* Set default dev_loss_tmo based on module parameter */
@@ -1820,6 +1921,27 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
fc_vport_setlink(vn_port);
}
+ /* Set symbolic node name */
+ if (base_qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (base_qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
+
+ /* Set supported speed */
+ fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
+
+ /* Set speed */
+ vn_port->link_speed = n_port->link_speed;
+
+ /* Set port type */
+ fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
+
+ /* Set maxframe size */
+ fc_host_maxframe_size(vn_port->host) = n_port->mfs;
+
QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
vn_port);
@@ -1827,9 +1949,10 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
-err2:
+ return 0;
+
+err:
scsi_host_put(vn_port->host);
-err1:
return rc;
}
@@ -1870,8 +1993,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
fc_lport_free_stats(vn_port);
/* Release Scsi_Host */
- if (vn_port->host)
- scsi_host_put(vn_port->host);
+ scsi_host_put(vn_port->host);
out:
return 0;
@@ -2070,7 +2192,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
struct qedf_ctx *qedf = fp->qedf;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
u16 prod_idx;
/* Get the pointer to the global CQ this completion is on */
@@ -2097,7 +2219,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
{
struct qedf_ctx *qedf = fp->qedf;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
struct global_queue *que;
u16 prod_idx;
struct fcoe_cqe *cqe;
@@ -2588,12 +2710,12 @@ void qedf_fp_io_handler(struct work_struct *work)
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
- sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
+ sizeof(struct status_block), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
QEDF_ERR(&qedf->dbg_ctx,
@@ -2904,7 +3026,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
{
u32 *list;
int i;
- int status = 0, rc;
+ int status;
u32 *pbl;
dma_addr_t page;
int num_pages;
@@ -2916,7 +3038,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
*/
if (!qedf->num_queues) {
QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
- return 1;
+ return -ENOMEM;
}
/*
@@ -2924,7 +3046,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
* addresses of our queues
*/
if (!qedf->p_cpuq) {
- status = 1;
+ status = -EINVAL;
QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
goto mem_alloc_failure;
}
@@ -2940,8 +3062,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
"qedf->global_queues=%p.\n", qedf->global_queues);
/* Allocate DMA coherent buffers for BDQ */
- rc = qedf_alloc_bdq(qedf);
- if (rc) {
+ status = qedf_alloc_bdq(qedf);
+ if (status) {
QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
goto mem_alloc_failure;
}
@@ -3161,7 +3283,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
{
int rc = -EINVAL;
struct fc_lport *lport;
- struct qedf_ctx *qedf;
+ struct qedf_ctx *qedf = NULL;
struct Scsi_Host *host;
bool is_vf = false;
struct qed_ll2_params params;
@@ -3171,12 +3293,16 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
void *task_start, *task_end;
struct qed_slowpath_params slowpath_params;
struct qed_probe_params qed_params;
- u16 tmp;
+ u16 retry_cnt = 10;
/*
* When doing error recovery we didn't reap the lport so don't try
* to reallocate it.
*/
+retry_probe:
+ if (mode == QEDF_MODE_RECOVERY)
+ msleep(2000);
+
if (mode != QEDF_MODE_RECOVERY) {
lport = libfc_host_alloc(&qedf_host_template,
sizeof(struct qedf_ctx));
@@ -3191,6 +3317,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
/* Initialize qedf_ctx */
qedf = lport_priv(lport);
+ set_bit(QEDF_PROBING, &qedf->flags);
qedf->lport = lport;
qedf->ctlr.lp = lport;
qedf->pdev = pdev;
@@ -3205,6 +3332,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
init_completion(&qedf->fipvlan_compl);
mutex_init(&qedf->stats_mutex);
mutex_init(&qedf->flush_mutex);
+ qedf->flogi_pending = 0;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
@@ -3214,9 +3342,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} else {
/* Init pointers during recovery */
qedf = pci_get_drvdata(pdev);
+ set_bit(QEDF_PROBING, &qedf->flags);
lport = qedf->lport;
}
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
+
host = lport->host;
/* Allocate mempool for qedf_io_work structs */
@@ -3235,6 +3366,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
+ INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
qedf->fipvlan_retries = qedf_fipvlan_retries;
/* Set a default prio in case DCBX doesn't converge */
if (qedf_default_prio > -1) {
@@ -3257,6 +3389,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qed_params.is_vf = is_vf;
qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
if (!qedf->cdev) {
+ if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Retry %d initialize hardware\n", retry_cnt);
+ retry_cnt--;
+ goto retry_probe;
+ }
QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
rc = -ENODEV;
goto err1;
@@ -3289,6 +3427,23 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
+ /* Learn information crucial for qedf to progress */
+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
+ goto err2;
+ }
+
+ if (mode != QEDF_MODE_RECOVERY) {
+ qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
+ if (IS_ERR(qedf->devlink)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
+ rc = PTR_ERR(qedf->devlink);
+ qedf->devlink = NULL;
+ goto err2;
+ }
+ }
+
/* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
@@ -3352,9 +3507,9 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
"Writing %d to primary and secondary BDQ doorbell registers.\n",
qedf->bdq_prod_idx);
writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
- tmp = readw(qedf->bdq_primary_prod);
+ readw(qedf->bdq_primary_prod);
writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
- tmp = readw(qedf->bdq_secondary_prod);
+ readw(qedf->bdq_secondary_prod);
qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
@@ -3401,6 +3556,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
host->transportt = qedf_fc_transport_template;
host->max_lun = qedf_max_lun;
host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ host->max_id = QEDF_MAX_SESSIONS;
host->can_queue = FCOE_PARAMS_NUM_TASKS;
rc = scsi_add_host(host, &pdev->dev);
if (rc) {
@@ -3474,6 +3630,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->lport->host->host_no);
qedf->dpc_wq = create_workqueue(host_buf);
}
+ INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
/*
* GRC dump and sysfs parameters are not reaped during the recovery
@@ -3521,6 +3678,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
else
fc_fabric_login(lport);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+
/* All good */
return 0;
@@ -3584,7 +3745,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
else
fc_fabric_logoff(qedf->lport);
- if (qedf_wait_for_upload(qedf) == false)
+ if (!qedf_wait_for_upload(qedf))
QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
#ifdef CONFIG_DEBUG_FS
@@ -3660,6 +3821,11 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
QEDF_ERR(&(qedf->dbg_ctx),
"Failed to send drv state to MFW.\n");
+ if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
+ qed_ops->common->devlink_unregister(qedf->devlink);
+ qedf->devlink = NULL;
+ }
+
qed_ops->common->slowpath_stop(qedf->cdev);
qed_ops->common->remove(qedf->cdev);
@@ -3688,6 +3854,45 @@ void qedf_wq_grcdump(struct work_struct *work)
qedf_capture_grc_dump(qedf);
}
+void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Hardware error handler scheduled, event=%d.\n",
+ err_type);
+
+ if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Already in recovery, not scheduling board disable work.\n");
+ return;
+ }
+
+ switch (err_type) {
+ case QED_HW_ERR_FAN_FAIL:
+ schedule_delayed_work(&qedf->board_disable_work, 0);
+ break;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_DMAE_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ /* Prevent HW attentions from being reasserted */
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
+ break;
+ case QED_HW_ERR_RAMROD_FAIL:
+ /* Prevent HW attentions from being reasserted */
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
+
+ if (qedf_enable_recovery && qedf->devlink)
+ qed_ops->common->report_fatal_error(qedf->devlink,
+ err_type);
+
+ break;
+ default:
+ break;
+ }
+}
+
/*
* Protocol TLV handler
*/
@@ -3695,11 +3900,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
{
struct qedf_ctx *qedf = dev;
struct qed_mfw_tlv_fcoe *fcoe = data;
- struct fc_lport *lport = qedf->lport;
- struct Scsi_Host *host = lport->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+ struct fc_lport *lport;
+ struct Scsi_Host *host;
+ struct fc_host_attrs *fc_host;
struct fc_host_statistics *hst;
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is null.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_PROBING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
+ return;
+ }
+
+ lport = qedf->lport;
+ host = lport->host;
+ fc_host = shost_to_fc_host(host);
+
/* Force a refresh of the fc_host stats including offload stats */
hst = qedf_fc_get_host_stats(host);
@@ -3770,11 +3989,62 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
fcoe->scsi_tsk_full = qedf->task_set_fulls;
}
+/* Deferred work function to perform soft context reset on STAG change */
+void qedf_stag_change_work(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, stag_work.work);
+
+ printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
+ dev_name(&qedf->pdev->dev), __func__, __LINE__,
+ qedf->dbg_ctx.host_no);
+ qedf_ctx_soft_reset(qedf->lport);
+}
+
static void qedf_shutdown(struct pci_dev *pdev)
{
__qedf_remove(pdev, QEDF_MODE_NORMAL);
}
+/*
+ * Recovery handler code
+ */
+static void qedf_schedule_recovery_handler(void *dev)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
+ schedule_delayed_work(&qedf->recovery_work, 0);
+}
+
+static void qedf_recovery_handler(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, recovery_work.work);
+
+ if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
+ return;
+
+ /*
+ * Call common_ops->recovery_prolog to allow the MFW to quiesce
+ * any PCI transactions.
+ */
+ qed_ops->common->recovery_prolog(qedf->cdev);
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
+ __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
+ /*
+ * Reset link and dcbx to down state since we will not get a link down
+ * event from the MFW but calling __qedf_remove will essentially be a
+ * link down event.
+ */
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+ __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
+ clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
+}
+
/* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
index 7ab07f3b453f..2091d883a584 100644
--- a/drivers/scsi/qedi/Kconfig
+++ b/drivers/scsi/qedi/Kconfig
@@ -8,6 +8,6 @@ config QEDI
select QED_OOO
select QED_ISCSI
select ISCSI_BOOT_SYSFS
- ---help---
+ help
This driver supports iSCSI offload for the QLogic FastLinQ
41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 9513fd320ffd..ce199a7a16b8 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -36,6 +36,7 @@ struct qedi_endpoint;
*/
#define QEDI_MODE_NORMAL 0
#define QEDI_MODE_RECOVERY 1
+#define QEDI_MODE_SHUTDOWN 2
#define ISCSI_WQE_SET_PTU_INVALIDATE 1
#define QEDI_MAX_ISCSI_TASK 4096
@@ -273,11 +274,17 @@ struct qedi_ctx {
spinlock_t ll2_lock; /* Light L2 lock */
spinlock_t hba_lock; /* per port lock */
struct task_struct *ll2_recv_thread;
+ unsigned long qedi_err_flags;
+#define QEDI_ERR_ATTN_CLR_EN 0
+#define QEDI_ERR_IS_RECOVERABLE 2
+#define QEDI_ERR_OVERRIDE_EN 31
unsigned long flags;
#define UIO_DEV_OPENED 1
#define QEDI_IOTHREAD_WAKE 2
#define QEDI_IN_RECOVERY 5
#define QEDI_IN_OFFLINE 6
+#define QEDI_IN_SHUTDOWN 7
+#define QEDI_BLOCK_IO 8
u8 mac[ETH_ALEN];
u32 src_ip[4];
@@ -303,6 +310,7 @@ struct qedi_ctx {
u32 max_sqes;
u8 num_queues;
u32 max_active_conns;
+ s32 msix_count;
struct iscsi_cid_queue cid_que;
struct qedi_endpoint **ep_tbl;
@@ -331,6 +339,8 @@ struct qedi_ctx {
u16 ll2_mtu;
struct workqueue_struct *dpc_wq;
+ struct delayed_work recovery_work;
+ struct delayed_work board_disable_work;
spinlock_t task_idx_lock; /* To protect gbl context */
s32 last_tidx_alloc;
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 42f5afb60055..8deb2001dc2f 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -136,7 +136,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
{
struct qedi_fastpath *fp = NULL;
struct qed_sb_info *sb_info = NULL;
- struct status_block_e4 *sb = NULL;
+ struct status_block *sb = NULL;
struct global_queue *que = NULL;
int id;
u16 prod_idx;
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
sb_info = fp->sb_info;
sb = sb_info->sb_virt;
prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
- STATUS_BLOCK_E4_PROD_INDEX_MASK);
+ STATUS_BLOCK_PROD_INDEX_MASK);
seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
que = qedi->global_queues[fp->sb_id];
seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 946cebc4c932..6901738324da 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -14,8 +14,8 @@
#include "qedi_fw_iscsi.h"
#include "qedi_fw_scsi.h"
-static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask);
+static int send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask, struct iscsi_task *ctask);
void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
{
@@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
@@ -69,9 +70,9 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
cmd->task_id, qedi_conn->iscsi_conn_id,
&cmd->io_cmd);
}
+ spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
spin_unlock(&session->back_lock);
@@ -84,7 +85,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct e4_iscsi_task_context *task_ctx;
+ struct iscsi_task_context *task_ctx;
struct iscsi_text_rsp *resp_hdr_ptr;
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
@@ -122,6 +123,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
@@ -132,9 +134,9 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
cmd->task_id, qedi_conn->iscsi_conn_id,
&cmd->io_cmd);
}
+ spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
qedi_conn->gen_pdu.resp_buf,
@@ -154,19 +156,11 @@ static void qedi_tmf_resp_work(struct work_struct *work)
struct iscsi_tm_rsp *resp_hdr_ptr;
int rval = 0;
- set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
- iscsi_block_session(session->cls_session);
rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
- if (rval) {
- qedi_clear_task_idx(qedi, qedi_cmd->task_id);
- iscsi_unblock_session(session->cls_session);
+ if (rval)
goto exit_tmf_resp;
- }
-
- iscsi_unblock_session(session->cls_session);
- qedi_clear_task_idx(qedi, qedi_cmd->task_id);
spin_lock(&session->back_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
@@ -174,7 +168,10 @@ static void qedi_tmf_resp_work(struct work_struct *work)
exit_tmf_resp:
kfree(resp_hdr_ptr);
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works--;
+ spin_unlock(&qedi_conn->tmf_work_lock);
}
static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
@@ -222,24 +219,33 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+ spin_lock(&qedi_conn->list_lock);
if (likely(qedi_cmd->io_cmd_in_list)) {
qedi_cmd->io_cmd_in_list = false;
list_del_init(&qedi_cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
+
+ spin_lock(&qedi_conn->tmf_work_lock);
+ switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ if (qedi_conn->ep_disconnect_starting) {
+ /* Session is down so ep_disconnect will clean up */
+ spin_unlock(&qedi_conn->tmf_work_lock);
+ goto unblock_sess;
+ }
+
+ qedi_conn->fw_cleanup_works++;
+ spin_unlock(&qedi_conn->tmf_work_lock);
- if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
goto unblock_sess;
}
-
- qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+ spin_unlock(&qedi_conn->tmf_work_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
kfree(resp_hdr_ptr);
@@ -255,7 +261,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct e4_iscsi_task_context *task_ctx;
+ struct iscsi_task_context *task_ctx;
struct iscsi_login_rsp *resp_hdr_ptr;
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
@@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
memset(task_ctx, '\0', sizeof(*task_ctx));
@@ -306,7 +314,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
}
static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
@@ -352,7 +359,6 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
struct iscsi_cqe_unsolicited *cqe,
int count)
{
- u16 tmp;
u16 idx = 0;
struct scsi_bd *pbl;
@@ -381,10 +387,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
qedi->bdq_prod_idx += count;
writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
- tmp = readw(qedi->bdq_primary_prod);
+ readw(qedi->bdq_primary_prod);
writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
- tmp = readw(qedi->bdq_secondary_prod);
+ readw(qedi->bdq_secondary_prod);
}
static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
@@ -461,7 +467,6 @@ static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
}
spin_unlock(&qedi_conn->list_lock);
- qedi_clear_task_idx(qedi, cmd->task_id);
}
done:
@@ -598,20 +603,13 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
goto error;
}
- if (!sc_cmd->SCp.ptr) {
+ if (!iscsi_cmd(sc_cmd)->task) {
QEDI_WARN(&qedi->dbg_ctx,
- "SCp.ptr is NULL, returned in another context.\n");
+ "NULL task pointer, returned in another context.\n");
goto error;
}
- if (!sc_cmd->request) {
- QEDI_WARN(&qedi->dbg_ctx,
- "sc_cmd->request is NULL, sc_cmd=%p.\n",
- sc_cmd);
- goto error;
- }
-
- if (!sc_cmd->request->q) {
+ if (!scsi_cmd_to_rq(sc_cmd)->q) {
QEDI_WARN(&qedi->dbg_ctx,
"request->q is NULL so request is not valid, sc_cmd=%p.\n",
sc_cmd);
@@ -666,7 +664,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
if (qedi_io_tracing)
qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
- qedi_clear_task_idx(qedi, cmd->task_id);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
conn->data, datalen);
error:
@@ -723,7 +720,6 @@ static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
cqe->itid, cmd->task_id);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
spin_lock_bh(&session->back_lock);
__iscsi_put_task(task);
@@ -732,20 +728,16 @@ static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
struct iscsi_cqe_solicited *cqe,
- struct iscsi_task *task,
struct iscsi_conn *conn)
{
struct qedi_work_map *work, *work_tmp;
u32 proto_itt = cqe->itid;
- u32 ptmp_itt = 0;
- itt_t protoitt = 0;
int found = 0;
struct qedi_cmd *qedi_cmd = NULL;
- u32 rtid = 0;
u32 iscsi_cid;
struct qedi_conn *qedi_conn;
struct qedi_cmd *dbg_cmd;
- struct iscsi_task *mtask;
+ struct iscsi_task *mtask, *task;
struct iscsi_tm *tmf_hdr = NULL;
iscsi_cid = cqe->conn_id;
@@ -771,8 +763,8 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
}
found = 1;
mtask = qedi_cmd->task;
+ task = work->ctask;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- rtid = work->rtid;
list_del_init(&work->list);
kfree(work);
@@ -781,80 +773,48 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
}
spin_unlock_bh(&qedi_conn->tmf_work_lock);
- if (found) {
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
- proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+ if (!found)
+ goto check_cleanup_reqs;
- if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_ABORT_TASK) {
- spin_lock_bh(&conn->session->back_lock);
-
- protoitt = build_itt(get_itt(tmf_hdr->rtt),
- conn->session->age);
- task = iscsi_itt_to_task(conn, protoitt);
-
- spin_unlock_bh(&conn->session->back_lock);
-
- if (!task) {
- QEDI_NOTICE(&qedi->dbg_ctx,
- "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
- get_itt(tmf_hdr->rtt),
- qedi_conn->iscsi_conn_id);
- return;
- }
-
- dbg_cmd = task->dd_data;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
+ proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+
+ spin_lock_bh(&conn->session->back_lock);
+ if (iscsi_task_is_completed(task)) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), qedi_conn->iscsi_conn_id);
+ goto unlock;
+ }
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
- get_itt(tmf_hdr->rtt), get_itt(task->itt),
- dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
+ dbg_cmd = task->dd_data;
- if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
- qedi_cmd->state = CLEANUP_RECV;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(task->itt), dbg_cmd->task_id,
+ qedi_conn->iscsi_conn_id);
- qedi_clear_task_idx(qedi_conn->qedi, rtid);
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(dbg_cmd->io_cmd_in_list)) {
+ dbg_cmd->io_cmd_in_list = false;
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_cmd->state = CLEANUP_RECV;
+unlock:
+ spin_unlock_bh(&conn->session->back_lock);
+ wake_up_interruptible(&qedi_conn->wait_queue);
+ return;
- spin_lock(&qedi_conn->list_lock);
- list_del_init(&dbg_cmd->io_cmd);
- qedi_conn->active_cmd_count--;
- spin_unlock(&qedi_conn->list_lock);
- qedi_cmd->state = CLEANUP_RECV;
- wake_up_interruptible(&qedi_conn->wait_queue);
- }
- } else if (qedi_conn->cmd_cleanup_req > 0) {
- spin_lock_bh(&conn->session->back_lock);
- qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
- protoitt = build_itt(ptmp_itt, conn->session->age);
- task = iscsi_itt_to_task(conn, protoitt);
+check_cleanup_reqs:
+ if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) ==
+ qedi_conn->cmd_cleanup_req) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
- cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
- qedi_conn->iscsi_conn_id);
-
- spin_unlock_bh(&conn->session->back_lock);
- if (!task) {
- QEDI_NOTICE(&qedi->dbg_ctx,
- "task is null, itid=0x%x, cid=0x%x\n",
- cqe->itid, qedi_conn->iscsi_conn_id);
- return;
- }
- qedi_conn->cmd_cleanup_cmpl++;
- wake_up(&qedi_conn->wait_queue);
-
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
"Freeing tid=0x%x for cid=0x%x\n",
cqe->itid, qedi_conn->iscsi_conn_id);
- qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
-
- } else {
- qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
- protoitt = build_itt(ptmp_itt, conn->session->age);
- task = iscsi_itt_to_task(conn, protoitt);
- QEDI_ERR(&qedi->dbg_ctx,
- "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
- protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
+ wake_up(&qedi_conn->wait_queue);
}
}
@@ -949,8 +909,7 @@ void qedi_fp_process_cqes(struct qedi_work *work)
goto exit_fp_process;
case ISCSI_CQE_TYPE_TASK_CLEANUP:
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
- qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
- conn);
+ qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, conn);
goto exit_fp_process;
default:
QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
@@ -963,17 +922,11 @@ exit_fp_process:
static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
{
- struct iscsi_db_data dbell = { 0 };
+ qedi_conn->ep->db_data.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
- dbell.agg_flags = 0;
-
- dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
- dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
- dbell.params |=
- DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
-
- dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
- writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
+ /* wmb - Make sure fw idx is coherent */
+ wmb();
+ writel(*(u32 *)&qedi_conn->ep->db_data, qedi_conn->ep->p_doorbell);
/* Make sure fw write idx is coherent, and include both memory barriers
* as a failsafe as for some architectures the call is the same but on
@@ -1010,7 +963,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
struct scsi_sge *resp_sge = NULL;
@@ -1030,9 +983,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1113,7 +1066,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct qedi_cmd *qedi_cmd;
@@ -1131,9 +1084,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1203,7 +1156,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
}
qedi_conn->cmd_cleanup_req = 0;
- qedi_conn->cmd_cleanup_cmpl = 0;
+ atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
@@ -1236,6 +1189,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_conn->cmd_cleanup_req++;
qedi_iscsi_cleanup_task(ctask, true);
+ cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
QEDI_WARN(&qedi->dbg_ctx,
@@ -1254,15 +1208,15 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_conn->iscsi_conn_id);
rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
- ((qedi_conn->cmd_cleanup_req ==
- qedi_conn->cmd_cleanup_cmpl) ||
- qedi_conn->ep),
- 5 * HZ);
+ (qedi_conn->cmd_cleanup_req ==
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+ 5 * HZ);
if (rval) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
qedi_conn->cmd_cleanup_req,
- qedi_conn->cmd_cleanup_cmpl,
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl),
qedi_conn->iscsi_conn_id);
return 0;
@@ -1271,7 +1225,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
qedi_conn->cmd_cleanup_req,
- qedi_conn->cmd_cleanup_cmpl,
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl),
qedi_conn->iscsi_conn_id);
iscsi_host_for_each_session(qedi->shost,
@@ -1280,9 +1234,10 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
/* Enable IOs for all other sessions except current.*/
if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
- (qedi_conn->cmd_cleanup_req ==
- qedi_conn->cmd_cleanup_cmpl),
- 5 * HZ)) {
+ (qedi_conn->cmd_cleanup_req ==
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+ 5 * HZ)) {
iscsi_host_for_each_session(qedi->shost,
qedi_mark_device_available);
return -1;
@@ -1302,7 +1257,7 @@ void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_ep = qedi_conn->ep;
qedi_conn->cmd_cleanup_req = 0;
- qedi_conn->cmd_cleanup_cmpl = 0;
+ atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
if (!qedi_ep) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1354,7 +1309,7 @@ static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
return 0;
}
-static void qedi_tmf_work(struct work_struct *work)
+static void qedi_abort_work(struct work_struct *work)
{
struct qedi_cmd *qedi_cmd =
container_of(work, struct qedi_cmd, tmf_work);
@@ -1367,18 +1322,30 @@ static void qedi_tmf_work(struct work_struct *work)
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
s16 rval = 0;
- s16 tid = 0;
mtask = qedi_cmd->task;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
- ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
- if (!ctask || !ctask->sc) {
- QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
- goto abort_ret;
+ spin_lock_bh(&conn->session->back_lock);
+ ctask = iscsi_itt_to_ctask(conn, tmf_hdr->rtt);
+ if (!ctask) {
+ spin_unlock_bh(&conn->session->back_lock);
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid RTT. Letting abort timeout.\n");
+ goto clear_cleanup;
}
+ if (iscsi_task_is_completed(ctask)) {
+ spin_unlock_bh(&conn->session->back_lock);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Task already completed\n");
+ /*
+ * We have to still send the TMF because libiscsi needs the
+ * response to avoid a timeout.
+ */
+ goto send_tmf;
+ }
+ spin_unlock_bh(&conn->session->back_lock);
+
cmd = (struct qedi_cmd *)ctask->dd_data;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
@@ -1388,19 +1355,21 @@ static void qedi_tmf_work(struct work_struct *work)
if (qedi_do_not_recover) {
QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
qedi_do_not_recover);
- goto abort_ret;
+ goto clear_cleanup;
}
- list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
+ list_work = kzalloc(sizeof(*list_work), GFP_NOIO);
if (!list_work) {
QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
- goto abort_ret;
+ goto clear_cleanup;
}
qedi_cmd->type = TYPEIO;
+ qedi_cmd->state = CLEANUP_WAIT;
list_work->qedi_cmd = qedi_cmd;
list_work->rtid = cmd->task_id;
list_work->state = QEDI_WORK_SCHEDULED;
+ list_work->ctask = ctask;
qedi_cmd->list_tmf_work = list_work;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
@@ -1423,23 +1392,13 @@ static void qedi_tmf_work(struct work_struct *work)
goto ldel_exit;
}
- tid = qedi_get_task_idx(qedi);
- if (tid == -1) {
- QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
- qedi_conn->iscsi_conn_id);
- goto ldel_exit;
- }
-
- qedi_cmd->task_id = tid;
- qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
-
-abort_ret:
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
- return;
+send_tmf:
+ send_iscsi_tmf(qedi_conn, qedi_cmd->task, ctask);
+ goto clear_cleanup;
ldel_exit:
spin_lock_bh(&qedi_conn->tmf_work_lock);
- if (!qedi_cmd->list_tmf_work) {
+ if (qedi_cmd->list_tmf_work) {
list_del_init(&list_work->list);
qedi_cmd->list_tmf_work = NULL;
kfree(list_work);
@@ -1447,22 +1406,26 @@ ldel_exit:
spin_unlock_bh(&qedi_conn->tmf_work_lock);
spin_lock(&qedi_conn->list_lock);
- list_del_init(&cmd->io_cmd);
- qedi_conn->active_cmd_count--;
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
spin_unlock(&qedi_conn->list_lock);
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+clear_cleanup:
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works--;
+ spin_unlock(&qedi_conn->tmf_work_lock);
}
-static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask)
+static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
+ struct iscsi_task *ctask)
{
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct e4_iscsi_task_context *fw_task_ctx;
- struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
- struct iscsi_task *ctask;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_tm *tmf_hdr;
struct qedi_cmd *qedi_cmd;
struct qedi_cmd *cmd;
@@ -1470,7 +1433,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
u32 scsi_lun[2];
s16 tid = 0;
u16 sq_idx = 0;
- int rval = 0;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
@@ -1483,9 +1445,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1503,12 +1465,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
ISCSI_TM_FUNC_ABORT_TASK) {
- ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
- if (!ctask || !ctask->sc) {
- QEDI_ERR(&qedi->dbg_ctx,
- "Could not get reference task\n");
- return 0;
- }
cmd = (struct qedi_cmd *)ctask->dd_data;
tmf_pdu_header.rtt =
qedi_set_itt(cmd->task_id,
@@ -1534,10 +1490,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
task_params.sqe = &ep->sq[sq_idx];
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
- rval = init_initiator_tmf_request_task(&task_params,
- &tmf_pdu_header);
- if (rval)
- return -1;
+ init_initiator_tmf_request_task(&task_params, &tmf_pdu_header);
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1549,47 +1502,34 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return 0;
}
-int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask)
+int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask)
{
+ struct iscsi_tm *tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ struct qedi_cmd *qedi_cmd = mtask->dd_data;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_tm *tmf_hdr;
- struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
- s16 tid = 0;
+ int rc = 0;
- tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- qedi_cmd->task = mtask;
+ switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works++;
+ spin_unlock(&qedi_conn->tmf_work_lock);
- /* If abort task then schedule the work and return */
- if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_ABORT_TASK) {
- qedi_cmd->state = CLEANUP_WAIT;
- INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_abort_work);
queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
-
- } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
- tid = qedi_get_task_idx(qedi);
- if (tid == -1) {
- QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
- qedi_conn->iscsi_conn_id);
- return -1;
- }
- qedi_cmd->task_id = tid;
-
- qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
-
- } else {
+ break;
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ rc = send_iscsi_tmf(qedi_conn, mtask, NULL);
+ break;
+ default:
QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
qedi_conn->iscsi_conn_id);
- return -1;
+ return -EINVAL;
}
- return 0;
+ return rc;
}
int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
@@ -1599,7 +1539,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
struct scsi_sge *req_sge = NULL;
@@ -1621,9 +1561,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1700,7 +1640,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
@@ -1720,9 +1660,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -2042,7 +1982,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct iscsi_task_params task_params;
struct iscsi_conn_params conn_params;
struct scsi_initiator_cmd_params cmd_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2065,9 +2005,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
cmd->task_id = tid;
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 52772904ef5d..642556a1ce1c 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -202,7 +202,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
struct data_hdr *pdu_header,
enum iscsi_task_type task_type)
{
- struct e4_iscsi_task_context *context;
+ struct iscsi_task_context *context;
u32 val;
u16 index;
u8 val_byte;
@@ -224,7 +224,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
cpu_to_le16(task_params->conn_icid);
SET_FIELD(context->ustorm_ag_context.flags1,
- E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
context->ustorm_st_context.task_type = task_type;
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -254,7 +254,7 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
static
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
- struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
u32 remaining_recv_len, u32 expected_data_transfer_len,
u8 num_sges, bool tx_dif_conn_err_en)
{
@@ -266,12 +266,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
ustorm_st_cxt->exp_data_transfer_len = val;
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
SET_FIELD(ustorm_ag_cxt->flags2,
- E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
tx_dif_conn_err_en ? 1 : 0);
}
static
-void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
struct iscsi_conn_params *conn_params,
enum iscsi_task_type task_type,
u32 task_size,
@@ -470,7 +470,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
}
}
-static void set_local_completion_context(struct e4_iscsi_task_context *context)
+static void set_local_completion_context(struct iscsi_task_context *context)
{
SET_FIELD(context->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -487,7 +487,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
struct scsi_dif_task_params *dif_task_params)
{
u32 exp_data_transfer_len = conn_params->max_burst_length;
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
bool slow_io = false;
u32 task_size, val;
u8 num_sges = 0;
@@ -615,7 +615,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -657,7 +657,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -703,7 +703,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -758,7 +758,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index 10f19f0af0a3..df2d471a7b51 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -10,7 +10,7 @@
#include "qedi_fw_scsi.h"
struct iscsi_task_params {
- struct e4_iscsi_task_context *context;
+ struct iscsi_task_context *context;
struct iscsi_wqe *sqe;
u32 tx_io_size;
u32 rx_io_size;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 8ba7c771ce4d..72942772b198 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -22,7 +22,7 @@ extern struct iscsi_transport qedi_iscsi_transport;
extern const struct qed_iscsi_ops *qedi_ops;
extern const struct qedi_debugfs_ops qedi_debugfs_ops[];
extern const struct file_operations qedi_dbg_fops[];
-extern struct device_attribute *qedi_shost_attrs[];
+extern const struct attribute_group *qedi_shost_groups[];
int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
@@ -31,8 +31,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct iscsi_task *task);
int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct iscsi_task *task);
-int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask);
+int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask);
int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct iscsi_task *task);
int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 8829880a54c3..31ec429104e2 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -58,7 +58,8 @@ struct scsi_host_template qedi_host_template = {
.max_sectors = 0xffff,
.dma_boundary = QEDI_HW_DMA_BOUNDARY,
.cmd_per_lun = 128,
- .shost_attrs = qedi_shost_attrs,
+ .shost_groups = qedi_shost_groups,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
@@ -330,12 +331,22 @@ free_conn:
void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
{
- iscsi_block_session(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+ struct qedi_conn *qedi_conn = session->leadconn->dd_data;
+
+ spin_lock_bh(&session->frwd_lock);
+ set_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
+ spin_unlock_bh(&session->frwd_lock);
}
void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
{
- iscsi_unblock_session(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+ struct qedi_conn *qedi_conn = session->leadconn->dd_data;
+
+ spin_lock_bh(&session->frwd_lock);
+ clear_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
+ spin_unlock_bh(&session->frwd_lock);
}
static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
@@ -377,6 +388,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
struct qedi_ctx *qedi = iscsi_host_priv(shost);
struct qedi_endpoint *qedi_ep;
struct iscsi_endpoint *ep;
+ int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -384,26 +396,37 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_ep = ep->dd_data;
if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
- (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
- return -EINVAL;
+ (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
qedi_ep->conn = qedi_conn;
qedi_conn->ep = qedi_ep;
+ qedi_conn->iscsi_ep = ep;
qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
qedi_conn->fw_cid = qedi_ep->fw_cid;
qedi_conn->cmd_cleanup_req = 0;
- qedi_conn->cmd_cleanup_cmpl = 0;
+ atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
+
+ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
- if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
- return -EINVAL;
spin_lock_init(&qedi_conn->tmf_work_lock);
INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
init_waitqueue_head(&qedi_conn->wait_queue);
- return 0;
+put_ep:
+ iscsi_put_endpoint(ep);
+ return rc;
}
static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
@@ -477,8 +500,8 @@ static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
{
- struct qedi_ctx *qedi = qedi_ep->qedi;
struct qed_iscsi_params_offload *conn_info;
+ struct qedi_ctx *qedi = qedi_ep->qedi;
int rval;
int i;
@@ -555,10 +578,37 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
"Default cq index [%d], mss [%d]\n",
conn_info->default_cq, conn_info->mss);
+ /* Prepare the doorbell parameters */
+ qedi_ep->db_data.agg_flags = 0;
+ qedi_ep->db_data.params = 0;
+ SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_MAX);
+ SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ISCSI_SQ_PROD_CMD);
+ SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_BYPASS_EN, 1);
+
+ /* Register doorbell with doorbell recovery mechanism */
+ rval = qedi_ops->common->db_recovery_add(qedi->cdev,
+ qedi_ep->p_doorbell,
+ &qedi_ep->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rval) {
+ kfree(conn_info);
+ return rval;
+ }
+
rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
- if (rval)
+ if (rval) {
+ /* delete doorbell from doorbell recovery mechanism */
+ rval = qedi_ops->common->db_recovery_del(qedi->cdev,
+ qedi_ep->p_doorbell,
+ &qedi_ep->db_data);
+
QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
rval, qedi_ep);
+ }
kfree(conn_info);
return rval;
@@ -581,7 +631,11 @@ static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
goto start_err;
}
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works = 0;
+ qedi_conn->ep_disconnect_starting = false;
+ spin_unlock(&qedi_conn->tmf_work_lock);
+
qedi_conn->abrt_conn = 0;
rval = iscsi_conn_start(cls_conn);
@@ -741,7 +795,7 @@ static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
rc = qedi_send_iscsi_logout(qedi_conn, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
- rc = qedi_iscsi_abort_work(qedi_conn, task);
+ rc = qedi_send_iscsi_tmf(qedi_conn, task);
break;
case ISCSI_OP_TEXT:
rc = qedi_send_iscsi_text(qedi_conn, task);
@@ -771,7 +825,6 @@ static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
}
cmd->conn = conn->dd_data;
- cmd->scsi_cmd = NULL;
return qedi_iscsi_send_generic_request(task);
}
@@ -782,6 +835,16 @@ static int qedi_task_xmit(struct iscsi_task *task)
struct qedi_cmd *cmd = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ /* Clear now so in cleanup_task we know it didn't make it */
+ cmd->scsi_cmd = NULL;
+ cmd->task_id = U16_MAX;
+
+ if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags))
+ return -ENODEV;
+
+ if (test_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags))
+ return -EACCES;
+
cmd->state = 0;
cmd->task = NULL;
cmd->use_slowpath = false;
@@ -797,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
return qedi_iscsi_send_ioreq(task);
}
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 5 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
static struct iscsi_endpoint *
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -832,6 +926,11 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
+ if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+ QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+ return ERR_PTR(-ENXIO);
+ }
+
ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
if (!ep) {
QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
@@ -840,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
}
qedi_ep = ep->dd_data;
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
qedi_ep->state = EP_STATE_IDLE;
qedi_ep->iscsi_cid = (u32)-1;
qedi_ep->qedi = qedi;
@@ -866,12 +966,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
}
- if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
- QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
- ret = -ENXIO;
- goto ep_conn_exit;
- }
-
ret = qedi_alloc_sq(qedi, qedi_ep);
if (ret)
goto ep_conn_exit;
@@ -972,46 +1066,50 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
{
struct qedi_cmd *cmd, *cmd_tmp;
+ spin_lock(&qedi_conn->list_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
io_cmd) {
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
}
static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
{
struct qedi_endpoint *qedi_ep;
struct qedi_conn *qedi_conn = NULL;
- struct iscsi_conn *conn = NULL;
struct qedi_ctx *qedi;
int ret = 0;
int wait_delay;
int abrt_conn = 0;
- int count = 10;
wait_delay = 60 * HZ + DEF_MAX_RT_TIME;
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
+ flush_work(&qedi_ep->offload_work);
+
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- flush_work(&qedi_ep->offload_work);
-
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
- conn = qedi_conn->cls_conn->dd_data;
- iscsi_suspend_queue(conn);
abrt_conn = qedi_conn->abrt_conn;
- while (count--) {
- if (!test_bit(QEDI_CONN_FW_CLEANUP,
- &qedi_conn->flags)) {
- break;
- }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "cid=0x%x qedi_ep=%p waiting for %d tmfs\n",
+ qedi_ep->iscsi_cid, qedi_ep,
+ qedi_conn->fw_cleanup_works);
+
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->ep_disconnect_starting = true;
+ while (qedi_conn->fw_cleanup_works > 0) {
+ spin_unlock(&qedi_conn->tmf_work_lock);
msleep(1000);
+ spin_lock(&qedi_conn->tmf_work_lock);
}
+ spin_unlock(&qedi_conn->tmf_work_lock);
if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
if (qedi_do_not_recover) {
@@ -1061,7 +1159,20 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
break;
}
+ if (!abrt_conn)
+ wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
+
qedi_ep->state = EP_STATE_DISCONN_START;
+
+ if (test_bit(QEDI_IN_SHUTDOWN, &qedi->flags) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags))
+ goto ep_release_conn;
+
+ /* Delete doorbell from doorbell recovery mechanism */
+ ret = qedi_ops->common->db_recovery_del(qedi->cdev,
+ qedi_ep->p_doorbell,
+ &qedi_ep->db_data);
+
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1155,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
return rc;
}
-static void qedi_offload_work(struct work_struct *work)
-{
- struct qedi_endpoint *qedi_ep =
- container_of(work, struct qedi_endpoint, offload_work);
- struct qedi_ctx *qedi;
- int wait_delay = 5 * HZ;
- int ret;
-
- qedi = qedi_ep->qedi;
-
- ret = qedi_iscsi_offload_conn(qedi_ep);
- if (ret) {
- QEDI_ERR(&qedi->dbg_ctx,
- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
- qedi_ep->iscsi_cid, qedi_ep, ret);
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- return;
- }
-
- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
- (qedi_ep->state ==
- EP_STATE_OFLDCONN_COMPL),
- wait_delay);
- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- QEDI_ERR(&qedi->dbg_ctx,
- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
- qedi_ep->iscsi_cid, qedi_ep);
- }
-}
-
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
{
struct qedi_ctx *qedi;
@@ -1214,6 +1294,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
}
iscsi_cid = (u32)path_data->handle;
+ if (iscsi_cid >= qedi->max_active_conns) {
+ ret = -EINVAL;
+ goto set_path_exit;
+ }
qedi_ep = qedi->ep_tbl[iscsi_cid];
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
@@ -1297,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
qedi_ep->dst_addr, qedi_ep->dst_port);
}
- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
ret = 0;
@@ -1365,13 +1448,24 @@ static umode_t qedi_attr_is_visible(int param_type, int param)
static void qedi_cleanup_task(struct iscsi_task *task)
{
- if (!task->sc || task->state == ISCSI_TASK_PENDING) {
+ struct qedi_cmd *cmd;
+
+ if (task->state == ISCSI_TASK_PENDING) {
QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
refcount_read(&task->refcount));
return;
}
- qedi_iscsi_unmap_sg_list(task->dd_data);
+ if (task->sc)
+ qedi_iscsi_unmap_sg_list(task->dd_data);
+
+ cmd = task->dd_data;
+ if (cmd->task_id != U16_MAX)
+ qedi_clear_task_idx(iscsi_host_priv(task->conn->session->host),
+ cmd->task_id);
+
+ cmd->task_id = U16_MAX;
+ cmd->scsi_cmd = NULL;
}
struct iscsi_transport qedi_iscsi_transport = {
@@ -1383,6 +1477,7 @@ struct iscsi_transport qedi_iscsi_transport = {
.destroy_session = qedi_session_destroy,
.create_conn = qedi_conn_create,
.bind_conn = qedi_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.start_conn = qedi_conn_start,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qedi_conn_destroy,
@@ -1535,7 +1630,7 @@ static const struct {
},
};
-char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
+static char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
{
int i;
char *msg = NULL;
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 67c3b7349271..9b9f2e44fdde 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -80,6 +80,7 @@ struct qedi_endpoint {
u32 handle;
u32 fw_cid;
void __iomem *p_doorbell;
+ struct iscsi_db_data db_data;
/* Send queue management */
struct iscsi_wqe *sq;
@@ -149,11 +150,12 @@ struct qedi_conn {
struct iscsi_cls_conn *cls_conn;
struct qedi_ctx *qedi;
struct qedi_endpoint *ep;
+ struct iscsi_endpoint *iscsi_ep;
struct list_head active_cmd_list;
spinlock_t list_lock; /* internal conn lock */
u32 active_cmd_count;
u32 cmd_cleanup_req;
- u32 cmd_cleanup_cmpl;
+ atomic_t cmd_cleanup_cmpl;
u32 iscsi_conn_id;
int itt;
@@ -168,8 +170,8 @@ struct qedi_conn {
struct list_head tmf_work_list;
wait_queue_head_t wait_queue;
spinlock_t tmf_work_lock; /* tmf work lock */
- unsigned long flags;
-#define QEDI_CONN_FW_CLEANUP 1
+ bool ep_disconnect_starting;
+ int fw_cleanup_works;
};
struct qedi_cmd {
@@ -180,7 +182,7 @@ struct qedi_cmd {
struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg;
struct qedi_io_bdt io_tbl;
- struct e4_iscsi_task_context request;
+ struct iscsi_task_context request;
unsigned char *sense_buffer;
dma_addr_t sense_buffer_dma;
u16 task_id;
@@ -211,6 +213,7 @@ struct qedi_cmd {
struct qedi_work_map {
struct list_head list;
struct qedi_cmd *qedi_cmd;
+ struct iscsi_task *ctask;
int rtid;
int state;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index acb930b8c6a6..df2fe7bd26d1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -28,6 +28,10 @@
#include "qedi_gbl.h"
#include "qedi_iscsi.h"
+static uint qedi_qed_debug;
+module_param(qedi_qed_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_qed_debug, " QED debug level 0 (default)");
+
static uint qedi_fw_debug;
module_param(qedi_fw_debug, uint, 0644);
MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
@@ -41,11 +45,15 @@ module_param(qedi_io_tracing, uint, 0644);
MODULE_PARM_DESC(qedi_io_tracing,
" Enable logging of SCSI requests/completions into trace buffer. (default off).");
-uint qedi_ll2_buf_size = 0x400;
+static uint qedi_ll2_buf_size = 0x400;
module_param(qedi_ll2_buf_size, uint, 0644);
MODULE_PARM_DESC(qedi_ll2_buf_size,
"parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
+static uint qedi_flags_override;
+module_param(qedi_flags_override, uint, 0644);
+MODULE_PARM_DESC(qedi_flags_override, "Disable/Enable MFW error flags bits action.");
+
const struct qed_iscsi_ops *qedi_ops;
static struct scsi_transport_template *qedi_scsi_transport;
static struct pci_driver qedi_pci_driver;
@@ -58,6 +66,9 @@ static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
+static void qedi_recovery_handler(struct work_struct *work);
+static void qedi_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
@@ -265,10 +276,8 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
}
udev = kzalloc(sizeof(*udev), GFP_KERNEL);
- if (!udev) {
- rc = -ENOMEM;
+ if (!udev)
goto err_udev;
- }
udev->uio_dev = -1;
@@ -342,12 +351,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(struct status_block_e4), &sb_phys,
+ sizeof(struct status_block), &sb_phys,
GFP_KERNEL);
if (!sb_virt) {
QEDI_ERR(&qedi->dbg_ctx,
@@ -609,7 +618,7 @@ static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
sizeof(struct qedi_endpoint *)), GFP_KERNEL);
if (!qedi->ep_tbl)
return -ENOMEM;
- port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+ port_id = prandom_u32_max(QEDI_LOCAL_PORT_RANGE);
if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
QEDI_LOCAL_PORT_MIN, port_id)) {
qedi_cm_free_mem(qedi);
@@ -631,7 +640,7 @@ static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
goto exit_setup_shost;
}
- shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA - 1;
shost->max_channel = 0;
shost->max_lun = ~0;
shost->max_cmd_len = 16;
@@ -657,8 +666,6 @@ exit_setup_shost:
static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
- struct qedi_uio_dev *udev;
- struct qedi_uio_ctrl *uctrl;
struct skb_work_list *work;
struct ethhdr *eh;
@@ -697,9 +704,6 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
"Allowed frame ethertype [0x%x] len [0x%x].\n",
eh->h_proto, skb->len);
- udev = qedi->udev;
- uctrl = udev->uctrl;
-
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -789,8 +793,7 @@ static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
spin_lock_bh(&qedi->ll2_lock);
list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
list_del(&work->list);
- if (work->skb)
- kfree_skb(work->skb);
+ kfree_skb(work->skb);
kfree(work);
}
spin_unlock_bh(&qedi->ll2_lock);
@@ -862,7 +865,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
- qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
+ qedi->pf_params.iscsi_pf_params.two_msl_timer = QED_TWO_MSL_TIMER_DFLT;
+ qedi->pf_params.iscsi_pf_params.tx_sws_timer = QED_TX_SWS_TIMER_DFLT;
qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
@@ -920,7 +924,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
ipv6_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
- snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
+ snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s",
block->target[index].target_name.byte);
tgt->ipv6_en = ipv6_en;
@@ -1113,6 +1117,65 @@ exit_get_data:
return;
}
+void qedi_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+ unsigned long override_flags = qedi_flags_override;
+
+ if (override_flags && test_bit(QEDI_ERR_OVERRIDE_EN, &override_flags))
+ qedi->qedi_err_flags = qedi_flags_override;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "HW error handler scheduled, err=%d err_flags=0x%x\n",
+ err_type, qedi->qedi_err_flags);
+
+ switch (err_type) {
+ case QED_HW_ERR_FAN_FAIL:
+ schedule_delayed_work(&qedi->board_disable_work, 0);
+ break;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_DMAE_FAIL:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags))
+ qedi_ops->common->attn_clr_enable(qedi->cdev, true);
+
+ if (err_type == QED_HW_ERR_RAMROD_FAIL &&
+ test_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags))
+ qedi_ops->common->recovery_process(qedi->cdev);
+
+ break;
+ default:
+ break;
+ }
+}
+
+static void qedi_schedule_recovery_handler(void *dev)
+{
+ struct qedi_ctx *qedi = dev;
+
+ QEDI_ERR(&qedi->dbg_ctx, "Recovery handler scheduled.\n");
+
+ if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags))
+ return;
+
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+
+ schedule_delayed_work(&qedi->recovery_work, 0);
+}
+
+static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn = session->leadconn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
+
static void qedi_link_update(void *dev, struct qed_link_output *link)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
@@ -1124,12 +1187,15 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Link Down event.\n");
atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery);
}
}
static struct qed_iscsi_cb_ops qedi_cb_ops = {
{
.link_update = qedi_link_update,
+ .schedule_recovery_handler = qedi_schedule_recovery_handler,
+ .schedule_hw_err_handler = qedi_schedule_hw_err_handler,
.get_protocol_tlv_data = qedi_get_protocol_tlv_data,
.get_generic_tlv_data = qedi_get_generic_tlv_data,
}
@@ -1140,7 +1206,6 @@ static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
{
struct qedi_work *qedi_work;
struct qedi_conn *q_conn;
- struct iscsi_conn *conn;
struct qedi_cmd *qedi_cmd;
u32 iscsi_cid;
int rc = 0;
@@ -1153,7 +1218,6 @@ static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
iscsi_cid);
return -1;
}
- conn = q_conn->cls_conn->dd_data;
switch (cqe->cqe_common.cqe_type) {
case ISCSI_CQE_TYPE_SOLICITED:
@@ -1196,7 +1260,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
{
struct qedi_ctx *qedi = fp->qedi;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
struct qedi_percpu_s *p = NULL;
struct global_queue *que;
u16 prod_idx;
@@ -1252,7 +1316,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
struct qedi_ctx *qedi = fp->qedi;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
u16 prod_idx;
barrier();
@@ -1286,13 +1350,13 @@ process_again:
"process already running\n");
}
- if (qedi_fp_has_work(fp) == 0)
+ if (!qedi_fp_has_work(fp))
qed_sb_update_sb_idx(fp->sb_info);
/* Check for more work */
rmb();
- if (qedi_fp_has_work(fp) == 0)
+ if (!qedi_fp_has_work(fp))
qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
else
goto process_again;
@@ -1344,7 +1408,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
u16 idx;
cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ for (i = 0; i < qedi->msix_count; i++) {
idx = i * qedi->dev_info.common.num_hwfns +
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
@@ -1374,7 +1438,12 @@ static int qedi_setup_int(struct qedi_ctx *qedi)
{
int rc = 0;
- rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
+ rc = qedi_ops->common->set_fp_int(qedi->cdev, qedi->num_queues);
+ if (rc < 0)
+ goto exit_setup_int;
+
+ qedi->msix_count = rc;
+
rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
if (rc)
goto exit_setup_int;
@@ -1469,7 +1538,6 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
int i;
struct scsi_bd *pbl;
u64 *list;
- dma_addr_t page;
/* Alloc dma memory for BDQ buffers */
for (i = 0; i < QEDI_BDQ_NUM; i++) {
@@ -1539,11 +1607,9 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size /
QEDI_PAGE_SIZE;
list = (u64 *)qedi->bdq_pbl_list;
- page = qedi->bdq_pbl_list_dma;
for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
*list = qedi->bdq_pbl_dma;
list++;
- page += QEDI_PAGE_SIZE;
}
return 0;
@@ -1553,7 +1619,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
{
u32 *list;
int i;
- int status = 0, rc;
+ int status;
u32 *pbl;
dma_addr_t page;
int num_pages;
@@ -1564,14 +1630,14 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
*/
if (!qedi->num_queues) {
QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
- return 1;
+ return -ENOMEM;
}
/* Make sure we allocated the PBL that will contain the physical
* addresses of our queues
*/
if (!qedi->p_cpuq) {
- status = 1;
+ status = -EINVAL;
goto mem_alloc_failure;
}
@@ -1586,13 +1652,13 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
"qedi->global_queues=%p.\n", qedi->global_queues);
/* Allocate DMA coherent buffers for BDQ */
- rc = qedi_alloc_bdq(qedi);
- if (rc)
+ status = qedi_alloc_bdq(qedi);
+ if (status)
goto mem_alloc_failure;
/* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
- rc = qedi_alloc_nvm_iscsi_cfg(qedi);
- if (rc)
+ status = qedi_alloc_nvm_iscsi_cfg(qedi);
+ if (status)
goto mem_alloc_failure;
/* Allocate a CQ and an associated PBL for each MSI-X
@@ -1605,6 +1671,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
if (!qedi->global_queues[i]) {
QEDI_ERR(&qedi->dbg_ctx,
"Unable to allocation global queue %d.\n", i);
+ status = -ENOMEM;
goto mem_alloc_failure;
}
@@ -1947,7 +2014,7 @@ void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
qedi_ops->ll2->start(qedi->cdev, &params);
}
-/**
+/*
* qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting
* for gaps) for the matching absolute-pf-id of the QEDI device.
*/
@@ -2019,8 +2086,7 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
rc = snprintf(buf, ip_len, fmt, gw);
break;
case ISCSI_BOOT_ETH_FLAGS:
- rc = snprintf(buf, 3, "%hhd\n",
- SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = snprintf(buf, 3, "0\n");
@@ -2175,7 +2241,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
chap_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
@@ -2183,11 +2249,11 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = snprintf(buf, 3, "0\n");
@@ -2323,21 +2389,46 @@ kset_free:
return -ENOMEM;
}
+static pci_ers_result_t qedi_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ QEDI_ERR(&qedi->dbg_ctx, "%s: PCI error detected [%d]\n",
+ __func__, state);
+
+ if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Recovery already in progress.\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ qedi_ops->common->recovery_process(qedi->cdev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
static void __qedi_remove(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
int rval;
+ u16 retry = 10;
- if (qedi->tmf_thread) {
- flush_workqueue(qedi->tmf_thread);
- destroy_workqueue(qedi->tmf_thread);
- qedi->tmf_thread = NULL;
- }
+ if (mode == QEDI_MODE_NORMAL)
+ iscsi_host_remove(qedi->shost, false);
+ else if (mode == QEDI_MODE_SHUTDOWN)
+ iscsi_host_remove(qedi->shost, true);
+
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
+ if (qedi->tmf_thread) {
+ destroy_workqueue(qedi->tmf_thread);
+ qedi->tmf_thread = NULL;
+ }
- if (qedi->offload_thread) {
- flush_workqueue(qedi->offload_thread);
- destroy_workqueue(qedi->offload_thread);
- qedi->offload_thread = NULL;
+ if (qedi->offload_thread) {
+ destroy_workqueue(qedi->offload_thread);
+ qedi->offload_thread = NULL;
+ }
}
#ifdef CONFIG_DEBUG_FS
@@ -2349,12 +2440,17 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_sync_free_irqs(qedi);
if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
- qedi_ops->stop(qedi->cdev);
+ while (retry--) {
+ rval = qedi_ops->stop(qedi->cdev);
+ if (rval < 0)
+ msleep(1000);
+ else
+ break;
+ }
qedi_ops->ll2->stop(qedi->cdev);
}
- if (mode == QEDI_MODE_NORMAL)
- qedi_free_iscsi_pf_param(qedi);
+ qedi_free_iscsi_pf_param(qedi);
rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
if (rval)
@@ -2367,15 +2463,12 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_destroy_fp(qedi);
- if (mode == QEDI_MODE_NORMAL) {
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
qedi_release_cid_que(qedi);
qedi_cm_free_mem(qedi);
qedi_free_uio(qedi->udev);
qedi_free_itt(qedi);
- iscsi_host_remove(qedi->shost);
- iscsi_host_free(qedi->shost);
-
if (qedi->ll2_recv_thread) {
kthread_stop(qedi->ll2_recv_thread);
qedi->ll2_recv_thread = NULL;
@@ -2384,14 +2477,40 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
if (qedi->boot_kset)
iscsi_boot_destroy_kset(qedi->boot_kset);
+
+ iscsi_host_free(qedi->shost);
}
}
+static void qedi_board_disable_work(struct work_struct *work)
+{
+ struct qedi_ctx *qedi =
+ container_of(work, struct qedi_ctx,
+ board_disable_work.work);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Fan failure, Unloading firmware context.\n");
+
+ if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
+ return;
+
+ __qedi_remove(qedi->pdev, QEDI_MODE_NORMAL);
+}
+
+static void qedi_shutdown(struct pci_dev *pdev)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ QEDI_ERR(&qedi->dbg_ctx, "%s: Shutdown qedi\n", __func__);
+ if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
+ return;
+ __qedi_remove(pdev, QEDI_MODE_SHUTDOWN);
+}
+
static int __qedi_probe(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi;
struct qed_ll2_params params;
- u32 dp_module = 0;
u8 dp_level = 0;
bool is_vf = false;
char host_buf[16];
@@ -2400,7 +2519,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
struct qed_probe_params qed_params;
void *task_start, *task_end;
int rc;
- u16 tmp;
+ u16 retry = 10;
if (mode != QEDI_MODE_RECOVERY) {
qedi = qedi_host_alloc(pdev);
@@ -2412,18 +2531,31 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qedi = pci_get_drvdata(pdev);
}
+retry_probe:
+ if (mode == QEDI_MODE_RECOVERY)
+ msleep(2000);
+
memset(&qed_params, 0, sizeof(qed_params));
qed_params.protocol = QED_PROTOCOL_ISCSI;
- qed_params.dp_module = dp_module;
+ qed_params.dp_module = qedi_qed_debug;
qed_params.dp_level = dp_level;
qed_params.is_vf = is_vf;
qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
if (!qedi->cdev) {
+ if (mode == QEDI_MODE_RECOVERY && retry) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Retry %d initialize hardware\n", retry);
+ retry--;
+ goto retry_probe;
+ }
+
rc = -ENODEV;
QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
goto free_host;
}
+ set_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags);
+ set_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags);
atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
@@ -2435,14 +2567,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qedi->dev_info.common.num_hwfns,
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev));
- if (mode != QEDI_MODE_RECOVERY) {
- rc = qedi_set_iscsi_pf_param(qedi);
- if (rc) {
- rc = -ENOMEM;
- QEDI_ERR(&qedi->dbg_ctx,
- "Set iSCSI pf param fail\n");
- goto free_host;
- }
+ rc = qedi_set_iscsi_pf_param(qedi);
+ if (rc) {
+ rc = -ENOMEM;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Set iSCSI pf param fail\n");
+ goto free_host;
}
qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
@@ -2501,15 +2631,15 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
"Writing %d to primary and secondary BDQ doorbell registers.\n",
qedi->bdq_prod_idx);
writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
- tmp = readw(qedi->bdq_primary_prod);
+ readw(qedi->bdq_primary_prod);
writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
- tmp = readw(qedi->bdq_secondary_prod);
+ readw(qedi->bdq_secondary_prod);
ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
qedi->mac);
- sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ snprintf(host_buf, sizeof(host_buf), "host_%d", qedi->shost->host_no);
qedi_ops->common->set_name(qedi->cdev, host_buf);
qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
@@ -2630,9 +2760,13 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
QEDI_ERR(&qedi->dbg_ctx,
"Unable to start offload thread!\n");
rc = -ENODEV;
- goto free_cid_que;
+ goto free_tmf_thread;
}
+ INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler);
+ INIT_DELAYED_WORK(&qedi->board_disable_work,
+ qedi_board_disable_work);
+
/* F/w needs 1st task context memory entry for performance */
set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
atomic_set(&qedi->num_offloads, 0);
@@ -2650,6 +2784,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
return 0;
+free_tmf_thread:
+ destroy_workqueue(qedi->tmf_thread);
free_cid_que:
qedi_release_cid_que(qedi);
free_uio:
@@ -2658,7 +2794,7 @@ remove_host:
#ifdef CONFIG_DEBUG_FS
qedi_dbg_host_exit(&qedi->dbg_ctx);
#endif
- iscsi_host_remove(qedi->shost);
+ iscsi_host_remove(qedi->shost, false);
stop_iscsi_func:
qedi_ops->stop(qedi->cdev);
stop_slowpath:
@@ -2673,6 +2809,32 @@ exit_probe:
return rc;
}
+static void qedi_mark_conn_recovery(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn = session->leadconn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+
+ iscsi_conn_failure(qedi_conn->cls_conn->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+static void qedi_recovery_handler(struct work_struct *work)
+{
+ struct qedi_ctx *qedi =
+ container_of(work, struct qedi_ctx, recovery_work.work);
+
+ iscsi_host_for_each_session(qedi->shost, qedi_mark_conn_recovery);
+
+ /* Call common_ops->recovery_prolog to allow the MFW to quiesce
+ * any PCI transactions.
+ */
+ qedi_ops->common->recovery_prolog(qedi->cdev);
+
+ __qedi_remove(qedi->pdev, QEDI_MODE_RECOVERY);
+ __qedi_probe(qedi->pdev, QEDI_MODE_RECOVERY);
+ clear_bit(QEDI_IN_RECOVERY, &qedi->flags);
+}
+
static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
return __qedi_probe(pdev, QEDI_MODE_NORMAL);
@@ -2692,11 +2854,17 @@ MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
static enum cpuhp_state qedi_cpuhp_state;
+static struct pci_error_handlers qedi_err_handler = {
+ .error_detected = qedi_io_error_detected,
+};
+
static struct pci_driver qedi_pci_driver = {
.name = QEDI_MODULE_NAME,
.id_table = qedi_pci_tbl,
.probe = qedi_probe,
.remove = qedi_remove,
+ .shutdown = qedi_shutdown,
+ .err_handler = &qedi_err_handler,
};
static int __init qedi_init(void)
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
index 04ee68e6499c..b00a7e08ef53 100644
--- a/drivers/scsi/qedi/qedi_sysfs.c
+++ b/drivers/scsi/qedi/qedi_sysfs.c
@@ -16,9 +16,9 @@ static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
return iscsi_host_priv(shost);
}
-static ssize_t qedi_show_port_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t port_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
@@ -28,8 +28,8 @@ static ssize_t qedi_show_port_state(struct device *dev,
return sprintf(buf, "Linkdown\n");
}
-static ssize_t qedi_show_speed(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
struct qed_link_output if_link;
@@ -39,11 +39,20 @@ static ssize_t qedi_show_speed(struct device *dev,
return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
}
-static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
-static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
+static DEVICE_ATTR_RO(port_state);
+static DEVICE_ATTR_RO(speed);
-struct device_attribute *qedi_shost_attrs[] = {
- &dev_attr_port_state,
- &dev_attr_speed,
+static struct attribute *qedi_shost_attrs[] = {
+ &dev_attr_port_state.attr,
+ &dev_attr_speed.attr,
+ NULL
+};
+
+static const struct attribute_group qedi_shost_attr_group = {
+ .attrs = qedi_shost_attrs
+};
+
+const struct attribute_group *qedi_shost_groups[] = {
+ &qedi_shost_attr_group,
NULL
};
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 3337cd341d21..1e7f4d138e06 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -477,20 +477,12 @@ __setup("qla1280=", qla1280_setup);
#endif
-/*
- * We use the scsi_pointer structure that's included with each scsi_command
- * to overlay our struct srb over it. qla1280_init() checks that a srb is not
- * bigger than a scsi_pointer.
- */
-
-#define CMD_SP(Cmnd) &Cmnd->SCp
#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
#define CMD_CDBP(Cmnd) Cmnd->cmnd
#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
#define CMD_RESULT(Cmnd) Cmnd->result
#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
-#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
#define CMD_HOST(Cmnd) Cmnd->device->host
#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
@@ -526,7 +518,7 @@ static struct pci_device_id qla1280_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
-DEFINE_MUTEX(qla1280_firmware_mutex);
+static DEFINE_MUTEX(qla1280_firmware_mutex);
struct qla_fw {
char *fwname;
@@ -535,7 +527,7 @@ struct qla_fw {
#define QL_NUM_FW_IMAGES 3
-struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
+static struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
{"qlogic/1040.bin", NULL}, /* image 0 */
{"qlogic/1280.bin", NULL}, /* image 1 */
{"qlogic/12160.bin", NULL}, /* image 2 */
@@ -633,13 +625,13 @@ static int qla1280_read_nvram(struct scsi_qla_host *ha)
* to be read a word (two bytes) at a time.
*
* The net result of this would be that the word (and
- * doubleword) quantites in the firmware would be correct, but
+ * doubleword) quantities in the firmware would be correct, but
* the bytes would be pairwise reversed. Since most of the
- * firmware quantites are, in fact, bytes, we do an extra
+ * firmware quantities are, in fact, bytes, we do an extra
* le16_to_cpu() in the firmware read routine.
*
* The upshot of all this is that the bytes in the firmware
- * are in the correct places, but the 16 and 32 bit quantites
+ * are in the correct places, but the 16 and 32 bit quantities
* are still in little endian format. We fix that up below by
* doing extra reverses on them */
nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
@@ -687,18 +679,16 @@ qla1280_info(struct Scsi_Host *host)
* The mid-level driver tries to ensures that queuecommand never gets invoked
* concurrently with itself or the interrupt handler (although the
* interrupt handler may call this routine as part of request-completion
- * handling). Unfortunely, it sometimes calls the scheduler in interrupt
+ * handling). Unfortunately, it sometimes calls the scheduler in interrupt
* context which is a big NO! NO!.
**************************************************************************/
-static int
-qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
+static int qla1280_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
- struct srb *sp = (struct srb *)CMD_SP(cmd);
+ struct srb *sp = scsi_cmd_priv(cmd);
int status;
- cmd->scsi_done = fn;
sp->cmd = cmd;
sp->flags = 0;
sp->wait = NULL;
@@ -756,7 +746,7 @@ _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
sp->wait = NULL;
if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
status = SUCCESS;
- (*cmd->scsi_done)(cmd);
+ scsi_done(cmd);
}
return status;
}
@@ -831,7 +821,7 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
ENTER("qla1280_error_action");
ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
- sp = (struct srb *)CMD_SP(cmd);
+ sp = scsi_cmd_priv(cmd);
bus = SCSI_BUS_32(cmd);
target = SCSI_TCN_32(cmd);
lun = SCSI_LUN_32(cmd);
@@ -1241,7 +1231,7 @@ qla1280_done(struct scsi_qla_host *ha)
{
struct srb *sp;
struct list_head *done_q;
- int bus, target, lun;
+ int bus, target;
struct scsi_cmnd *cmd;
ENTER("qla1280_done");
@@ -1256,7 +1246,6 @@ qla1280_done(struct scsi_qla_host *ha)
cmd = sp->cmd;
bus = SCSI_BUS_32(cmd);
target = SCSI_TCN_32(cmd);
- lun = SCSI_LUN_32(cmd);
switch ((CMD_RESULT(cmd) >> 16)) {
case DID_RESET:
@@ -1279,7 +1268,7 @@ qla1280_done(struct scsi_qla_host *ha)
ha->actthreads--;
if (sp->wait == NULL)
- (*(cmd)->scsi_done)(cmd);
+ scsi_done(cmd);
else
complete(sp->wait);
}
@@ -2185,13 +2174,12 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
nv->cntr_flags_1.disable_loading_risc_code;
if (IS_ISP1040(ha)) {
- uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
+ uint16_t hwrev, cfg1, cdma_conf;
hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
- ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
/* Busted fifo, says mjacob. */
if (hwrev != ISP_CFG0_1040A)
@@ -2427,7 +2415,6 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
int cnt;
uint16_t *optr, *iptr;
uint16_t __iomem *mptr;
- uint16_t data;
DECLARE_COMPLETION_ONSTACK(wait);
ENTER("qla1280_mailbox_command");
@@ -2462,7 +2449,7 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
spin_unlock_irq(ha->host->host_lock);
WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
- data = qla1280_debounce_register(&reg->istatus);
+ qla1280_debounce_register(&reg->istatus);
wait_for_completion(&wait);
del_timer_sync(&ha->mailbox_timer);
@@ -2830,7 +2817,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
@@ -3057,7 +3044,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
/* Check for empty slot in outstanding command list. */
for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
- (ha->outstanding_cmds[cnt] != 0); cnt++) ;
+ ha->outstanding_cmds[cnt]; cnt++);
if (cnt >= MAX_OUTSTANDING_COMMANDS) {
status = SCSI_MLQUEUE_HOST_BUSY;
@@ -3085,7 +3072,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
@@ -3604,7 +3591,6 @@ static void
qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
struct list_head *done_q)
{
- unsigned int bus, target, lun;
int sense_sz;
struct srb *sp;
struct scsi_cmnd *cmd;
@@ -3630,11 +3616,6 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
cmd = sp->cmd;
- /* Generate LU queue on cntrl, target, LUN */
- bus = SCSI_BUS_32(cmd);
- target = SCSI_TCN_32(cmd);
- lun = SCSI_LUN_32(cmd);
-
if (comp_status || scsi_status) {
dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
"0x%x, handle = 0x%x\n", comp_status,
@@ -3673,7 +3654,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
dprintk(2, "qla1280_status_entry: Check "
"condition Sense data, b %i, t %i, "
- "l %i\n", bus, target, lun);
+ "l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
+ SCSI_LUN_32(cmd));
if (sense_sz)
qla1280_dump_buffer(2,
(char *)cmd->sense_buffer,
@@ -3914,18 +3896,18 @@ qla1280_get_target_parameters(struct scsi_qla_host *ha,
printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
if (mb[3] != 0) {
- printk(" Sync: period %d, offset %d",
+ printk(KERN_CONT " Sync: period %d, offset %d",
(mb[3] & 0xff), (mb[3] >> 8));
if (mb[2] & BIT_13)
- printk(", Wide");
+ printk(KERN_CONT ", Wide");
if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
- printk(", DT");
+ printk(KERN_CONT ", DT");
} else
- printk(" Async");
+ printk(KERN_CONT " Async");
if (device->simple_tags)
- printk(", Tagged queuing: depth %d", device->queue_depth);
- printk("\n");
+ printk(KERN_CONT ", Tagged queuing: depth %d", device->queue_depth);
+ printk(KERN_CONT "\n");
}
@@ -3970,7 +3952,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
int i;
ha = (struct scsi_qla_host *)host->hostdata;
- sp = (struct srb *)CMD_SP(cmd);
+ sp = scsi_cmd_priv(cmd);
printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
@@ -3989,8 +3971,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
} */
printk(" tag=%d, transfersize=0x%x \n",
- cmd->tag, cmd->transfersize);
- printk(" SP=0x%p\n", CMD_SP(cmd));
+ scsi_cmd_to_rq(cmd)->tag, cmd->transfersize);
printk(" underflow size = 0x%x, direction=0x%x\n",
cmd->underflow, cmd->sc_data_direction);
}
@@ -4056,7 +4037,6 @@ qla1280_setup(char *s)
{
char *cp, *ptr;
unsigned long val;
- int toke;
cp = s;
@@ -4071,7 +4051,7 @@ qla1280_setup(char *s)
} else
val = simple_strtoul(ptr, &ptr, 0);
- switch ((toke = qla1280_get_token(cp))) {
+ switch (qla1280_get_token(cp)) {
case TOKEN_NVRAM:
if (!val)
driver_setup.no_nvram = 1;
@@ -4150,6 +4130,7 @@ static struct scsi_host_template qla1280_driver_template = {
.can_queue = MAX_OUTSTANDING_COMMANDS,
.this_id = -1,
.sg_tablesize = SG_ALL,
+ .cmd_size = sizeof(struct srb),
};
@@ -4362,12 +4343,6 @@ static struct pci_driver qla1280_pci_driver = {
static int __init
qla1280_init(void)
{
- if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
- printk(KERN_WARNING
- "qla1280: struct srb too big, aborting\n");
- return -EINVAL;
- }
-
#ifdef MODULE
/*
* If we are called as a module, the qla1280 pointer may not be null
@@ -4411,15 +4386,3 @@ MODULE_FIRMWARE("qlogic/1040.bin");
MODULE_FIRMWARE("qlogic/1280.bin");
MODULE_FIRMWARE("qlogic/12160.bin");
MODULE_VERSION(QLA1280_VERSION);
-
-/*
- * Overrides for Emacs so that we almost follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index e7820b5bca38..d309e2ca14de 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -87,8 +87,7 @@
#define RESPONSE_ENTRY_CNT 63 /* Number of response entries. */
/*
- * SCSI Request Block structure (sp) that is placed
- * on cmd->SCp location of every I/O
+ * SCSI Request Block structure (sp) that occurs after each struct scsi_cmnd.
*/
struct srb {
struct list_head list; /* (8/16) LU queue */
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 764501838e21..802c373fd6d9 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -6,7 +6,7 @@ config SCSI_QLA_FC
depends on NVME_FC || !NVME_FC
select FW_LOADER
select BTREE
- ---help---
+ help
This qla2xxx driver supports all QLogic Fibre Channel
PCI and PCIe host adapters.
@@ -37,14 +37,14 @@ config TCM_QLA2XXX
depends on LIBFC
select BTREE
default n
- ---help---
+ help
Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
if TCM_QLA2XXX
config TCM_QLA2XXX_DEBUG
bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs"
default n
- ---help---
+ help
Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs
This will include code to enable the SCSI command jammer
endif
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 17d5bc1cc56b..cbc1303e761e 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o \
+ qla_edif.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d7e7043f9eab..b67ad30d56e6 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
@@ -26,7 +25,8 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
+ if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
+ ha->mpi_fw_dump_reading))
return 0;
mutex_lock(&ha->optrom_mutex);
@@ -42,6 +42,10 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
} else if (ha->mctp_dumped && ha->mctp_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
MCTP_DUMP_SIZE);
+ } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
+ rval = memory_read_from_buffer(buf, count, &off,
+ ha->mpi_fw_dump,
+ ha->mpi_fw_dump_len);
} else if (ha->fw_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_len);
@@ -79,7 +83,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_md_prep(vha);
}
ha->fw_dump_reading = 0;
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
break;
case 1:
if (ha->fw_dumped && !ha->fw_dump_reading) {
@@ -103,7 +107,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha);
} else {
- ha->fw_dump_mpi = 1;
qla2x00_system_error(vha);
}
break;
@@ -137,6 +140,30 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
vha->host_no);
}
break;
+ case 8:
+ if (!ha->mpi_fw_dump_reading)
+ break;
+ ql_log(ql_log_info, vha, 0x70e7,
+ "MPI firmware dump cleared on (%ld).\n", vha->host_no);
+ ha->mpi_fw_dump_reading = 0;
+ ha->mpi_fw_dumped = 0;
+ break;
+ case 9:
+ if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
+ ha->mpi_fw_dump_reading = 1;
+ ql_log(ql_log_info, vha, 0x70e8,
+ "Raw MPI firmware dump ready for read on (%ld).\n",
+ vha->host_no);
+ }
+ break;
+ case 10:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ql_log(ql_log_info, vha, 0x70e9,
+ "Issuing MPI firmware dump on host#%ld.\n",
+ vha->host_no);
+ ha->isp_ops->mpi_fw_dump(vha, 0);
+ }
+ break;
}
return count;
}
@@ -207,10 +234,9 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
/* Checksum NVRAM. */
if (IS_FWI2_CAPABLE(ha)) {
- uint32_t *iter;
+ __le32 *iter = (__force __le32 *)buf;
uint32_t chksum;
- iter = (uint32_t *)buf;
chksum = 0;
for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
chksum += le32_to_cpu(*iter);
@@ -529,7 +555,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EINVAL;
- if (IS_NOCACHE_VPD_TYPE(ha))
+ if (!IS_NOCACHE_VPD_TYPE(ha))
goto skip;
faddr = ha->flt_region_vpd << 2;
@@ -684,6 +710,12 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x706e,
"Issuing ISP reset.\n");
+ if (vha->hw->flags.port_isolated) {
+ ql_log(ql_log_info, vha, 0x706e,
+ "Port is isolated, returning.\n");
+ return -EINVAL;
+ }
+
scsi_block_requests(vha->host);
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_no_md_cap = 1;
@@ -706,13 +738,14 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (IS_QLA83XX(ha)) {
uint32_t idc_control;
qla83xx_idc_lock(vha, 0);
@@ -737,6 +770,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
}
+ break;
case 0x2025e:
if (!IS_P3P_TYPE(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071,
@@ -917,9 +951,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
return 0;
+ mutex_lock(&vha->hw->optrom_mutex);
if (ha->dcbx_tlv)
goto do_read;
- mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&vha->hw->optrom_mutex);
return 0;
@@ -1022,9 +1056,6 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
continue;
if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
continue;
- if (iter->type == 0x27 &&
- (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
- continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -1324,6 +1355,79 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t led[3] = { 0 };
+
+ if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return -EPERM;
+
+ if (ql26xx_led_config(vha, 0, led))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
+ led[0], led[1], led[2]);
+}
+
+static ssize_t
+qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t options = BIT_0;
+ uint16_t led[3] = { 0 };
+ uint16_t word[4];
+ int n;
+
+ if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return -EPERM;
+
+ n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3);
+ if (n == 4) {
+ if (word[0] == 3) {
+ options |= BIT_3|BIT_2|BIT_1;
+ led[0] = word[1];
+ led[1] = word[2];
+ led[2] = word[3];
+ goto write;
+ }
+ return -EINVAL;
+ }
+
+ if (n == 2) {
+ /* check led index */
+ if (word[0] == 0) {
+ options |= BIT_2;
+ led[0] = word[1];
+ goto write;
+ }
+ if (word[0] == 1) {
+ options |= BIT_3;
+ led[1] = word[1];
+ goto write;
+ }
+ if (word[0] == 2) {
+ options |= BIT_1;
+ led[2] = word[1];
+ goto write;
+ }
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+
+write:
+ if (ql26xx_led_config(vha, options, led))
+ return -EFAULT;
+
+ return count;
+}
+
+static ssize_t
qla2x00_optrom_bios_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1761,6 +1865,18 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
return strlen(buf);
}
+static const struct {
+ u16 rate;
+ char *str;
+} port_speed_str[] = {
+ { PORT_SPEED_4GB, "4" },
+ { PORT_SPEED_8GB, "8" },
+ { PORT_SPEED_16GB, "16" },
+ { PORT_SPEED_32GB, "32" },
+ { PORT_SPEED_64GB, "64" },
+ { PORT_SPEED_10GB, "10" },
+};
+
static ssize_t
qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1768,7 +1884,8 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
ssize_t rval;
- char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
+ u16 i;
+ char *speed = "Unknown";
rval = qla2x00_get_data_rate(vha);
if (rval != QLA_SUCCESS) {
@@ -1777,12 +1894,40 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- ql_log(ql_log_info, vha, 0x70d6,
- "port speed:%d\n", ha->link_data_rate);
+ for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) {
+ if (port_speed_str[i].rate != ha->link_data_rate)
+ continue;
+ speed = port_speed_str[i].str;
+ break;
+ }
- return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", speed);
}
+static ssize_t
+qla2x00_mpi_pause_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int rval = 0;
+
+ if (sscanf(buf, "%d", &rval) != 1)
+ return -EINVAL;
+
+ ql_log(ql_log_warn, vha, 0x7089, "Pausing MPI...\n");
+
+ rval = qla83xx_wr_reg(vha, 0x002012d4, 0x30000001);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x708a, "Unable to pause MPI.\n");
+ count = 0;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(mpi_pause, S_IWUSR, NULL, qla2x00_mpi_pause_store);
+
/* ----- */
static ssize_t
@@ -1828,9 +1973,8 @@ static char *mode_to_str[] = {
};
#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
-static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
+static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
{
- int rc = 0;
enum {
NO_ACTION,
MODE_CHANGE_ACCEPT,
@@ -2103,8 +2247,6 @@ static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
vha->ql2xexchoffld, vha->u_ql2xexchoffld);
break;
}
-
- return rc;
}
static ssize_t
@@ -2250,6 +2392,26 @@ qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
}
+static ssize_t
+qla2x00_dport_diagnostics_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+ !IS_QLA28XX(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ if (!*vha->dport_data)
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
+ vha->dport_data[0], vha->dport_data[1],
+ vha->dport_data[2], vha->dport_data[3]);
+}
+static DEVICE_ATTR(dport_diagnostics, 0444,
+ qla2x00_dport_diagnostics_show, NULL);
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -2264,6 +2426,8 @@ static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
qla2x00_zio_timer_store);
static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
qla2x00_beacon_store);
+static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show,
+ qla2x00_beacon_config_store);
static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
qla2x00_optrom_bios_version_show, NULL);
static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
@@ -2313,68 +2477,76 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
-
-struct device_attribute *qla2x00_host_attrs[] = {
- &dev_attr_driver_version,
- &dev_attr_fw_version,
- &dev_attr_serial_num,
- &dev_attr_isp_name,
- &dev_attr_isp_id,
- &dev_attr_model_name,
- &dev_attr_model_desc,
- &dev_attr_pci_info,
- &dev_attr_link_state,
- &dev_attr_zio,
- &dev_attr_zio_timer,
- &dev_attr_beacon,
- &dev_attr_optrom_bios_version,
- &dev_attr_optrom_efi_version,
- &dev_attr_optrom_fcode_version,
- &dev_attr_optrom_fw_version,
- &dev_attr_84xx_fw_version,
- &dev_attr_total_isp_aborts,
- &dev_attr_serdes_version,
- &dev_attr_mpi_version,
- &dev_attr_phy_version,
- &dev_attr_flash_block_size,
- &dev_attr_vlan_id,
- &dev_attr_vn_port_mac_address,
- &dev_attr_fabric_param,
- &dev_attr_fw_state,
- &dev_attr_optrom_gold_fw_version,
- &dev_attr_thermal_temp,
- &dev_attr_diag_requests,
- &dev_attr_diag_megabytes,
- &dev_attr_fw_dump_size,
- &dev_attr_allow_cna_fw_dump,
- &dev_attr_pep_version,
- &dev_attr_min_supported_speed,
- &dev_attr_max_supported_speed,
- &dev_attr_zio_threshold,
- &dev_attr_dif_bundle_statistics,
- &dev_attr_port_speed,
- &dev_attr_port_no,
- &dev_attr_fw_attr,
- NULL, /* reserve for qlini_mode */
- NULL, /* reserve for ql2xiniexchg */
- NULL, /* reserve for ql2xexchoffld */
+static struct attribute *qla2x00_host_attrs[] = {
+ &dev_attr_driver_version.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_serial_num.attr,
+ &dev_attr_isp_name.attr,
+ &dev_attr_isp_id.attr,
+ &dev_attr_model_name.attr,
+ &dev_attr_model_desc.attr,
+ &dev_attr_pci_info.attr,
+ &dev_attr_link_state.attr,
+ &dev_attr_zio.attr,
+ &dev_attr_zio_timer.attr,
+ &dev_attr_beacon.attr,
+ &dev_attr_beacon_config.attr,
+ &dev_attr_optrom_bios_version.attr,
+ &dev_attr_optrom_efi_version.attr,
+ &dev_attr_optrom_fcode_version.attr,
+ &dev_attr_optrom_fw_version.attr,
+ &dev_attr_84xx_fw_version.attr,
+ &dev_attr_total_isp_aborts.attr,
+ &dev_attr_serdes_version.attr,
+ &dev_attr_mpi_version.attr,
+ &dev_attr_phy_version.attr,
+ &dev_attr_flash_block_size.attr,
+ &dev_attr_vlan_id.attr,
+ &dev_attr_vn_port_mac_address.attr,
+ &dev_attr_fabric_param.attr,
+ &dev_attr_fw_state.attr,
+ &dev_attr_optrom_gold_fw_version.attr,
+ &dev_attr_thermal_temp.attr,
+ &dev_attr_diag_requests.attr,
+ &dev_attr_diag_megabytes.attr,
+ &dev_attr_fw_dump_size.attr,
+ &dev_attr_allow_cna_fw_dump.attr,
+ &dev_attr_pep_version.attr,
+ &dev_attr_min_supported_speed.attr,
+ &dev_attr_max_supported_speed.attr,
+ &dev_attr_zio_threshold.attr,
+ &dev_attr_dif_bundle_statistics.attr,
+ &dev_attr_port_speed.attr,
+ &dev_attr_port_no.attr,
+ &dev_attr_fw_attr.attr,
+ &dev_attr_dport_diagnostics.attr,
+ &dev_attr_mpi_pause.attr,
+ &dev_attr_qlini_mode.attr,
+ &dev_attr_ql2xiniexchg.attr,
+ &dev_attr_ql2xexchoffld.attr,
NULL,
};
-void qla_insert_tgt_attrs(void)
+static umode_t qla_host_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
{
- struct device_attribute **attr;
+ if (ql2x_ini_mode != QLA2XXX_INI_MODE_DUAL &&
+ (attr == &dev_attr_qlini_mode.attr ||
+ attr == &dev_attr_ql2xiniexchg.attr ||
+ attr == &dev_attr_ql2xexchoffld.attr))
+ return 0;
+ return attr->mode;
+}
- /* advance to empty slot */
- for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
- continue;
+static const struct attribute_group qla2x00_host_attr_group = {
+ .is_visible = qla_host_attr_is_visible,
+ .attrs = qla2x00_host_attrs
+};
- *attr = &dev_attr_qlini_mode;
- attr++;
- *attr = &dev_attr_ql2xiniexchg;
- attr++;
- *attr = &dev_attr_ql2xexchoffld;
-}
+const struct attribute_group *qla2x00_host_groups[] = {
+ &qla2x00_host_attr_group,
+ NULL
+};
/* Host attributes. */
@@ -2523,7 +2695,13 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
static inline void
qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
rport->dev_loss_tmo = timeout ? timeout : 1;
+
+ if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port)
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
+ rport->dev_loss_tmo);
}
static void
@@ -2536,17 +2714,27 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!fcport)
return;
- /* Now that the rport has been deleted, set the fcport state to
- FCS_DEVICE_DEAD */
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5101,
+ DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d",
+ rport->port_state));
+
+ /*
+ * Now that the rport has been deleted, set the fcport state to
+ * FCS_DEVICE_DEAD, if the fcport is still lost.
+ */
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
/*
* Transport has effectively 'deleted' the rport, clear
* all local references.
*/
spin_lock_irqsave(host->host_lock, flags);
- fcport->rport = fcport->drport = NULL;
- *((fc_port_t **)rport->dd_data) = NULL;
+ /* Confirm port has not reappeared before clearing pointers. */
+ if (rport->port_state != FC_PORTSTATE_ONLINE) {
+ fcport->rport = fcport->drport = NULL;
+ *((fc_port_t **)rport->dd_data) = NULL;
+ }
spin_unlock_irqrestore(host->host_lock, flags);
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
@@ -2579,14 +2767,24 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
/*
* At this point all fcport's software-states are cleared. Perform any
* final cleanup of firmware resources (PCBs and XCBs).
+ *
+ * Attempt to cleanup only lost devices.
*/
if (fcport->loop_id != FC_NO_LOOP_ID) {
- if (IS_FWI2_CAPABLE(fcport->vha->hw))
- fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
- fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
- else
+ if (IS_FWI2_CAPABLE(fcport->vha->hw) &&
+ fcport->scan_state != QLA_FCPORT_FOUND) {
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ fcport->logout_on_delete = 1;
+
+ if (!EDIF_NEGOTIATION_PENDING(fcport)) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
+ "%s %d schedule session deletion\n", __func__,
+ __LINE__);
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) {
qla2x00_port_logout(fcport->vha, fcport);
+ }
}
}
@@ -2598,6 +2796,9 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
if (IS_QLAFX00(vha->hw))
return 0;
+ if (vha->hw->flags.port_isolated)
+ return 0;
+
qla2x00_loop_reset(vha);
return 0;
}
@@ -2612,6 +2813,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
struct link_statistics *stats;
dma_addr_t stats_dma;
struct fc_host_statistics *p = &vha->fc_host_stat;
+ struct qla_qpair *qpair;
+ int i;
+ u64 ib = 0, ob = 0, ir = 0, or = 0;
memset(p, -1, sizeof(*p));
@@ -2648,30 +2852,58 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (rval != QLA_SUCCESS)
goto done_free;
- p->link_failure_count = stats->link_fail_cnt;
- p->loss_of_sync_count = stats->loss_sync_cnt;
- p->loss_of_signal_count = stats->loss_sig_cnt;
- p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
- p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
- p->invalid_crc_count = stats->inval_crc_cnt;
+ /* --- */
+ for (i = 0; i < vha->hw->max_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ ir += qpair->counters.input_requests;
+ or += qpair->counters.output_requests;
+ ib += qpair->counters.input_bytes;
+ ob += qpair->counters.output_bytes;
+ }
+ ir += ha->base_qpair->counters.input_requests;
+ or += ha->base_qpair->counters.output_requests;
+ ib += ha->base_qpair->counters.input_bytes;
+ ob += ha->base_qpair->counters.output_bytes;
+
+ ir += vha->qla_stats.input_requests;
+ or += vha->qla_stats.output_requests;
+ ib += vha->qla_stats.input_bytes;
+ ob += vha->qla_stats.output_bytes;
+ /* --- */
+
+ p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
+ p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
+ p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
+ p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt);
+ p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt);
+ p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt);
if (IS_FWI2_CAPABLE(ha)) {
- p->lip_count = stats->lip_cnt;
- p->tx_frames = stats->tx_frames;
- p->rx_frames = stats->rx_frames;
- p->dumped_frames = stats->discarded_frames;
- p->nos_count = stats->nos_rcvd;
+ p->lip_count = le32_to_cpu(stats->lip_cnt);
+ p->tx_frames = le32_to_cpu(stats->tx_frames);
+ p->rx_frames = le32_to_cpu(stats->rx_frames);
+ p->dumped_frames = le32_to_cpu(stats->discarded_frames);
+ p->nos_count = le32_to_cpu(stats->nos_rcvd);
p->error_frames =
- stats->dropped_frames + stats->discarded_frames;
- p->rx_words = vha->qla_stats.input_bytes;
- p->tx_words = vha->qla_stats.output_bytes;
+ le32_to_cpu(stats->dropped_frames) +
+ le32_to_cpu(stats->discarded_frames);
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
+ p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
+ } else {
+ p->rx_words = ib >> 2;
+ p->tx_words = ob >> 2;
+ }
}
+
p->fcp_control_requests = vha->qla_stats.control_requests;
- p->fcp_input_requests = vha->qla_stats.input_requests;
- p->fcp_output_requests = vha->qla_stats.output_requests;
- p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
- p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
+ p->fcp_input_requests = ir;
+ p->fcp_output_requests = or;
+ p->fcp_input_megabytes = ib >> 20;
+ p->fcp_output_megabytes = ob >> 20;
p->seconds_since_last_reset =
- get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
+ get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
do_div(p->seconds_since_last_reset, HZ);
done_free:
@@ -2689,13 +2921,24 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
struct link_statistics *stats;
dma_addr_t stats_dma;
+ int i;
+ struct qla_qpair *qpair;
memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+ for (i = 0; i < vha->hw->max_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ memset(&qpair->counters, 0, sizeof(qpair->counters));
+ }
+ memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters));
vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
if (IS_FWI2_CAPABLE(ha)) {
+ int rval;
+
stats = dma_alloc_coherent(&ha->pdev->dev,
sizeof(*stats), &stats_dma, GFP_KERNEL);
if (!stats) {
@@ -2705,7 +2948,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
}
/* reset firmware statistics */
- qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x70de,
+ "Resetting ISP statistics failed: rval = %d\n",
+ rval);
dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
stats, stats_dma);
@@ -2928,11 +3175,14 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
msleep(1000);
- qla_nvme_delete(vha);
qla24xx_disable_vp(vha);
qla2x00_wait_for_sess_deletion(vha);
+ qla_nvme_delete(vha);
+ qla_enode_stop(vha);
+ qla_edb_stop(vha);
+
vha->flags.delete_progress = 1;
qlt_remove_target(ha, vha);
@@ -3080,11 +3330,34 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.bsg_timeout = qla24xx_bsg_timeout,
};
+static uint
+qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds)
+{
+ uint supported_speeds = FC_PORTSPEED_UNKNOWN;
+
+ if (speeds & FDMI_PORT_SPEED_64GB)
+ supported_speeds |= FC_PORTSPEED_64GBIT;
+ if (speeds & FDMI_PORT_SPEED_32GB)
+ supported_speeds |= FC_PORTSPEED_32GBIT;
+ if (speeds & FDMI_PORT_SPEED_16GB)
+ supported_speeds |= FC_PORTSPEED_16GBIT;
+ if (speeds & FDMI_PORT_SPEED_8GB)
+ supported_speeds |= FC_PORTSPEED_8GBIT;
+ if (speeds & FDMI_PORT_SPEED_4GB)
+ supported_speeds |= FC_PORTSPEED_4GBIT;
+ if (speeds & FDMI_PORT_SPEED_2GB)
+ supported_speeds |= FC_PORTSPEED_2GBIT;
+ if (speeds & FDMI_PORT_SPEED_1GB)
+ supported_speeds |= FC_PORTSPEED_1GBIT;
+
+ return supported_speeds;
+}
+
void
qla2x00_init_host_attr(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- u32 speeds = FC_PORTSPEED_UNKNOWN;
+ u32 speeds = 0, fdmi_speed = 0;
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
@@ -3094,46 +3367,8 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- if (IS_CNA_CAPABLE(ha))
- speeds = FC_PORTSPEED_10GBIT;
- else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
- if (ha->max_supported_speed == 2) {
- if (ha->min_supported_speed <= 6)
- speeds |= FC_PORTSPEED_64GBIT;
- }
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1) {
- if (ha->min_supported_speed <= 5)
- speeds |= FC_PORTSPEED_32GBIT;
- }
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 4)
- speeds |= FC_PORTSPEED_16GBIT;
- }
- if (ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 3)
- speeds |= FC_PORTSPEED_8GBIT;
- }
- if (ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 2)
- speeds |= FC_PORTSPEED_4GBIT;
- }
- } else if (IS_QLA2031(ha))
- speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
- FC_PORTSPEED_4GBIT;
- else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
- speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
- FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
- else if (IS_QLA24XX_TYPE(ha))
- speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
- FC_PORTSPEED_1GBIT;
- else if (IS_QLA23XX(ha))
- speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
- else
- speeds = FC_PORTSPEED_1GBIT;
+ fdmi_speed = qla25xx_fdmi_port_speed_capability(ha);
+ speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed);
fc_host_supported_speeds(vha->host) = speeds;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index d7169e43f5e1..cd75b179410d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,26 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_gbl.h"
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/bsg-lib.h>
+static void qla2xxx_free_fcport_work(struct work_struct *work)
+{
+ struct fc_port *fcport = container_of(work, typeof(*fcport),
+ free_work);
+
+ qla2x00_free_fcport(fcport);
+}
+
/* BSG support for ELS/CT pass through */
void qla2x00_bsg_job_done(srb_t *sp, int res)
{
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ ql_dbg(ql_dbg_user, sp->vha, 0x7009,
+ "%s: sp hdl %x, result=%x bsg ptr %p\n",
+ __func__, sp->handle, res, bsg_job);
+
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+
bsg_reply->result = res;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
- sp->free(sp);
}
void qla2x00_bsg_sp_free(srb_t *sp)
@@ -44,17 +58,27 @@ void qla2x00_bsg_sp_free(srb_t *sp)
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
} else {
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (sp->remap.remapped) {
+ dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
+ sp->remap.rsp.dma);
+ dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
+ sp->remap.req.dma);
+ } else {
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ }
}
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_FXIOCB_BCMD ||
- sp->type == SRB_ELS_CMD_HST)
- qla2x00_free_fcport(sp->fcport);
+ sp->type == SRB_ELS_CMD_HST) {
+ INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
+ queue_work(ha->wq, &sp->fcport->free_work);
+ }
qla2x00_rel_sp(sp);
}
@@ -213,8 +237,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
/* validate fcp priority data */
- if (!qla24xx_fcp_prio_cfg_valid(vha,
- (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
+ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
bsg_reply->result = (DID_ERROR << 16);
ret = -EINVAL;
/* If buffer was invalidatic int
@@ -256,6 +279,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
int req_sg_cnt, rsp_sg_cnt;
int rval = (DID_ERROR << 16);
uint16_t nextlid = 0;
+ uint32_t els_cmd = 0;
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
rport = fc_bsg_to_rport(bsg_job);
@@ -269,6 +293,9 @@ qla2x00_process_els(struct bsg_job *bsg_job)
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_HST_ELS_NOLOGIN";
+ els_cmd = bsg_request->rqst_data.h_els.command_code;
+ if (els_cmd == ELS_AUTH_ELS)
+ return qla_edif_process_els(vha, bsg_job);
}
if (!vha->flags.online) {
@@ -405,7 +432,7 @@ done_unmap_sg:
goto done_free_fcport;
done_free_fcport:
- if (bsg_request->msgcode == FC_BSG_RPT_ELS)
+ if (bsg_request->msgcode != FC_BSG_RPT_ELS)
qla2x00_free_fcport(fcport);
done:
return rval;
@@ -480,7 +507,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
>> 24;
switch (loop_id) {
case 0xFC:
- loop_id = cpu_to_le16(NPH_SNS);
+ loop_id = NPH_SNS;
break;
case 0xFA:
loop_id = vha->mgmt_svr_loop_id;
@@ -681,7 +708,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
* dump and reset the chip.
*/
if (ret) {
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
rval = -EINVAL;
@@ -718,7 +745,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
uint16_t response[MAILBOX_REGISTER_COUNT];
uint16_t config[4], new_config[4];
uint8_t *fw_sts_ptr;
- uint8_t *req_data = NULL;
+ void *req_data = NULL;
dma_addr_t req_data_dma;
uint32_t req_data_len;
uint8_t *rsp_data = NULL;
@@ -796,10 +823,11 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
bsg_request->rqst_data.h_vendor.vendor_cmd[2];
if (atomic_read(&vha->loop_state) == LOOP_READY &&
- (ha->current_topology == ISP_CFG_F ||
- (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
- req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
- elreq.options == EXTERNAL_LOOPBACK) {
+ ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
+ ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
+ get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
+ req_data_len == MAX_ELS_FRAME_PAYLOAD &&
+ elreq.options == EXTERNAL_LOOPBACK))) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
ql_dbg(ql_dbg_user, vha, 0x701e,
"BSG request type: %s.\n", type);
@@ -885,7 +913,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
* doesn't work take FCoE dump and then
* reset the chip.
*/
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
@@ -1506,10 +1534,15 @@ qla2x00_update_optrom(struct bsg_job *bsg_job)
bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
- ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+ rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
- bsg_reply->result = DID_OK;
+ if (rval) {
+ bsg_reply->result = -EINVAL;
+ rval = -EINVAL;
+ } else {
+ bsg_reply->result = DID_OK;
+ }
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
@@ -2026,7 +2059,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->loop_id = piocb_rqst->dataword;
+ fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
sp->type = SRB_FXIOCB_BCMD;
sp->name = "bsg_fx_mgmt";
@@ -2392,6 +2425,89 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
}
static int
+qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval;
+ struct qla_dport_diag_v2 *dd;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint16_t options;
+
+ if (!IS_DPORT_CAPABLE(vha->hw))
+ return -EPERM;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
+
+ options = dd->options;
+
+ /* Check dport Test in progress */
+ if (options == QLA_GET_DPORT_RESULT_V2 &&
+ vha->dport_status & DPORT_DIAG_IN_PROGRESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_IN_PROCESS;
+ goto dportcomplete;
+ }
+
+ /* Check chip reset in progress and start/restart requests arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2)) {
+ vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ }
+
+ /* Check chip reset in progress and get result request arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ options == QLA_GET_DPORT_RESULT_V2) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_NOT_RUNNING;
+ goto dportcomplete;
+ }
+
+ rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_OK;
+ if (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2) {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ vha->dport_status |= DPORT_DIAG_IN_PROGRESS;
+ } else if (options == QLA_GET_DPORT_RESULT_V2) {
+ dd->mbx1 = le16_to_cpu(vha->dport_data[1]);
+ dd->mbx2 = le16_to_cpu(vha->dport_data[2]);
+ }
+ } else {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_ERR;
+ }
+
+dportcomplete:
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
+
+ bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ kfree(dd);
+
+ return 0;
+}
+
+static int
qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
{
scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
@@ -2403,19 +2519,23 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
qla27xx_get_active_image(vha, &active_regions);
regions.global_image = active_regions.global;
+ if (IS_QLA27XX(ha))
+ regions.nvme_params = QLA27XX_PRIMARY_IMAGE;
+
if (IS_QLA28XX(ha)) {
- qla27xx_get_active_image(vha, &active_regions);
+ qla28xx_get_aux_images(vha, &active_regions);
regions.board_config = active_regions.aux.board_config;
regions.vpd_nvram = active_regions.aux.vpd_nvram;
regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
+ regions.nvme_params = active_regions.aux.nvme_params;
}
ql_dbg(ql_dbg_user, vha, 0x70e1,
- "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
+ "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n",
__func__, vha->host_no, regions.global_image,
regions.board_config, regions.vpd_nvram,
- regions.npiv_config_0_1, regions.npiv_config_2_3);
+ regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
@@ -2431,10 +2551,334 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
}
static int
-qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
+qla2x00_manage_host_stats(struct bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct ql_vnd_mng_host_stats_param *req_data;
+ struct ql_vnd_mng_host_stats_resp rsp_data;
+ u32 req_data_len;
+ int ret = 0;
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
+ return -EIO;
+ }
+
+ req_data_len = bsg_job->request_payload.payload_len;
+
+ if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
+ return -EIO;
+ }
+
+ req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data,
+ req_data_len);
+
+ switch (req_data->action) {
+ case QLA_STOP:
+ ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
+ break;
+ case QLA_START:
+ ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
+ break;
+ case QLA_CLEAR:
+ ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
+ ret = -EIO;
+ break;
+ }
+
+ kfree(req_data);
+
+ /* Prepare response */
+ rsp_data.status = ret;
+ bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
+
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &rsp_data,
+ sizeof(struct ql_vnd_mng_host_stats_resp));
+
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return ret;
+}
+
+static int
+qla2x00_get_host_stats(struct bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct ql_vnd_stats_param *req_data;
+ struct ql_vnd_host_stats_resp rsp_data;
+ u32 req_data_len;
+ int ret = 0;
+ u64 ini_entry_count = 0;
+ u64 entry_count = 0;
+ u64 tgt_num = 0;
+ u64 tmp_stat_type = 0;
+ u64 response_len = 0;
+ void *data;
+
+ req_data_len = bsg_job->request_payload.payload_len;
+
+ if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
+ return -EIO;
+ }
+
+ req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, req_data_len);
+
+ /* Copy stat type to work on it */
+ tmp_stat_type = req_data->stat_type;
+
+ if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
+ /* Num of tgts connected to this host */
+ tgt_num = qla2x00_get_num_tgts(vha);
+ /* unset BIT_17 */
+ tmp_stat_type &= ~(1 << 17);
+ }
+
+ /* Total ini stats */
+ ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
+
+ /* Total number of entries */
+ entry_count = ini_entry_count + tgt_num;
+
+ response_len = sizeof(struct ql_vnd_host_stats_resp) +
+ (sizeof(struct ql_vnd_stat_entry) * entry_count);
+
+ if (response_len > bsg_job->reply_payload.payload_len) {
+ rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
+ bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &rsp_data,
+ sizeof(struct ql_vnd_mng_host_stats_resp));
+
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ goto host_stat_out;
+ }
+
+ data = kzalloc(response_len, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto host_stat_out;
+ }
+
+ ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
+ data, response_len);
+
+ rsp_data.status = EXT_STATUS_OK;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+ bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ data, response_len);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ kfree(data);
+host_stat_out:
+ kfree(req_data);
+ return ret;
+}
+
+static struct fc_rport *
+qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
+{
+ fc_port_t *fcport = NULL;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->rport->number == tgt_num)
+ return fcport->rport;
+ }
+ return NULL;
+}
+
+static int
+qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct ql_vnd_tgt_stats_param *req_data;
+ u32 req_data_len;
+ int ret = 0;
+ u64 response_len = 0;
+ struct ql_vnd_tgt_stats_resp *data = NULL;
+ struct fc_rport *rport = NULL;
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
+ return -EIO;
+ }
+
+ req_data_len = bsg_job->request_payload.payload_len;
+
+ if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
+ return -EIO;
+ }
+
+ req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt,
+ req_data, req_data_len);
+
+ response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
+ sizeof(struct ql_vnd_stat_entry);
+
+ /* structure + size for one entry */
+ data = kzalloc(response_len, GFP_KERNEL);
+ if (!data) {
+ kfree(req_data);
+ return -ENOMEM;
+ }
+
+ if (response_len > bsg_job->reply_payload.payload_len) {
+ data->status = EXT_STATUS_BUFFER_TOO_SMALL;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
+ bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, data,
+ sizeof(struct ql_vnd_tgt_stats_resp));
+
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ goto tgt_stat_out;
+ }
+
+ rport = qla2xxx_find_rport(vha, req_data->tgt_id);
+ if (!rport) {
+ ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
+ ret = EXT_STATUS_INVALID_PARAM;
+ data->status = EXT_STATUS_INVALID_PARAM;
+ goto reply;
+ }
+
+ ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
+ rport, (void *)data, response_len);
+
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+reply:
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, data,
+ response_len);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+tgt_stat_out:
+ kfree(data);
+ kfree(req_data);
+
+ return ret;
+}
+
+static int
+qla2x00_manage_host_port(struct bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct ql_vnd_mng_host_port_param *req_data;
+ struct ql_vnd_mng_host_port_resp rsp_data;
+ u32 req_data_len;
+ int ret = 0;
+
+ req_data_len = bsg_job->request_payload.payload_len;
+
+ if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
+ return -EIO;
+ }
+
+ req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, req_data_len);
+
+ switch (req_data->action) {
+ case QLA_ENABLE:
+ ret = qla2xxx_enable_port(vha->host);
+ break;
+ case QLA_DISABLE:
+ ret = qla2xxx_disable_port(vha->host);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
+ ret = -EIO;
+ break;
+ }
+
+ kfree(req_data);
+
+ /* Prepare response */
+ rsp_data.status = ret;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &rsp_data,
+ sizeof(struct ql_vnd_mng_host_port_resp));
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return ret;
+}
+
+static int
+qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
{
struct fc_bsg_request *bsg_request = bsg_job->request;
+ ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
+ __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
+
switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
case QL_VND_LOOPBACK:
return qla2x00_process_loopback(bsg_job);
@@ -2503,9 +2947,30 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
case QL_VND_DPORT_DIAGNOSTICS:
return qla2x00_do_dport_diagnostics(bsg_job);
+ case QL_VND_DPORT_DIAGNOSTICS_V2:
+ return qla2x00_do_dport_diagnostics_v2(bsg_job);
+
+ case QL_VND_EDIF_MGMT:
+ return qla_edif_app_mgmt(bsg_job);
+
case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
return qla2x00_get_flash_image_status(bsg_job);
+ case QL_VND_MANAGE_HOST_STATS:
+ return qla2x00_manage_host_stats(bsg_job);
+
+ case QL_VND_GET_HOST_STATS:
+ return qla2x00_get_host_stats(bsg_job);
+
+ case QL_VND_GET_TGT_STATS:
+ return qla2x00_get_tgt_stats(bsg_job);
+
+ case QL_VND_MANAGE_HOST_PORT:
+ return qla2x00_manage_host_port(bsg_job);
+
+ case QL_VND_MBX_PASSTHRU:
+ return qla2x00_mailbox_passthru(bsg_job);
+
default:
return -ENOSYS;
}
@@ -2533,15 +2998,34 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
vha = shost_priv(host);
}
+ /* Disable port will bring down the chip, allow enable command */
+ if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
+ bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
+ goto skip_chip_chk;
+
+ if (vha->hw->flags.port_isolated) {
+ bsg_reply->result = DID_ERROR;
+ /* operation not permitted */
+ return -EPERM;
+ }
+
if (qla2x00_chip_is_down(vha)) {
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
bsg_request->msgcode);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
return -EBUSY;
}
- ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
+ if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ return -EIO;
+ }
+
+skip_chip_chk:
+ ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
+ "Entered %s msgcode=0x%x. bsg ptr %px\n",
+ __func__, bsg_request->msgcode, bsg_job);
switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
@@ -2552,7 +3036,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
ret = qla2x00_process_ct(bsg_job);
break;
case FC_BSG_HST_VENDOR:
- ret = qla2x00_process_vendor_specific(bsg_job);
+ ret = qla2x00_process_vendor_specific(vha, bsg_job);
break;
case FC_BSG_HST_ADD_RPORT:
case FC_BSG_HST_DEL_RPORT:
@@ -2561,6 +3045,10 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
break;
}
+
+ ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
+ "%s done with return %x\n", __func__, ret);
+
return ret;
}
@@ -2575,6 +3063,15 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
unsigned long flags;
struct req_que *req;
+ ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
+ __func__, bsg_job);
+
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x9007,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
+
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
@@ -2584,27 +3081,27 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
- if (sp) {
- if (((sp->type == SRB_CT_CMD) ||
- (sp->type == SRB_ELS_CMD_HST) ||
- (sp->type == SRB_FXIOCB_BCMD))
- && (sp->u.bsg_job == bsg_job)) {
- req->outstanding_cmds[cnt] = NULL;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- ql_log(ql_log_warn, vha, 0x7089,
- "mbx abort_command "
- "failed.\n");
- bsg_reply->result = -EIO;
- } else {
- ql_dbg(ql_dbg_user, vha, 0x708a,
- "mbx abort_command "
- "success.\n");
- bsg_reply->result = 0;
- }
- spin_lock_irqsave(&ha->hardware_lock, flags);
- goto done;
+ if (sp &&
+ (sp->type == SRB_CT_CMD ||
+ sp->type == SRB_ELS_CMD_HST ||
+ sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
+ sp->type == SRB_FXIOCB_BCMD) &&
+ sp->u.bsg_job == bsg_job) {
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
+ ql_log(ql_log_warn, vha, 0x7089,
+ "mbx abort_command failed.\n");
+ bsg_reply->result = -EIO;
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x708a,
+ "mbx abort_command success.\n");
+ bsg_reply->result = 0;
}
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ goto done;
+
}
}
}
@@ -2615,6 +3112,52 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return 0;
}
+
+int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ int ret = -EINVAL;
+ int ptsize = sizeof(struct qla_mbx_passthru);
+ struct qla_mbx_passthru *req_data = NULL;
+ uint32_t req_data_len;
+
+ req_data_len = bsg_job->request_payload.payload_len;
+ if (req_data_len != ptsize) {
+ ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n");
+ return -EIO;
+ }
+ req_data = kzalloc(ptsize, GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0xf0a4,
+ "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, ptsize);
+ ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out);
+
+ /* Copy the req_data in request buffer */
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, req_data, ptsize);
+
+ bsg_reply->reply_payload_rcv_len = ptsize;
+ if (ret == QLA_SUCCESS)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ else
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR;
+
+ bsg_job->reply_len = sizeof(*bsg_job->reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ kfree(req_data);
+
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 7594fad7b5b5..d38dab0a07e8 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_BSG_H
#define __QLA_BSG_H
@@ -32,6 +31,13 @@
#define QL_VND_DPORT_DIAGNOSTICS 0x19
#define QL_VND_GET_PRIV_STATS_EX 0x1A
#define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E
+#define QL_VND_EDIF_MGMT 0X1F
+#define QL_VND_MANAGE_HOST_STATS 0x23
+#define QL_VND_GET_HOST_STATS 0x24
+#define QL_VND_GET_TGT_STATS 0x25
+#define QL_VND_MANAGE_HOST_PORT 0x26
+#define QL_VND_MBX_PASSTHRU 0x2B
+#define QL_VND_DPORT_DIAGNOSTICS_V2 0x2C
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -41,6 +47,7 @@
#define EXT_STATUS_DATA_OVERRUN 7
#define EXT_STATUS_DATA_UNDERRUN 8
#define EXT_STATUS_MAILBOX 11
+#define EXT_STATUS_BUFFER_TOO_SMALL 16
#define EXT_STATUS_NO_MEMORY 17
#define EXT_STATUS_DEVICE_OFFLINE 22
@@ -54,6 +61,9 @@
#define EXT_STATUS_TIMEOUT 30
#define EXT_STATUS_THREAD_FAILED 31
#define EXT_STATUS_DATA_CMP_FAILED 32
+#define EXT_STATUS_DPORT_DIAG_ERR 40
+#define EXT_STATUS_DPORT_DIAG_IN_PROCESS 41
+#define EXT_STATUS_DPORT_DIAG_NOT_RUNNING 42
/* BSG definations for interpreting CommandSent field */
#define INT_DEF_LB_LOOPBACK_CMD 0
@@ -151,7 +161,7 @@ struct qla84_msg_mgmt {
uint16_t rsrvd;
struct qla84_mgmt_param mgmtp;/* parameters for cmd */
uint32_t len; /* bytes in payload following this struct */
- uint8_t payload[0]; /* payload for cmd */
+ uint8_t payload[]; /* payload for cmd */
};
struct qla_bsg_a84_mgmt {
@@ -182,6 +192,12 @@ struct qla_port_param {
uint16_t speed;
} __attribute__ ((packed));
+struct qla_mbx_passthru {
+ uint16_t reserved1[2];
+ uint16_t mbx_in[32];
+ uint16_t mbx_out[32];
+ uint32_t reserved2[16];
+} __packed;
/* FRU VPD */
@@ -204,7 +220,7 @@ struct qla_image_version {
struct qla_image_version_list {
uint32_t count;
- struct qla_image_version version[0];
+ struct qla_image_version version[];
} __packed;
struct qla_status_reg {
@@ -276,6 +292,17 @@ struct qla_dport_diag {
uint8_t unused[62];
} __packed;
+#define QLA_GET_DPORT_RESULT_V2 0 /* Get Result */
+#define QLA_RESTART_DPORT_TEST_V2 1 /* Restart test */
+#define QLA_START_DPORT_TEST_V2 2 /* Start test */
+struct qla_dport_diag_v2 {
+ uint16_t options;
+ uint16_t mbx1;
+ uint16_t mbx2;
+ uint8_t unused[58];
+ uint8_t buf[1024]; /* Test Result */
+} __packed;
+
/* D_Port options */
#define QLA_DPORT_RESULT 0x0
#define QLA_DPORT_START 0x2
@@ -287,7 +314,10 @@ struct qla_active_regions {
uint8_t vpd_nvram;
uint8_t npiv_config_0_1;
uint8_t npiv_config_2_3;
- uint8_t reserved[32];
+ uint8_t nvme_params;
+ uint8_t reserved[31];
} __packed;
+#include "qla_edif_bsg.h"
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 88a56e8480f7..d7e8454304ce 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
@@ -11,14 +10,11 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0193 | 0x0146 |
- * | | | 0x015b-0x0160 |
- * | | | 0x016e |
- * | Mailbox commands | 0x1206 | 0x11a2-0x11ff |
- * | Device Discovery | 0x2134 | 0x210e-0x2116 |
- * | | | 0x211a |
+ * | Module Init and Probe | 0x0199 | |
+ * | Mailbox commands | 0x1206 | 0x11a5-0x11ff |
+ * | Device Discovery | 0x2134 | 0x210e-0x2115 |
* | | | 0x211c-0x2128 |
- * | | | 0x212a-0x2134 |
+ * | | | 0x212c-0x2134 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
@@ -26,11 +22,7 @@
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
- * | Async Events | 0x5090 | 0x502b-0x502f |
- * | | | 0x5047 |
- * | | | 0x5084,0x5075 |
- * | | | 0x503d,0x5044 |
- * | | | 0x505f |
+ * | Async Events | 0x509c | |
* | Timer Routines | 0x6012 | |
* | User Space Interactions | 0x70e3 | 0x7018,0x702e |
* | | | 0x7020,0x7024 |
@@ -73,6 +65,8 @@
#include "qla_def.h"
#include <linux/delay.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/qla.h>
static uint32_t ql_dbg_offset = 0x800;
@@ -113,37 +107,45 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t *chunk = (uint32_t *)ha->gid_list;
uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
uint32_t stat;
ulong i, j, timer = 6000000;
int rval = QLA_FUNCTION_FAILED;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ if (qla_pci_disconnected(vha, reg))
+ return rval;
+
for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
- WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
- WRT_REG_WORD(&reg->mailbox1, LSW(addr));
- WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+ wrt_reg_word(&reg->mailbox1, LSW(addr));
+ wrt_reg_word(&reg->mailbox8, MSW(addr));
- WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
- WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+ wrt_reg_word(&reg->mailbox4, MSW(dwords));
+ wrt_reg_word(&reg->mailbox5, LSW(dwords));
- WRT_REG_WORD(&reg->mailbox9, 0);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_word(&reg->mailbox9, 0);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
while (timer--) {
udelay(5);
- stat = RD_REG_DWORD(&reg->host_status);
+ if (qla_pci_disconnected(vha, reg))
+ return rval;
+
+ stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
continue;
@@ -153,15 +155,15 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
stat != 0x10 && stat != 0x11) {
/* Clear this intr; it wasn't a mailbox intr */
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
continue;
}
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
break;
}
ha->flags.mbox_int = 1;
@@ -187,41 +189,48 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
int
-qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
- uint32_t ram_dwords, void **nxt)
+qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
+ uint32_t ram_dwords, void **nxt)
{
int rval = QLA_FUNCTION_FAILED;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t *chunk = (uint32_t *)ha->gid_list;
uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
uint32_t stat;
ulong i, j, timer = 6000000;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ if (qla_pci_disconnected(vha, reg))
+ return rval;
+
for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
- WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
- WRT_REG_WORD(&reg->mailbox1, LSW(addr));
- WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
+ wrt_reg_word(&reg->mailbox1, LSW(addr));
+ wrt_reg_word(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox10, 0);
- WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
- WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_word(&reg->mailbox4, MSW(dwords));
+ wrt_reg_word(&reg->mailbox5, LSW(dwords));
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
while (timer--) {
udelay(5);
- stat = RD_REG_DWORD(&reg->host_status);
+ if (qla_pci_disconnected(vha, reg))
+ return rval;
+ stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
continue;
@@ -229,15 +238,15 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
stat &= 0xff;
if (stat != 0x1 && stat != 0x2 &&
stat != 0x10 && stat != 0x11) {
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
continue;
}
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
break;
}
ha->flags.mbox_int = 1;
@@ -252,9 +261,9 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
return rval;
}
for (j = 0; j < dwords; j++) {
- ram[i + j] =
- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
- chunk[j] : swab32(chunk[j]);
+ ram[i + j] = (__force __be32)
+ ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+ chunk[j] : swab32(chunk[j]));
}
}
@@ -263,8 +272,8 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
static int
-qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
- uint32_t cram_size, void **nxt)
+qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram,
+ uint32_t cram_size, void **nxt)
{
int rval;
@@ -284,16 +293,16 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
return rval;
}
-static uint32_t *
+static __be32 *
qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
- uint32_t count, uint32_t *buf)
+ uint32_t count, __be32 *buf)
{
- uint32_t __iomem *dmp_reg;
+ __le32 __iomem *dmp_reg;
- WRT_REG_DWORD(&reg->iobase_addr, iobase);
+ wrt_reg_dword(&reg->iobase_addr, iobase);
dmp_reg = &reg->iobase_window;
for ( ; count--; dmp_reg++)
- *buf++ = htonl(RD_REG_DWORD(dmp_reg));
+ *buf++ = htonl(rd_reg_dword(dmp_reg));
return buf;
}
@@ -301,11 +310,11 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
void
qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
{
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_PAUSE);
/* 100 usec delay is sufficient enough for hardware to pause RISC */
udelay(100);
- if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
+ if (rd_reg_dword(&reg->host_status) & HSRX_RISC_PAUSED)
set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
}
@@ -322,17 +331,17 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
* Driver can proceed with the reset sequence after waiting
* for a timeout period.
*/
- WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
- WRT_REG_DWORD(&reg->ctrl_status,
+ wrt_reg_dword(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
@@ -340,19 +349,19 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
/* Wait for soft-reset to complete. */
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) &
+ if ((rd_reg_dword(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr); /* PCI Posting. */
- for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(10);
@@ -366,7 +375,7 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
}
static int
-qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
+qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram,
uint32_t ram_words, void **nxt)
{
int rval;
@@ -374,7 +383,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
uint16_t mb0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint16_t *dump = (uint16_t *)ha->gid_list;
+ __le16 *dump = (__force __le16 *)ha->gid_list;
rval = QLA_SUCCESS;
mb0 = 0;
@@ -397,11 +406,11 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
WRT_MAILBOX_REG(ha, reg, 4, words);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_INT) {
stat &= 0xff;
@@ -412,10 +421,10 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
mb0 = RD_MAILBOX_REG(ha, reg, 0);
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->semaphore, 0);
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
} else if (stat == 0x10 || stat == 0x11) {
set_bit(MBX_INTERRUPT,
@@ -423,15 +432,15 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
mb0 = RD_MAILBOX_REG(ha, reg, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
}
/* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
udelay(5);
}
@@ -439,7 +448,8 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
rval = mb0 & MBS_MASK;
for (idx = 0; idx < words; idx++)
- ram[cnt + idx] = swab16(dump[idx]);
+ ram[cnt + idx] =
+ cpu_to_be16(le16_to_cpu(dump[idx]));
} else {
rval = QLA_FUNCTION_FAILED;
}
@@ -451,12 +461,12 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
- uint16_t *buf)
+ __be16 *buf)
{
- uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
+ __le16 __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
for ( ; count--; dmp_reg++)
- *buf++ = htons(RD_REG_WORD(dmp_reg));
+ *buf++ = htons(rd_reg_word(dmp_reg));
}
static inline void *
@@ -470,10 +480,10 @@ qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
}
static inline void *
-qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
uint32_t cnt;
- uint32_t *iter_reg;
+ __be32 *iter_reg;
struct qla2xxx_fce_chain *fcec = ptr;
if (!ha->fce)
@@ -497,7 +507,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
-qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_offld_chain *c = ptr;
@@ -515,11 +525,11 @@ qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ptr += sizeof(struct qla2xxx_offld_chain);
memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
- return (char *)ptr + cpu_to_be32(c->size);
+ return (char *)ptr + be32_to_cpu(c->size);
}
static inline void *
-qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_offld_chain *c = ptr;
@@ -537,12 +547,12 @@ qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ptr += sizeof(struct qla2xxx_offld_chain);
memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
- return (char *)ptr + cpu_to_be32(c->size);
+ return (char *)ptr + be32_to_cpu(c->size);
}
static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
- uint32_t **last_chain)
+ __be32 **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
@@ -589,7 +599,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
}
static inline void *
-qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
@@ -660,7 +670,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
-qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
uint32_t cnt, que_idx;
uint8_t que_cnt;
@@ -683,13 +693,13 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
reg = ISP_QUE_REG(ha, cnt);
que_idx = cnt * 4;
mq->qregs[que_idx] =
- htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
+ htonl(rd_reg_dword(&reg->isp25mq.req_q_in));
mq->qregs[que_idx+1] =
- htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
+ htonl(rd_reg_dword(&reg->isp25mq.req_q_out));
mq->qregs[que_idx+2] =
- htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
+ htonl(rd_reg_dword(&reg->isp25mq.rsp_q_in));
mq->qregs[que_idx+3] =
- htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
+ htonl(rd_reg_dword(&reg->isp25mq.rsp_q_out));
}
return ptr + sizeof(struct qla2xxx_mq_chain);
@@ -704,45 +714,47 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
ql_log(ql_log_warn, vha, 0xd000,
"Failed to dump firmware (%x), dump status flags (0x%lx).\n",
rval, ha->fw_dump_cap_flags);
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
} else {
ql_log(ql_log_info, vha, 0xd001,
"Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
}
+void qla2xxx_dump_fw(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ vha->hw->isp_ops->fw_dump(vha);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
/**
* qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
* @vha: HA context
- * @hardware_locked: Called with the hardware_lock
*/
void
-qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla2300_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- uint16_t __iomem *dmp_reg;
- unsigned long flags;
+ __le16 __iomem *dmp_reg;
struct qla2300_fw_dump *fw;
void *nxt;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
-
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ lockdep_assert_held(&ha->hardware_lock);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd002,
"No buffer available for dump.\n");
- goto qla2300_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -750,19 +762,19 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla2300_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp23;
qla2xxx_prep_dump(ha, ha->fw_dump);
rval = QLA_SUCCESS;
- fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+ fw->hccr = htons(rd_reg_word(&reg->hccr));
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
if (IS_QLA2300(ha)) {
for (cnt = 30000;
- (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -770,74 +782,74 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
rval = QLA_FUNCTION_TIMEOUT;
}
} else {
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
udelay(10);
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2300.req_q_in;
- for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg);
cnt++, dmp_reg++)
- fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2300.mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg);
cnt++, dmp_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->ctrl_status, 0x40);
+ wrt_reg_word(&reg->ctrl_status, 0x40);
qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x50);
+ wrt_reg_word(&reg->ctrl_status, 0x50);
qla2xxx_read_window(reg, 48, fw->dma_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ wrt_reg_word(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg);
cnt++, dmp_reg++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->pcr, 0x2000);
+ wrt_reg_word(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
- WRT_REG_WORD(&reg->pcr, 0x2200);
+ wrt_reg_word(&reg->pcr, 0x2200);
qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
- WRT_REG_WORD(&reg->pcr, 0x2400);
+ wrt_reg_word(&reg->pcr, 0x2400);
qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
- WRT_REG_WORD(&reg->pcr, 0x2600);
+ wrt_reg_word(&reg->pcr, 0x2600);
qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
- WRT_REG_WORD(&reg->pcr, 0x2800);
+ wrt_reg_word(&reg->pcr, 0x2800);
qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
- WRT_REG_WORD(&reg->pcr, 0x2A00);
+ wrt_reg_word(&reg->pcr, 0x2A00);
qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
- WRT_REG_WORD(&reg->pcr, 0x2C00);
+ wrt_reg_word(&reg->pcr, 0x2C00);
qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
- WRT_REG_WORD(&reg->pcr, 0x2E00);
+ wrt_reg_word(&reg->pcr, 0x2E00);
qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ wrt_reg_word(&reg->ctrl_status, 0x10);
qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ wrt_reg_word(&reg->ctrl_status, 0x30);
qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
/* Reset RISC. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->ctrl_status) &
+ if ((rd_reg_word(&reg->ctrl_status) &
CSR_ISP_SOFT_RESET) == 0)
break;
@@ -858,12 +870,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Get RISC SRAM. */
if (rval == QLA_SUCCESS)
rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
- sizeof(fw->risc_ram) / 2, &nxt);
+ ARRAY_SIZE(fw->risc_ram), &nxt);
/* Get stack SRAM. */
if (rval == QLA_SUCCESS)
rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
- sizeof(fw->stack_ram) / 2, &nxt);
+ ARRAY_SIZE(fw->stack_ram), &nxt);
/* Get data SRAM. */
if (rval == QLA_SUCCESS)
@@ -874,48 +886,31 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla2xxx_copy_queues(ha, nxt);
qla2xxx_dump_post_process(base_vha, rval);
-
-qla2300_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
/**
* qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
* @vha: HA context
- * @hardware_locked: Called with the hardware_lock
*/
void
-qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla2100_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt, timer;
- uint16_t risc_address;
- uint16_t mb0, mb2;
+ uint16_t risc_address = 0;
+ uint16_t mb0 = 0, mb2 = 0;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- uint16_t __iomem *dmp_reg;
- unsigned long flags;
+ __le16 __iomem *dmp_reg;
struct qla2100_fw_dump *fw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- risc_address = 0;
- mb0 = mb2 = 0;
- flags = 0;
-
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ lockdep_assert_held(&ha->hardware_lock);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd004,
"No buffer available for dump.\n");
- goto qla2100_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -923,17 +918,17 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla2100_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp21;
qla2xxx_prep_dump(ha, ha->fw_dump);
rval = QLA_SUCCESS;
- fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+ fw->hccr = htons(rd_reg_word(&reg->hccr));
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
- for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
+ for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -942,61 +937,61 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2100.mailbox0;
for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
if (cnt == 8)
dmp_reg = &reg->u_end.isp2200.mailbox8;
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
}
dmp_reg = &reg->u.isp2100.unused_2[0];
- for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
- fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++)
+ fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ wrt_reg_word(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++)
+ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->pcr, 0x2000);
+ wrt_reg_word(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
- WRT_REG_WORD(&reg->pcr, 0x2100);
+ wrt_reg_word(&reg->pcr, 0x2100);
qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
- WRT_REG_WORD(&reg->pcr, 0x2200);
+ wrt_reg_word(&reg->pcr, 0x2200);
qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
- WRT_REG_WORD(&reg->pcr, 0x2300);
+ wrt_reg_word(&reg->pcr, 0x2300);
qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
- WRT_REG_WORD(&reg->pcr, 0x2400);
+ wrt_reg_word(&reg->pcr, 0x2400);
qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
- WRT_REG_WORD(&reg->pcr, 0x2500);
+ wrt_reg_word(&reg->pcr, 0x2500);
qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
- WRT_REG_WORD(&reg->pcr, 0x2600);
+ wrt_reg_word(&reg->pcr, 0x2600);
qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
- WRT_REG_WORD(&reg->pcr, 0x2700);
+ wrt_reg_word(&reg->pcr, 0x2700);
qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ wrt_reg_word(&reg->ctrl_status, 0x10);
qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ wrt_reg_word(&reg->ctrl_status, 0x30);
qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
/* Reset the ISP. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
}
for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
@@ -1009,11 +1004,11 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Pause RISC. */
if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
- (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
+ (rd_reg_word(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
for (cnt = 30000;
- (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -1023,13 +1018,13 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (rval == QLA_SUCCESS) {
/* Set memory configuration and timing. */
if (IS_QLA2100(ha))
- WRT_REG_WORD(&reg->mctr, 0xf1);
+ wrt_reg_word(&reg->mctr, 0xf1);
else
- WRT_REG_WORD(&reg->mctr, 0xf2);
- RD_REG_WORD(&reg->mctr); /* PCI Posting. */
+ wrt_reg_word(&reg->mctr, 0xf2);
+ rd_reg_word(&reg->mctr); /* PCI Posting. */
/* Release RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
}
}
@@ -1039,29 +1034,29 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
}
- for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS;
cnt++, risc_address++) {
WRT_MAILBOX_REG(ha, reg, 1, risc_address);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
for (timer = 6000000; timer != 0; timer--) {
/* Check for pending interrupts. */
- if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
- if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
+ if (rd_reg_word(&reg->istatus) & ISR_RISC_INT) {
+ if (rd_reg_word(&reg->semaphore) & BIT_0) {
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
mb0 = RD_MAILBOX_REG(ha, reg, 0);
mb2 = RD_MAILBOX_REG(ha, reg, 2);
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->semaphore, 0);
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
udelay(5);
}
@@ -1075,51 +1070,38 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
if (rval == QLA_SUCCESS)
- qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
+ qla2xxx_copy_queues(ha, &fw->queue_dump[0]);
qla2xxx_dump_post_process(base_vha, rval);
-
-qla2100_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla24xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla24xx_fw_dump *fw;
void *nxt;
void *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ lockdep_assert_held(&ha->hardware_lock);
+
if (IS_P3P_TYPE(ha))
return;
- flags = 0;
ha->fw_dump_cap_flags = 0;
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
-
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd006,
"No buffer available for dump.\n");
- goto qla24xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1127,13 +1109,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla24xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp24;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1143,41 +1125,41 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1216,19 +1198,19 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1337,44 +1319,31 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla24xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla25xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla25xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd008,
"No buffer available for dump.\n");
- goto qla25xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1382,14 +1351,14 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla25xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp25;
qla2xxx_prep_dump(ha, ha->fw_dump);
ha->fw_dump->version = htonl(2);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1403,73 +1372,73 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7010, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1533,19 +1502,19 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1663,44 +1632,31 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla25xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla25xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla81xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla81xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00a,
"No buffer available for dump.\n");
- goto qla81xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1708,12 +1664,12 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla81xx_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp81;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1727,73 +1683,73 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7010, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1857,19 +1813,19 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1991,57 +1947,44 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla81xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla81xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla83xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla83xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00c,
"No buffer available for dump!!!\n");
- goto qla83xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
ql_log(ql_log_warn, vha, 0xd00d,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
- goto qla83xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp83;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -2049,24 +1992,24 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
*/
qla24xx_pause_risc(reg, ha);
- WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+ wrt_reg_dword(&reg->iobase_addr, 0x6000);
dmp_reg = &reg->iobase_window;
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
dmp_reg = &reg->unused_4_1[0];
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
- WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+ wrt_reg_dword(&reg->iobase_addr, 0x6010);
dmp_reg = &reg->unused_4_1[2];
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
/* select PCR and disable ecc checking and correction */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
/* Host/Risc registers. */
iter_reg = fw->host_risc_reg;
@@ -2075,73 +2018,73 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7040, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -2237,19 +2180,19 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -2455,16 +2398,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
+ for (cnt = 30000; cnt && (rd_reg_word(&reg->mailbox0)); cnt--)
udelay(5);
if (!cnt) {
@@ -2505,20 +2448,32 @@ copy_queue:
qla83xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla83xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
+/* Write the debug message prefix into @pbuf. */
+static void ql_dbg_prefix(char *pbuf, int pbuf_size, struct pci_dev *pdev,
+ const scsi_qla_host_t *vha, uint msg_id)
+{
+ if (vha) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+
+ /* <module-name> [<dev-name>]-<msg-id>:<host>: */
+ snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
+ dev_name(&(pdev->dev)), msg_id, vha->host_no);
+ } else if (pdev) {
+ snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
+ dev_name(&pdev->dev), msg_id);
+ } else {
+ /* <module-name> [<dev-name>]-<msg-id>: : */
+ snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
+ "0000:00:00.0", msg_id);
+ }
+}
+
/*
* This function is for formatting and logging debug information.
* It is to be used when vha is available. It formats the message
@@ -2537,25 +2492,22 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
{
va_list va;
struct va_format vaf;
+ char pbuf[64];
+
+ ql_ktrace(1, level, pbuf, NULL, vha, id, fmt);
if (!ql_mask_match(level))
return;
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
+
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
- if (vha != NULL) {
- const struct pci_dev *pdev = vha->hw->pdev;
- /* <module-name> <pci-name> <msg-id>:<host> Message */
- pr_warn("%s [%s]-%04x:%ld: %pV",
- QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
- vha->host_no, &vaf);
- } else {
- pr_warn("%s [%s]-%04x: : %pV",
- QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
- }
+ pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
@@ -2580,9 +2532,13 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
{
va_list va;
struct va_format vaf;
+ char pbuf[128];
if (pdev == NULL)
return;
+
+ ql_ktrace(1, level, pbuf, pdev, NULL, id, fmt);
+
if (!ql_mask_match(level))
return;
@@ -2591,9 +2547,10 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &va;
- /* <module-name> <dev-name>:<msg-id> Message */
- pr_warn("%s [%s]-%04x: : %pV",
- QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL,
+ id + ql_dbg_offset);
+ pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
}
@@ -2621,16 +2578,10 @@ ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
if (level > ql_errlev)
return;
- if (vha != NULL) {
- const struct pci_dev *pdev = vha->hw->pdev;
- /* <module-name> <msg-id>:<host> Message */
- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
- QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
- } else {
- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
- QL_MSGHDR, "0000:00:00.0", id);
- }
- pbuf[sizeof(pbuf) - 1] = 0;
+ ql_ktrace(0, level, pbuf, NULL, vha, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
va_start(va, fmt);
@@ -2681,10 +2632,10 @@ ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
if (level > ql_errlev)
return;
- /* <module-name> <dev-name>:<msg-id> Message */
- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
- QL_MSGHDR, dev_name(&(pdev->dev)), id);
- pbuf[sizeof(pbuf) - 1] = 0;
+ ql_ktrace(0, level, pbuf, pdev, NULL, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, id);
va_start(va, fmt);
@@ -2717,7 +2668,7 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- uint16_t __iomem *mbx_reg;
+ __le16 __iomem *mbx_reg;
if (!ql_mask_match(level))
return;
@@ -2732,10 +2683,9 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
ql_dbg(level, vha, id, "Mailbox registers:\n");
for (i = 0; i < 6; i++, mbx_reg++)
ql_dbg(level, vha, id,
- "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg));
+ "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg));
}
-
void
ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
uint size)
@@ -2780,16 +2730,11 @@ ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
if (level > ql_errlev)
return;
- if (qpair != NULL) {
- const struct pci_dev *pdev = qpair->pdev;
- /* <module-name> <msg-id>:<host> Message */
- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ",
- QL_MSGHDR, dev_name(&(pdev->dev)), id);
- } else {
- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
- QL_MSGHDR, "0000:00:00.0", id);
- }
- pbuf[sizeof(pbuf) - 1] = 0;
+ ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
+ qpair ? qpair->vha : NULL, id);
va_start(va, fmt);
@@ -2833,6 +2778,9 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
{
va_list va;
struct va_format vaf;
+ char pbuf[128];
+
+ ql_ktrace(1, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
if (!ql_mask_match(level))
return;
@@ -2842,16 +2790,11 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
vaf.fmt = fmt;
vaf.va = &va;
- if (qpair != NULL) {
- const struct pci_dev *pdev = qpair->pdev;
- /* <module-name> <pci-name> <msg-id>:<host> Message */
- pr_warn("%s [%s]-%04x: %pV",
- QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
- &vaf);
- } else {
- pr_warn("%s [%s]-%04x: : %pV",
- QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
- }
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
+ qpair ? qpair->vha : NULL, id + ql_dbg_offset);
+
+ pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 433e95502808..70482b55d240 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
@@ -12,205 +11,206 @@
*/
struct qla2300_fw_dump {
- uint16_t hccr;
- uint16_t pbiu_reg[8];
- uint16_t risc_host_reg[8];
- uint16_t mailbox_reg[32];
- uint16_t resp_dma_reg[32];
- uint16_t dma_reg[48];
- uint16_t risc_hdw_reg[16];
- uint16_t risc_gp0_reg[16];
- uint16_t risc_gp1_reg[16];
- uint16_t risc_gp2_reg[16];
- uint16_t risc_gp3_reg[16];
- uint16_t risc_gp4_reg[16];
- uint16_t risc_gp5_reg[16];
- uint16_t risc_gp6_reg[16];
- uint16_t risc_gp7_reg[16];
- uint16_t frame_buf_hdw_reg[64];
- uint16_t fpm_b0_reg[64];
- uint16_t fpm_b1_reg[64];
- uint16_t risc_ram[0xf800];
- uint16_t stack_ram[0x1000];
- uint16_t data_ram[1];
+ __be16 hccr;
+ __be16 pbiu_reg[8];
+ __be16 risc_host_reg[8];
+ __be16 mailbox_reg[32];
+ __be16 resp_dma_reg[32];
+ __be16 dma_reg[48];
+ __be16 risc_hdw_reg[16];
+ __be16 risc_gp0_reg[16];
+ __be16 risc_gp1_reg[16];
+ __be16 risc_gp2_reg[16];
+ __be16 risc_gp3_reg[16];
+ __be16 risc_gp4_reg[16];
+ __be16 risc_gp5_reg[16];
+ __be16 risc_gp6_reg[16];
+ __be16 risc_gp7_reg[16];
+ __be16 frame_buf_hdw_reg[64];
+ __be16 fpm_b0_reg[64];
+ __be16 fpm_b1_reg[64];
+ __be16 risc_ram[0xf800];
+ __be16 stack_ram[0x1000];
+ __be16 data_ram[1];
};
struct qla2100_fw_dump {
- uint16_t hccr;
- uint16_t pbiu_reg[8];
- uint16_t mailbox_reg[32];
- uint16_t dma_reg[48];
- uint16_t risc_hdw_reg[16];
- uint16_t risc_gp0_reg[16];
- uint16_t risc_gp1_reg[16];
- uint16_t risc_gp2_reg[16];
- uint16_t risc_gp3_reg[16];
- uint16_t risc_gp4_reg[16];
- uint16_t risc_gp5_reg[16];
- uint16_t risc_gp6_reg[16];
- uint16_t risc_gp7_reg[16];
- uint16_t frame_buf_hdw_reg[16];
- uint16_t fpm_b0_reg[64];
- uint16_t fpm_b1_reg[64];
- uint16_t risc_ram[0xf000];
+ __be16 hccr;
+ __be16 pbiu_reg[8];
+ __be16 mailbox_reg[32];
+ __be16 dma_reg[48];
+ __be16 risc_hdw_reg[16];
+ __be16 risc_gp0_reg[16];
+ __be16 risc_gp1_reg[16];
+ __be16 risc_gp2_reg[16];
+ __be16 risc_gp3_reg[16];
+ __be16 risc_gp4_reg[16];
+ __be16 risc_gp5_reg[16];
+ __be16 risc_gp6_reg[16];
+ __be16 risc_gp7_reg[16];
+ __be16 frame_buf_hdw_reg[16];
+ __be16 fpm_b0_reg[64];
+ __be16 fpm_b1_reg[64];
+ __be16 risc_ram[0xf000];
+ u8 queue_dump[];
};
struct qla24xx_fw_dump {
- uint32_t host_status;
- uint32_t host_reg[32];
- uint32_t shadow_reg[7];
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[16];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[16];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[112];
- uint32_t fpm_hdw_reg[192];
- uint32_t fb_hdw_reg[176];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_reg[32];
+ __be32 shadow_reg[7];
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[16];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[16];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[112];
+ __be32 fpm_hdw_reg[192];
+ __be32 fb_hdw_reg[176];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla25xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[32];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t aseq_gp_reg[128];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[192];
- uint32_t fb_hdw_reg[192];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[32];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 aseq_gp_reg[128];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[192];
+ __be32 fb_hdw_reg[192];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla81xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[32];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t aseq_gp_reg[128];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[224];
- uint32_t fb_hdw_reg[208];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[32];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 aseq_gp_reg[128];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[224];
+ __be32 fb_hdw_reg[208];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla83xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[48];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[256];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t xseq_2_reg[16];
- uint32_t rseq_gp_reg[256];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t rseq_3_reg[16];
- uint32_t aseq_gp_reg[256];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t aseq_3_reg[16];
- uint32_t cmd_dma_reg[64];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[256];
- uint32_t rq0_array_reg[256];
- uint32_t rq1_array_reg[256];
- uint32_t rp0_array_reg[256];
- uint32_t rp1_array_reg[256];
- uint32_t queue_control_reg[16];
- uint32_t fb_hdw_reg[432];
- uint32_t at0_array_reg[128];
- uint32_t code_ram[0x2400];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[48];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[256];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 xseq_2_reg[16];
+ __be32 rseq_gp_reg[256];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 rseq_3_reg[16];
+ __be32 aseq_gp_reg[256];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 aseq_3_reg[16];
+ __be32 cmd_dma_reg[64];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[256];
+ __be32 rq0_array_reg[256];
+ __be32 rq1_array_reg[256];
+ __be32 rp0_array_reg[256];
+ __be32 rp1_array_reg[256];
+ __be32 queue_control_reg[16];
+ __be32 fb_hdw_reg[432];
+ __be32 at0_array_reg[128];
+ __be32 code_ram[0x2400];
+ __be32 ext_mem[1];
};
#define EFT_NUM_BUFFERS 4
@@ -223,44 +223,45 @@ struct qla83xx_fw_dump {
#define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b))
struct qla2xxx_fce_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t size;
- uint32_t addr_l;
- uint32_t addr_h;
- uint32_t eregs[8];
+ __be32 size;
+ __be32 addr_l;
+ __be32 addr_h;
+ __be32 eregs[8];
};
/* used by exchange off load and extended login offload */
struct qla2xxx_offld_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t size;
- u64 addr;
+ __be32 size;
+ __be32 reserved;
+ __be64 addr;
};
struct qla2xxx_mq_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t count;
- uint32_t qregs[4 * QLA_MQ_SIZE];
+ __be32 count;
+ __be32 qregs[4 * QLA_MQ_SIZE];
};
struct qla2xxx_mqueue_header {
- uint32_t queue;
+ __be32 queue;
#define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2
#define TYPE_ATIO_QUEUE 0x3
- uint32_t number;
- uint32_t size;
+ __be32 number;
+ __be32 size;
};
struct qla2xxx_mqueue_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
};
#define DUMP_CHAIN_VARIANT 0x80000000
@@ -273,28 +274,28 @@ struct qla2xxx_mqueue_chain {
struct qla2xxx_fw_dump {
uint8_t signature[4];
- uint32_t version;
+ __be32 version;
- uint32_t fw_major_version;
- uint32_t fw_minor_version;
- uint32_t fw_subminor_version;
- uint32_t fw_attributes;
+ __be32 fw_major_version;
+ __be32 fw_minor_version;
+ __be32 fw_subminor_version;
+ __be32 fw_attributes;
- uint32_t vendor;
- uint32_t device;
- uint32_t subsystem_vendor;
- uint32_t subsystem_device;
+ __be32 vendor;
+ __be32 device;
+ __be32 subsystem_vendor;
+ __be32 subsystem_device;
- uint32_t fixed_size;
- uint32_t mem_size;
- uint32_t req_q_size;
- uint32_t rsp_q_size;
+ __be32 fixed_size;
+ __be32 mem_size;
+ __be32 req_q_size;
+ __be32 rsp_q_size;
- uint32_t eft_size;
- uint32_t eft_addr_l;
- uint32_t eft_addr_h;
+ __be32 eft_size;
+ __be32 eft_addr_l;
+ __be32 eft_addr_h;
- uint32_t header_size;
+ __be32 header_size;
union {
struct qla2100_fw_dump isp21;
@@ -307,7 +308,7 @@ struct qla2xxx_fw_dump {
};
#define QL_MSGHDR "qla2xxx"
-#define QL_DBG_DEFAULT1_MASK 0x1e400000
+#define QL_DBG_DEFAULT1_MASK 0x1e600000
#define ql_log_fatal 0 /* display fatal errors */
#define ql_log_warn 1 /* display critical errors */
@@ -366,10 +367,11 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
+#define ql_dbg_edif 0x00000400 /* edif and purex debug */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
-extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, __be32 *,
uint32_t, void **);
extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
struct qla_hw_data *);
@@ -378,5 +380,51 @@ extern int qla24xx_soft_reset(struct qla_hw_data *);
static inline int
ql_mask_match(uint level)
{
- return (level & ql2xextended_error_logging) == level;
+ if (ql2xextended_error_logging == 1)
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+
+ return level && ((level & ql2xextended_error_logging) == level);
}
+
+static inline int
+ql_mask_match_ext(uint level, int *log_tunable)
+{
+ if (*log_tunable == 1)
+ *log_tunable = QL_DBG_DEFAULT1_MASK;
+
+ return (level & *log_tunable) == level;
+}
+
+/* Assumes local variable pbuf and pbuf_ready present. */
+#define ql_ktrace(dbg_msg, level, pbuf, pdev, vha, id, fmt) do { \
+ struct va_format _vaf; \
+ va_list _va; \
+ u32 dbg_off = dbg_msg ? ql_dbg_offset : 0; \
+ \
+ pbuf[0] = 0; \
+ if (!trace_ql_dbg_log_enabled()) \
+ break; \
+ \
+ if (dbg_msg && !ql_mask_match_ext(level, \
+ &ql2xextended_error_logging_ktrace)) \
+ break; \
+ \
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, vha, id + dbg_off); \
+ \
+ va_start(_va, fmt); \
+ _vaf.fmt = fmt; \
+ _vaf.va = &_va; \
+ \
+ trace_ql_dbg_log(pbuf, &_vaf); \
+ \
+ va_end(_va); \
+} while (0)
+
+#define QLA_ENABLE_KERNEL_TRACING
+
+#ifdef QLA_ENABLE_KERNEL_TRACING
+#define QLA_TRACE_ENABLE(_tr) \
+ trace_array_set_clr_event(_tr, "qla", NULL, true)
+#else /* QLA_ENABLE_KERNEL_TRACING */
+#define QLA_TRACE_ENABLE(_tr)
+#endif /* QLA_ENABLE_KERNEL_TRACING */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ed32e9715794..802eec6407d9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_DEF_H
#define __QLA_DEF_H
@@ -34,6 +33,13 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
+#include <uapi/scsi/fc/fc_els.h>
+
+#define QLA_DFS_DEFINE_DENTRY(_debugfs_file_name) \
+ struct dentry *dfs_##_debugfs_file_name
+#define QLA_DFS_ROOT_DEFINE_DENTRY(_debugfs_file_name) \
+ struct dentry *qla_dfs_##_debugfs_file_name
+
/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
typedef struct {
uint8_t domain;
@@ -48,6 +54,28 @@ typedef struct {
uint8_t domain;
} le_id_t;
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+ uint32_t b24 : 24;
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } b;
+} port_id_t;
+#define INVALID_PORT_ID 0xFFFFFF
+
#include "qla_bsg.h"
#include "qla_dsd.h"
#include "qla_nx.h"
@@ -55,7 +83,7 @@ typedef struct {
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
-#define QLA2XXX_MANUFACTURER "QLogic Corporation"
+#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -119,21 +147,59 @@ typedef struct {
#define LSD(x) ((uint32_t)((uint64_t)(x)))
#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
-#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
+static inline uint32_t make_handle(uint16_t x, uint16_t y)
+{
+ return ((uint32_t)x << 16) | y;
+}
/*
* I/O register
*/
-#define RD_REG_BYTE(addr) readb(addr)
-#define RD_REG_WORD(addr) readw(addr)
-#define RD_REG_DWORD(addr) readl(addr)
-#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr)
-#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr)
-#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr)
-#define WRT_REG_BYTE(addr, data) writeb(data, addr)
-#define WRT_REG_WORD(addr, data) writew(data, addr)
-#define WRT_REG_DWORD(addr, data) writel(data, addr)
+static inline u8 rd_reg_byte(const volatile u8 __iomem *addr)
+{
+ return readb(addr);
+}
+
+static inline u16 rd_reg_word(const volatile __le16 __iomem *addr)
+{
+ return readw(addr);
+}
+
+static inline u32 rd_reg_dword(const volatile __le32 __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline u8 rd_reg_byte_relaxed(const volatile u8 __iomem *addr)
+{
+ return readb_relaxed(addr);
+}
+
+static inline u16 rd_reg_word_relaxed(const volatile __le16 __iomem *addr)
+{
+ return readw_relaxed(addr);
+}
+
+static inline u32 rd_reg_dword_relaxed(const volatile __le32 __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+
+static inline void wrt_reg_byte(volatile u8 __iomem *addr, u8 data)
+{
+ return writeb(data, addr);
+}
+
+static inline void wrt_reg_word(volatile __le16 __iomem *addr, u16 data)
+{
+ return writew(data, addr);
+}
+
+static inline void wrt_reg_dword(volatile __le32 __iomem *addr, u32 data)
+{
+ return writel(data, addr);
+}
/*
* ISP83XX specific remote register addresses
@@ -280,6 +346,13 @@ struct name_list_extended {
u32 size;
u8 sent;
};
+
+struct els_reject {
+ struct fc_els_ls_rjt *c;
+ dma_addr_t cdma;
+ u16 size;
+};
+
/*
* Timeout timer counts in seconds
*/
@@ -306,6 +379,8 @@ struct name_list_extended {
#define FW_MAX_EXCHANGES_CNT (32 * 1024)
#define REDUCE_EXCHANGES_CNT (8 * 1024)
+#define SET_DID_STATUS(stat_var, status) (stat_var = status << 16)
+
struct req_que;
struct qla_tgt_sess;
@@ -331,32 +406,11 @@ struct srb_cmd {
#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
#define SRB_WAKEUP_ON_COMP BIT_6
#define SRB_DIF_BUNDL_DMA_VALID BIT_7 /* DIF: DMA list valid */
+#define SRB_EDIF_CLEANUP_DELETE BIT_9
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
-
-/*
- * 24 bit port ID type definition.
- */
-typedef union {
- uint32_t b24 : 24;
-
- struct {
-#ifdef __BIG_ENDIAN
- uint8_t domain;
- uint8_t area;
- uint8_t al_pa;
-#elif defined(__LITTLE_ENDIAN)
- uint8_t al_pa;
- uint8_t area;
- uint8_t domain;
-#else
-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
-#endif
- uint8_t rsvd_1;
- } b;
-} port_id_t;
-#define INVALID_PORT_ID 0xFFFFFF
+#define ISP_REG16_DISCONNECT 0xFFFF
static inline le_id_t be_id_to_le(be_id_t id)
{
@@ -414,7 +468,7 @@ struct els_logo_payload {
struct els_plogi_payload {
uint8_t opcode;
uint8_t rsvd[3];
- uint8_t data[112];
+ __be32 data[112 / 4];
};
struct ct_arg {
@@ -443,6 +497,7 @@ struct srb_iocb {
#define SRB_LOGIN_SKIP_PRLI BIT_2
#define SRB_LOGIN_NVME_PRLI BIT_3
#define SRB_LOGIN_PRLI_ONLY BIT_4
+#define SRB_LOGIN_FCSP BIT_5
uint16_t data[2];
u32 iop[2];
} logio;
@@ -466,7 +521,7 @@ struct srb_iocb {
u32 rx_size;
dma_addr_t els_plogi_pyld_dma;
dma_addr_t els_resp_pyld_dma;
- uint32_t fw_status[3];
+ __le32 fw_status[3];
__le16 comp_status;
__le16 len;
} els_plogi;
@@ -517,8 +572,8 @@ struct srb_iocb {
#define MAX_IOCB_MB_REG 28
#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
struct {
- __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
- __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
+ u16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
+ u16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
void *out, *in;
dma_addr_t out_dma, in_dma;
struct completion comp;
@@ -529,7 +584,7 @@ struct srb_iocb {
} nack;
struct {
__le16 comp_status;
- uint16_t rsp_pyld_len;
+ __le16 rsp_pyld_len;
uint8_t aen_op;
void *desc;
@@ -547,6 +602,10 @@ struct srb_iocb {
u16 cmd;
u16 vp_index;
} ctrlvp;
+ struct {
+ struct edif_sa_ctl *sa_ctl;
+ struct qla_sa_update_frame sa_frame;
+ } sa_update;
} u;
struct timer_list timer;
@@ -577,6 +636,21 @@ struct srb_iocb {
#define SRB_PRLI_CMD 21
#define SRB_CTRL_VP 22
#define SRB_PRLO_CMD 23
+#define SRB_SA_UPDATE 25
+#define SRB_ELS_CMD_HST_NOLOGIN 26
+#define SRB_SA_REPLACE 27
+
+struct qla_els_pt_arg {
+ u8 els_opcode;
+ u8 vp_idx;
+ __le16 nport_handle;
+ u16 control_flags, ox_id;
+ __le32 rx_xchg_address;
+ port_id_t did, sid;
+ u32 tx_len, tx_byte_count, rx_len, rx_byte_count;
+ dma_addr_t tx_addr, rx_addr;
+
+};
enum {
TYPE_SRB,
@@ -584,6 +658,19 @@ enum {
TYPE_TGT_TMCMD, /* task management */
};
+struct iocb_resource {
+ u8 res_type;
+ u8 pad;
+ u16 iocb_cnt;
+};
+
+struct bsg_cmd {
+ struct bsg_job *bsg_job;
+ union {
+ struct qla_els_pt_arg els_arg;
+ } u;
+};
+
typedef struct srb {
/*
* Do not move cmd_type field, it needs to
@@ -591,15 +678,13 @@ typedef struct srb {
*/
uint8_t cmd_type;
uint8_t pad[3];
+ struct iocb_resource iores;
struct kref cmd_kref; /* need to migrate ref_count over to this */
void *priv;
wait_queue_head_t nvme_ls_waitq;
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
- unsigned int abort:1;
- unsigned int aborted:1;
- unsigned int completed:1;
uint32_t handle;
uint16_t flags;
@@ -618,7 +703,21 @@ typedef struct srb {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
struct srb_cmd scmd;
+ struct bsg_cmd bsg_cmd;
} u;
+ struct {
+ bool remapped;
+ struct {
+ dma_addr_t dma;
+ void *buf;
+ uint len;
+ } req;
+ struct {
+ dma_addr_t dma;
+ void *buf;
+ uint len;
+ } rsp;
+ } remap;
/*
* Report completion status @res and call sp_put(@sp). @res is
* an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a
@@ -632,6 +731,11 @@ typedef struct srb {
* code.
*/
void (*put_fn)(struct kref *kref);
+
+ /*
+ * Report completion for asynchronous commands.
+ */
+ void (*async_done)(struct srb *sp, int res);
} srb_t;
#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
@@ -663,23 +767,23 @@ struct msg_echo_lb {
* ISP I/O Register Set structure definitions.
*/
struct device_reg_2xxx {
- uint16_t flash_address; /* Flash BIOS address */
- uint16_t flash_data; /* Flash BIOS data */
- uint16_t unused_1[1]; /* Gap */
- uint16_t ctrl_status; /* Control/Status */
+ __le16 flash_address; /* Flash BIOS address */
+ __le16 flash_data; /* Flash BIOS data */
+ __le16 unused_1[1]; /* Gap */
+ __le16 ctrl_status; /* Control/Status */
#define CSR_FLASH_64K_BANK BIT_3 /* Flash upper 64K bank select */
#define CSR_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable */
#define CSR_ISP_SOFT_RESET BIT_0 /* ISP soft reset */
- uint16_t ictrl; /* Interrupt control */
+ __le16 ictrl; /* Interrupt control */
#define ICR_EN_INT BIT_15 /* ISP enable interrupts. */
#define ICR_EN_RISC BIT_3 /* ISP enable RISC interrupts. */
- uint16_t istatus; /* Interrupt status */
+ __le16 istatus; /* Interrupt status */
#define ISR_RISC_INT BIT_3 /* RISC interrupt */
- uint16_t semaphore; /* Semaphore */
- uint16_t nvram; /* NVRAM register. */
+ __le16 semaphore; /* Semaphore */
+ __le16 nvram; /* NVRAM register. */
#define NVR_DESELECT 0
#define NVR_BUSY BIT_15
#define NVR_WRT_ENABLE BIT_14 /* Write enable */
@@ -693,80 +797,80 @@ struct device_reg_2xxx {
union {
struct {
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t unused_2[59]; /* Gap */
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 unused_2[59]; /* Gap */
} __attribute__((packed)) isp2100;
struct {
/* Request Queue */
- uint16_t req_q_in; /* In-Pointer */
- uint16_t req_q_out; /* Out-Pointer */
+ __le16 req_q_in; /* In-Pointer */
+ __le16 req_q_out; /* Out-Pointer */
/* Response Queue */
- uint16_t rsp_q_in; /* In-Pointer */
- uint16_t rsp_q_out; /* Out-Pointer */
+ __le16 rsp_q_in; /* In-Pointer */
+ __le16 rsp_q_out; /* Out-Pointer */
/* RISC to Host Status */
- uint32_t host_status;
+ __le32 host_status;
#define HSR_RISC_INT BIT_15 /* RISC interrupt */
#define HSR_RISC_PAUSED BIT_8 /* RISC Paused */
/* Host to Host Semaphore */
- uint16_t host_semaphore;
- uint16_t unused_3[17]; /* Gap */
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23;
- uint16_t mailbox24;
- uint16_t mailbox25;
- uint16_t mailbox26;
- uint16_t mailbox27;
- uint16_t mailbox28;
- uint16_t mailbox29;
- uint16_t mailbox30;
- uint16_t mailbox31;
- uint16_t fb_cmd;
- uint16_t unused_4[10]; /* Gap */
+ __le16 host_semaphore;
+ __le16 unused_3[17]; /* Gap */
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23;
+ __le16 mailbox24;
+ __le16 mailbox25;
+ __le16 mailbox26;
+ __le16 mailbox27;
+ __le16 mailbox28;
+ __le16 mailbox29;
+ __le16 mailbox30;
+ __le16 mailbox31;
+ __le16 fb_cmd;
+ __le16 unused_4[10]; /* Gap */
} __attribute__((packed)) isp2300;
} u;
- uint16_t fpm_diag_config;
- uint16_t unused_5[0x4]; /* Gap */
- uint16_t risc_hw;
- uint16_t unused_5_1; /* Gap */
- uint16_t pcr; /* Processor Control Register. */
- uint16_t unused_6[0x5]; /* Gap */
- uint16_t mctr; /* Memory Configuration and Timing. */
- uint16_t unused_7[0x3]; /* Gap */
- uint16_t fb_cmd_2100; /* Unused on 23XX */
- uint16_t unused_8[0x3]; /* Gap */
- uint16_t hccr; /* Host command & control register. */
+ __le16 fpm_diag_config;
+ __le16 unused_5[0x4]; /* Gap */
+ __le16 risc_hw;
+ __le16 unused_5_1; /* Gap */
+ __le16 pcr; /* Processor Control Register. */
+ __le16 unused_6[0x5]; /* Gap */
+ __le16 mctr; /* Memory Configuration and Timing. */
+ __le16 unused_7[0x3]; /* Gap */
+ __le16 fb_cmd_2100; /* Unused on 23XX */
+ __le16 unused_8[0x3]; /* Gap */
+ __le16 hccr; /* Host command & control register. */
#define HCCR_HOST_INT BIT_7 /* Host interrupt bit */
#define HCCR_RISC_PAUSE BIT_5 /* Pause mode bit */
/* HCCR commands */
@@ -779,9 +883,9 @@ struct device_reg_2xxx {
#define HCCR_DISABLE_PARITY_PAUSE 0x4001 /* Disable parity error RISC pause. */
#define HCCR_ENABLE_PARITY 0xA000 /* Enable PARITY interrupt */
- uint16_t unused_9[5]; /* Gap */
- uint16_t gpiod; /* GPIO Data register. */
- uint16_t gpioe; /* GPIO Enable register. */
+ __le16 unused_9[5]; /* Gap */
+ __le16 gpiod; /* GPIO Data register. */
+ __le16 gpioe; /* GPIO Enable register. */
#define GPIO_LED_MASK 0x00C0
#define GPIO_LED_GREEN_OFF_AMBER_OFF 0x0000
#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040
@@ -793,95 +897,95 @@ struct device_reg_2xxx {
union {
struct {
- uint16_t unused_10[8]; /* Gap */
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23; /* Also probe reg. */
+ __le16 unused_10[8]; /* Gap */
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23; /* Also probe reg. */
} __attribute__((packed)) isp2200;
} u_end;
};
struct device_reg_25xxmq {
- uint32_t req_q_in;
- uint32_t req_q_out;
- uint32_t rsp_q_in;
- uint32_t rsp_q_out;
- uint32_t atio_q_in;
- uint32_t atio_q_out;
+ __le32 req_q_in;
+ __le32 req_q_out;
+ __le32 rsp_q_in;
+ __le32 rsp_q_out;
+ __le32 atio_q_in;
+ __le32 atio_q_out;
};
struct device_reg_fx00 {
- uint32_t mailbox0; /* 00 */
- uint32_t mailbox1; /* 04 */
- uint32_t mailbox2; /* 08 */
- uint32_t mailbox3; /* 0C */
- uint32_t mailbox4; /* 10 */
- uint32_t mailbox5; /* 14 */
- uint32_t mailbox6; /* 18 */
- uint32_t mailbox7; /* 1C */
- uint32_t mailbox8; /* 20 */
- uint32_t mailbox9; /* 24 */
- uint32_t mailbox10; /* 28 */
- uint32_t mailbox11;
- uint32_t mailbox12;
- uint32_t mailbox13;
- uint32_t mailbox14;
- uint32_t mailbox15;
- uint32_t mailbox16;
- uint32_t mailbox17;
- uint32_t mailbox18;
- uint32_t mailbox19;
- uint32_t mailbox20;
- uint32_t mailbox21;
- uint32_t mailbox22;
- uint32_t mailbox23;
- uint32_t mailbox24;
- uint32_t mailbox25;
- uint32_t mailbox26;
- uint32_t mailbox27;
- uint32_t mailbox28;
- uint32_t mailbox29;
- uint32_t mailbox30;
- uint32_t mailbox31;
- uint32_t aenmailbox0;
- uint32_t aenmailbox1;
- uint32_t aenmailbox2;
- uint32_t aenmailbox3;
- uint32_t aenmailbox4;
- uint32_t aenmailbox5;
- uint32_t aenmailbox6;
- uint32_t aenmailbox7;
+ __le32 mailbox0; /* 00 */
+ __le32 mailbox1; /* 04 */
+ __le32 mailbox2; /* 08 */
+ __le32 mailbox3; /* 0C */
+ __le32 mailbox4; /* 10 */
+ __le32 mailbox5; /* 14 */
+ __le32 mailbox6; /* 18 */
+ __le32 mailbox7; /* 1C */
+ __le32 mailbox8; /* 20 */
+ __le32 mailbox9; /* 24 */
+ __le32 mailbox10; /* 28 */
+ __le32 mailbox11;
+ __le32 mailbox12;
+ __le32 mailbox13;
+ __le32 mailbox14;
+ __le32 mailbox15;
+ __le32 mailbox16;
+ __le32 mailbox17;
+ __le32 mailbox18;
+ __le32 mailbox19;
+ __le32 mailbox20;
+ __le32 mailbox21;
+ __le32 mailbox22;
+ __le32 mailbox23;
+ __le32 mailbox24;
+ __le32 mailbox25;
+ __le32 mailbox26;
+ __le32 mailbox27;
+ __le32 mailbox28;
+ __le32 mailbox29;
+ __le32 mailbox30;
+ __le32 mailbox31;
+ __le32 aenmailbox0;
+ __le32 aenmailbox1;
+ __le32 aenmailbox2;
+ __le32 aenmailbox3;
+ __le32 aenmailbox4;
+ __le32 aenmailbox5;
+ __le32 aenmailbox6;
+ __le32 aenmailbox7;
/* Request Queue. */
- uint32_t req_q_in; /* A0 - Request Queue In-Pointer */
- uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */
+ __le32 req_q_in; /* A0 - Request Queue In-Pointer */
+ __le32 req_q_out; /* A4 - Request Queue Out-Pointer */
/* Response Queue. */
- uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */
- uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */
+ __le32 rsp_q_in; /* A8 - Response Queue In-Pointer */
+ __le32 rsp_q_out; /* AC - Response Queue Out-Pointer */
/* Init values shadowed on FW Up Event */
- uint32_t initval0; /* B0 */
- uint32_t initval1; /* B4 */
- uint32_t initval2; /* B8 */
- uint32_t initval3; /* BC */
- uint32_t initval4; /* C0 */
- uint32_t initval5; /* C4 */
- uint32_t initval6; /* C8 */
- uint32_t initval7; /* CC */
- uint32_t fwheartbeat; /* D0 */
- uint32_t pseudoaen; /* D4 */
+ __le32 initval0; /* B0 */
+ __le32 initval1; /* B4 */
+ __le32 initval2; /* B8 */
+ __le32 initval3; /* BC */
+ __le32 initval4; /* C0 */
+ __le32 initval5; /* C4 */
+ __le32 initval6; /* C8 */
+ __le32 initval7; /* CC */
+ __le32 fwheartbeat; /* D0 */
+ __le32 pseudoaen; /* D4 */
};
@@ -921,18 +1025,18 @@ typedef union {
&(reg)->u_end.isp2200.mailbox8 + (num) - 8) : \
&(reg)->u.isp2300.mailbox0 + (num))
#define RD_MAILBOX_REG(ha, reg, num) \
- RD_REG_WORD(MAILBOX_REG(ha, reg, num))
+ rd_reg_word(MAILBOX_REG(ha, reg, num))
#define WRT_MAILBOX_REG(ha, reg, num, data) \
- WRT_REG_WORD(MAILBOX_REG(ha, reg, num), data)
+ wrt_reg_word(MAILBOX_REG(ha, reg, num), data)
#define FB_CMD_REG(ha, reg) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
&(reg)->fb_cmd_2100 : \
&(reg)->u.isp2300.fb_cmd)
#define RD_FB_CMD_REG(ha, reg) \
- RD_REG_WORD(FB_CMD_REG(ha, reg))
+ rd_reg_word(FB_CMD_REG(ha, reg))
#define WRT_FB_CMD_REG(ha, reg, data) \
- WRT_REG_WORD(FB_CMD_REG(ha, reg), data)
+ wrt_reg_word(FB_CMD_REG(ha, reg), data)
typedef struct {
uint32_t out_mb; /* outbound from driver */
@@ -1018,6 +1122,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBA_LIP_F8 0x8016 /* Received a LIP F8. */
#define MBA_LOOP_INIT_ERR 0x8017 /* Loop Initialization Error. */
#define MBA_FABRIC_AUTH_REQ 0x801b /* Fabric Authentication Required. */
+#define MBA_CONGN_NOTI_RECV 0x801e /* Congestion Notification Received */
#define MBA_SCSI_COMPLETION 0x8020 /* SCSI Command Complete. */
#define MBA_CTIO_COMPLETION 0x8021 /* CTIO Complete. */
#define MBA_IP_COMPLETION 0x8022 /* IP Transmit Command Complete. */
@@ -1049,6 +1154,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBA_TEMPERATURE_ALERT 0x8070 /* Temperature Alert */
#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */
#define MBA_TRANS_INSERT 0x8130 /* Transceiver Insertion */
+#define MBA_TRANS_REMOVE 0x8131 /* Transceiver Removal */
#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
Notification */
@@ -1072,6 +1178,12 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
/* ISP mailbox loopback echo diagnostic error code */
#define MBS_LB_RESET 0x17
+
+/* AEN mailbox Port Diagnostics test */
+#define AEN_START_DIAG_TEST 0x0 /* start the diagnostics */
+#define AEN_DONE_DIAG_TEST_WITH_NOERR 0x1 /* Done with no errors */
+#define AEN_DONE_DIAG_TEST_WITH_ERR 0x2 /* Done with error.*/
+
/*
* Firmware options 1, 2, 3.
*/
@@ -1134,6 +1246,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */
#define MBC_GET_MEM_OFFLOAD_CNTRL_STAT 0x34 /* Memory Offload ctrl/Stat*/
#define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */
+#define MBC_SET_GET_FC_LED_CONFIG 0x3b /* Set/Get FC LED config */
#define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */
#define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */
#define MBC_GET_RESOURCE_COUNTS 0x42 /* Get Resource Counts. */
@@ -1260,10 +1373,14 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_ELS_CMD 0x5
#define RNID_TYPE_PORT_LOGIN 0x7
+#define RNID_BUFFER_CREDITS 0x8
#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
+#define ELS_CMD_MAP_SIZE 32
+
/*
* Firmware state codes from get firmware state mailbox command
*/
@@ -1309,7 +1426,7 @@ typedef struct {
uint8_t port_id[4];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
- uint16_t execution_throttle;
+ __le16 execution_throttle;
uint16_t execution_count;
uint8_t reset_count;
uint8_t reserved_2;
@@ -1395,9 +1512,9 @@ typedef struct {
*/
uint8_t firmware_options[2];
- uint16_t frame_payload_size;
- uint16_t max_iocb_allocation;
- uint16_t execution_throttle;
+ __le16 frame_payload_size;
+ __le16 max_iocb_allocation;
+ __le16 execution_throttle;
uint8_t retry_count;
uint8_t retry_delay; /* unused */
uint8_t port_name[WWN_SIZE]; /* Big endian. */
@@ -1406,17 +1523,17 @@ typedef struct {
uint8_t login_timeout;
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t request_q_outpointer;
- uint16_t response_q_inpointer;
- uint16_t request_q_length;
- uint16_t response_q_length;
- __le64 request_q_address __packed;
- __le64 response_q_address __packed;
+ __le16 request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_length;
+ __le16 response_q_length;
+ __le64 request_q_address __packed;
+ __le64 response_q_address __packed;
- uint16_t lun_enables;
+ __le16 lun_enables;
uint8_t command_resource_count;
uint8_t immediate_notify_resource_count;
- uint16_t timeout;
+ __le16 timeout;
uint8_t reserved_2[2];
/*
@@ -1467,6 +1584,25 @@ typedef struct {
uint8_t reserved_3[26];
} init_cb_t;
+/* Special Features Control Block */
+struct init_sf_cb {
+ uint8_t format;
+ uint8_t reserved0;
+ /*
+ * BIT 15-14 = Reserved
+ * BIT_13 = SAN Congestion Management (1 - Enabled, 0 - Disabled)
+ * BIT_12 = Remote Write Optimization (1 - Enabled, 0 - Disabled)
+ * BIT 11-0 = Reserved
+ */
+ __le16 flags;
+ uint8_t reserved1[32];
+ uint16_t discard_OHRB_timeout_value;
+ uint16_t remote_write_opt_queue_num;
+ uint8_t reserved2[40];
+ uint8_t scm_related_parameter[16];
+ uint8_t reserved3[32];
+};
+
/*
* Get Link Status mailbox command return buffer.
*/
@@ -1474,47 +1610,44 @@ typedef struct {
#define GLSO_USE_DID BIT_3
struct link_statistics {
- uint32_t link_fail_cnt;
- uint32_t loss_sync_cnt;
- uint32_t loss_sig_cnt;
- uint32_t prim_seq_err_cnt;
- uint32_t inval_xmit_word_cnt;
- uint32_t inval_crc_cnt;
- uint32_t lip_cnt;
- uint32_t link_up_cnt;
- uint32_t link_down_loop_init_tmo;
- uint32_t link_down_los;
- uint32_t link_down_loss_rcv_clk;
+ __le32 link_fail_cnt;
+ __le32 loss_sync_cnt;
+ __le32 loss_sig_cnt;
+ __le32 prim_seq_err_cnt;
+ __le32 inval_xmit_word_cnt;
+ __le32 inval_crc_cnt;
+ __le32 lip_cnt;
+ __le32 link_up_cnt;
+ __le32 link_down_loop_init_tmo;
+ __le32 link_down_los;
+ __le32 link_down_loss_rcv_clk;
uint32_t reserved0[5];
- uint32_t port_cfg_chg;
+ __le32 port_cfg_chg;
uint32_t reserved1[11];
- uint32_t rsp_q_full;
- uint32_t atio_q_full;
- uint32_t drop_ae;
- uint32_t els_proto_err;
- uint32_t reserved2;
- uint32_t tx_frames;
- uint32_t rx_frames;
- uint32_t discarded_frames;
- uint32_t dropped_frames;
+ __le32 rsp_q_full;
+ __le32 atio_q_full;
+ __le32 drop_ae;
+ __le32 els_proto_err;
+ __le32 reserved2;
+ __le32 tx_frames;
+ __le32 rx_frames;
+ __le32 discarded_frames;
+ __le32 dropped_frames;
uint32_t reserved3;
- uint32_t nos_rcvd;
+ __le32 nos_rcvd;
uint32_t reserved4[4];
- uint32_t tx_prjt;
- uint32_t rcv_exfail;
- uint32_t rcv_abts;
- uint32_t seq_frm_miss;
- uint32_t corr_err;
- uint32_t mb_rqst;
- uint32_t nport_full;
- uint32_t eofa;
+ __le32 tx_prjt;
+ __le32 rcv_exfail;
+ __le32 rcv_abts;
+ __le32 seq_frm_miss;
+ __le32 corr_err;
+ __le32 mb_rqst;
+ __le32 nport_full;
+ __le32 eofa;
uint32_t reserved5;
- uint32_t fpm_recv_word_cnt_lo;
- uint32_t fpm_recv_word_cnt_hi;
- uint32_t fpm_disc_word_cnt_lo;
- uint32_t fpm_disc_word_cnt_hi;
- uint32_t fpm_xmit_word_cnt_lo;
- uint32_t fpm_xmit_word_cnt_hi;
+ __le64 fpm_recv_word_cnt;
+ __le64 fpm_disc_word_cnt;
+ __le64 fpm_xmit_word_cnt;
uint32_t reserved6[70];
};
@@ -1566,9 +1699,9 @@ typedef struct {
*/
uint8_t firmware_options[2];
- uint16_t frame_payload_size;
- uint16_t max_iocb_allocation;
- uint16_t execution_throttle;
+ __le16 frame_payload_size;
+ __le16 max_iocb_allocation;
+ __le16 execution_throttle;
uint8_t retry_count;
uint8_t retry_delay; /* unused */
uint8_t port_name[WWN_SIZE]; /* Big endian. */
@@ -1692,7 +1825,7 @@ typedef struct {
uint8_t reset_delay;
uint8_t port_down_retry_count;
uint8_t boot_id_number;
- uint16_t max_luns_per_target;
+ __le16 max_luns_per_target;
uint8_t fcode_boot_port_name[WWN_SIZE];
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
@@ -1798,7 +1931,7 @@ struct atio {
};
typedef union {
- uint16_t extended;
+ __le16 extended;
struct {
uint8_t reserved;
uint8_t standard;
@@ -1824,18 +1957,18 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
target_id_t target; /* SCSI ID */
- uint16_t lun; /* SCSI LUN */
- uint16_t control_flags; /* Control flags. */
+ __le16 lun; /* SCSI LUN */
+ __le16 control_flags; /* Control flags. */
#define CF_WRITE BIT_6
#define CF_READ BIT_5
#define CF_SIMPLE_TAG BIT_3
#define CF_ORDERED_TAG BIT_2
#define CF_HEAD_TAG BIT_1
uint16_t reserved_1;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
union {
struct dsd32 dsd32[3];
struct dsd64 dsd64[2];
@@ -1853,11 +1986,11 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
target_id_t target; /* SCSI ID */
- uint16_t lun; /* SCSI LUN */
- uint16_t control_flags; /* Control flags. */
+ __le16 lun; /* SCSI LUN */
+ __le16 control_flags; /* Control flags. */
uint16_t reserved_1;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
uint32_t byte_count; /* Total byte count. */
struct dsd64 dsd[2];
@@ -1919,7 +2052,7 @@ struct crc_context {
__le16 guard_seed; /* Initial Guard Seed */
__le16 prot_opts; /* Requested Data Protection Mode */
__le16 blk_size; /* Data size in bytes */
- uint16_t runt_blk_guard; /* Guard value for runt block (tape
+ __le16 runt_blk_guard; /* Guard value for runt block (tape
* only) */
__le32 byte_count; /* Total byte count/ total data
* transfer count */
@@ -1972,13 +2105,13 @@ typedef struct {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t scsi_status; /* SCSI status. */
- uint16_t comp_status; /* Completion status. */
- uint16_t state_flags; /* State flags. */
- uint16_t status_flags; /* Status flags. */
- uint16_t rsp_info_len; /* Response Info Length. */
- uint16_t req_sense_length; /* Request sense data length. */
- uint32_t residual_length; /* Residual transfer length. */
+ __le16 scsi_status; /* SCSI status. */
+ __le16 comp_status; /* Completion status. */
+ __le16 state_flags; /* State flags. */
+ __le16 status_flags; /* Status flags. */
+ __le16 rsp_info_len; /* Response Info Length. */
+ __le16 req_sense_length; /* Request sense data length. */
+ __le32 residual_length; /* Residual transfer length. */
uint8_t rsp_info[8]; /* FCP response information. */
uint8_t req_sense_data[32]; /* Request sense data. */
} sts_entry_t;
@@ -2035,6 +2168,12 @@ typedef struct {
#define CS_COMPLETE_CHKCOND 0x30 /* Error? */
#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
failure */
+#define CS_REJECT_RECEIVED 0x4E /* Reject received */
+#define CS_EDIF_AUTH_ERROR 0x63 /* decrypt error */
+#define CS_EDIF_PAD_LEN_ERROR 0x65 /* pad > frame size, not 4byte align */
+#define CS_EDIF_INV_REQ 0x66 /* invalid request */
+#define CS_EDIF_SPI_ERROR 0x67 /* rx frame unable to locate sa */
+#define CS_EDIF_HDR_ERROR 0x69 /* data frame != expected len */
#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
#define CS_UNKNOWN 0x81 /* Driver defined */
#define CS_RETRY 0x82 /* Driver defined */
@@ -2110,8 +2249,8 @@ typedef struct {
/* clear port changed, */
/* use sequence number. */
uint8_t reserved_1;
- uint16_t sequence_number; /* Sequence number of event */
- uint16_t lun; /* SCSI LUN */
+ __le16 sequence_number; /* Sequence number of event */
+ __le16 lun; /* SCSI LUN */
uint8_t reserved_2[48];
} mrk_entry_t;
@@ -2126,23 +2265,25 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle1; /* System handle. */
target_id_t loop_id;
- uint16_t status;
- uint16_t control_flags; /* Control flags. */
+ __le16 status;
+ __le16 control_flags; /* Control flags. */
uint16_t reserved2;
- uint16_t timeout;
- uint16_t cmd_dsd_count;
- uint16_t total_dsd_count;
+ __le16 timeout;
+ __le16 cmd_dsd_count;
+ __le16 total_dsd_count;
uint8_t type;
uint8_t r_ctl;
- uint16_t rx_id;
+ __le16 rx_id;
uint16_t reserved3;
uint32_t handle2;
- uint32_t rsp_bytecount;
- uint32_t req_bytecount;
+ __le32 rsp_bytecount;
+ __le32 req_bytecount;
struct dsd64 req_dsd;
struct dsd64 rsp_dsd;
} ms_iocb_entry_t;
+#define SCM_EDC_ACC_RECEIVED BIT_6
+#define SCM_RDF_ACC_RECEIVED BIT_7
/*
* ISP queue - Mailbox Command entry structure definition.
@@ -2166,20 +2307,20 @@ struct mbx_entry {
uint32_t handle;
target_id_t loop_id;
- uint16_t status;
- uint16_t state_flags;
- uint16_t status_flags;
+ __le16 status;
+ __le16 state_flags;
+ __le16 status_flags;
uint32_t sys_define2[2];
- uint16_t mb0;
- uint16_t mb1;
- uint16_t mb2;
- uint16_t mb3;
- uint16_t mb6;
- uint16_t mb7;
- uint16_t mb9;
- uint16_t mb10;
+ __le16 mb0;
+ __le16 mb1;
+ __le16 mb2;
+ __le16 mb3;
+ __le16 mb6;
+ __le16 mb7;
+ __le16 mb9;
+ __le16 mb10;
uint32_t reserved_2[2];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
@@ -2201,52 +2342,53 @@ struct imm_ntfy_from_isp {
uint8_t entry_status; /* Entry Status. */
union {
struct {
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
- uint16_t lun;
+ __le16 lun;
uint8_t target_id;
uint8_t reserved_1;
- uint16_t status_modifier;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
+ __le16 status_modifier;
+ __le16 status;
+ __le16 task_flags;
+ __le16 seq_id;
+ __le16 srr_rx_id;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
#define SRR_IU_DATA_IN 0x1
#define SRR_IU_DATA_OUT 0x5
#define SRR_IU_STATUS 0x7
- uint16_t srr_ox_id;
+ __le16 srr_ox_id;
uint8_t reserved_2[28];
} isp2x;
struct {
uint32_t reserved;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t reserved_2;
- uint16_t flags;
+ __le16 flags;
+#define NOTIFY24XX_FLAGS_FCSP BIT_5
#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
- uint16_t srr_rx_id;
- uint16_t status;
+ __le16 srr_rx_id;
+ __le16 status;
uint8_t status_subcode;
uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_ox_id;
+ __le32 exchange_address;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_ox_id;
union {
struct {
uint8_t node_name[8];
} plogi; /* PLOGI/ADISC/PDISC */
struct {
/* PRLI word 3 bit 0-15 */
- uint16_t wd3_lo;
+ __le16 wd3_lo;
uint8_t resv0[6];
} prli;
struct {
uint8_t port_id[3];
uint8_t resv1;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t resv2;
} req_els;
} u;
@@ -2259,7 +2401,7 @@ struct imm_ntfy_from_isp {
} isp24;
} u;
uint16_t reserved_7;
- uint16_t ox_id;
+ __le16 ox_id;
} __packed;
#endif
@@ -2307,11 +2449,9 @@ struct mbx_24xx_entry {
*/
typedef enum {
FCT_UNKNOWN,
- FCT_RSCN,
- FCT_SWITCH,
- FCT_BROADCAST,
- FCT_INITIATOR,
- FCT_TARGET,
+ FCT_BROADCAST = 0x01,
+ FCT_INITIATOR = 0x02,
+ FCT_TARGET = 0x04,
FCT_NVME_INITIATOR = 0x10,
FCT_NVME_TARGET = 0x20,
FCT_NVME_DISCOVERY = 0x40,
@@ -2354,6 +2494,7 @@ enum discovery_state {
DSC_LOGIN_COMPLETE,
DSC_ADISC,
DSC_DELETE_PEND,
+ DSC_LOGIN_AUTH_PEND,
};
enum login_state { /* FW control Target side */
@@ -2381,12 +2522,6 @@ typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
- uint8_t node_name[WWN_SIZE];
- uint8_t port_name[WWN_SIZE];
- port_id_t d_id;
- uint16_t loop_id;
- uint16_t old_loop_id;
-
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
unsigned int free_pending:1;
@@ -2403,15 +2538,26 @@ typedef struct fc_port {
unsigned int n2n_flag:1;
unsigned int explicit_logout:1;
unsigned int prli_pend_timer:1;
+ unsigned int do_prli_nvme:1;
+
+ uint8_t nvme_flag;
+
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ port_id_t d_id;
+ uint16_t loop_id;
+ uint16_t old_loop_id;
struct completion nvme_del_done;
uint32_t nvme_prli_service_param;
+#define NVME_PRLI_SP_PI_CTRL BIT_9
+#define NVME_PRLI_SP_SLER BIT_8
#define NVME_PRLI_SP_CONF BIT_7
#define NVME_PRLI_SP_INITIATOR BIT_5
#define NVME_PRLI_SP_TARGET BIT_4
#define NVME_PRLI_SP_DISCOVERY BIT_3
#define NVME_PRLI_SP_FIRST_BURST BIT_0
- uint8_t nvme_flag;
+
uint32_t nvme_first_burst_size;
#define NVME_FLAG_REGISTERED 4
#define NVME_FLAG_DELETING 2
@@ -2422,6 +2568,8 @@ typedef struct fc_port {
int generation;
struct se_session *se_sess;
+ struct list_head sess_cmd_list;
+ spinlock_t sess_cmd_lock;
struct kref sess_kref;
struct qla_tgt *tgt;
unsigned long expires;
@@ -2482,6 +2630,39 @@ typedef struct fc_port {
u8 last_login_state;
u16 n2n_link_reset_cnt;
u16 n2n_chip_reset;
+
+ struct dentry *dfs_rport_dir;
+
+ u64 tgt_short_link_down_cnt;
+ u64 tgt_link_down_time;
+ u64 dev_loss_tmo;
+ /*
+ * EDIF parameters for encryption.
+ */
+ struct {
+ uint32_t enable:1; /* device is edif enabled/req'd */
+ uint32_t app_stop:2;
+ uint32_t aes_gmac:1;
+ uint32_t app_sess_online:1;
+ uint32_t tx_sa_set:1;
+ uint32_t rx_sa_set:1;
+ uint32_t tx_sa_pending:1;
+ uint32_t rx_sa_pending:1;
+ uint32_t tx_rekey_cnt;
+ uint32_t rx_rekey_cnt;
+ uint64_t tx_bytes;
+ uint64_t rx_bytes;
+ uint8_t sess_down_acked;
+ uint8_t auth_state;
+ uint16_t authok:1;
+ uint16_t rekey_cnt;
+ struct list_head edif_indx_list;
+ spinlock_t indx_list_lock;
+
+ struct list_head tx_sa_list;
+ struct list_head rx_sa_list;
+ spinlock_t sa_list_lock;
+ } edif;
} fc_port_t;
enum {
@@ -2506,24 +2687,28 @@ struct event_arg {
/*
* Fibre channel port/lun states.
*/
-#define FCS_UNCONFIGURED 1
-#define FCS_DEVICE_DEAD 2
-#define FCS_DEVICE_LOST 3
-#define FCS_ONLINE 4
+enum {
+ FCS_UNKNOWN,
+ FCS_UNCONFIGURED,
+ FCS_DEVICE_DEAD,
+ FCS_DEVICE_LOST,
+ FCS_ONLINE,
+};
extern const char *const port_state_str[5];
-static const char * const port_dstate_str[] = {
- "DELETED",
- "GNN_ID",
- "GNL",
- "LOGIN_PEND",
- "LOGIN_FAILED",
- "GPDB",
- "UPD_FCPORT",
- "LOGIN_COMPLETE",
- "ADISC",
- "DELETE_PEND"
+static const char *const port_dstate_str[] = {
+ [DSC_DELETED] = "DELETED",
+ [DSC_GNN_ID] = "GNN_ID",
+ [DSC_GNL] = "GNL",
+ [DSC_LOGIN_PEND] = "LOGIN_PEND",
+ [DSC_LOGIN_FAILED] = "LOGIN_FAILED",
+ [DSC_GPDB] = "GPDB",
+ [DSC_UPD_FCPORT] = "UPD_FCPORT",
+ [DSC_LOGIN_COMPLETE] = "LOGIN_COMPLETE",
+ [DSC_ADISC] = "ADISC",
+ [DSC_DELETE_PEND] = "DELETE_PEND",
+ [DSC_LOGIN_AUTH_PEND] = "LOGIN_AUTH_PEND",
};
/*
@@ -2535,6 +2720,8 @@ static const char * const port_dstate_str[] = {
#define FCF_ASYNC_SENT BIT_3
#define FCF_CONF_COMP_SUPPORTED BIT_4
#define FCF_ASYNC_ACTIVE BIT_5
+#define FCF_FCSP_DEVICE BIT_6
+#define FCF_EDIF_DELETE BIT_7
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2624,10 +2811,11 @@ static const char * const port_dstate_str[] = {
#define GFF_ID_RSP_SIZE (16 + 128)
/*
- * HBA attribute types.
+ * FDMI HBA attribute types.
*/
-#define FDMI_HBA_ATTR_COUNT 9
-#define FDMIV2_HBA_ATTR_COUNT 17
+#define FDMI1_HBA_ATTR_COUNT 10
+#define FDMI2_HBA_ATTR_COUNT 17
+
#define FDMI_HBA_NODE_NAME 0x1
#define FDMI_HBA_MANUFACTURER 0x2
#define FDMI_HBA_SERIAL_NUMBER 0x3
@@ -2639,16 +2827,17 @@ static const char * const port_dstate_str[] = {
#define FDMI_HBA_FIRMWARE_VERSION 0x9
#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
+
#define FDMI_HBA_NODE_SYMBOLIC_NAME 0xc
-#define FDMI_HBA_VENDOR_ID 0xd
+#define FDMI_HBA_VENDOR_SPECIFIC_INFO 0xd
#define FDMI_HBA_NUM_PORTS 0xe
#define FDMI_HBA_FABRIC_NAME 0xf
#define FDMI_HBA_BOOT_BIOS_NAME 0x10
-#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER 0xe0
+#define FDMI_HBA_VENDOR_IDENTIFIER 0xe0
struct ct_fdmi_hba_attr {
- uint16_t type;
- uint16_t len;
+ __be16 type;
+ __be16 len;
union {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[64];
@@ -2660,55 +2849,41 @@ struct ct_fdmi_hba_attr {
uint8_t orom_version[16];
uint8_t fw_version[32];
uint8_t os_version[128];
- uint32_t max_ct_len;
- } a;
-};
-
-struct ct_fdmi_hba_attributes {
- uint32_t count;
- struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
-};
+ __be32 max_ct_len;
-struct ct_fdmiv2_hba_attr {
- uint16_t type;
- uint16_t len;
- union {
- uint8_t node_name[WWN_SIZE];
- uint8_t manufacturer[64];
- uint8_t serial_num[32];
- uint8_t model[16+1];
- uint8_t model_desc[80];
- uint8_t hw_version[16];
- uint8_t driver_version[32];
- uint8_t orom_version[16];
- uint8_t fw_version[32];
- uint8_t os_version[128];
- uint32_t max_ct_len;
uint8_t sym_name[256];
- uint32_t vendor_id;
- uint32_t num_ports;
+ __be32 vendor_specific_info;
+ __be32 num_ports;
uint8_t fabric_name[WWN_SIZE];
uint8_t bios_name[32];
uint8_t vendor_identifier[8];
} a;
};
-struct ct_fdmiv2_hba_attributes {
- uint32_t count;
- struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT];
+struct ct_fdmi1_hba_attributes {
+ __be32 count;
+ struct ct_fdmi_hba_attr entry[FDMI1_HBA_ATTR_COUNT];
+};
+
+struct ct_fdmi2_hba_attributes {
+ __be32 count;
+ struct ct_fdmi_hba_attr entry[FDMI2_HBA_ATTR_COUNT];
};
/*
- * Port attribute types.
+ * FDMI Port attribute types.
*/
-#define FDMI_PORT_ATTR_COUNT 6
-#define FDMIV2_PORT_ATTR_COUNT 16
+#define FDMI1_PORT_ATTR_COUNT 6
+#define FDMI2_PORT_ATTR_COUNT 16
+#define FDMI2_SMARTSAN_PORT_ATTR_COUNT 23
+
#define FDMI_PORT_FC4_TYPES 0x1
#define FDMI_PORT_SUPPORT_SPEED 0x2
#define FDMI_PORT_CURRENT_SPEED 0x3
#define FDMI_PORT_MAX_FRAME_SIZE 0x4
#define FDMI_PORT_OS_DEVICE_NAME 0x5
#define FDMI_PORT_HOST_NAME 0x6
+
#define FDMI_PORT_NODE_NAME 0x7
#define FDMI_PORT_NAME 0x8
#define FDMI_PORT_SYM_NAME 0x9
@@ -2718,7 +2893,15 @@ struct ct_fdmiv2_hba_attributes {
#define FDMI_PORT_FC4_TYPE 0xd
#define FDMI_PORT_STATE 0x101
#define FDMI_PORT_COUNT 0x102
-#define FDMI_PORT_ID 0x103
+#define FDMI_PORT_IDENTIFIER 0x103
+
+#define FDMI_SMARTSAN_SERVICE 0xF100
+#define FDMI_SMARTSAN_GUID 0xF101
+#define FDMI_SMARTSAN_VERSION 0xF102
+#define FDMI_SMARTSAN_PROD_NAME 0xF103
+#define FDMI_SMARTSAN_PORT_INFO 0xF104
+#define FDMI_SMARTSAN_QOS_SUPPORT 0xF105
+#define FDMI_SMARTSAN_SECURITY_SUPPORT 0xF106
#define FDMI_PORT_SPEED_1GB 0x1
#define FDMI_PORT_SPEED_2GB 0x2
@@ -2727,61 +2910,69 @@ struct ct_fdmiv2_hba_attributes {
#define FDMI_PORT_SPEED_8GB 0x10
#define FDMI_PORT_SPEED_16GB 0x20
#define FDMI_PORT_SPEED_32GB 0x40
-#define FDMI_PORT_SPEED_64GB 0x80
+#define FDMI_PORT_SPEED_20GB 0x80
+#define FDMI_PORT_SPEED_40GB 0x100
+#define FDMI_PORT_SPEED_128GB 0x200
+#define FDMI_PORT_SPEED_64GB 0x400
+#define FDMI_PORT_SPEED_256GB 0x800
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
#define FC_CLASS_2 0x04
#define FC_CLASS_3 0x08
#define FC_CLASS_2_3 0x0C
-struct ct_fdmiv2_port_attr {
- uint16_t type;
- uint16_t len;
+struct ct_fdmi_port_attr {
+ __be16 type;
+ __be16 len;
union {
uint8_t fc4_types[32];
- uint32_t sup_speed;
- uint32_t cur_speed;
- uint32_t max_frame_size;
+ __be32 sup_speed;
+ __be32 cur_speed;
+ __be32 max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[256];
+
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t port_sym_name[128];
- uint32_t port_type;
- uint32_t port_supported_cos;
+ __be32 port_type;
+ __be32 port_supported_cos;
uint8_t fabric_name[WWN_SIZE];
uint8_t port_fc4_type[32];
- uint32_t port_state;
- uint32_t num_ports;
- uint32_t port_id;
+ __be32 port_state;
+ __be32 num_ports;
+ __be32 port_id;
+
+ uint8_t smartsan_service[24];
+ uint8_t smartsan_guid[16];
+ uint8_t smartsan_version[24];
+ uint8_t smartsan_prod_name[16];
+ __be32 smartsan_port_info;
+ __be32 smartsan_qos_support;
+ __be32 smartsan_security_support;
} a;
};
-/*
- * Port Attribute Block.
- */
-struct ct_fdmiv2_port_attributes {
- uint32_t count;
- struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT];
+struct ct_fdmi1_port_attributes {
+ __be32 count;
+ struct ct_fdmi_port_attr entry[FDMI1_PORT_ATTR_COUNT];
};
-struct ct_fdmi_port_attr {
- uint16_t type;
- uint16_t len;
- union {
- uint8_t fc4_types[32];
- uint32_t sup_speed;
- uint32_t cur_speed;
- uint32_t max_frame_size;
- uint8_t os_dev_name[32];
- uint8_t host_name[256];
- } a;
+struct ct_fdmi2_port_attributes {
+ __be32 count;
+ struct ct_fdmi_port_attr entry[FDMI2_PORT_ATTR_COUNT];
};
-struct ct_fdmi_port_attributes {
- uint32_t count;
- struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
-};
+#define FDMI_ATTR_TYPELEN(obj) \
+ (sizeof((obj)->type) + sizeof((obj)->len))
+
+#define FDMI_ATTR_ALIGNMENT(len) \
+ (4 - ((len) & 3))
+
+/* FDMI register call options */
+#define CALLOPT_FDMI1 0
+#define CALLOPT_FDMI2 1
+#define CALLOPT_FDMI2_SMARTSAN 2
/* FDMI definitions. */
#define GRHL_CMD 0x100
@@ -2793,10 +2984,13 @@ struct ct_fdmi_port_attributes {
#define RHBA_RSP_SIZE 16
#define RHAT_CMD 0x201
+
#define RPRT_CMD 0x210
+#define RPRT_RSP_SIZE 24
#define RPA_CMD 0x211
#define RPA_RSP_SIZE 16
+#define SMARTSAN_RPA_RSP_SIZE 24
#define DHBA_CMD 0x300
#define DHBA_REQ_SIZE (16 + 8)
@@ -2819,8 +3013,8 @@ struct ct_cmd_hdr {
/* CT command request */
struct ct_sns_req {
struct ct_cmd_hdr header;
- uint16_t command;
- uint16_t max_rsp_size;
+ __be16 command;
+ __be16 max_rsp_size;
uint8_t fragment_id;
uint8_t reserved[3];
@@ -2877,32 +3071,26 @@ struct ct_sns_req {
struct {
uint8_t hba_identifier[8];
- uint32_t entry_count;
+ __be32 entry_count;
uint8_t port_name[8];
- struct ct_fdmi_hba_attributes attrs;
+ struct ct_fdmi2_hba_attributes attrs;
} rhba;
struct {
uint8_t hba_identifier[8];
- uint32_t entry_count;
- uint8_t port_name[8];
- struct ct_fdmiv2_hba_attributes attrs;
- } rhba2;
-
- struct {
- uint8_t hba_identifier[8];
- struct ct_fdmi_hba_attributes attrs;
+ struct ct_fdmi1_hba_attributes attrs;
} rhat;
struct {
uint8_t port_name[8];
- struct ct_fdmi_port_attributes attrs;
+ struct ct_fdmi2_port_attributes attrs;
} rpa;
struct {
+ uint8_t hba_identifier[8];
uint8_t port_name[8];
- struct ct_fdmiv2_port_attributes attrs;
- } rpa2;
+ struct ct_fdmi2_port_attributes attrs;
+ } rprt;
struct {
uint8_t port_name[8];
@@ -2938,7 +3126,7 @@ struct ct_sns_req {
/* CT command response header */
struct ct_rsp_hdr {
struct ct_cmd_hdr header;
- uint16_t response;
+ __be16 response;
uint16_t residual;
uint8_t fragment_id;
uint8_t reason_code;
@@ -3016,7 +3204,7 @@ struct ct_sns_rsp {
struct {
uint32_t entry_count;
uint8_t port_name[8];
- struct ct_fdmi_hba_attributes attrs;
+ struct ct_fdmi1_hba_attributes attrs;
} ghat;
struct {
@@ -3024,14 +3212,16 @@ struct ct_sns_rsp {
} gfpn_id;
struct {
- uint16_t speeds;
- uint16_t speed;
+ __be16 speeds;
+ __be16 speed;
} gpsc;
#define GFF_FCP_SCSI_OFFSET 7
#define GFF_NVME_OFFSET 23 /* type = 28h */
struct {
uint8_t fc4_features[128];
+#define FC4_FF_TARGET BIT_0
+#define FC4_FF_INITIATOR BIT_1
} gff_id;
struct {
uint8_t reserved;
@@ -3115,13 +3305,13 @@ struct fab_scan {
struct sns_cmd_pkt {
union {
struct {
- uint16_t buffer_length;
- uint16_t reserved_1;
- __le64 buffer_address __packed;
- uint16_t subcommand_length;
- uint16_t reserved_2;
- uint16_t subcommand;
- uint16_t size;
+ __le16 buffer_length;
+ __le16 reserved_1;
+ __le64 buffer_address __packed;
+ __le16 subcommand_length;
+ __le16 reserved_2;
+ __le16 subcommand;
+ __le16 size;
uint32_t reserved_3;
uint8_t param[36];
} cmd;
@@ -3147,7 +3337,7 @@ struct gid_list_info {
uint8_t area;
uint8_t domain;
uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */
- uint16_t loop_id; /* ISP23XX -- 6 bytes. */
+ __le16 loop_id; /* ISP23XX -- 6 bytes. */
uint16_t reserved_1; /* ISP24XX -- 8 bytes. */
};
@@ -3221,10 +3411,13 @@ struct isp_operations {
int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t,
uint32_t);
- void (*fw_dump) (struct scsi_qla_host *, int);
+ void (*fw_dump)(struct scsi_qla_host *vha);
+ void (*mpi_fw_dump)(struct scsi_qla_host *, int);
+ /* Context: task, might sleep */
int (*beacon_on) (struct scsi_qla_host *);
int (*beacon_off) (struct scsi_qla_host *);
+
void (*beacon_blink) (struct scsi_qla_host *);
void *(*read_optrom)(struct scsi_qla_host *, void *,
@@ -3235,7 +3428,10 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
int (*start_scsi_mq) (srb_t *);
+
+ /* Context: task, might sleep */
int (*abort_isp) (struct scsi_qla_host *);
+
int (*iospace_config)(struct qla_hw_data *);
int (*initialize_adapter)(struct scsi_qla_host *);
};
@@ -3250,6 +3446,7 @@ struct isp_operations {
#define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS 0x04
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
@@ -3301,6 +3498,7 @@ enum qla_work_type {
QLA_EVT_SP_RETRY,
QLA_EVT_IIDMA,
QLA_EVT_ELS_PLOGI,
+ QLA_EVT_SA_REPLACE,
};
@@ -3359,6 +3557,11 @@ struct qla_work_evt {
u8 fc4_type;
srb_t *sp;
} gpnft;
+ struct {
+ struct edif_sa_ctl *sa_ctl;
+ fc_port_t *fcport;
+ uint16_t nport_handle;
+ } sa_update;
} u;
};
@@ -3447,6 +3650,14 @@ struct qla_tgt_counters {
uint64_t num_term_xchg_sent;
};
+struct qla_counters {
+ uint64_t input_bytes;
+ uint64_t input_requests;
+ uint64_t output_bytes;
+ uint64_t output_requests;
+
+};
+
struct qla_qpair;
/* Response queue data structure */
@@ -3454,8 +3665,8 @@ struct rsp_que {
dma_addr_t dma;
response_t *ring;
response_t *ring_ptr;
- uint32_t __iomem *rsp_q_in; /* FWI2-capable only. */
- uint32_t __iomem *rsp_q_out;
+ __le32 __iomem *rsp_q_in; /* FWI2-capable only. */
+ __le32 __iomem *rsp_q_out;
uint16_t ring_index;
uint16_t out_ptr;
uint16_t *in_ptr; /* queue shadow in index */
@@ -3481,8 +3692,8 @@ struct req_que {
dma_addr_t dma;
request_t *ring;
request_t *ring_ptr;
- uint32_t __iomem *req_q_in; /* FWI2-capable only. */
- uint32_t __iomem *req_q_out;
+ __le32 __iomem *req_q_in; /* FWI2-capable only. */
+ __le32 __iomem *req_q_out;
uint16_t ring_index;
uint16_t in_ptr;
uint16_t *out_ptr; /* queue shadow out index */
@@ -3505,6 +3716,15 @@ struct req_que {
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
+struct qla_fw_resources {
+ u16 iocbs_total;
+ u16 iocbs_limit;
+ u16 iocbs_qp_limit;
+ u16 iocbs_used;
+};
+
+#define QLA_IOCB_PCT_LIMIT 95
+
/*Queue pair data structure */
struct qla_qpair {
spinlock_t qp_lock;
@@ -3531,6 +3751,7 @@ struct qla_qpair {
uint32_t enable_class_2:1;
uint32_t enable_explicit_conf:1;
uint32_t use_shadow_reg:1;
+ uint32_t rcv_intr:1;
uint16_t id; /* qp number used with FW */
uint16_t vp_idx; /* vport ID */
@@ -3546,13 +3767,20 @@ struct qla_qpair {
struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
struct qla_hw_data *hw;
struct work_struct q_work;
+ struct qla_counters counters;
+
struct list_head qp_list_elem; /* vha->qp_list */
struct list_head hints_list;
- uint16_t cpuid;
+
uint16_t retry_term_cnt;
- uint32_t retry_term_exchg_addr;
+ __le32 retry_term_exchg_addr;
uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
+ uint16_t cpuid;
+ struct qla_fw_resources fwres ____cacheline_aligned;
+ u32 cmd_cnt;
+ u32 cmd_completion_cnt;
+ u32 prev_completion_cnt;
};
/* Place holder for FW buffer parameters */
@@ -3562,6 +3790,134 @@ struct qlfc_fw {
uint32_t len;
};
+struct rdp_req_payload {
+ uint32_t els_request;
+ uint32_t desc_list_len;
+
+ /* NPIV descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint8_t reserved;
+ uint8_t nport_id[3];
+ } npiv_desc;
+};
+
+struct rdp_rsp_payload {
+ struct {
+ __be32 cmd;
+ __be32 len;
+ } hdr;
+
+ /* LS Request Info descriptor */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 req_payload_word_0;
+ } ls_req_info_desc;
+
+ /* LS Request Info descriptor */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 req_payload_word_0;
+ } ls_req_info_desc2;
+
+ /* SFP diagnostic param descriptor */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 temperature;
+ __be16 vcc;
+ __be16 tx_bias;
+ __be16 tx_power;
+ __be16 rx_power;
+ __be16 sfp_flags;
+ } sfp_diag_desc;
+
+ /* Port Speed Descriptor */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 speed_capab;
+ __be16 operating_speed;
+ } port_speed_desc;
+
+ /* Link Error Status Descriptor */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 link_fail_cnt;
+ __be32 loss_sync_cnt;
+ __be32 loss_sig_cnt;
+ __be32 prim_seq_err_cnt;
+ __be32 inval_xmit_word_cnt;
+ __be32 inval_crc_cnt;
+ uint8_t pn_port_phy_type;
+ uint8_t reserved[3];
+ } ls_err_desc;
+
+ /* Port name description with diag param */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ uint8_t WWNN[WWN_SIZE];
+ uint8_t WWPN[WWN_SIZE];
+ } port_name_diag_desc;
+
+ /* Port Name desc for Direct attached Fx_Port or Nx_Port */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ uint8_t WWNN[WWN_SIZE];
+ uint8_t WWPN[WWN_SIZE];
+ } port_name_direct_desc;
+
+ /* Buffer Credit descriptor */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 fcport_b2b;
+ __be32 attached_fcport_b2b;
+ __be32 fcport_rtt;
+ } buffer_credit_desc;
+
+ /* Optical Element Data Descriptor */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 high_alarm;
+ __be16 low_alarm;
+ __be16 high_warn;
+ __be16 low_warn;
+ __be32 element_flags;
+ } optical_elmt_desc[5];
+
+ /* Optical Product Data Descriptor */
+ struct {
+ __be32 desc_tag;
+ __be32 desc_len;
+ uint8_t vendor_name[16];
+ uint8_t part_number[16];
+ uint8_t serial_number[16];
+ uint8_t revision[4];
+ uint8_t date[8];
+ } optical_prod_desc;
+};
+
+#define RDP_DESC_LEN(obj) \
+ (sizeof(obj) - sizeof((obj).desc_tag) - sizeof((obj).desc_len))
+
+#define RDP_PORT_SPEED_1GB BIT_15
+#define RDP_PORT_SPEED_2GB BIT_14
+#define RDP_PORT_SPEED_4GB BIT_13
+#define RDP_PORT_SPEED_10GB BIT_12
+#define RDP_PORT_SPEED_8GB BIT_11
+#define RDP_PORT_SPEED_16GB BIT_10
+#define RDP_PORT_SPEED_32GB BIT_9
+#define RDP_PORT_SPEED_64GB BIT_8
+#define RDP_PORT_SPEED_UNKNOWN BIT_0
+
struct scsi_qlt_host {
void *target_lport_ptr;
struct mutex tgt_mutex;
@@ -3578,17 +3934,17 @@ struct qlt_hw_data {
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
- uint32_t __iomem *atio_q_in;
- uint32_t __iomem *atio_q_out;
+ __le32 __iomem *atio_q_in;
+ __le32 __iomem *atio_q_out;
- struct qla_tgt_func_tmpl *tgt_ops;
+ const struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt_vp_map *tgt_vp_map;
int saved_set;
- uint16_t saved_exchange_count;
- uint32_t saved_firmware_options_1;
- uint32_t saved_firmware_options_2;
- uint32_t saved_firmware_options_3;
+ __le16 saved_exchange_count;
+ __le32 saved_firmware_options_1;
+ __le32 saved_firmware_options_2;
+ __le32 saved_firmware_options_3;
uint8_t saved_firmware_options[2];
uint8_t saved_add_firmware_options[2];
@@ -3608,7 +3964,6 @@ struct qlt_hw_data {
int num_act_qpairs;
#define DEFAULT_NAQP 2
spinlock_t atio_lock ____cacheline_aligned;
- struct btree_head32 host_map;
};
#define MAX_QFULL_CMDS_ALLOC 8192
@@ -3618,6 +3973,18 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+struct qla_hw_data_stat {
+ u32 num_fw_dump;
+ u32 num_mpi_reset;
+};
+
+/* refer to pcie_do_recovery reference */
+typedef enum {
+ QLA_PCI_RESUME,
+ QLA_PCI_ERR_DETECTED,
+ QLA_PCI_MMIO_ENABLED,
+ QLA_PCI_SLOT_RESET,
+} pci_error_state_t;
/*
* Qlogic host adapter specific data structure.
*/
@@ -3626,6 +3993,7 @@ struct qla_hw_data {
/* SRB cache. */
#define SRB_MIN_REQ 128
mempool_t *srb_mempool;
+ u8 port_name[WWN_SIZE];
volatile struct {
uint32_t mbox_int :1;
@@ -3673,17 +4041,31 @@ struct qla_hw_data {
uint32_t fw_started:1;
uint32_t fw_init_done:1;
- uint32_t detected_lr_sfp:1;
- uint32_t using_lr_setting:1;
+ uint32_t lr_detected:1;
+
uint32_t rida_fmt2:1;
uint32_t purge_mbox:1;
uint32_t n2n_bigger:1;
uint32_t secure_adapter:1;
uint32_t secure_fw:1;
+ /* Supported by Adapter */
+ uint32_t scm_supported_a:1;
+ /* Supported by Firmware */
+ uint32_t scm_supported_f:1;
+ /* Enabled in Driver */
+ uint32_t scm_enabled:1;
+ uint32_t edif_hw:1;
+ uint32_t edif_enabled:1;
+ uint32_t n2n_fw_acc_sec:1;
+ uint32_t plogi_template_valid:1;
+ uint32_t port_isolated:1;
+ uint32_t eeh_flush:2;
+#define EEH_FLUSH_RDY 1
+#define EEH_FLUSH_DONE 2
} flags;
uint16_t max_exchg;
- uint16_t long_range_distance; /* 32G & above */
+ uint16_t lr_distance; /* 32G & above */
#define LR_DISTANCE_5K 1
#define LR_DISTANCE_10K 0
@@ -3714,6 +4096,7 @@ struct qla_hw_data {
uint32_t rsp_que_len;
uint32_t req_que_off;
uint32_t rsp_que_off;
+ unsigned long eeh_jif;
/* Multi queue data structs */
device_reg_t *mqiobase;
@@ -3896,15 +4279,28 @@ struct qla_hw_data {
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_BIDI_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
((ha)->fw_attributes_ext[0] & BIT_0))
-#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
-#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define QLA_ABTS_FW_ENABLED(_ha) ((_ha)->fw_attributes_ext[0] & BIT_14)
+#define QLA_SRB_NVME_LS(_sp) ((_sp)->type == SRB_NVME_LS)
+#define QLA_SRB_NVME_CMD(_sp) ((_sp)->type == SRB_NVME_CMD)
+#define QLA_NVME_IOS(_sp) (QLA_SRB_NVME_CMD(_sp) || QLA_SRB_NVME_LS(_sp))
+#define QLA_LS_ABTS_WAIT_ENABLED(_sp) \
+ (QLA_SRB_NVME_LS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw))
+#define QLA_CMD_ABTS_WAIT_ENABLED(_sp) \
+ (QLA_SRB_NVME_CMD(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw))
+#define QLA_ABTS_WAIT_ENABLED(_sp) \
+ (QLA_NVME_IOS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw))
+
+#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
IS_QLA28XX(ha))
@@ -3926,6 +4322,10 @@ struct qla_hw_data {
#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_ZIO_THRESHOLD_CAPABLE(ha) \
+ ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&\
+ (ha->zio_mode == QLA_ZIO_MODE_6))
+
/* HBA serial number */
uint8_t serial0;
uint8_t serial1;
@@ -3965,6 +4365,8 @@ struct qla_hw_data {
#define SFP_DEV_SIZE 512
#define SFP_BLOCK_SIZE 64
+#define SFP_RTDI_LEN SFP_BLOCK_SIZE
+
void *sfp_data;
dma_addr_t sfp_data_dma;
@@ -3995,6 +4397,13 @@ struct qla_hw_data {
int init_cb_size;
dma_addr_t ex_init_cb_dma;
struct ex_init_cb_81xx *ex_init_cb;
+ dma_addr_t sf_init_cb_dma;
+ struct init_sf_cb *sf_init_cb;
+
+ void *scm_fpin_els_buff;
+ uint64_t scm_fpin_els_buff_size;
+ bool scm_fpin_valid;
+ bool scm_fpin_payload_size;
void *async_pd;
dma_addr_t async_pd_dma;
@@ -4004,7 +4413,7 @@ struct qla_hw_data {
/* Extended Logins */
void *exlogin_buf;
dma_addr_t exlogin_buf_dma;
- int exlogin_size;
+ uint32_t exlogin_size;
#define ENABLE_EXCHANGE_OFFLD BIT_2
@@ -4015,7 +4424,8 @@ struct qla_hw_data {
int exchoffld_count;
/* n2n */
- struct els_plogi_payload plogi_els_payld;
+ struct fc_els_flogi plogi_els_payld;
+#define LOGIN_TEMPLATE_SIZE (sizeof(struct fc_els_flogi) - 4)
void *swl;
@@ -4057,6 +4467,14 @@ struct qla_hw_data {
#define FW_ATTR_H_NVME BIT_10
#define FW_ATTR_H_NVME_UPDATED BIT_14
+ /* About firmware SCM support */
+#define FW_ATTR_EXT0_SCM_SUPPORTED BIT_12
+ /* Brocade fabric attached */
+#define FW_ATTR_EXT0_SCM_BROCADE 0x00001000
+ /* Cisco fabric attached */
+#define FW_ATTR_EXT0_SCM_CISCO 0x00002000
+#define FW_ATTR_EXT0_NVME2 BIT_13
+#define FW_ATTR_EXT0_EDIF BIT_5
uint16_t fw_attributes_ext[2];
uint32_t fw_memory_size;
uint32_t fw_transfer_size;
@@ -4080,7 +4498,7 @@ struct qla_hw_data {
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
- uint16_t fw_seriallink_options24[4];
+ __le16 fw_seriallink_options24[4];
uint8_t serdes_version[3];
uint8_t mpi_version[3];
@@ -4098,7 +4516,6 @@ struct qla_hw_data {
uint32_t fw_dump_len;
u32 fw_dump_alloc_len;
bool fw_dumped;
- bool fw_dump_mpi;
unsigned long fw_dump_cap_flags;
#define RISC_PAUSE_CMPL 0
#define DMA_SHUTDOWN_CMPL 1
@@ -4109,6 +4526,10 @@ struct qla_hw_data {
#define ISP_MBX_RDY 6
#define ISP_SOFT_RESET_CMPL 7
int fw_dump_reading;
+ void *mpi_fw_dump;
+ u32 mpi_fw_dump_len;
+ unsigned int mpi_fw_dump_reading:1;
+ unsigned int mpi_fw_dumped:1;
int prev_minidump_failed;
dma_addr_t eft_dma;
void *eft;
@@ -4224,7 +4645,9 @@ struct qla_hw_data {
struct qla_chip_state_84xx *cs84xx;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
+ struct work_struct heartbeat_work;
struct qlfc_fw fw_buf;
+ unsigned long last_heartbeat_run_jiffies;
/* FCP_CMND priority support */
struct qla_fcp_prio_cfg *fcp_prio_cfg;
@@ -4260,7 +4683,7 @@ struct qla_hw_data {
#define NUM_DSD_CHAIN 4096
uint8_t fw_type;
- __le32 file_prd_off; /* File firmware product offset */
+ uint32_t file_prd_off; /* File firmware product offset */
uint32_t md_template_size;
void *md_tmplt_hdr;
@@ -4322,8 +4745,27 @@ struct qla_hw_data {
uint16_t last_zio_threshold;
#define DEFAULT_ZIO_THRESHOLD 5
+
+ struct qla_hw_data_stat stat;
+ pci_error_state_t pci_error_state;
+ struct dma_pool *purex_dma_pool;
+ struct btree_head32 host_map;
+
+#define EDIF_NUM_SA_INDEX 512
+#define EDIF_TX_SA_INDEX_BASE EDIF_NUM_SA_INDEX
+ void *edif_rx_sa_id_map;
+ void *edif_tx_sa_id_map;
+ spinlock_t sadb_fp_lock;
+
+ struct list_head sadb_tx_index_list;
+ struct list_head sadb_rx_index_list;
+ spinlock_t sadb_lock; /* protects list */
+ struct els_reject elsrej;
+ u8 edif_post_stop_cnt_down;
};
+#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
+
struct active_regions {
uint8_t global;
struct {
@@ -4331,6 +4773,7 @@ struct active_regions {
uint8_t vpd_nvram;
uint8_t npiv_config_0_1;
uint8_t npiv_config_2_3;
+ uint8_t nvme_params;
} aux;
};
@@ -4344,6 +4787,33 @@ struct active_regions {
#define QLA_SET_DATA_RATE_NOLR 1
#define QLA_SET_DATA_RATE_LR 2 /* Set speed and initiate LR */
+#define QLA_DEFAULT_PAYLOAD_SIZE 64
+/*
+ * This item might be allocated with a size > sizeof(struct purex_item).
+ * The "size" variable gives the size of the payload (which
+ * is variable) starting at "iocb".
+ */
+struct purex_item {
+ struct list_head list;
+ struct scsi_qla_host *vha;
+ void (*process_item)(struct scsi_qla_host *vha,
+ struct purex_item *pkt);
+ atomic_t in_use;
+ uint16_t size;
+ struct {
+ uint8_t iocb[64];
+ } iocb;
+};
+
+#include "qla_edif.h"
+
+#define SCM_FLAG_RDF_REJECT 0x00
+#define SCM_FLAG_RDF_COMPLETED 0x01
+
+#define QLA_CON_PRIMITIVE_RECEIVED 0x1
+#define QLA_CONGESTION_ARB_WARNING 0x1
+#define QLA_CONGESTION_ARB_ALARM 0X2
+
/*
* Qlogic scsi host structure
*/
@@ -4376,6 +4846,7 @@ typedef struct scsi_qla_host {
uint32_t qpairs_rsp_created:1;
uint32_t nvme_enabled:1;
uint32_t nvme_first_burst:1;
+ uint32_t nvme2_enabled:1;
} flags;
atomic_t loop_state;
@@ -4416,7 +4887,7 @@ typedef struct scsi_qla_host {
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
#define QPAIR_ONLINE_CHECK_NEEDED 27
-#define SET_NVME_ZIO_THRESHOLD_NEEDED 28
+#define DO_EEH_RECOVERY 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
#define IOCB_WORK_ACTIVE 31
@@ -4424,6 +4895,8 @@ typedef struct scsi_qla_host {
#define ISP_ABORT_TO_ROM 33
#define VPORT_DELETE 34
+#define PROCESS_PUREX_IOCB 63
+
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
@@ -4461,6 +4934,7 @@ typedef struct scsi_qla_host {
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t fabric_node_name[WWN_SIZE];
+ uint8_t fabric_port_name[WWN_SIZE];
struct nvme_fc_local_port *nvme_local_port;
struct completion nvme_del_done;
@@ -4471,7 +4945,6 @@ typedef struct scsi_qla_host {
/* list of commands waiting on workqueue */
struct list_head qla_cmd_list;
- struct list_head qla_sess_op_cmd_list;
struct list_head unknown_atio_list;
spinlock_t cmd_list_lock;
struct delayed_work unknown_atio_work;
@@ -4531,6 +5004,14 @@ typedef struct scsi_qla_host {
uint16_t ql2xexchoffld;
uint16_t ql2xiniexchg;
+ struct dentry *dfs_rport_root;
+
+ struct purex_list {
+ struct list_head head;
+ spinlock_t lock;
+ } purex_list;
+ struct purex_item default_item;
+
struct name_list_extended gnl;
/* Count of active session/fcport */
int fcport_count;
@@ -4540,21 +5021,36 @@ typedef struct scsi_qla_host {
uint8_t n2n_node_name[WWN_SIZE];
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
+ __le16 dport_data[4];
struct list_head gpnid_list;
struct fab_scan scan;
+ uint8_t scm_fabric_connection_flags;
unsigned int irq_offset;
+
+ u64 hw_err_cnt;
+ u64 interface_err_cnt;
+ u64 cmd_timeout_cnt;
+ u64 reset_cmd_err_cnt;
+ u64 link_down_time;
+ u64 short_link_down_cnt;
+ struct edif_dbell e_dbell;
+ struct pur_core pur_cinfo;
+
+#define DPORT_DIAG_IN_PROGRESS BIT_0
+#define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1
+ uint16_t dport_status;
} scsi_qla_host_t;
struct qla27xx_image_status {
uint8_t image_status_mask;
- uint16_t generation;
+ __le16 generation;
uint8_t ver_major;
uint8_t ver_minor;
uint8_t bitmap; /* 28xx only */
uint8_t reserved[2];
- uint32_t checksum;
- uint32_t signature;
+ __le32 checksum;
+ __le32 signature;
} __packed;
/* 28xx aux image status bimap values */
@@ -4562,6 +5058,7 @@ struct qla27xx_image_status {
#define QLA28XX_AUX_IMG_VPD_NVRAM BIT_1
#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1 BIT_2
#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3 BIT_3
+#define QLA28XX_AUX_IMG_NVME_PARAMS BIT_4
#define SET_VP_IDX 1
#define SET_AL_PA 2
@@ -4668,8 +5165,7 @@ struct secure_flash_update_block_pk {
} while (0)
#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \
- atomic_dec(&__qpair->ref_count); \
-
+ atomic_dec(&__qpair->ref_count)
#define QLA_ENA_CONF(_ha) {\
int i;\
@@ -4715,6 +5211,9 @@ struct secure_flash_update_block_pk {
#define QLA_BUSY 0x107
#define QLA_ALREADY_REGISTERED 0x109
#define QLA_OS_TIMER_EXPIRED 0x10a
+#define QLA_ERR_NO_QPAIR 0x10b
+#define QLA_ERR_NOT_FOUND 0x10c
+#define QLA_ERR_FROM_FW 0x10d
#define NVRAM_DELAY() udelay(10)
@@ -4735,8 +5234,6 @@ struct secure_flash_update_block_pk {
#define QLA_DSDS_PER_IOCB 37
-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
-
#define QLA_SG_ALL 1024
enum nexus_wait_type {
@@ -4745,6 +5242,43 @@ enum nexus_wait_type {
WAIT_LUN,
};
+#define INVALID_EDIF_SA_INDEX 0xffff
+#define RX_DELETE_NO_EDIF_SA_INDEX 0xfffe
+
+#define QLA_SKIP_HANDLE QLA_TGT_SKIP_HANDLE
+
+/* edif hash element */
+struct edif_list_entry {
+ uint16_t handle; /* nport_handle */
+ uint32_t update_sa_index;
+ uint32_t delete_sa_index;
+ uint32_t count; /* counter for filtering sa_index */
+#define EDIF_ENTRY_FLAGS_CLEANUP 0x01 /* this index is being cleaned up */
+ uint32_t flags; /* used by sadb cleanup code */
+ fc_port_t *fcport; /* needed by rx delay timer function */
+ struct timer_list timer; /* rx delay timer */
+ struct list_head next;
+};
+
+#define EDIF_TX_INDX_BASE 512
+#define EDIF_RX_INDX_BASE 0
+#define EDIF_RX_DELETE_FILTER_COUNT 3 /* delay queuing rx delete until this many */
+
+/* entry in the sa_index free pool */
+
+struct sa_index_pair {
+ uint16_t sa_index;
+ uint32_t spi;
+};
+
+/* edif sa_index data structure */
+struct edif_sa_index_entry {
+ struct sa_index_pair sa_pair[2];
+ fc_port_t *fcport;
+ uint16_t handle;
+ struct list_head next;
+};
+
/* Refer to SNIA SFF 8247 */
struct sff_8247_a0 {
u8 txid; /* transceiver id */
@@ -4822,11 +5356,14 @@ struct sff_8247_a0 {
u8 resv2[128];
};
-#define AUTO_DETECT_SFP_SUPPORT(_vha)\
- (ql2xautodetectsfp && !_vha->vp_idx && \
- (IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
- IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw) || \
- IS_QLA28XX(_vha->hw)))
+/* BPM -- Buffer Plus Management support. */
+#define IS_BPM_CAPABLE(ha) \
+ (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_BPM_RANGE_CAPABLE(ha) \
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_BPM_ENABLED(vha) \
+ (ql2xautodetectsfp && !vha->vp_idx && IS_BPM_CAPABLE(vha->hw))
#define FLASH_SEMAPHORE_REGISTER_ADDR 0x00101016
@@ -4843,6 +5380,8 @@ struct sff_8247_a0 {
ha->current_topology == ISP_CFG_N || \
!ha->current_topology)
+#define QLA_N2N_WAIT_TIME 5 /* 2 * ra_tov(n2n) + 1 */
+
#define NVME_TYPE(fcport) \
(fcport->fc4_type & FS_FC4TYPE_NVME) \
@@ -4855,16 +5394,88 @@ struct sff_8247_a0 {
#define NVME_FCP_TARGET(fcport) \
(FCP_TYPE(fcport) && NVME_TYPE(fcport)) \
+#define NVME_PRIORITY(ha, fcport) \
+ (NVME_FCP_TARGET(fcport) && \
+ (ha->fc4_type_priority == FC4_PRIORITY_NVME))
+
#define NVME_TARGET(ha, fcport) \
- ((NVME_FCP_TARGET(fcport) && \
- (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
+ (fcport->do_prli_nvme || \
NVME_ONLY_TARGET(fcport)) \
#define PRLI_PHASE(_cls) \
((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP))
+enum ql_vnd_host_stat_action {
+ QLA_STOP = 0,
+ QLA_START,
+ QLA_CLEAR,
+};
+
+struct ql_vnd_mng_host_stats_param {
+ u32 stat_type;
+ enum ql_vnd_host_stat_action action;
+} __packed;
+
+struct ql_vnd_mng_host_stats_resp {
+ u32 status;
+} __packed;
+
+struct ql_vnd_stats_param {
+ u32 stat_type;
+} __packed;
+
+struct ql_vnd_tgt_stats_param {
+ s32 tgt_id;
+ u32 stat_type;
+} __packed;
+
+enum ql_vnd_host_port_action {
+ QLA_ENABLE = 0,
+ QLA_DISABLE,
+};
+
+struct ql_vnd_mng_host_port_param {
+ enum ql_vnd_host_port_action action;
+} __packed;
+
+struct ql_vnd_mng_host_port_resp {
+ u32 status;
+} __packed;
+
+struct ql_vnd_stat_entry {
+ u32 stat_type; /* Failure type */
+ u32 tgt_num; /* Target Num */
+ u64 cnt; /* Counter value */
+} __packed;
+
+struct ql_vnd_stats {
+ u64 entry_count; /* Num of entries */
+ u64 rservd;
+ struct ql_vnd_stat_entry entry[]; /* Place holder of entries */
+} __packed;
+
+struct ql_vnd_host_stats_resp {
+ u32 status;
+ struct ql_vnd_stats stats;
+} __packed;
+
+struct ql_vnd_tgt_stats_resp {
+ u32 status;
+ struct ql_vnd_stats stats;
+} __packed;
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
#include "qla_inline.h"
+
+#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \
+ _fcport->disc_state == DSC_DELETED)
+
+#define DBG_FCPORT_PRFMT(_fp, _fmt, _args...) \
+ "%s: %8phC: " _fmt " (state=%d disc_state=%d scan_state=%d loopid=0x%x deleted=%d flags=0x%x)\n", \
+ __func__, _fp->port_name, ##_args, atomic_read(&_fp->state), \
+ _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \
+ _fp->flags
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0a6fb359f4d5..777808af5634 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
@@ -12,6 +11,140 @@
static struct dentry *qla2x00_dfs_root;
static atomic_t qla2x00_dfs_root_count;
+#define QLA_DFS_RPORT_DEVLOSS_TMO 1
+
+static int
+qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
+{
+ switch (attr_id) {
+ case QLA_DFS_RPORT_DEVLOSS_TMO:
+ /* Only supported for FC-NVMe devices that are registered. */
+ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
+ return -EIO;
+ *val = fp->nvme_remote_port->dev_loss_tmo;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
+{
+ switch (attr_id) {
+ case QLA_DFS_RPORT_DEVLOSS_TMO:
+ /* Only supported for FC-NVMe devices that are registered. */
+ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
+ return -EIO;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
+ val);
+#else /* CONFIG_NVME_FC */
+ return -EINVAL;
+#endif /* CONFIG_NVME_FC */
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \
+static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \
+{ \
+ struct fc_port *fp = data; \
+ return qla_dfs_rport_get(fp, _attr_id, val); \
+} \
+static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \
+{ \
+ struct fc_port *fp = data; \
+ return qla_dfs_rport_set(fp, _attr_id, val); \
+} \
+DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \
+ qla_dfs_rport_##_attr##_get, \
+ qla_dfs_rport_##_attr##_set, "%llu\n")
+
+/*
+ * Wrapper for getting fc_port fields.
+ *
+ * _attr : Attribute name.
+ * _get_val : Accessor macro to retrieve the value.
+ */
+#define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \
+static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \
+{ \
+ struct fc_port *fp = data; \
+ *val = _get_val; \
+ return 0; \
+} \
+DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \
+ qla_dfs_rport_field_##_attr##_get, \
+ NULL, "%llu\n")
+
+#define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
+ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
+
+#define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
+ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
+
+DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
+
+DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
+DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
+DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
+DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
+DEFINE_QLA_DFS_RPORT_FIELD(flags);
+DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
+DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
+DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
+DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
+DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
+DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
+DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
+
+void
+qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
+{
+ char wwn[32];
+
+#define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \
+ debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \
+ fp, &qla_dfs_rport_field_##_attr##_fops)
+
+ if (!vha->dfs_rport_root || fp->dfs_rport_dir)
+ return;
+
+ sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
+ fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
+ if (!fp->dfs_rport_dir)
+ return;
+ if (NVME_TARGET(vha->hw, fp))
+ debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
+ fp, &qla_dfs_rport_dev_loss_tmo_fops);
+
+ QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
+ QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
+ QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
+ QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
+ QLA_CREATE_RPORT_FIELD_ATTR(flags);
+ QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
+ QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
+ QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
+ QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
+ QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
+ QLA_CREATE_RPORT_FIELD_ATTR(port_id);
+ QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
+}
+
+void
+qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
+{
+ if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
+ return;
+ debugfs_remove_recursive(fp->dfs_rport_dir);
+ fp->dfs_rport_dir = NULL;
+}
+
static int
qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
{
@@ -37,89 +170,63 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
return 0;
}
-static int
-qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
-{
- scsi_qla_host_t *vha = inode->i_private;
-
- return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
-}
-
-static const struct file_operations dfs_tgt_sess_ops = {
- .open = qla2x00_dfs_tgt_sess_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess);
static int
qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
{
scsi_qla_host_t *vha = s->private;
struct qla_hw_data *ha = vha->hw;
- struct gid_list_info *gid_list, *gid;
+ struct gid_list_info *gid_list;
dma_addr_t gid_list_dma;
fc_port_t fc_port;
+ char *id_iter;
int rc, i;
uint16_t entries, loop_id;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
seq_printf(s, "%s\n", vha->host_str);
- if (tgt) {
- gid_list = dma_alloc_coherent(&ha->pdev->dev,
- qla2x00_gid_list_size(ha),
- &gid_list_dma, GFP_KERNEL);
- if (!gid_list) {
- ql_dbg(ql_dbg_user, vha, 0x7018,
- "DMA allocation failed for %u\n",
- qla2x00_gid_list_size(ha));
- return 0;
- }
+ gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_user, vha, 0x7018,
+ "DMA allocation failed for %u\n",
+ qla2x00_gid_list_size(ha));
+ return 0;
+ }
- rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
- &entries);
- if (rc != QLA_SUCCESS)
- goto out_free_id_list;
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+ &entries);
+ if (rc != QLA_SUCCESS)
+ goto out_free_id_list;
- gid = gid_list;
+ id_iter = (char *)gid_list;
- seq_puts(s, "Port Name Port ID Loop ID\n");
+ seq_puts(s, "Port Name Port ID Loop ID\n");
- for (i = 0; i < entries; i++) {
- loop_id = le16_to_cpu(gid->loop_id);
- memset(&fc_port, 0, sizeof(fc_port_t));
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid =
+ (struct gid_list_info *)id_iter;
+ loop_id = le16_to_cpu(gid->loop_id);
+ memset(&fc_port, 0, sizeof(fc_port_t));
- fc_port.loop_id = loop_id;
+ fc_port.loop_id = loop_id;
- rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
- seq_printf(s, "%8phC %02x%02x%02x %d\n",
- fc_port.port_name, fc_port.d_id.b.domain,
- fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
- fc_port.loop_id);
- gid = (void *)gid + ha->gid_list_info_size;
- }
-out_free_id_list:
- dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
- gid_list, gid_list_dma);
+ rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+ seq_printf(s, "%8phC %02x%02x%02x %d\n",
+ fc_port.port_name, fc_port.d_id.b.domain,
+ fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+ fc_port.loop_id);
+ id_iter += ha->gid_list_info_size;
}
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
return 0;
}
-static int
-qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
-{
- scsi_qla_host_t *vha = inode->i_private;
-
- return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
-}
-
-static const struct file_operations dfs_tgt_port_database_ops = {
- .open = qla2x00_dfs_tgt_port_database_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database);
static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
@@ -127,6 +234,8 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
struct scsi_qla_host *vha = s->private;
uint16_t mb[MAX_IOCB_MB_REG];
int rc;
+ struct qla_hw_data *ha = vha->hw;
+ u16 iocbs_used, i;
rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
if (rc != QLA_SUCCESS) {
@@ -134,11 +243,11 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
} else {
seq_puts(s, "FW Resource count\n\n");
seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
- seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
- seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
- seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
- seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
- seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
+ seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
+ seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
+ seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
+ seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
+ seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
seq_printf(s, "MAX VP count[%d]\n", mb[11]);
seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
@@ -149,26 +258,24 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
mb[22]);
seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
mb[23]);
-
}
- return 0;
-}
+ if (ql2xenforce_iocb_limit) {
+ /* lock is not require. It's an estimate. */
+ iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ }
-static int
-qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
-{
- struct scsi_qla_host *vha = inode->i_private;
+ seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
+ iocbs_used, ha->base_qpair->fwres.iocbs_limit);
+ }
- return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
+ return 0;
}
-static const struct file_operations dfs_fw_resource_cnt_ops = {
- .open = qla_dfs_fw_resource_cnt_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt);
static int
qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
@@ -179,6 +286,10 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
u16 i;
+ fc_port_t *fcport = NULL;
+
+ if (qla2x00_chip_is_down(vha))
+ return 0;
qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
@@ -242,23 +353,34 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
seq_printf(s, "DIF App tag err = %d\n",
vha->qla_stats.qla_dif_stats.dif_app_tag_err);
- return 0;
-}
-static int
-qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
-{
- struct scsi_qla_host *vha = inode->i_private;
+ seq_puts(s, "\n");
+ seq_puts(s, "Initiator Error Counters\n");
+ seq_printf(s, "HW Error Count = %14lld\n",
+ vha->hw_err_cnt);
+ seq_printf(s, "Link Down Count = %14lld\n",
+ vha->short_link_down_cnt);
+ seq_printf(s, "Interface Err Count = %14lld\n",
+ vha->interface_err_cnt);
+ seq_printf(s, "Cmd Timeout Count = %14lld\n",
+ vha->cmd_timeout_cnt);
+ seq_printf(s, "Reset Count = %14lld\n",
+ vha->reset_cmd_err_cnt);
+ seq_puts(s, "\n");
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (!fcport->rport)
+ continue;
+
+ seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n",
+ fcport->rport->number, fcport->tgt_short_link_down_cnt);
+ }
+ seq_puts(s, "\n");
- return single_open(file, qla_dfs_tgt_counters_show, vha);
+ return 0;
}
-static const struct file_operations dfs_tgt_counters_ops = {
- .open = qla_dfs_tgt_counters_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters);
static int
qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
@@ -367,6 +489,99 @@ qla_dfs_naqp_show(struct seq_file *s, void *unused)
return 0;
}
+/*
+ * Helper macros for setting up debugfs entries.
+ * _name: The name of the debugfs entry
+ * _ctx_struct: The context that was passed when creating the debugfs file
+ *
+ * QLA_DFS_SETUP_RD could be used when there is only a show function.
+ * - show function take the name qla_dfs_<sysfs-name>_show
+ *
+ * QLA_DFS_SETUP_RW could be used when there are both show and write functions.
+ * - show function take the name qla_dfs_<sysfs-name>_show
+ * - write function take the name qla_dfs_<sysfs-name>_write
+ *
+ * To have a new debugfs entry, do:
+ * 1. Create a "struct dentry *" in the appropriate structure in the format
+ * dfs_<sysfs-name>
+ * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW
+ * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE
+ * or QLA_DFS_ROOT_CREATE_FILE
+ * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE
+ * or QLA_DFS_ROOT_REMOVE_FILE
+ *
+ * Example for creating "TEST" sysfs file:
+ * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; }
+ * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t);
+ * 3. In qla2x00_dfs_setup():
+ * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha);
+ * 4. In qla2x00_dfs_remove():
+ * QLA_DFS_REMOVE_FILE(ha, TEST);
+ */
+#define QLA_DFS_SETUP_RD(_name, _ctx_struct) \
+static int \
+qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
+{ \
+ _ctx_struct *__ctx = inode->i_private; \
+ \
+ return single_open(file, qla_dfs_##_name##_show, __ctx); \
+} \
+ \
+static const struct file_operations qla_dfs_##_name##_ops = { \
+ .open = qla_dfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
+
+#define QLA_DFS_SETUP_RW(_name, _ctx_struct) \
+static int \
+qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
+{ \
+ _ctx_struct *__ctx = inode->i_private; \
+ \
+ return single_open(file, qla_dfs_##_name##_show, __ctx); \
+} \
+ \
+static const struct file_operations qla_dfs_##_name##_ops = { \
+ .open = qla_dfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .write = qla_dfs_##_name##_write, \
+};
+
+#define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \
+ do { \
+ if (!qla_dfs_##_name) \
+ qla_dfs_##_name = debugfs_create_file(#_name, \
+ _perm, qla2x00_dfs_root, _ctx, \
+ &qla_dfs_##_name##_ops); \
+ } while (0)
+
+#define QLA_DFS_ROOT_REMOVE_FILE(_name) \
+ do { \
+ if (qla_dfs_##_name) { \
+ debugfs_remove(qla_dfs_##_name); \
+ qla_dfs_##_name = NULL; \
+ } \
+ } while (0)
+
+#define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx) \
+ do { \
+ (_struct)->dfs_##_name = debugfs_create_file(#_name, \
+ _perm, _parent, _ctx, \
+ &qla_dfs_##_name##_ops) \
+ } while (0)
+
+#define QLA_DFS_REMOVE_FILE(_struct, _name) \
+ do { \
+ if ((_struct)->dfs_##_name) { \
+ debugfs_remove((_struct)->dfs_##_name); \
+ (_struct)->dfs_##_name = NULL; \
+ } \
+ } while (0)
+
static int
qla_dfs_naqp_open(struct inode *inode, struct file *file)
{
@@ -460,23 +675,35 @@ create_dir:
create_nodes:
ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
- S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
+ S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops);
ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
- ha->dfs_dir, vha, &dfs_tgt_counters_ops);
+ ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops);
ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
- S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
+ S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops);
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
- S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
+ S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops);
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+ if (!ha->tgt.dfs_naqp) {
+ ql_log(ql_log_warn, vha, 0xd011,
+ "Unable to create debugFS naqp node.\n");
+ goto out;
+ }
+ }
+ vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
+ if (!vha->dfs_rport_root) {
+ ql_log(ql_log_warn, vha, 0xd012,
+ "Unable to create debugFS rports node.\n");
+ goto out;
+ }
out:
return 0;
}
@@ -516,6 +743,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
ha->dfs_fce = NULL;
}
+ if (vha->dfs_rport_root) {
+ debugfs_remove_recursive(vha->dfs_rport_root);
+ vha->dfs_rport_root = NULL;
+ }
+
if (ha->dfs_dir) {
debugfs_remove(ha->dfs_dir);
ha->dfs_dir = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
new file mode 100644
index 000000000000..00ccc41cef14
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -0,0 +1,3684 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (c) 2021 Marvell
+ */
+#include "qla_def.h"
+#include "qla_edif.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <scsi/scsi_tcq.h>
+
+static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
+ struct list_head *sa_list);
+static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
+ struct qla_sa_update_frame *sa_frame);
+static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
+ uint16_t sa_index);
+static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
+
+struct edb_node {
+ struct list_head list;
+ uint32_t ntype;
+ union {
+ port_id_t plogi_did;
+ uint32_t async;
+ port_id_t els_sid;
+ struct edif_sa_update_aen sa_aen;
+ } u;
+};
+
+static struct els_sub_cmd {
+ uint16_t cmd;
+ const char *str;
+} sc_str[] = {
+ {SEND_ELS, "send ELS"},
+ {SEND_ELS_REPLY, "send ELS Reply"},
+ {PULL_ELS, "retrieve ELS"},
+};
+
+const char *sc_to_str(uint16_t cmd)
+{
+ int i;
+ struct els_sub_cmd *e;
+
+ for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
+ e = sc_str + i;
+ if (cmd == e->cmd)
+ return e->str;
+ }
+ return "unknown";
+}
+
+static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *edbnode = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ /* db nodes are fifo - no qualifications done */
+ if (!list_empty(&vha->e_dbell.head)) {
+ edbnode = list_first_entry(&vha->e_dbell.head,
+ struct edb_node, list);
+ list_del_init(&edbnode->list);
+ }
+
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ return edbnode;
+}
+
+static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
+{
+ list_del_init(&node->list);
+ kfree(node);
+}
+
+static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
+ uint16_t handle)
+{
+ struct edif_list_entry *entry;
+ struct edif_list_entry *tentry;
+ struct list_head *indx_list = &fcport->edif.edif_indx_list;
+
+ list_for_each_entry_safe(entry, tentry, indx_list, next) {
+ if (entry->handle == handle)
+ return entry;
+ }
+ return NULL;
+}
+
+/* timeout called when no traffic and delayed rx sa_index delete */
+static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
+{
+ struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
+ fc_port_t *fcport = edif_entry->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct edif_sa_ctl *sa_ctl;
+ uint16_t nport_handle;
+ unsigned long flags = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3069,
+ "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n",
+ __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
+
+ /*
+ * if delete_sa_index is valid then no one has serviced this
+ * delayed delete
+ */
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+
+ /*
+ * delete_sa_index is invalidated when we find the new sa_index in
+ * the incoming data stream. If it is not invalidated then we are
+ * still looking for the new sa_index because there is no I/O and we
+ * need to just force the rx delete and move on. Otherwise
+ * we could get another rekey which will result in an error 66.
+ */
+ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
+ uint16_t delete_sa_index = edif_entry->delete_sa_index;
+
+ edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+ nport_handle = edif_entry->handle;
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
+ delete_sa_index, 0);
+
+ if (sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
+ __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
+ nport_handle);
+
+ sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
+ set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
+ qla_post_sa_replace_work(fcport->vha, fcport,
+ nport_handle, sa_ctl);
+
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl not found for delete_sa_index: %d\n",
+ __func__, edif_entry->delete_sa_index);
+ }
+ } else {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ }
+}
+
+/*
+ * create a new list entry for this nport handle and
+ * add an sa_update index to the list - called for sa_update
+ */
+static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
+ uint16_t sa_index, uint16_t handle)
+{
+ struct edif_list_entry *entry;
+ unsigned long flags = 0;
+
+ /* if the entry exists, then just update the sa_index */
+ entry = qla_edif_list_find_sa_index(fcport, handle);
+ if (entry) {
+ entry->update_sa_index = sa_index;
+ entry->count = 0;
+ return 0;
+ }
+
+ /*
+ * This is the normal path - there should be no existing entry
+ * when update is called. The exception is at startup
+ * when update is called for the first two sa_indexes
+ * followed by a delete of the first sa_index
+ */
+ entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&entry->next);
+ entry->handle = handle;
+ entry->update_sa_index = sa_index;
+ entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+ entry->count = 0;
+ entry->flags = 0;
+ timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return 0;
+}
+
+/* remove an entry from the list */
+static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ list_del(&entry->next);
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+}
+
+int qla_post_sa_replace_work(struct scsi_qla_host *vha,
+ fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.sa_update.fcport = fcport;
+ e->u.sa_update.sa_ctl = sa_ctl;
+ e->u.sa_update.nport_handle = nport_handle;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
+ return qla2x00_post_work(vha, e);
+}
+
+static void
+qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport)
+{
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
+ fcport->node_name, fcport->port_name, fcport->d_id.b24);
+
+ fcport->edif.tx_rekey_cnt = 0;
+ fcport->edif.rx_rekey_cnt = 0;
+
+ fcport->edif.tx_bytes = 0;
+ fcport->edif.rx_bytes = 0;
+}
+
+static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
+fc_port_t *fcport)
+{
+ struct extra_auth_els *p;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct qla_bsg_auth_els_request *req =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ if (!vha->hw->flags.edif_enabled) {
+ ql_dbg(ql_dbg_edif, vha, 0x9105,
+ "%s edif not enabled\n", __func__);
+ goto done;
+ }
+ if (DBELL_INACTIVE(vha)) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ goto done;
+ }
+
+ p = &req->e;
+
+ /* Get response */
+ if (p->sub_cmd == PULL_ELS) {
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ qla_pur_get_pending(vha, fcport, bsg_job);
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
+ __func__, sc_to_str(p->sub_cmd), fcport->port_name,
+ fcport->d_id.b24, rpl->rx_xchg_address,
+ rpl->r.reply_payload_rcv_len, bsg_job);
+
+ goto done;
+ }
+ return 0;
+
+done:
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return -EIO;
+}
+
+fc_port_t *
+qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
+{
+ fc_port_t *f, *tf;
+
+ f = NULL;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+ if (f->d_id.b24 == id->b24)
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * qla_edif_app_check(): check for valid application id.
+ * @vha: host adapter pointer
+ * @appid: application id
+ * Return: false = fail, true = pass
+ */
+static bool
+qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
+{
+ /* check that the app is allow/known to the driver */
+
+ if (appid.app_vid != EDIF_APP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
+ __func__, appid.app_vid);
+ return false;
+ }
+
+ if (appid.version != EDIF_VERSION1) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)",
+ __func__, appid.version);
+ return false;
+ }
+
+ return true;
+}
+
+static void
+qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
+ int index)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+ list_del(&sa_ctl->next);
+ spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+ if (index >= 512)
+ fcport->edif.tx_rekey_cnt--;
+ else
+ fcport->edif.rx_rekey_cnt--;
+ kfree(sa_ctl);
+}
+
+/* return an index to the freepool */
+static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
+ uint16_t sa_index)
+{
+ void *sa_id_map;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+ u16 lsa_index = sa_index;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: entry\n", __func__);
+
+ if (dir) {
+ sa_id_map = ha->edif_tx_sa_id_map;
+ lsa_index -= EDIF_TX_SA_INDEX_BASE;
+ } else {
+ sa_id_map = ha->edif_rx_sa_id_map;
+ }
+
+ spin_lock_irqsave(&ha->sadb_fp_lock, flags);
+ clear_bit(lsa_index, sa_id_map);
+ spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: index %d added to free pool\n", __func__, sa_index);
+}
+
+static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
+ struct fc_port *fcport, struct edif_sa_index_entry *entry,
+ int pdir)
+{
+ struct edif_list_entry *edif_entry;
+ struct edif_sa_ctl *sa_ctl;
+ int i, dir;
+ int key_cnt = 0;
+
+ for (i = 0; i < 2; i++) {
+ if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
+ continue;
+
+ if (fcport->loop_id != entry->handle) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
+ __func__, i, entry->handle, fcport->loop_id,
+ entry->sa_pair[i].sa_index);
+ }
+
+ /* release the sa_ctl */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
+ entry->sa_pair[i].sa_index, pdir);
+ if (sa_ctl &&
+ qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
+ qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
+ }
+
+ /* Release the index */
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, entry->sa_pair[i].sa_index, entry->handle);
+
+ dir = (entry->sa_pair[i].sa_index <
+ EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
+ qla_edif_add_sa_index_to_freepool(fcport, dir,
+ entry->sa_pair[i].sa_index);
+
+ /* Delete timer on RX */
+ if (pdir != SAU_FLG_TX) {
+ edif_entry =
+ qla_edif_list_find_sa_index(fcport, entry->handle);
+ if (edif_entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
+ __func__, edif_entry, edif_entry->update_sa_index,
+ edif_entry->delete_sa_index);
+ qla_edif_list_delete_sa_index(fcport, edif_entry);
+ /*
+ * valid delete_sa_index indicates there is a rx
+ * delayed delete queued
+ */
+ if (edif_entry->delete_sa_index !=
+ INVALID_EDIF_SA_INDEX) {
+ del_timer(&edif_entry->timer);
+
+ /* build and send the aen */
+ fcport->edif.rx_sa_set = 1;
+ fcport->edif.rx_sa_pending = 0;
+ qla_edb_eventcreate(vha,
+ VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_RX_SA_KEY, fcport);
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
+ __func__, edif_entry, edif_entry->update_sa_index,
+ edif_entry->delete_sa_index);
+
+ kfree(edif_entry);
+ }
+ }
+ key_cnt++;
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: %d %s keys released\n",
+ __func__, key_cnt, pdir ? "tx" : "rx");
+}
+
+/* find an release all outstanding sadb sa_indicies */
+void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
+{
+ struct edif_sa_index_entry *entry, *tmp;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: Starting...\n", __func__);
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
+ if (entry->fcport == fcport) {
+ list_del(&entry->next);
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ __qla2x00_release_all_sadb(vha, fcport, entry, 0);
+ kfree(entry);
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ break;
+ }
+ }
+
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
+ if (entry->fcport == fcport) {
+ list_del(&entry->next);
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+ __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
+
+ kfree(entry);
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+}
+
+/**
+ * qla_edif_app_start: application has announce its present
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ *
+ * Set/activate doorbell. Reset current sessions and re-login with
+ * secure flag.
+ */
+static int
+qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct app_start appstart;
+ struct app_start_reply appreply;
+ struct fc_port *fcport, *tf;
+
+ ql_log(ql_log_info, vha, 0x1313,
+ "EDIF application registration with driver, FC device connections will be re-established.\n");
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appstart,
+ sizeof(struct app_start));
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
+ __func__, appstart.app_info.app_vid, appstart.app_start_flags);
+
+ if (DBELL_INACTIVE(vha)) {
+ /* mark doorbell as active since an app is now present */
+ vha->e_dbell.db_flags |= EDB_ACTIVE;
+ } else {
+ goto out;
+ }
+
+ if (N2N_TOPO(vha->hw)) {
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ fcport->n2n_link_reset_cnt = 0;
+
+ if (vha->hw->flags.n2n_fw_acc_sec) {
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ qla_edif_sa_ctl_init(vha, fcport);
+
+ /*
+ * While authentication app was not running, remote device
+ * could still try to login with this local port. Let's
+ * clear the state and try again.
+ */
+ qla2x00_wait_for_sess_deletion(vha);
+
+ /* bounce the link to get the other guy to relogin */
+ if (!vha->hw->flags.n2n_bigger) {
+ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else {
+ qla2x00_wait_for_hba_online(vha);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+ }
+ } else {
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "FCSP - nn %8phN pn %8phN portid=%06x.\n",
+ fcport->node_name, fcport->port_name,
+ fcport->d_id.b24);
+ ql_dbg(ql_dbg_edif, vha, 0xf084,
+ "%s: se_sess %p / sess %p from port %8phC "
+ "loop_id %#04x s_id %06x logout %d "
+ "keep %d els_logo %d disc state %d auth state %d"
+ "stop state %d\n",
+ __func__, fcport->se_sess, fcport,
+ fcport->port_name, fcport->loop_id,
+ fcport->d_id.b24, fcport->logout_on_delete,
+ fcport->keep_nport_handle, fcport->send_els_logo,
+ fcport->disc_state, fcport->edif.auth_state,
+ fcport->edif.app_stop);
+
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ break;
+
+ fcport->login_retry = vha->hw->login_retry_count;
+
+ fcport->edif.app_stop = 0;
+ fcport->edif.app_sess_online = 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport, true);
+
+ if (!rval && !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET)))
+ continue;
+
+ rval = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
+ __func__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ qla_edif_sa_ctl_init(vha, fcport);
+ }
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
+
+ if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+ /* mark as active since an app is now present */
+ vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
+ __func__);
+ }
+
+out:
+ appreply.host_support_edif = vha->hw->flags.edif_enabled;
+ appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
+ appreply.edif_edb_active = vha->e_dbell.db_flags;
+ appreply.version = EDIF_VERSION1;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &appreply,
+ sizeof(struct app_start_reply));
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s app start completed with 0x%x\n",
+ __func__, rval);
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_stop - app has announced it's exiting.
+ * @vha: host adapter pointer
+ * @bsg_job: user space command pointer
+ *
+ * Free any in flight messages, clear all doorbell events
+ * to application. Reject any message relate to security.
+ */
+static int
+qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct app_stop appstop;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct fc_port *fcport, *tf;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appstop,
+ sizeof(struct app_stop));
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
+ __func__, appstop.app_info.app_vid);
+
+ /* Call db stop and enode stop functions */
+
+ /* if we leave this running short waits are operational < 16 secs */
+ qla_enode_stop(vha); /* stop enode */
+ qla_edb_stop(vha); /* stop db */
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!(fcport->flags & FCF_FCSP_DEVICE))
+ continue;
+
+ if (fcport->flags & FCF_FCSP_DEVICE) {
+ ql_dbg(ql_dbg_edif, vha, 0xf084,
+ "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
+ __func__, fcport,
+ fcport->port_name, fcport->loop_id, fcport->d_id.b24,
+ fcport->logout_on_delete, fcport->keep_nport_handle,
+ fcport->send_els_logo);
+
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ break;
+
+ fcport->edif.app_stop = 1;
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
+ __func__, fcport->port_name);
+
+ fcport->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ }
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ /* no return interface to app - it assumes we cleaned up ok */
+
+ return 0;
+}
+
+static int
+qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct app_plogi_reply *appplogireply)
+{
+ int ret = 0;
+
+ if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
+ __func__, fcport->port_name, fcport->edif.tx_sa_set,
+ fcport->edif.rx_sa_set);
+ appplogireply->prli_status = 0;
+ ret = 1;
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC Both SA(s) updated.\n", __func__,
+ fcport->port_name);
+ fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
+ fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
+ appplogireply->prli_status = 1;
+ }
+ return ret;
+}
+
+/**
+ * qla_edif_app_authok - authentication by app succeeded. Driver can proceed
+ * with prli
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int
+qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct auth_complete_cmd appplogiok;
+ struct app_plogi_reply appplogireply = {0};
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ fc_port_t *fcport = NULL;
+ port_id_t portid = {0};
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appplogiok,
+ sizeof(struct auth_complete_cmd));
+
+ /* silent unaligned access warning */
+ portid.b.domain = appplogiok.u.d_id.b.domain;
+ portid.b.area = appplogiok.u.d_id.b.area;
+ portid.b.al_pa = appplogiok.u.d_id.b.al_pa;
+
+ appplogireply.version = EDIF_VERSION1;
+ switch (appplogiok.type) {
+ case PL_TYPE_WWPN:
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ appplogiok.u.wwpn, 0);
+ if (!fcport)
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s wwpn lookup failed: %8phC\n",
+ __func__, appplogiok.u.wwpn);
+ break;
+ case PL_TYPE_DID:
+ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
+ if (!fcport)
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s d_id lookup failed: %x\n", __func__,
+ portid.b24);
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s undefined type: %x\n", __func__,
+ appplogiok.type);
+ break;
+ }
+
+ if (!fcport) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto errstate_exit;
+ }
+
+ /*
+ * if port is online then this is a REKEY operation
+ * Only do sa update checking
+ */
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s Skipping PRLI complete based on rekey\n", __func__);
+ appplogireply.prli_status = 1;
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
+ goto errstate_exit;
+ }
+
+ /* make sure in AUTH_PENDING or else reject */
+ if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC is not in auth pending state (%x)\n",
+ __func__, fcport->port_name, fcport->disc_state);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ appplogireply.prli_status = 0;
+ goto errstate_exit;
+ }
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ appplogireply.prli_status = 1;
+ fcport->edif.authok = 1;
+ if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
+ __func__, fcport->port_name, fcport->edif.tx_sa_set,
+ fcport->edif.rx_sa_set);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ appplogireply.prli_status = 0;
+ goto errstate_exit;
+
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC Both SA(s) updated.\n", __func__,
+ fcport->port_name);
+ fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
+ fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
+ }
+
+ if (qla_ini_mode_enabled(vha)) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
+ __func__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ }
+
+errstate_exit:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &appplogireply,
+ sizeof(struct app_plogi_reply));
+
+ return 0;
+}
+
+/**
+ * qla_edif_app_authfail - authentication by app has failed. Driver is given
+ * notice to tear down current session.
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int
+qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct auth_complete_cmd appplogifail;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ fc_port_t *fcport = NULL;
+ port_id_t portid = {0};
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appplogifail,
+ sizeof(struct auth_complete_cmd));
+
+ /* silent unaligned access warning */
+ portid.b.domain = appplogifail.u.d_id.b.domain;
+ portid.b.area = appplogifail.u.d_id.b.area;
+ portid.b.al_pa = appplogifail.u.d_id.b.al_pa;
+
+ /*
+ * TODO: edif: app has failed this plogi. Inform driver to
+ * take any action (if any).
+ */
+ switch (appplogifail.type) {
+ case PL_TYPE_WWPN:
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ appplogifail.u.wwpn, 0);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ break;
+ case PL_TYPE_DID:
+ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
+ if (!fcport)
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s d_id lookup failed: %x\n", __func__,
+ portid.b24);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s undefined type: %x\n", __func__,
+ appplogifail.type);
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ break;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s fcport is 0x%p\n", __func__, fcport);
+
+ if (fcport) {
+ /* set/reset edif values and flags */
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
+ __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
+
+ if (qla_ini_mode_enabled(fcport->vha)) {
+ fcport->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ }
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
+ * [initiator|target] mode. It can specific session with specific nport id or
+ * all sessions.
+ * @vha: host adapter pointer
+ * @bsg_job: user request pointer
+ */
+static int
+qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ int32_t pcnt = 0;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct app_pinfo_req app_req;
+ struct app_pinfo_reply *app_reply;
+ port_id_t tdid;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &app_req,
+ sizeof(struct app_pinfo_req));
+
+ app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
+ sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL);
+
+ if (!app_reply) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ } else {
+ struct fc_port *fcport = NULL, *tf;
+
+ app_reply->version = EDIF_VERSION1;
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!(fcport->flags & FCF_FCSP_DEVICE))
+ continue;
+
+ tdid = app_req.remote_pid;
+
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "APP request entry - portid=%06x.\n", tdid.b24);
+
+ /* Ran out of space */
+ if (pcnt >= app_req.num_ports)
+ break;
+
+ if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
+ continue;
+
+ if (!N2N_TOPO(vha->hw)) {
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport,
+ true);
+
+ if (!rval &&
+ !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type &
+ (FCT_TARGET | FCT_NVME_TARGET)))
+ continue;
+ }
+
+ rval = 0;
+
+ app_reply->ports[pcnt].version = EDIF_VERSION1;
+ app_reply->ports[pcnt].remote_type =
+ VND_CMD_RTYPE_UNKNOWN;
+ if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
+ app_reply->ports[pcnt].remote_type |=
+ VND_CMD_RTYPE_TARGET;
+ if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
+ app_reply->ports[pcnt].remote_type |=
+ VND_CMD_RTYPE_INITIATOR;
+
+ app_reply->ports[pcnt].remote_pid = fcport->d_id;
+
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
+ fcport->node_name, fcport->port_name, pcnt,
+ fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE);
+
+ switch (fcport->edif.auth_state) {
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
+ fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
+ app_reply->ports[pcnt].auth_state =
+ VND_CMD_AUTH_STATE_NEEDED;
+ } else {
+ app_reply->ports[pcnt].auth_state =
+ VND_CMD_AUTH_STATE_ELS_RCVD;
+ }
+ break;
+ default:
+ app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
+ break;
+ }
+
+ memcpy(app_reply->ports[pcnt].remote_wwpn,
+ fcport->port_name, 8);
+
+ app_reply->ports[pcnt].remote_state =
+ (atomic_read(&fcport->state) ==
+ FCS_ONLINE ? 1 : 0);
+
+ pcnt++;
+
+ if (tdid.b24 != 0)
+ break;
+ }
+ app_reply->port_count = pcnt;
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ }
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ app_reply,
+ sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt);
+
+ kfree(app_reply);
+
+ return rval;
+}
+
+/**
+ * qla_edif_app_getstats - app would like to read various statistics info
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int32_t
+qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ int32_t rval = 0;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t size;
+
+ struct app_sinfo_req app_req;
+ struct app_stats_reply *app_reply;
+ uint32_t pcnt = 0;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &app_req,
+ sizeof(struct app_sinfo_req));
+ if (app_req.num_ports == 0) {
+ ql_dbg(ql_dbg_async, vha, 0x911d,
+ "%s app did not indicate number of ports to return\n",
+ __func__);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ }
+
+ size = sizeof(struct app_stats_reply) +
+ (sizeof(struct app_sinfo) * app_req.num_ports);
+
+ app_reply = kzalloc(size, GFP_KERNEL);
+ if (!app_reply) {
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ rval = -1;
+ } else {
+ struct fc_port *fcport = NULL, *tf;
+
+ app_reply->version = EDIF_VERSION1;
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (fcport->edif.enable) {
+ if (pcnt > app_req.num_ports)
+ break;
+
+ app_reply->elem[pcnt].rekey_count =
+ fcport->edif.rekey_cnt;
+ app_reply->elem[pcnt].tx_bytes =
+ fcport->edif.tx_bytes;
+ app_reply->elem[pcnt].rx_bytes =
+ fcport->edif.rx_bytes;
+
+ memcpy(app_reply->elem[pcnt].remote_wwpn,
+ fcport->port_name, 8);
+
+ pcnt++;
+ }
+ }
+ app_reply->elem_count = pcnt;
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ }
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, app_reply,
+ sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt));
+
+ kfree(app_reply);
+
+ return rval;
+}
+
+static int32_t
+qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_port *fcport;
+ struct aen_complete_cmd ack;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &ack, sizeof(ack));
+
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: %06x event_code %x\n",
+ __func__, ack.port_id.b24, ack.event_code);
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: unable to find fcport %06x \n",
+ __func__, ack.port_id.b24);
+ return 0;
+ }
+
+ switch (ack.event_code) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ fcport->edif.sess_down_acked = 1;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ u32 sg_skip, reply_payload_len;
+ bool keep;
+ struct edb_node *dbnode = NULL;
+ struct edif_app_dbell ap;
+ int dat_size = 0;
+
+ sg_skip = 0;
+ reply_payload_len = bsg_job->reply_payload.payload_len;
+
+ while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) {
+ dbnode = qla_edb_getnext(vha);
+ if (dbnode) {
+ keep = true;
+ dat_size = 0;
+ ap.event_code = dbnode->ntype;
+ switch (dbnode->ntype) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ case VND_CMD_AUTH_STATE_NEEDED:
+ ap.port_id = dbnode->u.plogi_did;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ ap.port_id = dbnode->u.els_sid;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ ap.port_id = dbnode->u.sa_aen.port_id;
+ memcpy(&ap.event_data, &dbnode->u,
+ sizeof(struct edif_sa_update_aen));
+ dat_size += sizeof(struct edif_sa_update_aen);
+ break;
+ default:
+ keep = false;
+ ql_log(ql_log_warn, vha, 0x09102,
+ "%s unknown DB type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+ break;
+ }
+ ap.event_data_size = dat_size;
+ /* 8 = sizeof(ap.event_code + ap.event_data_size) */
+ dat_size += 8;
+ if (keep)
+ sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &ap, dat_size, sg_skip, false);
+
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell consumed : type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+
+ kfree(dbnode);
+ } else {
+ break;
+ }
+ }
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ bsg_reply->reply_payload_rcv_len = sg_skip;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+ return 0;
+}
+
+static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
+ u32 delay)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ /* small sleep for doorbell events to accumulate */
+ if (delay)
+ msleep(delay);
+
+ qla_edif_consume_dbell(vha, bsg_job);
+
+ bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+}
+
+static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct bsg_job *prev_bsg_job = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (vha->e_dbell.dbell_bsg_job) {
+ prev_bsg_job = vha->e_dbell.dbell_bsg_job;
+ vha->e_dbell.dbell_bsg_job = NULL;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (prev_bsg_job)
+ __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0);
+}
+
+static int
+qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ unsigned long flags;
+ bool return_bsg = false;
+
+ /* flush previous dbell bsg */
+ qla_edif_dbell_bsg_done(vha);
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) {
+ /*
+ * when the next db event happens, bsg_job will return.
+ * Otherwise, timer will return it.
+ */
+ vha->e_dbell.dbell_bsg_job = bsg_job;
+ vha->e_dbell.bsg_expire = jiffies + 10 * HZ;
+ } else {
+ return_bsg = true;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (return_bsg)
+ __qla_edif_dbell_bsg_done(vha, bsg_job, 1);
+
+ return 0;
+}
+
+int32_t
+qla_edif_app_mgmt(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct app_id appcheck;
+ bool done = true;
+ int32_t rval = 0;
+ uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+ u32 level = ql_dbg_edif;
+
+ /* doorbell is high traffic */
+ if (vnd_sc == QL_VND_SC_READ_DBELL)
+ level = 0;
+
+ ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n",
+ __func__, vnd_sc);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &appcheck,
+ sizeof(struct app_id));
+
+ if (!vha->hw->flags.edif_enabled ||
+ test_bit(VPORT_DELETE, &vha->dpc_flags)) {
+ ql_dbg(level, vha, 0x911d,
+ "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
+ __func__, bsg_job, vha->dpc_flags);
+
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ if (!qla_edif_app_check(vha, appcheck)) {
+ ql_dbg(level, vha, 0x911d,
+ "%s app checked failed.\n",
+ __func__);
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ switch (vnd_sc) {
+ case QL_VND_SC_SA_UPDATE:
+ done = false;
+ rval = qla24xx_sadb_update(bsg_job);
+ break;
+ case QL_VND_SC_APP_START:
+ rval = qla_edif_app_start(vha, bsg_job);
+ break;
+ case QL_VND_SC_APP_STOP:
+ rval = qla_edif_app_stop(vha, bsg_job);
+ break;
+ case QL_VND_SC_AUTH_OK:
+ rval = qla_edif_app_authok(vha, bsg_job);
+ break;
+ case QL_VND_SC_AUTH_FAIL:
+ rval = qla_edif_app_authfail(vha, bsg_job);
+ break;
+ case QL_VND_SC_GET_FCINFO:
+ rval = qla_edif_app_getfcinfo(vha, bsg_job);
+ break;
+ case QL_VND_SC_GET_STATS:
+ rval = qla_edif_app_getstats(vha, bsg_job);
+ break;
+ case QL_VND_SC_AEN_COMPLETE:
+ rval = qla_edif_ack(vha, bsg_job);
+ break;
+ case QL_VND_SC_READ_DBELL:
+ rval = qla_edif_dbell_bsg(vha, bsg_job);
+ done = false;
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
+ __func__,
+ bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
+ rval = EXT_STATUS_INVALID_PARAM;
+ done = false;
+ break;
+ }
+
+done:
+ if (done) {
+ ql_dbg(level, vha, 0x7009,
+ "%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
+
+ return rval;
+}
+
+static struct edif_sa_ctl *
+qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
+ int dir)
+{
+ struct edif_sa_ctl *sa_ctl;
+ struct qla_sa_update_frame *sap;
+ int index = sa_frame->fast_sa_index;
+ unsigned long flags = 0;
+
+ sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
+ if (!sa_ctl) {
+ /* couldn't get space */
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "unable to allocate SA CTL\n");
+ return NULL;
+ }
+
+ /*
+ * need to allocate sa_index here and save it
+ * in both sa_ctl->index and sa_frame->fast_sa_index;
+ * If alloc fails then delete sa_ctl and return NULL
+ */
+ INIT_LIST_HEAD(&sa_ctl->next);
+ sap = &sa_ctl->sa_frame;
+ *sap = *sa_frame;
+ sa_ctl->index = index;
+ sa_ctl->fcport = fcport;
+ sa_ctl->flags = 0;
+ sa_ctl->state = 0L;
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
+ __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
+ spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+ if (dir == SAU_FLG_TX)
+ list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
+ else
+ list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
+ spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+
+ return sa_ctl;
+}
+
+void
+qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
+{
+ struct edif_sa_ctl *sa_ctl, *tsa_ctl;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+
+ list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
+ next) {
+ list_del(&sa_ctl->next);
+ kfree(sa_ctl);
+ }
+
+ list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
+ next) {
+ list_del(&sa_ctl->next);
+ kfree(sa_ctl);
+ }
+
+ spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+}
+
+struct edif_sa_ctl *
+qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
+{
+ struct edif_sa_ctl *sa_ctl, *tsa_ctl;
+ struct list_head *sa_list;
+
+ if (dir == SAU_FLG_TX)
+ sa_list = &fcport->edif.tx_sa_list;
+ else
+ sa_list = &fcport->edif.rx_sa_list;
+
+ list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
+ if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
+ sa_ctl->index == index)
+ return sa_ctl;
+ }
+ return NULL;
+}
+
+/* add the sa to the correct list */
+static int
+qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
+ struct qla_sa_update_frame *sa_frame)
+{
+ struct edif_sa_ctl *sa_ctl = NULL;
+ int dir;
+ uint16_t sa_index;
+
+ dir = (sa_frame->flags & SAU_FLG_TX);
+
+ /* map the spi to an sa_index */
+ sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
+ if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
+ /* process rx delete */
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+ "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
+ __func__, fcport->loop_id, sa_frame->spi);
+
+ /* build and send the aen */
+ fcport->edif.rx_sa_set = 1;
+ fcport->edif.rx_sa_pending = 0;
+ qla_edb_eventcreate(fcport->vha,
+ VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_RX_SA_KEY, fcport);
+
+ /* force a return of good bsg status; */
+ return RX_DELETE_NO_EDIF_SA_INDEX;
+ } else if (sa_index == INVALID_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
+ __func__, sa_frame->spi, dir);
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
+ __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
+
+ /* This is a local copy of sa_frame. */
+ sa_frame->fast_sa_index = sa_index;
+ /* create the sa_ctl */
+ sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
+ if (!sa_ctl) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
+ __func__, sa_frame->spi, dir, sa_index);
+ return -1;
+ }
+
+ set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
+
+ if (dir == SAU_FLG_TX)
+ fcport->edif.tx_rekey_cnt++;
+ else
+ fcport->edif.rx_rekey_cnt++;
+
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+ "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
+ __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
+ fcport->edif.tx_rekey_cnt,
+ fcport->edif.rx_rekey_cnt, fcport->loop_id);
+
+ return 0;
+}
+
+#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
+#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
+#define EDIF_MSLEEP_INTERVAL 100
+#define EDIF_RETRY_COUNT 50
+
+int
+qla24xx_sadb_update(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = NULL;
+ srb_t *sp = NULL;
+ struct edif_list_entry *edif_entry = NULL;
+ int found = 0;
+ int rval = 0;
+ int result = 0, cnt;
+ struct qla_sa_update_frame sa_frame;
+ struct srb_iocb *iocb_cmd;
+ port_id_t portid;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
+ "%s entered, vha: 0x%p\n", __func__, vha);
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &sa_frame,
+ sizeof(struct qla_sa_update_frame));
+
+ /* Check if host is online */
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
+ rval = -EIO;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ if (DBELL_INACTIVE(vha)) {
+ ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
+ rval = -EIO;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ /* silent unaligned access warning */
+ portid.b.domain = sa_frame.port_id.b.domain;
+ portid.b.area = sa_frame.port_id.b.area;
+ portid.b.al_pa = sa_frame.port_id.b.al_pa;
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
+ if (fcport) {
+ found = 1;
+ if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
+ fcport->edif.tx_bytes = 0;
+ if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
+ fcport->edif.rx_bytes = 0;
+ }
+
+ if (!found) {
+ ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
+ sa_frame.port_id.b24);
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
+ goto done;
+ }
+
+ /* make sure the nport_handle is valid */
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
+ __func__, fcport->port_name, sa_frame.spi,
+ fcport->disc_state);
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
+ goto done;
+ }
+
+ /* allocate and queue an sa_ctl */
+ result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
+
+ /* failure of bsg */
+ if (result == INVALID_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN, skipping update.\n",
+ __func__, fcport->port_name);
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+
+ /* rx delete failure */
+ } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN, skipping rx delete.\n",
+ __func__, fcport->port_name);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ goto done;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x70e1,
+ "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
+ __func__, fcport->port_name, sa_frame.fast_sa_index,
+ sa_frame.flags);
+
+ /* looking for rx index and delete */
+ if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
+ (sa_frame.flags & SAU_FLG_INV)) {
+ uint16_t nport_handle = fcport->loop_id;
+ uint16_t sa_index = sa_frame.fast_sa_index;
+
+ /*
+ * make sure we have an existing rx key, otherwise just process
+ * this as a straight delete just like TX
+ * This is NOT a normal case, it indicates an error recovery or key cleanup
+ * by the ipsec code above us.
+ */
+ edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
+ if (!edif_entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
+ __func__, fcport->loop_id, sa_index);
+ goto force_rx_delete;
+ }
+
+ /*
+ * if we have a forced delete for rx, remove the sa_index from the edif list
+ * and proceed with normal delete. The rx delay timer should not be running
+ */
+ if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
+ qla_edif_list_delete_sa_index(fcport, edif_entry);
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
+ __func__, fcport->loop_id, sa_index);
+ kfree(edif_entry);
+ goto force_rx_delete;
+ }
+
+ /*
+ * delayed rx delete
+ *
+ * if delete_sa_index is not invalid then there is already
+ * a delayed index in progress, return bsg bad status
+ */
+ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
+ struct edif_sa_ctl *sa_ctl;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
+ __func__, edif_entry->handle, edif_entry->delete_sa_index);
+
+ /* free up the sa_ctl that was allocated with the sa_index */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
+ (sa_frame.flags & SAU_FLG_TX));
+ if (sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_ctl for index %d\n",
+ __func__, sa_ctl->index);
+ qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
+ }
+
+ /* release the sa_index */
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, sa_index, nport_handle);
+ qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
+
+ rval = -EINVAL;
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ goto done;
+ }
+
+ fcport->edif.rekey_cnt++;
+
+ /* configure and start the rx delay timer */
+ edif_entry->fcport = fcport;
+ edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
+ __func__, edif_entry, sa_index, nport_handle);
+
+ /*
+ * Start the timer when we queue the delayed rx delete.
+ * This is an activity timer that goes off if we have not
+ * received packets with the new sa_index
+ */
+ add_timer(&edif_entry->timer);
+
+ /*
+ * sa_delete for rx key with an active rx key including this one
+ * add the delete rx sa index to the hash so we can look for it
+ * in the rsp queue. Do this after making any changes to the
+ * edif_entry as part of the rx delete.
+ */
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
+ __func__, sa_index, nport_handle, bsg_job);
+
+ edif_entry->delete_sa_index = sa_index;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+
+ goto done;
+
+ /*
+ * rx index and update
+ * add the index to the list and continue with normal update
+ */
+ } else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
+ ((sa_frame.flags & SAU_FLG_INV) == 0)) {
+ /* sa_update for rx key */
+ uint32_t nport_handle = fcport->loop_id;
+ uint16_t sa_index = sa_frame.fast_sa_index;
+ int result;
+
+ /*
+ * add the update rx sa index to the hash so we can look for it
+ * in the rsp queue and continue normally
+ */
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: adding update sa_index %d, lid 0x%x to edif_list\n",
+ __func__, sa_index, nport_handle);
+
+ result = qla_edif_list_add_sa_update_index(fcport, sa_index,
+ nport_handle);
+ if (result) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
+ __func__, sa_index, nport_handle);
+ }
+ }
+ if (sa_frame.flags & SAU_FLG_GMAC_MODE)
+ fcport->edif.aes_gmac = 1;
+ else
+ fcport->edif.aes_gmac = 0;
+
+force_rx_delete:
+ /*
+ * sa_update for both rx and tx keys, sa_delete for tx key
+ * immediately process the request
+ */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done;
+ }
+
+ sp->type = SRB_SA_UPDATE;
+ sp->name = "bsg_sa_update";
+ sp->u.bsg_job = bsg_job;
+ /* sp->free = qla2x00_bsg_sp_free; */
+ sp->free = qla2x00_rel_sp;
+ sp->done = qla2x00_bsg_job_done;
+ iocb_cmd = &sp->u.iocb_cmd;
+ iocb_cmd->u.sa_update.sa_frame = sa_frame;
+ cnt = 0;
+retry:
+ rval = qla2x00_start_sp(sp);
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
+ ql_log(ql_dbg_edif, vha, 0x70e3,
+ "%s qla2x00_start_sp failed=%d.\n",
+ __func__, rval);
+
+ qla2x00_rel_sp(sp);
+ rval = -EIO;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: %s sent, hdl=%x, portid=%06x.\n",
+ __func__, sp->name, sp->handle, fcport->d_id.b24);
+
+ fcport->edif.rekey_cnt++;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ return 0;
+
+/*
+ * send back error status
+ */
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
+ __func__, bsg_reply->result, bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return 0;
+}
+
+static void
+qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
+{
+ node->ntype = N_UNDEF;
+ kfree(node);
+}
+
+/**
+ * qla_enode_init - initialize enode structs & lock
+ * @vha: host adapter pointer
+ *
+ * should only be called when driver attaching
+ */
+void
+qla_enode_init(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ char name[32];
+
+ if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
+ /* list still active - error */
+ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
+ __func__);
+ return;
+ }
+
+ /* initialize lock which protects pur_core & init list */
+ spin_lock_init(&vha->pur_cinfo.pur_lock);
+ INIT_LIST_HEAD(&vha->pur_cinfo.head);
+
+ snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
+ ha->pdev->device);
+}
+
+/**
+ * qla_enode_stop - stop and clear and enode data
+ * @vha: host adapter pointer
+ *
+ * called when app notified it is exiting
+ */
+void
+qla_enode_stop(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct enode *node, *q;
+
+ if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s enode not active\n", __func__);
+ return;
+ }
+
+ /* grab lock so list doesn't move */
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+
+ vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
+
+ /* hopefully this is a null list at this point */
+ list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
+ ql_dbg(ql_dbg_edif, vha, 0x910f,
+ "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
+ node->dinfo.nodecnt);
+ list_del_init(&node->list);
+ qla_enode_free(vha, node);
+ }
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+}
+
+static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid)
+{
+ unsigned long flags;
+ struct enode *e, *tmp;
+ struct purexevent *purex;
+ LIST_HEAD(enode_list);
+
+ if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s enode not active\n", __func__);
+ return;
+ }
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+ list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) {
+ purex = &e->u.purexinfo;
+ if (purex->pur_info.pur_sid.b24 == portid.b24) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s free ELS sid=%06x. xchg %x, nb=%xh\n",
+ __func__, portid.b24,
+ purex->pur_info.pur_rx_xchg_address,
+ purex->pur_info.pur_bytes_rcvd);
+
+ list_del_init(&e->list);
+ list_add_tail(&e->list, &enode_list);
+ }
+ }
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+ list_for_each_entry_safe(e, tmp, &enode_list, list) {
+ list_del_init(&e->list);
+ qla_enode_free(vha, e);
+ }
+}
+
+/*
+ * allocate enode struct and populate buffer
+ * returns: enode pointer with buffers
+ * NULL on error
+ */
+static struct enode *
+qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
+{
+ struct enode *node;
+ struct purexevent *purex;
+
+ node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
+ if (!node)
+ return NULL;
+
+ purex = &node->u.purexinfo;
+ purex->msgp = (u8 *)(node + 1);
+ purex->msgp_len = ELS_MAX_PAYLOAD;
+
+ node->ntype = ntype;
+ INIT_LIST_HEAD(&node->list);
+ return node;
+}
+
+static void
+qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
+{
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
+ "%s add enode for type=%x, cnt=%x\n",
+ __func__, ptr->ntype, ptr->dinfo.nodecnt);
+
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+ list_add_tail(&ptr->list, &vha->pur_cinfo.head);
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+ return;
+}
+
+static struct enode *
+qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
+{
+ struct enode *node_rtn = NULL;
+ struct enode *list_node, *q;
+ unsigned long flags;
+ uint32_t sid;
+ struct purexevent *purex;
+
+ /* secure the list from moving under us */
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+
+ list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) {
+
+ /* node type determines what p1 and p2 are */
+ purex = &list_node->u.purexinfo;
+ sid = p1;
+
+ if (purex->pur_info.pur_sid.b24 == sid) {
+ /* found it and its complete */
+ node_rtn = list_node;
+ list_del(&list_node->list);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+ return node_rtn;
+}
+
+/**
+ * qla_pur_get_pending - read/return authentication message sent
+ * from remote port
+ * @vha: host adapter pointer
+ * @fcport: session pointer
+ * @bsg_job: user request where the message is copy to.
+ */
+static int
+qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct bsg_job *bsg_job)
+{
+ struct enode *ptr;
+ struct purexevent *purex;
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ bsg_job->reply_len = sizeof(*rpl);
+
+ ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
+ if (!ptr) {
+ ql_dbg(ql_dbg_edif, vha, 0x9111,
+ "%s no enode data found for %8phN sid=%06x\n",
+ __func__, fcport->port_name, fcport->d_id.b24);
+ SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
+ return -EIO;
+ }
+
+ /*
+ * enode is now off the linked list and is ours to deal with
+ */
+ purex = &ptr->u.purexinfo;
+
+ /* Copy info back to caller */
+ rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
+
+ SET_DID_STATUS(rpl->r.result, DID_OK);
+ rpl->r.reply_payload_rcv_len =
+ sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, purex->msgp,
+ purex->pur_info.pur_bytes_rcvd, 0);
+
+ /* data copy / passback completed - destroy enode */
+ qla_enode_free(vha, ptr);
+
+ return 0;
+}
+
+/* it is assume qpair lock is held */
+static int
+qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
+ struct qla_els_pt_arg *a)
+{
+ struct els_entry_24xx *els_iocb;
+
+ els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
+ if (!els_iocb) {
+ ql_log(ql_log_warn, vha, 0x700c,
+ "qla2x00_alloc_iocbs failed.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ qla_els_pt_iocb(vha, els_iocb, a);
+
+ ql_dbg(ql_dbg_edif, vha, 0x0183,
+ "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n",
+ a->ox_id, a->sid.b24, a->did.b24);
+ ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
+ vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
+ /* flush iocb to mem before notifying hw doorbell */
+ wmb();
+ qla2x00_start_iocbs(vha, qp->req);
+ return 0;
+}
+
+void
+qla_edb_init(scsi_qla_host_t *vha)
+{
+ if (DBELL_ACTIVE(vha)) {
+ /* list already init'd - error */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "edif db already initialized, cannot reinit\n");
+ return;
+ }
+
+ /* initialize lock which protects doorbell & init list */
+ spin_lock_init(&vha->e_dbell.db_lock);
+ INIT_LIST_HEAD(&vha->e_dbell.head);
+}
+
+static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
+{
+ unsigned long flags;
+ struct edb_node *e, *tmp;
+ port_id_t sid;
+ LIST_HEAD(edb_list);
+
+ if (DBELL_INACTIVE(vha)) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ return;
+ }
+
+ /* grab lock so list doesn't move */
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) {
+ switch (e->ntype) {
+ case VND_CMD_AUTH_STATE_NEEDED:
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ sid = e->u.plogi_did;
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ sid = e->u.els_sid;
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ /* app wants to see this */
+ continue;
+ default:
+ ql_log(ql_log_warn, vha, 0x09102,
+ "%s unknown node type: %x\n", __func__, e->ntype);
+ sid.b24 = 0;
+ break;
+ }
+ if (sid.b24 == portid.b24) {
+ ql_dbg(ql_dbg_edif, vha, 0x910f,
+ "%s free doorbell event : node type = %x %p\n",
+ __func__, e->ntype, e);
+ list_del_init(&e->list);
+ list_add_tail(&e->list, &edb_list);
+ }
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ list_for_each_entry_safe(e, tmp, &edb_list, list)
+ qla_edb_node_free(vha, e);
+}
+
+/* function called when app is stopping */
+
+void
+qla_edb_stop(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *node, *q;
+
+ if (DBELL_INACTIVE(vha)) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ return;
+ }
+
+ /* grab lock so list doesn't move */
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */
+ /* hopefully this is a null list at this point */
+ list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
+ ql_dbg(ql_dbg_edif, vha, 0x910f,
+ "%s freeing edb_node type=%x\n",
+ __func__, node->ntype);
+ qla_edb_node_free(vha, node);
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ qla_edif_dbell_bsg_done(vha);
+}
+
+static struct edb_node *
+qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
+{
+ struct edb_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node) {
+ /* couldn't get space */
+ ql_dbg(ql_dbg_edif, vha, 0x9100,
+ "edb node unable to be allocated\n");
+ return NULL;
+ }
+
+ node->ntype = ntype;
+ INIT_LIST_HEAD(&node->list);
+ return node;
+}
+
+/* adds a already allocated enode to the linked list */
+static bool
+qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
+{
+ unsigned long flags;
+
+ if (DBELL_INACTIVE(vha)) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ return false;
+ }
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ list_add_tail(&ptr->list, &vha->e_dbell.head);
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ return true;
+}
+
+/* adds event to doorbell list */
+void
+qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
+ uint32_t data, uint32_t data2, fc_port_t *sfcport)
+{
+ struct edb_node *edbnode;
+ fc_port_t *fcport = sfcport;
+ port_id_t id;
+
+ if (!vha->hw->flags.edif_enabled) {
+ /* edif not enabled */
+ return;
+ }
+
+ if (DBELL_INACTIVE(vha)) {
+ if (fcport)
+ fcport->edif.auth_state = dbtype;
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled (type=%d\n", __func__, dbtype);
+ return;
+ }
+
+ edbnode = qla_edb_node_alloc(vha, dbtype);
+ if (!edbnode) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to alloc db node\n", __func__);
+ return;
+ }
+
+ if (!fcport) {
+ id.b.domain = (data >> 16) & 0xff;
+ id.b.area = (data >> 8) & 0xff;
+ id.b.al_pa = data & 0xff;
+ ql_dbg(ql_dbg_edif, vha, 0x09222,
+ "%s: Arrived s_id: %06x\n", __func__,
+ id.b24);
+ fcport = qla2x00_find_fcport_by_pid(vha, &id);
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s can't find fcport for sid= 0x%x - ignoring\n",
+ __func__, id.b24);
+ kfree(edbnode);
+ return;
+ }
+ }
+
+ /* populate the edb node */
+ switch (dbtype) {
+ case VND_CMD_AUTH_STATE_NEEDED:
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ edbnode->u.plogi_did.b24 = fcport->d_id.b24;
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ edbnode->u.els_sid.b24 = fcport->d_id.b24;
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ edbnode->u.sa_aen.port_id = fcport->d_id;
+ edbnode->u.sa_aen.status = data;
+ edbnode->u.sa_aen.key_type = data2;
+ edbnode->u.sa_aen.version = EDIF_VERSION1;
+ break;
+ default:
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unknown type: %x\n", __func__, dbtype);
+ kfree(edbnode);
+ edbnode = NULL;
+ break;
+ }
+
+ if (edbnode) {
+ if (!qla_edb_node_add(vha, edbnode)) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to add dbnode\n", __func__);
+ kfree(edbnode);
+ return;
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
+ qla_edif_dbell_bsg_done(vha);
+ if (fcport)
+ fcport->edif.auth_state = dbtype;
+ }
+}
+
+void
+qla_edif_timer(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
+ if (DBELL_INACTIVE(vha) &&
+ ha->edif_post_stop_cnt_down) {
+ ha->edif_post_stop_cnt_down--;
+
+ /*
+ * turn off auto 'Plogi Acc + secure=1' feature
+ * Set Add FW option[3]
+ * BIT_15, if.
+ */
+ if (ha->edif_post_stop_cnt_down == 0) {
+ ql_dbg(ql_dbg_async, vha, 0x911d,
+ "%s chip reset to turn off PLOGI ACC + secure\n",
+ __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+ } else {
+ ha->edif_post_stop_cnt_down = 60;
+ }
+ }
+
+ if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire))
+ qla_edif_dbell_bsg_done(vha);
+}
+
+static void qla_noop_sp_done(srb_t *sp, int res)
+{
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+}
+
+/*
+ * Called from work queue
+ * build and send the sa_update iocb to delete an rx sa_index
+ */
+int
+qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
+{
+ srb_t *sp;
+ fc_port_t *fcport = NULL;
+ struct srb_iocb *iocb_cmd = NULL;
+ int rval = QLA_SUCCESS;
+ struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
+ uint16_t nport_handle = e->u.sa_update.nport_handle;
+
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "%s: starting, sa_ctl: %p\n", __func__, sa_ctl);
+
+ if (!sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "sa_ctl allocation failed\n");
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ fcport = sa_ctl->fcport;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "SRB allocation failed\n");
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ iocb_cmd = &sp->u.iocb_cmd;
+ iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3073,
+ "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
+ fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
+ /*
+ * if this is a sadb cleanup delete, mark it so the isr can
+ * take the correct action
+ */
+ if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
+ /* mark this srb as a cleanup delete */
+ sp->flags |= SRB_EDIF_CLEANUP_DELETE;
+ ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
+ }
+
+ sp->type = SRB_SA_REPLACE;
+ sp->name = "SA_REPLACE";
+ sp->fcport = fcport;
+ sp->free = qla2x00_rel_sp;
+ sp->done = qla_noop_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+
+ if (rval != QLA_SUCCESS) {
+ goto done_free_sp;
+ }
+
+ return rval;
+done_free_sp:
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
+ return rval;
+}
+
+void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
+{
+ int itr = 0;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_sa_update_frame *sa_frame =
+ &sp->u.iocb_cmd.u.sa_update.sa_frame;
+ u8 flags = 0;
+
+ switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
+ case 0:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ break;
+ case 1:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ flags |= SA_FLAG_INVALIDATE;
+ break;
+ case 2:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ flags |= SA_FLAG_TX;
+ break;
+ case 3:
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, sa_frame->fast_sa_index);
+ flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
+ break;
+ }
+
+ sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
+ sa_update_iocb->entry_count = 1;
+ sa_update_iocb->sys_define = 0;
+ sa_update_iocb->entry_status = 0;
+ sa_update_iocb->handle = sp->handle;
+ sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
+ sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+
+ sa_update_iocb->flags = flags;
+ sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
+ sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
+ sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
+
+ sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
+ if (sp->fcport->edif.aes_gmac)
+ sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
+
+ if (sa_frame->flags & SAU_FLG_KEY256) {
+ sa_update_iocb->sa_control |= SA_CNTL_KEY256;
+ for (itr = 0; itr < 32; itr++)
+ sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
+ } else {
+ sa_update_iocb->sa_control |= SA_CNTL_KEY128;
+ for (itr = 0; itr < 16; itr++)
+ sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x921d,
+ "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
+ __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
+ sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
+ sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
+ sp->fcport->edif.aes_gmac);
+
+ if (sa_frame->flags & SAU_FLG_TX)
+ sp->fcport->edif.tx_sa_pending = 1;
+ else
+ sp->fcport->edif.rx_sa_pending = 1;
+
+ sp->fcport->vha->qla_stats.control_requests++;
+}
+
+void
+qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
+{
+ struct scsi_qla_host *vha = sp->vha;
+ struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
+ struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl;
+ uint16_t nport_handle = sp->fcport->loop_id;
+
+ sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
+ sa_update_iocb->entry_count = 1;
+ sa_update_iocb->sys_define = 0;
+ sa_update_iocb->entry_status = 0;
+ sa_update_iocb->handle = sp->handle;
+
+ sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
+
+ sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
+ sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+
+ /* Invalidate the index. salt, spi, control & key are ignore */
+ sa_update_iocb->flags = SA_FLAG_INVALIDATE;
+ sa_update_iocb->salt = 0;
+ sa_update_iocb->spi = 0;
+ sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
+ sa_update_iocb->sa_control = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x921d,
+ "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
+ __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
+ sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
+ sa_update_iocb->sa_index, sp->handle);
+
+ sp->fcport->vha->qla_stats.control_requests++;
+}
+
+void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
+{
+ struct purex_entry_24xx *p = *pkt;
+ struct enode *ptr;
+ int sid;
+ u16 totlen;
+ struct purexevent *purex;
+ struct scsi_qla_host *host = NULL;
+ int rc;
+ struct fc_port *fcport;
+ struct qla_els_pt_arg a;
+ be_id_t beid;
+
+ memset(&a, 0, sizeof(a));
+
+ a.els_opcode = ELS_AUTH_ELS;
+ a.nport_handle = p->nport_handle;
+ a.rx_xchg_address = p->rx_xchg_addr;
+ a.did.b.domain = p->s_id[2];
+ a.did.b.area = p->s_id[1];
+ a.did.b.al_pa = p->s_id[0];
+ a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
+ a.tx_addr = vha->hw->elsrej.cdma;
+ a.vp_idx = vha->vp_idx;
+ a.control_flags = EPD_ELS_RJT;
+ a.ox_id = le16_to_cpu(p->ox_id);
+
+ sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
+
+ totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
+ if (le16_to_cpu(p->status_flags) & 0x8000) {
+ totlen = le16_to_cpu(p->trunc_frame_size);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ if (totlen > ELS_MAX_PAYLOAD) {
+ ql_dbg(ql_dbg_edif, vha, 0x0910d,
+ "%s WARNING: verbose ELS frame received (totlen=%x)\n",
+ __func__, totlen);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ if (!vha->hw->flags.edif_enabled) {
+ /* edif support not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
+ __func__);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ ptr = qla_enode_alloc(vha, N_PUREX);
+ if (!ptr) {
+ ql_dbg(ql_dbg_edif, vha, 0x09109,
+ "WARNING: enode alloc failed for sid=%x\n",
+ sid);
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return;
+ }
+
+ purex = &ptr->u.purexinfo;
+ purex->pur_info.pur_sid = a.did;
+ purex->pur_info.pur_bytes_rcvd = totlen;
+ purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
+ purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
+ purex->pur_info.pur_did.b.domain = p->d_id[2];
+ purex->pur_info.pur_did.b.area = p->d_id[1];
+ purex->pur_info.pur_did.b.al_pa = p->d_id[0];
+ purex->pur_info.vp_idx = p->vp_idx;
+
+ a.sid = purex->pur_info.pur_did;
+
+ rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
+ purex->msgp_len);
+ if (rc) {
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ qla_enode_free(vha, ptr);
+ return;
+ }
+ beid.al_pa = purex->pur_info.pur_did.b.al_pa;
+ beid.area = purex->pur_info.pur_did.b.area;
+ beid.domain = purex->pur_info.pur_did.b.domain;
+ host = qla_find_host_by_d_id(vha, beid);
+ if (!host) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "%s Drop ELS due to unable to find host %06x\n",
+ __func__, purex->pur_info.pur_did.b24);
+
+ qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+ qla_enode_free(vha, ptr);
+ return;
+ }
+
+ fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
+
+ if (DBELL_INACTIVE(vha)) {
+ ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
+ __func__, host->e_dbell.db_flags,
+ fcport ? fcport->d_id.b24 : 0);
+
+ qla_els_reject_iocb(host, (*rsp)->qpair, &a);
+ qla_enode_free(host, ptr);
+ return;
+ }
+
+ if (fcport && EDIF_SESSION_DOWN(fcport)) {
+ ql_dbg(ql_dbg_edif, host, 0x13b6,
+ "%s terminate exchange. Send logo to 0x%x\n",
+ __func__, a.did.b24);
+
+ a.tx_byte_count = a.tx_len = 0;
+ a.tx_addr = 0;
+ a.control_flags = EPD_RX_XCHG; /* EPD_RX_XCHG = terminate cmd */
+ qla_els_reject_iocb(host, (*rsp)->qpair, &a);
+ qla_enode_free(host, ptr);
+ /* send logo to let remote port knows to tear down session */
+ fcport->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+
+ /* add the local enode to the list */
+ qla_enode_add(host, ptr);
+
+ ql_dbg(ql_dbg_edif, host, 0x0910c,
+ "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
+ __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
+ purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address);
+
+ qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
+}
+
+static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ void *sa_id_map;
+ unsigned long flags = 0;
+ u16 sa_index;
+
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: entry\n", __func__);
+
+ if (dir)
+ sa_id_map = ha->edif_tx_sa_id_map;
+ else
+ sa_id_map = ha->edif_rx_sa_id_map;
+
+ spin_lock_irqsave(&ha->sadb_fp_lock, flags);
+ sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
+ if (sa_index >= EDIF_NUM_SA_INDEX) {
+ spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+ return INVALID_EDIF_SA_INDEX;
+ }
+ set_bit(sa_index, sa_id_map);
+ spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+
+ if (dir)
+ sa_index += EDIF_TX_SA_INDEX_BASE;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: index retrieved from free pool %d\n", __func__, sa_index);
+
+ return sa_index;
+}
+
+/* find an sadb entry for an nport_handle */
+static struct edif_sa_index_entry *
+qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
+ struct list_head *sa_list)
+{
+ struct edif_sa_index_entry *entry;
+ struct edif_sa_index_entry *tentry;
+ struct list_head *indx_list = sa_list;
+
+ list_for_each_entry_safe(entry, tentry, indx_list, next) {
+ if (entry->handle == nport_handle)
+ return entry;
+ }
+ return NULL;
+}
+
+/* remove an sa_index from the nport_handle and return it to the free pool */
+static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
+ uint16_t sa_index)
+{
+ struct edif_sa_index_entry *entry;
+ struct list_head *sa_list;
+ int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
+ int slot = 0;
+ int free_slot_count = 0;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: entry\n", __func__);
+
+ if (dir)
+ sa_list = &ha->sadb_tx_index_list;
+ else
+ sa_list = &ha->sadb_rx_index_list;
+
+ entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
+ if (!entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: no entry found for nport_handle 0x%x\n",
+ __func__, nport_handle);
+ return -1;
+ }
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ /*
+ * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
+ * the other is use at re-key time.
+ */
+ for (slot = 0; slot < 2; slot++) {
+ if (entry->sa_pair[slot].sa_index == sa_index) {
+ entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
+ entry->sa_pair[slot].spi = 0;
+ free_slot_count++;
+ qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
+ } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
+ free_slot_count++;
+ }
+ }
+
+ if (free_slot_count == 2) {
+ list_del(&entry->next);
+ kfree(entry);
+ }
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_index %d removed, free_slot_count: %d\n",
+ __func__, sa_index, free_slot_count);
+
+ return 0;
+}
+
+void
+qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
+ struct sa_update_28xx *pkt)
+{
+ const char *func = "SA_UPDATE_RESPONSE_IOCB";
+ srb_t *sp;
+ struct edif_sa_ctl *sa_ctl;
+ int old_sa_deleted = 1;
+ uint16_t nport_handle;
+ struct scsi_qla_host *vha;
+
+ sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
+
+ if (!sp) {
+ ql_dbg(ql_dbg_edif, v, 0x3063,
+ "%s: no sp found for pkt\n", __func__);
+ return;
+ }
+ /* use sp->vha due to npiv */
+ vha = sp->vha;
+
+ switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
+ case 0:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ case 1:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ case 2:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ case 3:
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
+ __func__, vha, pkt->sa_index);
+ break;
+ }
+
+ /*
+ * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
+ * to be correct during cleanup sa_update iocbs.
+ */
+ nport_handle = sp->fcport->loop_id;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
+ __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
+ nport_handle, pkt->sa_index, pkt->flags, sp->handle);
+
+ /* if rx delete, remove the timer */
+ if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) {
+ struct edif_list_entry *edif_entry;
+
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
+ edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
+ if (edif_entry) {
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: removing edif_entry %p, new sa_index: 0x%x\n",
+ __func__, edif_entry, pkt->sa_index);
+ qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
+ del_timer(&edif_entry->timer);
+
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
+ __func__, edif_entry, pkt->sa_index);
+
+ kfree(edif_entry);
+ }
+ }
+
+ /*
+ * if this is a delete for either tx or rx, make sure it succeeded.
+ * The new_sa_info field should be 0xffff on success
+ */
+ if (pkt->flags & SA_FLAG_INVALIDATE)
+ old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
+
+ /* Process update and delete the same way */
+
+ /* If this is an sadb cleanup delete, bypass sending events to IPSEC */
+ if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: nph 0x%x, sa_index %d removed from fw\n",
+ __func__, sp->fcport->loop_id, pkt->sa_index);
+
+ } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
+ old_sa_deleted) {
+ /*
+ * Note: Wa are only keeping track of latest SA,
+ * so we know when we can start enableing encryption per I/O.
+ * If all SA's get deleted, let FW reject the IOCB.
+
+ * TODO: edif: don't set enabled here I think
+ * TODO: edif: prli complete is where it should be set
+ */
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "SA(%x)updated for s_id %02x%02x%02x\n",
+ pkt->new_sa_info,
+ pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
+ sp->fcport->edif.enable = 1;
+ if (pkt->flags & SA_FLAG_TX) {
+ sp->fcport->edif.tx_sa_set = 1;
+ sp->fcport->edif.tx_sa_pending = 0;
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_TX_SA_KEY, sp->fcport);
+ } else {
+ sp->fcport->edif.rx_sa_set = 1;
+ sp->fcport->edif.rx_sa_pending = 0;
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ QL_VND_SA_STAT_SUCCESS,
+ QL_VND_RX_SA_KEY, sp->fcport);
+ }
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
+ __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
+ pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
+
+ if (pkt->flags & SA_FLAG_TX)
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
+ QL_VND_TX_SA_KEY, sp->fcport);
+ else
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+ (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
+ QL_VND_RX_SA_KEY, sp->fcport);
+ }
+
+ /* for delete, release sa_ctl, sa_index */
+ if (pkt->flags & SA_FLAG_INVALIDATE) {
+ /* release the sa_ctl */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
+ le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
+ if (sa_ctl &&
+ qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
+ (pkt->flags & SA_FLAG_TX)) != NULL) {
+ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+ "%s: freeing sa_ctl for index %d\n",
+ __func__, sa_ctl->index);
+ qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sa_ctl NOT freed, sa_ctl: %p\n",
+ __func__, sa_ctl);
+ }
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, le16_to_cpu(pkt->sa_index), nport_handle);
+ qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
+ le16_to_cpu(pkt->sa_index));
+ /*
+ * check for a failed sa_update and remove
+ * the sadb entry.
+ */
+ } else if (pkt->u.comp_sts) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: freeing sa_index %d, nph: 0x%x\n",
+ __func__, pkt->sa_index, nport_handle);
+ qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
+ le16_to_cpu(pkt->sa_index));
+ switch (le16_to_cpu(pkt->u.comp_sts)) {
+ case CS_PORT_EDIF_UNAVAIL:
+ case CS_PORT_EDIF_LOGOUT:
+ qlt_schedule_sess_for_deletion(sp->fcport);
+ break;
+ default:
+ break;
+ }
+ }
+
+ sp->done(sp, 0);
+}
+
+/**
+ * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Return: non-zero if a failure occurred, else zero.
+ */
+int
+qla28xx_start_scsi_edif(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index, i;
+ uint32_t handle;
+ uint16_t cnt;
+ int16_t req_cnt;
+ uint16_t tot_dsds;
+ __be32 *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_6 *cmd_pkt;
+ struct dsd64 *cur_dsd;
+ uint8_t avail_dsds = 0;
+ struct scatterlist *sg;
+ struct req_que *req = sp->qpair->req;
+ spinlock_t *lock = sp->qpair->qp_lock_ptr;
+
+ /* Setup device pointers. */
+ cmd = GET_CMD_SP(sp);
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x300c,
+ "qla2x00_marker failed for cmd=%p.\n", cmd);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else {
+ nseg = 0;
+ }
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ rd_reg_dword(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ ctx = sp->u.scmd.ct6_ctx =
+ mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!ctx) {
+ ql_log(ql_log_fatal, vha, 0x3010,
+ "Failed to allocate ctx for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3011,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /*
+ * SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ ql_log(ql_log_warn, vha, 0x3012,
+ "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
+ cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = make_handle(req->id, handle);
+
+ /*
+ * Zero out remaining portion of packet.
+ * tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = cpu_to_le32(0);
+ goto no_dsds;
+ }
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
+ sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
+ sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
+ }
+
+ cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
+ cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
+
+ /* One DSD is available in the Command Type 6 IOCB */
+ avail_dsds = 1;
+ cur_dsd = &cmd_pkt->fcp_dsd;
+
+ /* Load data segments */
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
+ cur_dsd = cont_pkt->dsd;
+ avail_dsds = 5;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ put_unaligned_le64(sle_dma, &cur_dsd->address);
+ cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
+ cur_dsd++;
+ avail_dsds--;
+ }
+
+no_dsds:
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
+
+ cmd_pkt->entry_type = COMMAND_TYPE_6;
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ cmd_pkt->entry_status = 0;
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Adjust ring index. */
+ wmb();
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ sp->qpair->cmd_cnt++;
+ /* Set chip new ring index. */
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->u.scmd.ct6_ctx) {
+ mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
+ sp->u.scmd.ct6_ctx = NULL;
+ }
+ qla_put_iocbs(sp->qpair, &sp->iores);
+ spin_unlock_irqrestore(lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+/**********************************************
+ * edif update/delete sa_index list functions *
+ **********************************************/
+
+/* clear the edif_indx_list for this port */
+void qla_edif_list_del(fc_port_t *fcport)
+{
+ struct edif_list_entry *indx_lst;
+ struct edif_list_entry *tindx_lst;
+ struct list_head *indx_list = &fcport->edif.edif_indx_list;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
+ list_del(&indx_lst->next);
+ kfree(indx_lst);
+ }
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+}
+
+/******************
+ * SADB functions *
+ ******************/
+
+/* allocate/retrieve an sa_index for a given spi */
+static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
+ struct qla_sa_update_frame *sa_frame)
+{
+ struct edif_sa_index_entry *entry;
+ struct list_head *sa_list;
+ uint16_t sa_index;
+ int dir = sa_frame->flags & SAU_FLG_TX;
+ int slot = 0;
+ int free_slot = -1;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+ uint16_t nport_handle = fcport->loop_id;
+
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: entry fc_port: %p, nport_handle: 0x%x\n",
+ __func__, fcport, nport_handle);
+
+ if (dir)
+ sa_list = &ha->sadb_tx_index_list;
+ else
+ sa_list = &ha->sadb_rx_index_list;
+
+ entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
+ if (!entry) {
+ if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: rx delete request with no entry\n", __func__);
+ return RX_DELETE_NO_EDIF_SA_INDEX;
+ }
+
+ /* if there is no entry for this nport, add one */
+ entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
+ if (!entry)
+ return INVALID_EDIF_SA_INDEX;
+
+ sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
+ if (sa_index == INVALID_EDIF_SA_INDEX) {
+ kfree(entry);
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ INIT_LIST_HEAD(&entry->next);
+ entry->handle = nport_handle;
+ entry->fcport = fcport;
+ entry->sa_pair[0].spi = sa_frame->spi;
+ entry->sa_pair[0].sa_index = sa_index;
+ entry->sa_pair[1].spi = 0;
+ entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ list_add_tail(&entry->next, sa_list);
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
+ __func__, nport_handle, sa_frame->spi, sa_index);
+
+ return sa_index;
+ }
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+
+ /* see if we already have an entry for this spi */
+ for (slot = 0; slot < 2; slot++) {
+ if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
+ free_slot = slot;
+ } else {
+ if (entry->sa_pair[slot].spi == sa_frame->spi) {
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
+ __func__, slot, entry->handle, sa_frame->spi,
+ entry->sa_pair[slot].sa_index);
+ return entry->sa_pair[slot].sa_index;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+ /* both slots are used */
+ if (free_slot == -1) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
+ __func__, entry->handle, sa_frame->spi);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n",
+ __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
+ entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
+
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ /* there is at least one free slot, use it */
+ sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
+ if (sa_index == INVALID_EDIF_SA_INDEX) {
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+ "%s: empty freepool!!\n", __func__);
+ return INVALID_EDIF_SA_INDEX;
+ }
+
+ spin_lock_irqsave(&ha->sadb_lock, flags);
+ entry->sa_pair[free_slot].spi = sa_frame->spi;
+ entry->sa_pair[free_slot].sa_index = sa_index;
+ spin_unlock_irqrestore(&ha->sadb_lock, flags);
+ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+ "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
+ __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
+
+ return sa_index;
+}
+
+/* release any sadb entries -- only done at teardown */
+void qla_edif_sadb_release(struct qla_hw_data *ha)
+{
+ struct edif_sa_index_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
+ list_del(&entry->next);
+ kfree(entry);
+ }
+
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
+ list_del(&entry->next);
+ kfree(entry);
+ }
+}
+
+/**************************
+ * sadb freepool functions
+ **************************/
+
+/* build the rx and tx sa_index free pools -- only done at fcport init */
+int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
+{
+ ha->edif_tx_sa_id_map =
+ kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
+
+ if (!ha->edif_tx_sa_id_map) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
+ "Unable to allocate memory for sadb tx.\n");
+ return -ENOMEM;
+ }
+
+ ha->edif_rx_sa_id_map =
+ kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
+ if (!ha->edif_rx_sa_id_map) {
+ kfree(ha->edif_tx_sa_id_map);
+ ha->edif_tx_sa_id_map = NULL;
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
+ "Unable to allocate memory for sadb rx.\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* release the free pool - only done during fcport teardown */
+void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
+{
+ kfree(ha->edif_tx_sa_id_map);
+ ha->edif_tx_sa_id_map = NULL;
+ kfree(ha->edif_rx_sa_id_map);
+ ha->edif_rx_sa_id_map = NULL;
+}
+
+static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+ fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
+{
+ struct edif_list_entry *edif_entry;
+ struct edif_sa_ctl *sa_ctl;
+ uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
+ unsigned long flags = 0;
+ uint16_t nport_handle = fcport->loop_id;
+ uint16_t cached_nport_handle;
+
+ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+ edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
+ if (!edif_entry) {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return; /* no pending delete for this handle */
+ }
+
+ /*
+ * check for no pending delete for this index or iocb does not
+ * match rx sa_index
+ */
+ if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
+ edif_entry->update_sa_index != sa_index) {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return;
+ }
+
+ /*
+ * wait until we have seen at least EDIF_DELAY_COUNT transfers before
+ * queueing RX delete
+ */
+ if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+ return;
+ }
+
+ ql_dbg(ql_dbg_edif, vha, 0x5033,
+ "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
+ __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
+
+ delete_sa_index = edif_entry->delete_sa_index;
+ edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+ cached_nport_handle = edif_entry->handle;
+ spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+
+ /* sanity check on the nport handle */
+ if (nport_handle != cached_nport_handle) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
+ __func__, nport_handle, cached_nport_handle);
+ }
+
+ /* find the sa_ctl for the delete and schedule the delete */
+ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
+ if (sa_ctl) {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
+ __func__, sa_ctl, sa_index);
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
+ delete_sa_index,
+ edif_entry->update_sa_index, nport_handle, handle);
+
+ sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
+ set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
+ qla_post_sa_replace_work(fcport->vha, fcport,
+ nport_handle, sa_ctl);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x3063,
+ "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
+ __func__, delete_sa_index);
+ }
+}
+
+void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+ srb_t *sp, struct sts_entry_24xx *sts24)
+{
+ fc_port_t *fcport = sp->fcport;
+ /* sa_index used by this iocb */
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ uint32_t handle;
+
+ handle = (uint32_t)LSW(sts24->handle);
+
+ /* find out if this status iosb is for a scsi read */
+ if (cmd->sc_data_direction != DMA_FROM_DEVICE)
+ return;
+
+ return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
+ le16_to_cpu(sts24->edif_sa_index));
+}
+
+void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct ctio7_from_24xx *pkt)
+{
+ __chk_edif_rx_sa_delete_pending(vha, fcport,
+ pkt->handle, le16_to_cpu(pkt->edif_sa_index));
+}
+
+static void qla_parse_auth_els_ctl(struct srb *sp)
+{
+ struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
+ struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
+ struct fc_bsg_request *request = bsg_job->request;
+ struct qla_bsg_auth_els_request *p =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ a->tx_len = a->tx_byte_count = sp->remap.req.len;
+ a->tx_addr = sp->remap.req.dma;
+ a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
+ a->rx_addr = sp->remap.rsp.dma;
+
+ if (p->e.sub_cmd == SEND_ELS_REPLY) {
+ a->control_flags = p->e.extra_control_flags << 13;
+ a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
+ if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
+ a->els_opcode = ELS_LS_ACC;
+ else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
+ a->els_opcode = ELS_LS_RJT;
+ }
+ a->did = sp->fcport->d_id;
+ a->els_opcode = request->rqst_data.h_els.command_code;
+ a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ a->vp_idx = sp->vha->vp_idx;
+}
+
+int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ fc_port_t *fcport = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ int rval = (DID_ERROR << 16), cnt;
+ port_id_t d_id;
+ struct qla_bsg_auth_els_request *p =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ rpl->version = EDIF_VERSION1;
+
+ d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
+ d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
+ d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
+
+ /* find matching d_id in fcport list */
+ fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x911a,
+ "%s fcport not find online portid=%06x.\n",
+ __func__, d_id.b24);
+ SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ return -EIO;
+ }
+
+ if (qla_bsg_check(vha, bsg_job, fcport))
+ return 0;
+
+ if (EDIF_SESS_DELETE(fcport)) {
+ ql_dbg(ql_dbg_edif, vha, 0x910d,
+ "%s ELS code %x, no loop id.\n", __func__,
+ bsg_request->rqst_data.r_els.els_code);
+ SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+ return -ENXIO;
+ }
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
+ SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+ rval = -EIO;
+ goto done;
+ }
+
+ /* pass through is supported only for ISP 4Gb or higher */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ ql_dbg(ql_dbg_user, vha, 0x7001,
+ "ELS passthru not supported for ISP23xx based adapters.\n");
+ SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+ rval = -EPERM;
+ goto done;
+ }
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_dbg(ql_dbg_user, vha, 0x7004,
+ "Failed get sp pid=%06x\n", fcport->d_id.b24);
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done;
+ }
+
+ sp->remap.req.len = bsg_job->request_payload.payload_len;
+ sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
+ GFP_KERNEL, &sp->remap.req.dma);
+ if (!sp->remap.req.buf) {
+ ql_dbg(ql_dbg_user, vha, 0x7005,
+ "Failed allocate request dma len=%x\n",
+ bsg_job->request_payload.payload_len);
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done_free_sp;
+ }
+
+ sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
+ sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
+ GFP_KERNEL, &sp->remap.rsp.dma);
+ if (!sp->remap.rsp.buf) {
+ ql_dbg(ql_dbg_user, vha, 0x7006,
+ "Failed allocate response dma len=%x\n",
+ bsg_job->reply_payload.payload_len);
+ rval = -ENOMEM;
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ goto done_free_remap_req;
+ }
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
+ sp->remap.req.len);
+ sp->remap.remapped = true;
+
+ sp->type = SRB_ELS_CMD_HST_NOLOGIN;
+ sp->name = "SPCN_BSG_HST_NOLOGIN";
+ sp->u.bsg_cmd.bsg_job = bsg_job;
+ qla_parse_auth_els_ctl(sp);
+
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
+
+ cnt = 0;
+retry:
+ rval = qla2x00_start_sp(sp);
+ switch (rval) {
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_edif, vha, 0x700a,
+ "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
+ p->e.extra_rx_xchg_address, p->e.extra_control_flags,
+ sp->handle, sp->remap.req.len, bsg_job);
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+ fallthrough;
+ default:
+ ql_log(ql_log_warn, vha, 0x700e,
+ "%s qla2x00_start_sp failed = %d\n", __func__, rval);
+ SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+ rval = -EIO;
+ goto done_free_remap_rsp;
+ }
+ return rval;
+
+done_free_remap_rsp:
+ dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
+ sp->remap.rsp.dma);
+done_free_remap_req:
+ dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
+ sp->remap.req.dma);
+done_free_sp:
+ qla2x00_rel_sp(sp);
+
+done:
+ return rval;
+}
+
+void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
+{
+ u16 cnt = 0;
+
+ if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0xf09c,
+ "%s: sess %8phN send port_offline event\n",
+ __func__, sess->port_name);
+ sess->edif.app_sess_online = 0;
+ sess->edif.sess_down_acked = 0;
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
+ sess->d_id.b24, 0, sess);
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
+
+ while (!READ_ONCE(sess->edif.sess_down_acked) &&
+ !test_bit(VPORT_DELETE, &vha->dpc_flags)) {
+ msleep(100);
+ cnt++;
+ if (cnt > 100)
+ break;
+ }
+ sess->edif.sess_down_acked = 0;
+ ql_dbg(ql_dbg_disc, vha, 0xf09c,
+ "%s: sess %8phN port_offline event completed\n",
+ __func__, sess->port_name);
+ }
+}
+
+void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport)
+{
+ if (!(fcport->flags & FCF_FCSP_DEVICE))
+ return;
+
+ qla_edb_clear(vha, fcport->d_id);
+ qla_enode_clear(vha, fcport->d_id);
+}
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
new file mode 100644
index 000000000000..7cdb89ccdc6e
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (c) 2021 Marvell
+ */
+#ifndef __QLA_EDIF_H
+#define __QLA_EDIF_H
+
+struct qla_scsi_host;
+#define EDIF_APP_ID 0x73730001
+
+#define EDIF_MAX_INDEX 2048
+struct edif_sa_ctl {
+ struct list_head next;
+ uint16_t del_index;
+ uint16_t index;
+ uint16_t slot;
+ uint16_t flags;
+#define EDIF_SA_CTL_FLG_REPL BIT_0
+#define EDIF_SA_CTL_FLG_DEL BIT_1
+#define EDIF_SA_CTL_FLG_CLEANUP_DEL BIT_4
+ // Invalidate Index bit and mirrors QLA_SA_UPDATE_FLAGS_DELETE
+ unsigned long state;
+#define EDIF_SA_CTL_USED 1 /* Active Sa update */
+#define EDIF_SA_CTL_PEND 2 /* Waiting for slot */
+#define EDIF_SA_CTL_REPL 3 /* Active Replace and Delete */
+#define EDIF_SA_CTL_DEL 4 /* Delete Pending */
+ struct fc_port *fcport;
+ struct bsg_job *bsg_job;
+ struct qla_sa_update_frame sa_frame;
+};
+
+enum enode_flags_t {
+ ENODE_ACTIVE = 0x1,
+};
+
+struct pur_core {
+ enum enode_flags_t enode_flags;
+ spinlock_t pur_lock;
+ struct list_head head;
+};
+
+enum db_flags_t {
+ EDB_ACTIVE = BIT_0,
+};
+
+#define DBELL_ACTIVE(_v) (_v->e_dbell.db_flags & EDB_ACTIVE)
+#define DBELL_INACTIVE(_v) (!(_v->e_dbell.db_flags & EDB_ACTIVE))
+
+struct edif_dbell {
+ enum db_flags_t db_flags;
+ spinlock_t db_lock;
+ struct list_head head;
+ struct bsg_job *dbell_bsg_job;
+ unsigned long bsg_expire;
+};
+
+#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */
+struct sa_update_28xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* IOCB System handle. */
+
+ union {
+ __le16 nport_handle; /* in: N_PORT handle. */
+ __le16 comp_sts; /* out: completion status */
+#define CS_PORT_EDIF_UNAVAIL 0x28
+#define CS_PORT_EDIF_LOGOUT 0x29
+#define CS_PORT_EDIF_SUPP_NOT_RDY 0x64
+#define CS_PORT_EDIF_INV_REQ 0x66
+ } u;
+ uint8_t vp_index;
+ uint8_t reserved_1;
+ uint8_t port_id[3];
+ uint8_t flags;
+#define SA_FLAG_INVALIDATE BIT_0
+#define SA_FLAG_TX BIT_1 // 1=tx, 0=rx
+
+ uint8_t sa_key[32]; /* 256 bit key */
+ __le32 salt;
+ __le32 spi;
+ uint8_t sa_control;
+#define SA_CNTL_ENC_FCSP (1 << 3)
+#define SA_CNTL_ENC_OPD (2 << 3)
+#define SA_CNTL_ENC_MSK (3 << 3) // mask bits 4,3
+#define SA_CNTL_AES_GMAC (1 << 2)
+#define SA_CNTL_KEY256 (2 << 0)
+#define SA_CNTL_KEY128 0
+
+ uint8_t reserved_2;
+ __le16 sa_index; // reserve: bit 11-15
+ __le16 old_sa_info;
+ __le16 new_sa_info;
+};
+
+#define NUM_ENTRIES 256
+#define PUR_GET 1
+
+struct dinfo {
+ int nodecnt;
+ int lstate;
+};
+
+struct pur_ninfo {
+ port_id_t pur_sid;
+ port_id_t pur_did;
+ uint8_t vp_idx;
+ short pur_bytes_rcvd;
+ unsigned short pur_nphdl;
+ unsigned int pur_rx_xchg_address;
+};
+
+struct purexevent {
+ struct pur_ninfo pur_info;
+ unsigned char *msgp;
+ u32 msgp_len;
+};
+
+#define N_UNDEF 0
+#define N_PUREX 1
+struct enode {
+ struct list_head list;
+ struct dinfo dinfo;
+ uint32_t ntype;
+ union {
+ struct purexevent purexinfo;
+ } u;
+};
+
+#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
+
+#define EDIF_SESSION_DOWN(_s) \
+ (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
+ _s->disc_state == DSC_DELETED || \
+ !_s->edif.app_sess_online))
+
+#define EDIF_NEGOTIATION_PENDING(_fcport) \
+ (DBELL_ACTIVE(_fcport->vha) && \
+ (_fcport->disc_state == DSC_LOGIN_AUTH_PEND))
+
+#define EDIF_SESS_DELETE(_s) \
+ (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
+ _s->disc_state == DSC_DELETED))
+
+#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
new file mode 100644
index 000000000000..0931f4e4e127
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (C) 2018- Marvell
+ *
+ */
+#ifndef __QLA_EDIF_BSG_H
+#define __QLA_EDIF_BSG_H
+
+#define EDIF_VERSION1 1
+
+/* BSG Vendor specific commands */
+#define ELS_MAX_PAYLOAD 2112
+#ifndef WWN_SIZE
+#define WWN_SIZE 8
+#endif
+#define VND_CMD_APP_RESERVED_SIZE 28
+#define VND_CMD_PAD_SIZE 3
+enum auth_els_sub_cmd {
+ SEND_ELS = 0,
+ SEND_ELS_REPLY,
+ PULL_ELS,
+};
+
+struct extra_auth_els {
+ enum auth_els_sub_cmd sub_cmd;
+ uint32_t extra_rx_xchg_address;
+ uint8_t extra_control_flags;
+#define BSG_CTL_FLAG_INIT 0
+#define BSG_CTL_FLAG_LS_ACC 1
+#define BSG_CTL_FLAG_LS_RJT 2
+#define BSG_CTL_FLAG_TRM 3
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct qla_bsg_auth_els_request {
+ struct fc_bsg_request r;
+ struct extra_auth_els e;
+};
+
+struct qla_bsg_auth_els_reply {
+ struct fc_bsg_reply r;
+ uint32_t rx_xchg_address;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+};
+
+struct app_id {
+ int app_vid;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_start_reply {
+ uint32_t host_support_edif;
+ uint32_t edif_enode_active;
+ uint32_t edif_edb_active;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_start {
+ struct app_id app_info;
+ uint8_t app_start_flags;
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_stop {
+ struct app_id app_info;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_plogi_reply {
+ uint32_t prli_status;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_pinfo_req {
+ struct app_id app_info;
+ uint8_t num_ports;
+ port_id_t remote_pid;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_pinfo {
+ port_id_t remote_pid;
+ uint8_t remote_wwpn[WWN_SIZE];
+ uint8_t remote_type;
+#define VND_CMD_RTYPE_UNKNOWN 0
+#define VND_CMD_RTYPE_TARGET 1
+#define VND_CMD_RTYPE_INITIATOR 2
+ uint8_t remote_state;
+ uint8_t auth_state;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+/* AUTH States */
+#define VND_CMD_AUTH_STATE_UNDEF 0
+#define VND_CMD_AUTH_STATE_SESSION_SHUTDOWN 1
+#define VND_CMD_AUTH_STATE_NEEDED 2
+#define VND_CMD_AUTH_STATE_ELS_RCVD 3
+#define VND_CMD_AUTH_STATE_SAUPDATE_COMPL 4
+
+struct app_pinfo_reply {
+ uint8_t port_count;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ struct app_pinfo ports[];
+} __packed;
+
+struct app_sinfo_req {
+ struct app_id app_info;
+ uint8_t num_ports;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_sinfo {
+ uint8_t remote_wwpn[WWN_SIZE];
+ int64_t rekey_count;
+ uint8_t rekey_mode;
+ int64_t tx_bytes;
+ int64_t rx_bytes;
+} __packed;
+
+struct app_stats_reply {
+ uint8_t elem_count;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ struct app_sinfo elem[];
+} __packed;
+
+struct qla_sa_update_frame {
+ struct app_id app_info;
+ uint16_t flags;
+#define SAU_FLG_INV 0x01 /* delete key */
+#define SAU_FLG_TX 0x02 /* 1=tx, 0 = rx */
+#define SAU_FLG_FORCE_DELETE 0x08
+#define SAU_FLG_GMAC_MODE 0x20 /*
+ * GMAC mode is cleartext for the IO
+ * (i.e. NULL encryption)
+ */
+#define SAU_FLG_KEY128 0x40
+#define SAU_FLG_KEY256 0x80
+ uint16_t fast_sa_index:10,
+ reserved:6;
+ uint32_t salt;
+ uint32_t spi;
+ uint8_t sa_key[32];
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ port_id_t port_id;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved2[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+#define QL_VND_SC_UNDEF 0
+#define QL_VND_SC_SA_UPDATE 1
+#define QL_VND_SC_APP_START 2
+#define QL_VND_SC_APP_STOP 3
+#define QL_VND_SC_AUTH_OK 4
+#define QL_VND_SC_AUTH_FAIL 5
+#define QL_VND_SC_REKEY_CONFIG 6
+#define QL_VND_SC_GET_FCINFO 7
+#define QL_VND_SC_GET_STATS 8
+#define QL_VND_SC_AEN_COMPLETE 9
+#define QL_VND_SC_READ_DBELL 10
+
+/*
+ * bsg caller to provide empty buffer for doorbell events.
+ *
+ * sg_io_v4.din_xferp = empty buffer for door bell events
+ * sg_io_v4.dout_xferp = struct edif_read_dbell *buf
+ */
+struct edif_read_dbell {
+ struct app_id app_info;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+};
+
+
+/* Application interface data structure for rtn data */
+#define EXT_DEF_EVENT_DATA_SIZE 64
+struct edif_app_dbell {
+ uint32_t event_code;
+ uint32_t event_data_size;
+ union {
+ port_id_t port_id;
+ uint8_t event_data[EXT_DEF_EVENT_DATA_SIZE];
+ };
+} __packed;
+
+struct edif_sa_update_aen {
+ port_id_t port_id;
+ uint32_t key_type; /* Tx (1) or RX (2) */
+ uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+#define QL_VND_SA_STAT_SUCCESS 0
+#define QL_VND_SA_STAT_FAILED 1
+#define QL_VND_SA_STAT_TIMEOUT 2
+#define QL_VND_SA_STAT_ERROR 3
+
+#define QL_VND_RX_SA_KEY 1
+#define QL_VND_TX_SA_KEY 2
+
+/* App defines for plogi auth'd ok and plogi auth bad requests */
+struct auth_complete_cmd {
+ struct app_id app_info;
+#define PL_TYPE_WWPN 1
+#define PL_TYPE_DID 2
+ uint32_t type;
+ union {
+ uint8_t wwpn[WWN_SIZE];
+ port_id_t d_id;
+ } u;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct aen_complete_cmd {
+ struct app_id app_info;
+ port_id_t port_id;
+ uint32_t event_code;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+#define RX_DELAY_DELETE_TIMEOUT 20
+
+#define FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN 1
+
+#endif /* QLA_EDIF_BSG_H */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d641918cdd46..f307beed9d29 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_FW_H
#define __QLA_FW_H
@@ -31,6 +30,9 @@
#define PDO_FORCE_ADISC BIT_1
#define PDO_FORCE_PLOGI BIT_0
+struct buffer_credit_24xx {
+ u32 parameter[28];
+};
#define PORT_DATABASE_24XX_SIZE 64
struct port_database_24xx {
@@ -80,10 +82,11 @@ struct port_database_24xx {
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint8_t reserved_3[4];
+ uint8_t reserved_3[2];
+ uint16_t nvme_first_burst_size;
uint16_t prli_nvme_svc_param_word_0; /* Bits 15-0 of word 0 */
uint16_t prli_nvme_svc_param_word_3; /* Bits 15-0 of word 3 */
- uint16_t nvme_first_burst_size;
+ uint8_t secure_login;
uint8_t reserved_4[14];
};
@@ -131,28 +134,28 @@ struct vp_database_24xx {
struct nvram_24xx {
/* NVRAM header. */
uint8_t id[4];
- uint16_t nvram_version;
+ __le16 nvram_version;
uint16_t reserved_0;
/* Firmware Initialization Control Block. */
- uint16_t version;
+ __le16 version;
uint16_t reserved_1;
- __le16 frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
- uint16_t hard_address;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
+ __le16 hard_address;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint16_t login_retry_count;
- uint16_t link_down_on_nos;
- uint16_t interrupt_delay_timer;
- uint16_t login_timeout;
+ __le16 login_retry_count;
+ __le16 link_down_on_nos;
+ __le16 interrupt_delay_timer;
+ __le16 login_timeout;
- uint32_t firmware_options_1;
- uint32_t firmware_options_2;
- uint32_t firmware_options_3;
+ __le32 firmware_options_1;
+ __le32 firmware_options_2;
+ __le32 firmware_options_3;
/* Offset 56. */
@@ -175,7 +178,7 @@ struct nvram_24xx {
* BIT 11-13 = Output Emphasis 4G
* BIT 14-15 = Reserved
*/
- uint16_t seriallink_options[4];
+ __le16 seriallink_options[4];
uint16_t reserved_2[16];
@@ -215,25 +218,25 @@ struct nvram_24xx {
*
* BIT 16-31 =
*/
- uint32_t host_p;
+ __le32 host_p;
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
uint8_t boot_port_name[WWN_SIZE];
- uint16_t boot_lun_number;
+ __le16 boot_lun_number;
uint16_t reserved_8;
uint8_t alt1_boot_port_name[WWN_SIZE];
- uint16_t alt1_boot_lun_number;
+ __le16 alt1_boot_lun_number;
uint16_t reserved_9;
uint8_t alt2_boot_port_name[WWN_SIZE];
- uint16_t alt2_boot_lun_number;
+ __le16 alt2_boot_lun_number;
uint16_t reserved_10;
uint8_t alt3_boot_port_name[WWN_SIZE];
- uint16_t alt3_boot_lun_number;
+ __le16 alt3_boot_lun_number;
uint16_t reserved_11;
/*
@@ -246,23 +249,23 @@ struct nvram_24xx {
* BIT 6 = Reserved
* BIT 7-31 =
*/
- uint32_t efi_parameters;
+ __le32 efi_parameters;
uint8_t reset_delay;
uint8_t reserved_12;
uint16_t reserved_13;
- uint16_t boot_id_number;
+ __le16 boot_id_number;
uint16_t reserved_14;
- uint16_t max_luns_per_target;
+ __le16 max_luns_per_target;
uint16_t reserved_15;
- uint16_t port_down_retry_count;
- uint16_t link_down_timeout;
+ __le16 port_down_retry_count;
+ __le16 link_down_timeout;
/* FCode parameters. */
- uint16_t fcode_parameter;
+ __le16 fcode_parameter;
uint16_t reserved_16[3];
@@ -272,13 +275,13 @@ struct nvram_24xx {
uint8_t prev_drv_ver_minor;
uint8_t prev_drv_ver_subminor;
- uint16_t prev_bios_ver_major;
- uint16_t prev_bios_ver_minor;
+ __le16 prev_bios_ver_major;
+ __le16 prev_bios_ver_minor;
- uint16_t prev_efi_ver_major;
- uint16_t prev_efi_ver_minor;
+ __le16 prev_efi_ver_major;
+ __le16 prev_efi_ver_minor;
- uint16_t prev_fw_ver_major;
+ __le16 prev_fw_ver_major;
uint8_t prev_fw_ver_minor;
uint8_t prev_fw_ver_subminor;
@@ -306,7 +309,7 @@ struct nvram_24xx {
uint16_t subsystem_vendor_id;
uint16_t subsystem_device_id;
- uint32_t checksum;
+ __le32 checksum;
};
/*
@@ -315,46 +318,46 @@ struct nvram_24xx {
*/
#define ICB_VERSION 1
struct init_cb_24xx {
- uint16_t version;
+ __le16 version;
uint16_t reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
- uint16_t hard_address;
+ __le16 hard_address;
uint8_t port_name[WWN_SIZE]; /* Big endian. */
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t response_q_inpointer;
- uint16_t request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_outpointer;
- uint16_t login_retry_count;
+ __le16 login_retry_count;
- uint16_t prio_request_q_outpointer;
+ __le16 prio_request_q_outpointer;
- uint16_t response_q_length;
- uint16_t request_q_length;
+ __le16 response_q_length;
+ __le16 request_q_length;
- uint16_t link_down_on_nos; /* Milliseconds. */
+ __le16 link_down_on_nos; /* Milliseconds. */
- uint16_t prio_request_q_length;
+ __le16 prio_request_q_length;
__le64 request_q_address __packed;
__le64 response_q_address __packed;
__le64 prio_request_q_address __packed;
- uint16_t msix;
- uint16_t msix_atio;
+ __le16 msix;
+ __le16 msix_atio;
uint8_t reserved_2[4];
- uint16_t atio_q_inpointer;
- uint16_t atio_q_length;
- __le64 atio_q_address __packed;
+ __le16 atio_q_inpointer;
+ __le16 atio_q_length;
+ __le64 atio_q_address __packed;
- uint16_t interrupt_delay_timer; /* 100us increments. */
- uint16_t login_timeout;
+ __le16 interrupt_delay_timer; /* 100us increments. */
+ __le16 login_timeout;
/*
* BIT 0 = Enable Hard Loop Id
@@ -375,7 +378,7 @@ struct init_cb_24xx {
* BIT 14 = Node Name Option
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_1;
+ __le32 firmware_options_1;
/*
* BIT 0 = Operation Mode bit 0
@@ -396,7 +399,7 @@ struct init_cb_24xx {
* BIT 14 = Enable Target PRLI Control
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_2;
+ __le32 firmware_options_2;
/*
* BIT 0 = Reserved
@@ -422,9 +425,9 @@ struct init_cb_24xx {
* BIT 30 = Enable request queue 0 out index shadowing
* BIT 31 = Reserved
*/
- uint32_t firmware_options_3;
- uint16_t qos;
- uint16_t rid;
+ __le32 firmware_options_3;
+ __le16 qos;
+ __le16 rid;
uint8_t reserved_3[20];
};
@@ -440,27 +443,27 @@ struct cmd_bidir {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT hanlde. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Commnad timeout. */
+ __le16 timeout; /* Command timeout. */
- uint16_t wr_dseg_count; /* Write Data segment count. */
- uint16_t rd_dseg_count; /* Read Data segment count. */
+ __le16 wr_dseg_count; /* Write Data segment count. */
+ __le16 rd_dseg_count; /* Read Data segment count. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define BD_WRAP_BACK BIT_3
#define BD_READ_DATA BIT_1
#define BD_WRITE_DATA BIT_0
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
__le64 fcp_cmnd_dseg_address __packed;/* Data segment address. */
uint16_t reserved[2]; /* Reserved */
- uint32_t rd_byte_count; /* Total Byte count Read. */
- uint32_t wr_byte_count; /* Total Byte count write. */
+ __le32 rd_byte_count; /* Total Byte count Read. */
+ __le32 wr_byte_count; /* Total Byte count write. */
uint8_t port_id[3]; /* PortID of destination port.*/
uint8_t vp_index;
@@ -477,28 +480,31 @@ struct cmd_type_6 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
- uint16_t fcp_rsp_dsd_len; /* FCP_RSP DSD length. */
+ __le16 fcp_rsp_dsd_len; /* FCP_RSP DSD length. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
+#define CF_NEW_SA BIT_12
+#define CF_EN_EDIF BIT_9
+#define CF_ADDITIONAL_PARAM_BLK BIT_8
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
/* Data segment address. */
__le64 fcp_cmnd_dseg_address __packed;
/* Data segment address. */
__le64 fcp_rsp_dseg_address __packed;
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -515,16 +521,16 @@ struct cmd_type_7 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
#define FW_MAX_TIMEOUT 0x1999
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
uint16_t reserved_1;
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t task_mgmt_flags; /* Task management flags. */
+ __le16 task_mgmt_flags; /* Task management flags. */
#define TMF_CLEAR_ACA BIT_14
#define TMF_TARGET_RESET BIT_13
#define TMF_LUN_RESET BIT_12
@@ -544,7 +550,7 @@ struct cmd_type_7 {
uint8_t crn;
uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -562,29 +568,29 @@ struct cmd_type_crc_2 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
- uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
+ __le16 fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
__le64 fcp_cmnd_dseg_address __packed;
/* Data segment address. */
__le64 fcp_rsp_dseg_address __packed;
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
__le64 crc_context_address __packed; /* Data segment address. */
- uint16_t crc_context_len; /* Data segment length. */
+ __le16 crc_context_len; /* Data segment length. */
uint16_t reserved_1; /* MUST be set to 0. */
};
@@ -601,32 +607,33 @@ struct sts_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
- uint16_t ox_id; /* OX_ID used by the firmware. */
+ __le16 comp_status; /* Completion status. */
+ __le16 ox_id; /* OX_ID used by the firmware. */
- uint32_t residual_len; /* FW calc residual transfer length. */
+ __le32 residual_len; /* FW calc residual transfer length. */
union {
- uint16_t reserved_1;
- uint16_t nvme_rsp_pyld_len;
+ __le16 reserved_1;
+ __le16 nvme_rsp_pyld_len;
+ __le16 edif_sa_index; /* edif sa_index used for initiator read data */
};
- uint16_t state_flags; /* State flags. */
+ __le16 state_flags; /* State flags. */
#define SF_TRANSFERRED_DATA BIT_11
#define SF_NVME_ERSP BIT_6
#define SF_FCP_RSP_DMA BIT_0
- uint16_t retry_delay;
- uint16_t scsi_status; /* SCSI status. */
+ __le16 status_qualifier;
+ __le16 scsi_status; /* SCSI status. */
#define SS_CONFIRMATION_REQ BIT_12
- uint32_t rsp_residual_count; /* FCP RSP residual count. */
+ __le32 rsp_residual_count; /* FCP RSP residual count. */
- uint32_t sense_len; /* FCP SENSE length. */
+ __le32 sense_len; /* FCP SENSE length. */
union {
struct {
- uint32_t rsp_data_len; /* FCP response data length */
+ __le32 rsp_data_len; /* FCP response data length */
uint8_t data[28]; /* FCP rsp/sense information */
};
struct nvme_fc_ersp_iu nvme_ersp;
@@ -669,7 +676,7 @@ struct mrk_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
uint8_t modifier; /* Modifier (7-0). */
#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */
@@ -698,28 +705,72 @@ struct ct_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t cmd_dsd_count;
+ __le16 cmd_dsd_count;
uint8_t vp_index;
uint8_t reserved_1;
- uint16_t timeout; /* Command timeout. */
+ __le16 timeout; /* Command timeout. */
uint16_t reserved_2;
- uint16_t rsp_dsd_count;
+ __le16 rsp_dsd_count;
uint8_t reserved_3[10];
- uint32_t rsp_byte_count;
- uint32_t cmd_byte_count;
+ __le32 rsp_byte_count;
+ __le32 cmd_byte_count;
struct dsd64 dsd[2];
};
+#define PURX_ELS_HEADER_SIZE 0x18
+
+/*
+ * ISP queue - PUREX IOCB entry structure definition
+ */
+#define PUREX_IOCB_TYPE 0x51 /* CT Pass Through IOCB entry */
+struct purex_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ __le16 reserved1;
+ uint8_t vp_idx;
+ uint8_t reserved2;
+
+ __le16 status_flags;
+ __le16 nport_handle;
+
+ __le16 frame_size;
+ __le16 trunc_frame_size;
+
+ __le32 rx_xchg_addr;
+
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+
+ uint8_t f_ctl[3];
+ uint8_t type;
+
+ __le16 seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 param;
+
+ uint8_t els_frame_payload[20];
+};
+
/*
* ISP queue - ELS Pass-Through entry structure definition.
*/
@@ -732,42 +783,53 @@ struct els_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t reserved_1;
+ __le16 comp_status; /* response only */
+ __le16 nport_handle;
- uint16_t nport_handle; /* N_PORT handle. */
-
- uint16_t tx_dsd_count;
+ __le16 tx_dsd_count;
uint8_t vp_index;
uint8_t sof_type;
#define EST_SOFI3 (1 << 4)
#define EST_SOFI2 (3 << 4)
- uint32_t rx_xchg_address; /* Receive exchange address. */
- uint16_t rx_dsd_count;
+ __le32 rx_xchg_address; /* Receive exchange address. */
+ __le16 rx_dsd_count;
uint8_t opcode;
uint8_t reserved_2;
- uint8_t port_id[3];
+ uint8_t d_id[3];
uint8_t s_id[3];
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13)
#define EPD_ELS_COMMAND (0 << 13)
#define EPD_ELS_ACC (1 << 13)
#define EPD_ELS_RJT (2 << 13)
-#define EPD_RX_XCHG (3 << 13)
+#define EPD_RX_XCHG (3 << 13) /* terminate exchange */
#define ECF_CLR_PASSTHRU_PEND BIT_12
#define ECF_INCL_FRAME_HDR BIT_11
+#define ECF_SEC_LOGIN BIT_3
- __le32 rx_byte_count;
- __le32 tx_byte_count;
+ union {
+ struct {
+ __le32 rx_byte_count;
+ __le32 tx_byte_count;
- __le64 tx_address __packed; /* Data segment 0 address. */
- __le32 tx_len; /* Data segment 0 length. */
- __le64 rx_address __packed; /* Data segment 1 address. */
- __le32 rx_len; /* Data segment 1 length. */
+ __le64 tx_address __packed; /* DSD 0 address. */
+ __le32 tx_len; /* DSD 0 length. */
+
+ __le64 rx_address __packed; /* DSD 1 address. */
+ __le32 rx_len; /* DSD 1 length. */
+ };
+ struct {
+ __le32 total_byte_count;
+ __le32 error_subcode_1;
+ __le32 error_subcode_2;
+ __le32 error_subcode_3;
+ };
+ };
};
struct els_sts_entry_24xx {
@@ -776,32 +838,33 @@ struct els_sts_entry_24xx {
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t handle; /* System handle. */
+ __le32 handle; /* System handle. */
- uint16_t comp_status;
+ __le16 comp_status;
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t reserved_1;
+ __le16 reserved_1;
uint8_t vp_index;
uint8_t sof_type;
- uint32_t rx_xchg_address; /* Receive exchange address. */
- uint16_t reserved_2;
+ __le32 rx_xchg_address; /* Receive exchange address. */
+ __le16 reserved_2;
uint8_t opcode;
uint8_t reserved_3;
- uint8_t port_id[3];
- uint8_t reserved_4;
+ uint8_t d_id[3];
+ uint8_t s_id[3];
- uint16_t reserved_5;
+ __le16 control_flags; /* Control flags. */
+ __le32 total_byte_count;
+ __le32 error_subcode_1;
+ __le32 error_subcode_2;
+ __le32 error_subcode_3;
- uint16_t control_flags; /* Control flags. */
- uint32_t total_byte_count;
- uint32_t error_subcode_1;
- uint32_t error_subcode_2;
+ __le32 reserved_4[4];
};
/*
* ISP queue - Mailbox Command entry structure definition.
@@ -828,17 +891,18 @@ struct logio_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_LOGIO_ERROR 0x31 /* Login/Logout IOCB error. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
/* Modifiers. */
#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */
#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
+#define LCF_COMMON_FEAT BIT_7 /* PLOGI - Set Common Features Field */
#define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */
#define LCF_NVME_PRLI BIT_6 /* Perform NVME FC4 PRLI */
#define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */
@@ -862,7 +926,9 @@ struct logio_entry_24xx {
uint8_t rsp_size; /* Response size in 32bit words. */
- uint32_t io_parameter[11]; /* General I/O parameters. */
+ __le32 io_parameter[11]; /* General I/O parameters. */
+#define LIO_COMM_FEAT_FCSP BIT_21
+#define LIO_COMM_FEAT_CIO BIT_31
#define LSC_SCODE_NOLINK 0x01
#define LSC_SCODE_NOIOCB 0x02
#define LSC_SCODE_NOXCB 0x03
@@ -890,17 +956,17 @@ struct tsk_mgmt_entry {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
uint16_t reserved_1;
- uint16_t delay; /* Activity delay in seconds. */
+ __le16 delay; /* Activity delay in seconds. */
- uint16_t timeout; /* Command timeout. */
+ __le16 timeout; /* Command timeout. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint32_t control_flags; /* Control Flags. */
+ __le32 control_flags; /* Control Flags. */
#define TCF_NOTMCMD_TO_TARGET BIT_31
#define TCF_LUN_RESET BIT_4
#define TCF_ABORT_TASK_SET BIT_3
@@ -925,28 +991,132 @@ struct abort_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- /* or Completion status. */
+ union {
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 comp_status; /* Completion status. */
+ };
- uint16_t options; /* Options. */
+ __le16 options; /* Options. */
#define AOF_NO_ABTS BIT_0 /* Do not send any ABTS. */
+#define AOF_NO_RRQ BIT_1 /* Do not send RRQ. */
+#define AOF_ABTS_TIMEOUT BIT_2 /* Disable logout on ABTS timeout. */
+#define AOF_ABTS_RTY_CNT BIT_3 /* Use driver specified retry count. */
+#define AOF_RSP_TIMEOUT BIT_4 /* Use specified response timeout. */
+
uint32_t handle_to_abort; /* System handle to abort. */
- uint16_t req_que_no;
+ __le16 req_que_no;
uint8_t reserved_1[30];
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
-
- uint8_t reserved_2[12];
+ u8 reserved_2[4];
+ union {
+ struct {
+ __le16 abts_rty_cnt;
+ __le16 rsp_timeout;
+ } drv;
+ struct {
+ u8 ba_rjt_vendorUnique;
+ u8 ba_rjt_reasonCodeExpl;
+ u8 ba_rjt_reasonCode;
+ u8 reserved_3;
+ } fw;
+ };
+ u8 reserved_4[4];
};
+#define ABTS_RCV_TYPE 0x54
+#define ABTS_RSP_TYPE 0x55
+struct abts_entry_24xx {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t handle_count;
+ uint8_t entry_status;
+
+ __le32 handle; /* type 0x55 only */
+
+ __le16 comp_status; /* type 0x55 only */
+ __le16 nport_handle; /* type 0x54 only */
+
+ __le16 control_flags; /* type 0x55 only */
+ uint8_t vp_idx;
+ uint8_t sof_type; /* sof_type is upper nibble */
+
+ __le32 rx_xch_addr;
+
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+
+ uint8_t f_ctl[3];
+ uint8_t type;
+
+ __le16 seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+
+ __le16 rx_id;
+ __le16 ox_id;
+
+ __le32 param;
+
+ union {
+ struct {
+ __le32 subcode3;
+ __le32 rsvd;
+ __le32 subcode1;
+ __le32 subcode2;
+ } error;
+ struct {
+ __le16 rsrvd1;
+ uint8_t last_seq_id;
+ uint8_t seq_id_valid;
+ __le16 aborted_rx_id;
+ __le16 aborted_ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
+ } ba_acc;
+ struct {
+ uint8_t vendor_unique;
+ uint8_t explanation;
+ uint8_t reason;
+ } ba_rjt;
+ } payload;
+
+ __le32 rx_xch_addr_to_abort;
+} __packed;
+
+/* ABTS payload explanation values */
+#define BA_RJT_EXP_NO_ADDITIONAL 0
+#define BA_RJT_EXP_INV_OX_RX_ID 3
+#define BA_RJT_EXP_SEQ_ABORTED 5
+
+/* ABTS payload reason values */
+#define BA_RJT_RSN_INV_CMD_CODE 1
+#define BA_RJT_RSN_LOGICAL_ERROR 3
+#define BA_RJT_RSN_LOGICAL_BUSY 5
+#define BA_RJT_RSN_PROTOCOL_ERROR 7
+#define BA_RJT_RSN_UNABLE_TO_PERFORM 9
+#define BA_RJT_RSN_VENDOR_SPECIFIC 0xff
+
+/* FC_F values */
+#define FC_TYPE_BLD 0x000 /* Basic link data */
+#define FC_F_CTL_RSP_CNTXT 0x800000 /* Responder of exchange */
+#define FC_F_CTL_LAST_SEQ 0x100000 /* Last sequence */
+#define FC_F_CTL_END_SEQ 0x80000 /* Last sequence */
+#define FC_F_CTL_SEQ_INIT 0x010000 /* Sequence initiative */
+#define FC_ROUTING_BLD 0x80 /* Basic link data frame */
+#define FC_R_CTL_BLD_BA_ACC 0x04 /* BA_ACC (basic accept) */
+
/*
* ISP I/O Register Set structure definitions.
*/
struct device_reg_24xx {
- uint32_t flash_addr; /* Flash/NVRAM BIOS address. */
+ __le32 flash_addr; /* Flash/NVRAM BIOS address. */
#define FARX_DATA_FLAG BIT_31
#define FARX_ACCESS_FLASH_CONF 0x7FFD0000
#define FARX_ACCESS_FLASH_DATA 0x7FF00000
@@ -997,9 +1167,9 @@ struct device_reg_24xx {
#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
#define HW_EVENT_FLASH_FW_ERR 0xF024
- uint32_t flash_data; /* Flash/NVRAM BIOS data. */
+ __le32 flash_data; /* Flash/NVRAM BIOS data. */
- uint32_t ctrl_status; /* Control/Status. */
+ __le32 ctrl_status; /* Control/Status. */
#define CSRX_FLASH_ACCESS_ERROR BIT_18 /* Flash/NVRAM Access Error. */
#define CSRX_DMA_ACTIVE BIT_17 /* DMA Active status. */
#define CSRX_DMA_SHUTDOWN BIT_16 /* DMA Shutdown control status. */
@@ -1025,35 +1195,35 @@ struct device_reg_24xx {
#define CSRX_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable. */
#define CSRX_ISP_SOFT_RESET BIT_0 /* ISP soft reset. */
- uint32_t ictrl; /* Interrupt control. */
+ __le32 ictrl; /* Interrupt control. */
#define ICRX_EN_RISC_INT BIT_3 /* Enable RISC interrupts on PCI. */
- uint32_t istatus; /* Interrupt status. */
+ __le32 istatus; /* Interrupt status. */
#define ISRX_RISC_INT BIT_3 /* RISC interrupt. */
- uint32_t unused_1[2]; /* Gap. */
+ __le32 unused_1[2]; /* Gap. */
/* Request Queue. */
- uint32_t req_q_in; /* In-Pointer. */
- uint32_t req_q_out; /* Out-Pointer. */
+ __le32 req_q_in; /* In-Pointer. */
+ __le32 req_q_out; /* Out-Pointer. */
/* Response Queue. */
- uint32_t rsp_q_in; /* In-Pointer. */
- uint32_t rsp_q_out; /* Out-Pointer. */
+ __le32 rsp_q_in; /* In-Pointer. */
+ __le32 rsp_q_out; /* Out-Pointer. */
/* Priority Request Queue. */
- uint32_t preq_q_in; /* In-Pointer. */
- uint32_t preq_q_out; /* Out-Pointer. */
+ __le32 preq_q_in; /* In-Pointer. */
+ __le32 preq_q_out; /* Out-Pointer. */
- uint32_t unused_2[2]; /* Gap. */
+ __le32 unused_2[2]; /* Gap. */
/* ATIO Queue. */
- uint32_t atio_q_in; /* In-Pointer. */
- uint32_t atio_q_out; /* Out-Pointer. */
+ __le32 atio_q_in; /* In-Pointer. */
+ __le32 atio_q_out; /* Out-Pointer. */
- uint32_t host_status;
+ __le32 host_status;
#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
- uint32_t hccr; /* Host command & control register. */
+ __le32 hccr; /* Host command & control register. */
/* HCCR statuses. */
#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
@@ -1075,7 +1245,7 @@ struct device_reg_24xx {
/* Clear RISC to PCI interrupt. */
#define HCCRX_CLR_RISC_INT 0xA0000000
- uint32_t gpiod; /* GPIO Data register. */
+ __le32 gpiod; /* GPIO Data register. */
/* LED update mask. */
#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18)
@@ -1094,7 +1264,7 @@ struct device_reg_24xx {
/* Data in/out. */
#define GPDX_DATA_INOUT (BIT_1|BIT_0)
- uint32_t gpioe; /* GPIO Enable register. */
+ __le32 gpioe; /* GPIO Enable register. */
/* Enable update mask. */
#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16)
/* Enable update mask. */
@@ -1102,56 +1272,56 @@ struct device_reg_24xx {
/* Enable. */
#define GPEX_ENABLE (BIT_1|BIT_0)
- uint32_t iobase_addr; /* I/O Bus Base Address register. */
-
- uint32_t unused_3[10]; /* Gap. */
-
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23;
- uint16_t mailbox24;
- uint16_t mailbox25;
- uint16_t mailbox26;
- uint16_t mailbox27;
- uint16_t mailbox28;
- uint16_t mailbox29;
- uint16_t mailbox30;
- uint16_t mailbox31;
-
- uint32_t iobase_window;
- uint32_t iobase_c4;
- uint32_t iobase_c8;
- uint32_t unused_4_1[6]; /* Gap. */
- uint32_t iobase_q;
- uint32_t unused_5[2]; /* Gap. */
- uint32_t iobase_select;
- uint32_t unused_6[2]; /* Gap. */
- uint32_t iobase_sdata;
+ __le32 iobase_addr; /* I/O Bus Base Address register. */
+
+ __le32 unused_3[10]; /* Gap. */
+
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23;
+ __le16 mailbox24;
+ __le16 mailbox25;
+ __le16 mailbox26;
+ __le16 mailbox27;
+ __le16 mailbox28;
+ __le16 mailbox29;
+ __le16 mailbox30;
+ __le16 mailbox31;
+
+ __le32 iobase_window;
+ __le32 iobase_c4;
+ __le32 iobase_c8;
+ __le32 unused_4_1[6]; /* Gap. */
+ __le32 iobase_q;
+ __le32 unused_5[2]; /* Gap. */
+ __le32 iobase_select;
+ __le32 unused_6[2]; /* Gap. */
+ __le32 iobase_sdata;
};
/* RISC-RISC semaphore register PCI offet */
#define RISC_REGISTER_BASE_OFFSET 0x7010
-#define RISC_REGISTER_WINDOW_OFFET 0x6
+#define RISC_REGISTER_WINDOW_OFFSET 0x6
/* RISC-RISC semaphore/flag register (risc address 0x7016) */
@@ -1213,8 +1383,8 @@ struct mid_conf_entry_24xx {
struct mid_init_cb_24xx {
struct init_cb_24xx init_cb;
- uint16_t count;
- uint16_t options;
+ __le16 count;
+ __le16 options;
struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
};
@@ -1248,27 +1418,27 @@ struct vp_ctrl_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t vp_idx_failed;
+ __le16 vp_idx_failed;
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */
#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */
#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */
- uint16_t command;
+ __le16 command;
#define VCE_COMMAND_ENABLE_VPS 0x00 /* Enable VPs. */
#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */
#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */
#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */
#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */
- uint16_t vp_count;
+ __le16 vp_count;
uint8_t vp_idx_map[16];
- uint16_t flags;
- uint16_t id;
+ __le16 flags;
+ __le16 id;
uint16_t reserved_4;
- uint16_t hopct;
+ __le16 hopct;
uint8_t reserved_5[24];
};
@@ -1284,12 +1454,12 @@ struct vp_config_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t flags;
+ __le16 flags;
#define CS_VF_BIND_VPORTS_TO_VF BIT_0
#define CS_VF_SET_QOS_OF_VPORTS BIT_1
#define CS_VF_SET_HOPS_OF_VPORTS BIT_2
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */
#define CS_VCT_CNT_ERROR 0x02 /* Invalid VP count. */
#define CS_VCT_ERROR 0x03 /* Unknown error. */
@@ -1316,9 +1486,9 @@ struct vp_config_entry_24xx {
uint16_t reserved_vp2;
uint8_t port_name_idx2[WWN_SIZE];
uint8_t node_name_idx2[WWN_SIZE];
- uint16_t id;
+ __le16 id;
uint16_t reserved_4;
- uint16_t hopct;
+ __le16 hopct;
uint8_t reserved_5[2];
};
@@ -1345,7 +1515,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t resv1;
+ __le32 resv1;
uint8_t vp_acquired;
uint8_t vp_setup;
uint8_t vp_idx; /* Format 0=reserved */
@@ -1409,15 +1579,15 @@ struct vf_evfp_entry_24xx {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
- uint16_t timeout; /* timeout */
- uint16_t adim_tagging_mode;
+ __le16 comp_status; /* Completion status. */
+ __le16 timeout; /* timeout */
+ __le16 adim_tagging_mode;
- uint16_t vfport_id;
+ __le16 vfport_id;
uint32_t exch_addr;
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t control_flags;
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 control_flags;
uint32_t io_parameter_0;
uint32_t io_parameter_1;
__le64 tx_address __packed; /* Data segment 0 address. */
@@ -1432,13 +1602,13 @@ struct vf_evfp_entry_24xx {
struct qla_fdt_layout {
uint8_t sig[4];
- uint16_t version;
- uint16_t len;
- uint16_t checksum;
+ __le16 version;
+ __le16 len;
+ __le16 checksum;
uint8_t unused1[2];
uint8_t model[16];
- uint16_t man_id;
- uint16_t id;
+ __le16 man_id;
+ __le16 id;
uint8_t flags;
uint8_t erase_cmd;
uint8_t alt_erase_cmd;
@@ -1447,15 +1617,15 @@ struct qla_fdt_layout {
uint8_t wrt_sts_reg_cmd;
uint8_t unprotect_sec_cmd;
uint8_t read_man_id_cmd;
- uint32_t block_size;
- uint32_t alt_block_size;
- uint32_t flash_size;
- uint32_t wrt_enable_data;
+ __le32 block_size;
+ __le32 alt_block_size;
+ __le32 flash_size;
+ __le32 wrt_enable_data;
uint8_t read_id_addr_len;
uint8_t wrt_disable_bits;
uint8_t read_dev_id_len;
uint8_t chip_erase_cmd;
- uint16_t read_timeout;
+ __le16 read_timeout;
uint8_t protect_sec_cmd;
uint8_t unused2[65];
};
@@ -1464,11 +1634,11 @@ struct qla_fdt_layout {
struct qla_flt_location {
uint8_t sig[4];
- uint16_t start_lo;
- uint16_t start_hi;
+ __le16 start_lo;
+ __le16 start_hi;
uint8_t version;
uint8_t unused[5];
- uint16_t checksum;
+ __le16 checksum;
};
#define FLT_REG_FW 0x01
@@ -1505,6 +1675,7 @@ struct qla_flt_location {
#define FLT_REG_VPD_SEC_27XX_1 0x52
#define FLT_REG_VPD_SEC_27XX_2 0xD8
#define FLT_REG_VPD_SEC_27XX_3 0xDA
+#define FLT_REG_NVME_PARAMS_27XX 0x21
/* 28xx */
#define FLT_REG_AUX_IMG_PRI_28XX 0x125
@@ -1521,22 +1692,24 @@ struct qla_flt_location {
#define FLT_REG_MPI_SEC_28XX 0xF0
#define FLT_REG_PEP_PRI_28XX 0xD1
#define FLT_REG_PEP_SEC_28XX 0xF1
+#define FLT_REG_NVME_PARAMS_PRI_28XX 0x14E
+#define FLT_REG_NVME_PARAMS_SEC_28XX 0x179
struct qla_flt_region {
- uint16_t code;
+ __le16 code;
uint8_t attribute;
uint8_t reserved;
- uint32_t size;
- uint32_t start;
- uint32_t end;
+ __le32 size;
+ __le32 start;
+ __le32 end;
};
struct qla_flt_header {
- uint16_t version;
- uint16_t length;
- uint16_t checksum;
- uint16_t unused;
- struct qla_flt_region region[0];
+ __le16 version;
+ __le16 length;
+ __le16 checksum;
+ __le16 unused;
+ struct qla_flt_region region[];
};
#define FLT_REGION_SIZE 16
@@ -1547,18 +1720,18 @@ struct qla_flt_header {
struct qla_npiv_header {
uint8_t sig[2];
- uint16_t version;
- uint16_t entries;
- uint16_t unused[4];
- uint16_t checksum;
+ __le16 version;
+ __le16 entries;
+ __le16 unused[4];
+ __le16 checksum;
};
struct qla_npiv_entry {
- uint16_t flags;
- uint16_t vf_id;
+ __le16 flags;
+ __le16 vf_id;
uint8_t q_qos;
uint8_t f_qos;
- uint16_t unused1;
+ __le16 unused1;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
};
@@ -1588,7 +1761,7 @@ struct verify_chip_entry_84xx {
uint32_t handle;
- uint16_t options;
+ __le16 options;
#define VCO_DONT_UPDATE_FW BIT_0
#define VCO_FORCE_UPDATE BIT_1
#define VCO_DONT_RESET_UPDATE BIT_2
@@ -1596,18 +1769,18 @@ struct verify_chip_entry_84xx {
#define VCO_END_OF_DATA BIT_14
#define VCO_ENABLE_DSD BIT_15
- uint16_t reserved_1;
+ __le16 reserved_1;
- uint16_t data_seg_cnt;
- uint16_t reserved_2[3];
+ __le16 data_seg_cnt;
+ __le16 reserved_2[3];
- uint32_t fw_ver;
- uint32_t exchange_address;
+ __le32 fw_ver;
+ __le32 exchange_address;
- uint32_t reserved_3[3];
- uint32_t fw_size;
- uint32_t fw_seq_size;
- uint32_t relative_offset;
+ __le32 reserved_3[3];
+ __le32 fw_size;
+ __le32 fw_seq_size;
+ __le32 relative_offset;
struct dsd64 dsd;
};
@@ -1620,22 +1793,22 @@ struct verify_chip_rsp_84xx {
uint32_t handle;
- uint16_t comp_status;
+ __le16 comp_status;
#define CS_VCS_CHIP_FAILURE 0x3
#define CS_VCS_BAD_EXCHANGE 0x8
#define CS_VCS_SEQ_COMPLETEi 0x40
- uint16_t failure_code;
+ __le16 failure_code;
#define VFC_CHECKSUM_ERROR 0x1
#define VFC_INVALID_LEN 0x2
#define VFC_ALREADY_IN_PROGRESS 0x8
- uint16_t reserved_1[4];
+ __le16 reserved_1[4];
- uint32_t fw_ver;
- uint32_t exchange_address;
+ __le32 fw_ver;
+ __le32 exchange_address;
- uint32_t reserved_2[6];
+ __le32 reserved_2[6];
};
#define ACCESS_CHIP_IOCB_TYPE 0x2B
@@ -1647,24 +1820,24 @@ struct access_chip_84xx {
uint32_t handle;
- uint16_t options;
+ __le16 options;
#define ACO_DUMP_MEMORY 0x0
#define ACO_LOAD_MEMORY 0x1
#define ACO_CHANGE_CONFIG_PARAM 0x2
#define ACO_REQUEST_INFO 0x3
- uint16_t reserved1;
+ __le16 reserved1;
- uint16_t dseg_count;
- uint16_t reserved2[3];
+ __le16 dseg_count;
+ __le16 reserved2[3];
- uint32_t parameter1;
- uint32_t parameter2;
- uint32_t parameter3;
+ __le32 parameter1;
+ __le32 parameter2;
+ __le32 parameter3;
- uint32_t reserved3[3];
- uint32_t total_byte_cnt;
- uint32_t reserved4;
+ __le32 reserved3[3];
+ __le32 total_byte_cnt;
+ __le32 reserved4;
struct dsd64 dsd;
};
@@ -1677,11 +1850,11 @@ struct access_chip_rsp_84xx {
uint32_t handle;
- uint16_t comp_status;
- uint16_t failure_code;
- uint32_t residual_count;
+ __le16 comp_status;
+ __le16 failure_code;
+ __le32 residual_count;
- uint32_t reserved[12];
+ __le32 reserved[12];
};
/* 81XX Support **************************************************************/
@@ -1726,9 +1899,8 @@ struct access_chip_rsp_84xx {
/* LR Distance bit positions */
#define LR_DIST_NV_POS 2
+#define LR_DIST_NV_MASK 0xf
#define LR_DIST_FW_POS 12
-#define LR_DIST_FW_SHIFT (LR_DIST_FW_POS - LR_DIST_NV_POS)
-#define LR_DIST_FW_FIELD(x) ((x) << LR_DIST_FW_SHIFT & 0xf000)
/* FAC semaphore defines */
#define FAC_SEMAPHORE_UNLOCK 0
@@ -1737,52 +1909,52 @@ struct access_chip_rsp_84xx {
struct nvram_81xx {
/* NVRAM header. */
uint8_t id[4];
- uint16_t nvram_version;
- uint16_t reserved_0;
+ __le16 nvram_version;
+ __le16 reserved_0;
/* Firmware Initialization Control Block. */
- uint16_t version;
- uint16_t reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
- uint16_t reserved_2;
+ __le16 version;
+ __le16 reserved_1;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
+ __le16 reserved_2;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint16_t login_retry_count;
- uint16_t reserved_3;
- uint16_t interrupt_delay_timer;
- uint16_t login_timeout;
+ __le16 login_retry_count;
+ __le16 reserved_3;
+ __le16 interrupt_delay_timer;
+ __le16 login_timeout;
- uint32_t firmware_options_1;
- uint32_t firmware_options_2;
- uint32_t firmware_options_3;
+ __le32 firmware_options_1;
+ __le32 firmware_options_2;
+ __le32 firmware_options_3;
- uint16_t reserved_4[4];
+ __le16 reserved_4[4];
/* Offset 64. */
uint8_t enode_mac[6];
- uint16_t reserved_5[5];
+ __le16 reserved_5[5];
/* Offset 80. */
- uint16_t reserved_6[24];
+ __le16 reserved_6[24];
/* Offset 128. */
- uint16_t ex_version;
+ __le16 ex_version;
uint8_t prio_fcf_matching_flags;
uint8_t reserved_6_1[3];
- uint16_t pri_fcf_vlan_id;
+ __le16 pri_fcf_vlan_id;
uint8_t pri_fcf_fabric_name[8];
- uint16_t reserved_6_2[7];
+ __le16 reserved_6_2[7];
uint8_t spma_mac_addr[6];
- uint16_t reserved_6_3[14];
+ __le16 reserved_6_3[14];
/* Offset 192. */
uint8_t min_supported_speed;
uint8_t reserved_7_0;
- uint16_t reserved_7[31];
+ __le16 reserved_7[31];
/*
* BIT 0 = Enable spinup delay
@@ -1815,26 +1987,26 @@ struct nvram_81xx {
* BIT 25 = Temp WWPN
* BIT 26-31 =
*/
- uint32_t host_p;
+ __le32 host_p;
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
uint8_t boot_port_name[WWN_SIZE];
- uint16_t boot_lun_number;
- uint16_t reserved_8;
+ __le16 boot_lun_number;
+ __le16 reserved_8;
uint8_t alt1_boot_port_name[WWN_SIZE];
- uint16_t alt1_boot_lun_number;
- uint16_t reserved_9;
+ __le16 alt1_boot_lun_number;
+ __le16 reserved_9;
uint8_t alt2_boot_port_name[WWN_SIZE];
- uint16_t alt2_boot_lun_number;
- uint16_t reserved_10;
+ __le16 alt2_boot_lun_number;
+ __le16 reserved_10;
uint8_t alt3_boot_port_name[WWN_SIZE];
- uint16_t alt3_boot_lun_number;
- uint16_t reserved_11;
+ __le16 alt3_boot_lun_number;
+ __le16 reserved_11;
/*
* BIT 0 = Selective Login
@@ -1846,60 +2018,63 @@ struct nvram_81xx {
* BIT 6 = Reserved
* BIT 7-31 =
*/
- uint32_t efi_parameters;
+ __le32 efi_parameters;
uint8_t reset_delay;
uint8_t reserved_12;
- uint16_t reserved_13;
+ __le16 reserved_13;
- uint16_t boot_id_number;
- uint16_t reserved_14;
+ __le16 boot_id_number;
+ __le16 reserved_14;
- uint16_t max_luns_per_target;
- uint16_t reserved_15;
+ __le16 max_luns_per_target;
+ __le16 reserved_15;
- uint16_t port_down_retry_count;
- uint16_t link_down_timeout;
+ __le16 port_down_retry_count;
+ __le16 link_down_timeout;
/* FCode parameters. */
- uint16_t fcode_parameter;
+ __le16 fcode_parameter;
- uint16_t reserved_16[3];
+ __le16 reserved_16[3];
/* Offset 352. */
uint8_t reserved_17[4];
- uint16_t reserved_18[5];
+ __le16 reserved_18[5];
uint8_t reserved_19[2];
- uint16_t reserved_20[8];
+ __le16 reserved_20[8];
/* Offset 384. */
uint8_t reserved_21[16];
- uint16_t reserved_22[3];
+ __le16 reserved_22[3];
/* Offset 406 (0x196) Enhanced Features
* BIT 0 = Extended BB credits for LR
* BIT 1 = Virtual Fabric Enable
* BIT 2-5 = Distance Support if BIT 0 is on
- * BIT 6-15 = Unused
+ * BIT 6 = Prefer FCP
+ * BIT 7 = SCM Disabled if BIT is set (1)
+ * BIT 8-15 = Unused
*/
uint16_t enhanced_features;
+
uint16_t reserved_24[4];
/* Offset 416. */
- uint16_t reserved_25[32];
+ __le16 reserved_25[32];
/* Offset 480. */
uint8_t model_name[16];
/* Offset 496. */
- uint16_t feature_mask_l;
- uint16_t feature_mask_h;
- uint16_t reserved_26[2];
+ __le16 feature_mask_l;
+ __le16 feature_mask_h;
+ __le16 reserved_26[2];
- uint16_t subsystem_vendor_id;
- uint16_t subsystem_device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_device_id;
- uint32_t checksum;
+ __le32 checksum;
};
/*
@@ -1908,31 +2083,31 @@ struct nvram_81xx {
*/
#define ICB_VERSION 1
struct init_cb_81xx {
- uint16_t version;
- uint16_t reserved_1;
+ __le16 version;
+ __le16 reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
- uint16_t reserved_2;
+ __le16 reserved_2;
uint8_t port_name[WWN_SIZE]; /* Big endian. */
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t response_q_inpointer;
- uint16_t request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_outpointer;
- uint16_t login_retry_count;
+ __le16 login_retry_count;
- uint16_t prio_request_q_outpointer;
+ __le16 prio_request_q_outpointer;
- uint16_t response_q_length;
- uint16_t request_q_length;
+ __le16 response_q_length;
+ __le16 request_q_length;
- uint16_t reserved_3;
+ __le16 reserved_3;
- uint16_t prio_request_q_length;
+ __le16 prio_request_q_length;
__le64 request_q_address __packed;
__le64 response_q_address __packed;
@@ -1940,12 +2115,12 @@ struct init_cb_81xx {
uint8_t reserved_4[8];
- uint16_t atio_q_inpointer;
- uint16_t atio_q_length;
+ __le16 atio_q_inpointer;
+ __le16 atio_q_length;
__le64 atio_q_address __packed;
- uint16_t interrupt_delay_timer; /* 100us increments. */
- uint16_t login_timeout;
+ __le16 interrupt_delay_timer; /* 100us increments. */
+ __le16 login_timeout;
/*
* BIT 0-3 = Reserved
@@ -1958,7 +2133,7 @@ struct init_cb_81xx {
* BIT 14 = Node Name Option
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_1;
+ __le32 firmware_options_1;
/*
* BIT 0 = Operation Mode bit 0
@@ -1976,7 +2151,7 @@ struct init_cb_81xx {
* BIT 14 = Enable Target PRLI Control
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_2;
+ __le32 firmware_options_2;
/*
* BIT 0-3 = Reserved
@@ -1997,7 +2172,7 @@ struct init_cb_81xx {
* BIT 28 = SPMA selection bit 1
* BIT 30-31 = Reserved
*/
- uint32_t firmware_options_3;
+ __le32 firmware_options_3;
uint8_t reserved_5[8];
@@ -2075,9 +2250,9 @@ struct qla_fcp_prio_cfg {
#define FCP_PRIO_ATTR_ENABLE 0x1
#define FCP_PRIO_ATTR_PERSIST 0x2
uint8_t reserved; /* Reserved for future use */
-#define FCP_PRIO_CFG_HDR_SIZE 0x10
- struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */
-#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
+#define FCP_PRIO_CFG_HDR_SIZE offsetof(struct qla_fcp_prio_cfg, entry)
+ struct qla_fcp_prio_entry entry[1023]; /* fcp priority entries */
+ uint8_t reserved2[16];
};
#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2a64729a2bc5..e3256e721be1 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_GBL_H
#define __QLA_GBL_H
@@ -13,6 +12,7 @@
* Global Function Prototypes in qla_init.c source file.
*/
extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
+extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport);
extern int qla2100_pci_config(struct scsi_qla_host *);
extern int qla2300_pci_config(struct scsi_qla_host *);
@@ -31,7 +31,7 @@ extern int qla24xx_nvram_config(struct scsi_qla_host *);
extern int qla81xx_nvram_config(struct scsi_qla_host *);
extern void qla2x00_update_fw_options(struct scsi_qla_host *);
extern void qla24xx_update_fw_options(scsi_qla_host_t *);
-extern void qla81xx_update_fw_options(scsi_qla_host_t *);
+
extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
@@ -70,8 +70,6 @@ extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
-extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -109,7 +107,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
void *, u8);
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
-int qla24xx_detect_sfp(scsi_qla_host_t *vha);
+int qla24xx_detect_sfp(scsi_qla_host_t *);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
extern void qla28xx_get_aux_images(struct scsi_qla_host *,
@@ -127,7 +125,24 @@ int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
void qla_rscn_replay(fc_port_t *fcport);
+void qla24xx_free_purex_item(struct purex_item *item);
extern bool qla24xx_risc_firmware_invalid(uint32_t *);
+void qla_init_iocb_limit(scsi_qla_host_t *);
+
+void qla_edif_list_del(fc_port_t *fcport);
+void qla_edif_sadb_release(struct qla_hw_data *ha);
+int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha);
+void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha);
+void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+ srb_t *sp, struct sts_entry_24xx *sts24);
+void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct ctio7_from_24xx *ctio);
+void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport);
+int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob);
+void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
+void qla_edif_clear_appdata(struct scsi_qla_host *vha,
+ struct fc_port *fcport);
+const char *sc_to_str(uint16_t cmd);
/*
* Global Data in qla_os.c source file.
@@ -142,8 +157,11 @@ extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
+extern int ql2xrdpenable;
+extern int ql2xsmartsan;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
+extern int ql2xextended_error_logging_ktrace;
extern int ql2xiidmaenable;
extern int ql2xmqsupport;
extern int ql2xfwloadbin;
@@ -154,7 +172,6 @@ extern int ql2xasynctmfenable;
extern int ql2xgffidenable;
extern int ql2xenabledif;
extern int ql2xenablehba_err_chk;
-extern int ql2xtargetreset;
extern int ql2xdontresethba;
extern uint64_t ql2xmaxlun;
extern int ql2xmdcapmask;
@@ -169,8 +186,12 @@ extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
-extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers;
+extern int ql2xfulldump_on_mpifail;
+extern int ql2xsecenable;
+extern int ql2xenforce_iocb_limit;
+extern int ql2xabts_wait_nvme;
+extern u32 ql2xnvme_queues;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -213,10 +234,10 @@ extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
-extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla_eeh_work(struct work_struct *);
extern void qla2x00_sp_compl(srb_t *sp, int);
extern void qla2xxx_qpair_sp_free_dma(srb_t *sp);
extern void qla2xxx_qpair_sp_compl(srb_t *sp, int);
@@ -226,6 +247,12 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
+void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
+ struct purex_item *pkt);
+void qla_pci_set_eeh_busy(struct scsi_qla_host *);
+void qla_schedule_eeh_work(struct scsi_qla_host *);
+struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *fcport,
+ int index, int dir);
/*
* Global Functions in qla_mid.c source file.
@@ -249,7 +276,6 @@ extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_free_dma(srb_t *sp);
-extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
@@ -270,7 +296,10 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
*/
-
+void qla_els_pt_iocb(struct scsi_qla_host *vha,
+ struct els_entry_24xx *pkt, struct qla_els_pt_arg *a);
+cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha,
+ struct req_que *que);
extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -286,7 +315,8 @@ extern int qla2x00_start_sp(srb_t *);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
extern int qla2xxx_dif_start_scsi_mq(srb_t *);
-extern void qla2x00_init_timer(srb_t *sp, unsigned long tmo);
+extern void qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
+ void (*done)(struct srb *, int));
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
@@ -300,6 +330,10 @@ extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
struct dsd64 *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
+ struct qla_work_evt *e);
+void qla2x00_sp_release(struct kref *kref);
+void qla2x00_els_dcmd2_iocb_timeout(void *data);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -354,6 +388,9 @@ extern int
qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int
+qla24xx_get_port_database(scsi_qla_host_t *, u16, struct port_database_24xx *);
+
+extern int
qla2x00_get_firmware_state(scsi_qla_host_t *, uint16_t *);
extern int
@@ -395,7 +432,8 @@ extern int
qla2x00_get_resource_cnts(scsi_qla_host_t *);
extern int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
+qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map,
+ u8 *num_entries);
extern int
qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
@@ -452,6 +490,13 @@ extern int
qla25xx_set_driver_version(scsi_qla_host_t *, char *);
extern int
+qla25xx_set_els_cmds_supported(scsi_qla_host_t *);
+
+extern int
+qla24xx_get_buffer_credits(scsi_qla_host_t *, struct buffer_credit_24xx *,
+ dma_addr_t);
+
+extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
@@ -509,6 +554,10 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+extern int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *,
+ struct qla_dport_diag_v2 *, mbx_cmd_t *);
+
int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
@@ -529,6 +578,7 @@ extern int qla2xxx_read_remote_register(scsi_qla_host_t *, uint32_t,
uint32_t *);
extern int qla2xxx_write_remote_register(scsi_qla_host_t *, uint32_t,
uint32_t);
+void qla_no_op_mb(struct scsi_qla_host *vha);
/*
* Global Function Prototypes in qla_isr.c source file.
@@ -552,14 +602,16 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
extern irqreturn_t
qla2xxx_msix_rsp_q(int irq, void *dev_id);
+extern irqreturn_t
+qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
+void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp);
/*
* Global Function Prototypes in qla_sup.c source file.
*/
-extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
uint32_t, uint32_t);
extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
@@ -617,19 +669,27 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
+extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job);
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
+ struct rsp_que **rsp, u8 *buf, u32 buf_len);
+
+int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in,
+ uint16_t *mbx_out);
/*
* Global Function Prototypes in qla_dbg.c source file.
*/
-extern void qla2100_fw_dump(scsi_qla_host_t *, int);
-extern void qla2300_fw_dump(scsi_qla_host_t *, int);
-extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla8044_fw_dump(scsi_qla_host_t *, int);
-
-extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+void qla2xxx_dump_fw(scsi_qla_host_t *vha);
+void qla2100_fw_dump(scsi_qla_host_t *vha);
+void qla2300_fw_dump(scsi_qla_host_t *vha);
+void qla24xx_fw_dump(scsi_qla_host_t *vha);
+void qla25xx_fw_dump(scsi_qla_host_t *vha);
+void qla81xx_fw_dump(scsi_qla_host_t *vha);
+void qla82xx_fw_dump(scsi_qla_host_t *vha);
+void qla8044_fw_dump(scsi_qla_host_t *vha);
+
+void qla27xx_fwdump(scsi_qla_host_t *vha);
+extern void qla27xx_mpi_fwdump(scsi_qla_host_t *, int);
extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *);
extern int qla27xx_fwdt_template_valid(void *);
extern ulong qla27xx_fwdt_template_size(void *);
@@ -656,13 +716,11 @@ extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_fdmi_register(scsi_qla_host_t *);
extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
-extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
+extern size_t qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
-extern void qla2x00_free_fcport(fc_port_t *);
-
extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
@@ -672,7 +730,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
@@ -684,13 +742,14 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
void qla_scan_work_fn(struct work_struct *);
+uint qla25xx_fdmi_port_speed_capability(struct qla_hw_data *);
+uint qla25xx_fdmi_port_speed_currently(struct qla_hw_data *);
/*
* Global Function Prototypes in qla_attr.c source file.
*/
struct device_attribute;
-extern struct device_attribute *qla2x00_host_attrs[];
-extern struct device_attribute *qla2x00_host_attrs_dm[];
+extern const struct attribute_group *qla2x00_host_groups[];
struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions;
extern struct fc_function_template qla2xxx_transport_vport_functions;
@@ -704,7 +763,6 @@ extern int qla2x00_echo_test(scsi_qla_host_t *,
extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
struct qla_fcp_prio_cfg *, uint8_t);
-void qla_insert_tgt_attrs(void);
/*
* Global Function Prototypes in qla_dfs.c source file.
*/
@@ -725,12 +783,6 @@ extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_delete_queues(struct scsi_qla_host *);
-extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
-extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
-extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
/* qlafx00 related functions */
extern int qlafx00_pci_config(struct scsi_qla_host *);
@@ -754,7 +806,7 @@ extern int qlafx00_fw_ready(scsi_qla_host_t *);
extern int qlafx00_configure_devices(scsi_qla_host_t *);
extern int qlafx00_reset_initialize(scsi_qla_host_t *);
extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t);
-extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
+extern void qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
uint32_t *, int);
extern uint32_t qlafx00_fw_state_show(struct device *,
@@ -767,7 +819,6 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
extern void qlafx00_timer_routine(scsi_qla_host_t *);
extern int qlafx00_rescan_isp(scsi_qla_host_t *);
-extern int qlafx00_loop_reset(scsi_qla_host_t *vha);
/* qla82xx related functions */
@@ -816,8 +867,6 @@ extern void qla82xx_init_flags(struct qla_hw_data *);
extern void qla82xx_set_drv_active(scsi_qla_host_t *);
extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
-extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
-extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
/* ISP 8021 IDC */
extern void qla82xx_clear_drv_active(struct qla_hw_data *);
@@ -839,11 +888,12 @@ extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
-extern char *qdev_state(uint32_t);
+extern const char *qdev_state(uint32_t);
extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
extern int qla82xx_read_temperature(scsi_qla_host_t *);
extern int qla8044_read_temperature(scsi_qla_host_t *);
extern int qla2x00_read_sfp_dev(struct scsi_qla_host *, char *, int);
+extern int ql26xx_led_config(scsi_qla_host_t *, uint16_t, uint16_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct bsg_job *);
@@ -853,9 +903,12 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
dma_addr_t, size_t, uint32_t);
extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *);
+extern int qla24xx_sadb_update(struct bsg_job *bsg_job);
+extern int qla_post_sa_replace_work(struct scsi_qla_host *vha,
+ fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl);
/* 83xx related functions */
-extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
+void qla83xx_fw_dump(scsi_qla_host_t *vha);
/* Minidump related functions */
extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
@@ -897,6 +950,7 @@ extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *);
extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *,
response_t *);
+struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, be_id_t d_id);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
@@ -909,12 +963,61 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
-void qlt_remove_target_resources(struct qla_hw_data *);
+void qla_remove_hostmap(struct qla_hw_data *ha);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
+extern void qla24xx_process_purex_list(struct purex_list *);
+extern void qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp);
+extern void qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp);
+extern void qla_wait_nvme_release_cmd_kref(srb_t *sp);
+extern void qla_nvme_abort_set_option
+ (struct abort_entry_24xx *abt, srb_t *sp);
+extern void qla_nvme_abort_process_comp_status
+ (struct abort_entry_24xx *abt, srb_t *sp);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+
+/* qla_edif.c */
+fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id);
+void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2,
+ fc_port_t *fcport);
+void qla_edb_stop(scsi_qla_host_t *vha);
+int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job);
+void qla_enode_init(scsi_qla_host_t *vha);
+void qla_enode_stop(scsi_qla_host_t *vha);
+void qla_edif_flush_sa_ctl_lists(fc_port_t *fcport);
+void qla_edb_init(scsi_qla_host_t *vha);
+void qla_edif_timer(scsi_qla_host_t *vha);
+int qla28xx_start_scsi_edif(srb_t *sp);
+void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb);
+void qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb);
+void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp);
+void qla28xx_sa_update_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct sa_update_28xx *pkt);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
+
+#define QLA2XX_HW_ERROR BIT_0
+#define QLA2XX_SHT_LNK_DWN BIT_1
+#define QLA2XX_INT_ERR BIT_2
+#define QLA2XX_CMD_TIMEOUT BIT_3
+#define QLA2XX_RESET_CMD_ERR BIT_4
+#define QLA2XX_TGT_SHT_LNK_DOWN BIT_17
+
+#define QLA2XX_MAX_LINK_DOWN_TIME 100
+
+int qla2xxx_start_stats(struct Scsi_Host *shost, u32 flags);
+int qla2xxx_stop_stats(struct Scsi_Host *shost, u32 flags);
+int qla2xxx_reset_stats(struct Scsi_Host *shost, u32 flags);
+
+int qla2xxx_get_ini_stats(struct Scsi_Host *shost, u32 flags, void *data, u64 size);
+int qla2xxx_get_tgt_stats(struct Scsi_Host *shost, u32 flags,
+ struct fc_rport *rport, void *data, u64 size);
+int qla2xxx_disable_port(struct Scsi_Host *shost);
+int qla2xxx_enable_port(struct Scsi_Host *shost);
+
+uint64_t qla2x00_get_num_tgts(scsi_qla_host_t *vha);
+uint64_t qla2x00_count_set_bits(u32 num);
+
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index aaa4a5bbf2ff..64ab070b8716 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
@@ -19,6 +18,8 @@ static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
static int qla_async_rsnn_nn(scsi_qla_host_t *);
+
+
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
* @vha: HA context
@@ -175,7 +176,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
break;
case CS_TIMEOUT:
rval = QLA_FUNCTION_TIMEOUT;
- /* fall through */
+ fallthrough;
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
@@ -528,7 +529,6 @@ static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
if (!e)
goto err2;
- del_timer(&sp->u.iocb_cmd.timer);
e->u.iosb.sp = sp;
qla2x00_post_work(vha, e);
return;
@@ -555,8 +555,8 @@ err2:
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
- sp->free(sp);
-
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
@@ -591,13 +591,15 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
if (!vha->flags.online)
goto done;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rft_id";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
@@ -631,14 +633,12 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
- if (vha->flags.nvme_enabled)
+ if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha))
ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x portid %06x.\n",
@@ -652,7 +652,8 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
}
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -675,8 +676,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
return (QLA_SUCCESS);
}
- return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
- FC4_TYPE_FCP_SCSI);
+ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type);
}
static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
@@ -687,13 +687,15 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
srb_t *sp;
struct ct_sns_pkt *ct_sns;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rff_id";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
@@ -726,13 +728,11 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
ct_req->req.rff_id.fc4_feature = fc4feature;
- ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
+ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */
sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x portid %06x feature %x type %x.\n",
@@ -748,7 +748,8 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -778,13 +779,15 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
srb_t *sp;
struct ct_sns_pkt *ct_sns;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rnid";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
@@ -822,9 +825,6 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x portid %06x\n",
sp->name, sp->handle, d_id->b24);
@@ -839,24 +839,24 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
-void
+size_t
qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
{
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(ha))
- snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
- ha->mr.fw_version, qla2x00_version_str);
- else
- snprintf(snn, size,
- "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
- ha->fw_major_version, ha->fw_minor_version,
- ha->fw_subminor_version, qla2x00_version_str);
+ return scnprintf(snn, size, "%s FW:v%s DVR:v%s",
+ ha->model_number, ha->mr.fw_version, qla2x00_version_str);
+
+ return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s",
+ ha->model_number, ha->fw_major_version, ha->fw_minor_version,
+ ha->fw_subminor_version, qla2x00_version_str);
}
/**
@@ -886,13 +886,15 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
srb_t *sp;
struct ct_sns_pkt *ct_sns;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rsnn_nn";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
@@ -936,9 +938,6 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x.\n",
sp->name, sp->handle);
@@ -953,7 +952,8 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -1247,7 +1247,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
}
/**
- * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
+ * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
* @vha: HA context
*
* This command uses the old Exectute SNS Command mailbox routine.
@@ -1479,7 +1479,7 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
}
/**
- * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
+ * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query.
* @p: CT request buffer
* @cmd: GS command
* @rsp_size: response size in bytes
@@ -1501,747 +1501,738 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
return &p->p.req;
}
+uint
+qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
+{
+ uint speeds = 0;
+
+ if (IS_CNA_CAPABLE(ha))
+ return FDMI_PORT_SPEED_10GB;
+ if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
+ if (ha->max_supported_speed == 2) {
+ if (ha->min_supported_speed <= 6)
+ speeds |= FDMI_PORT_SPEED_64GB;
+ }
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1) {
+ if (ha->min_supported_speed <= 5)
+ speeds |= FDMI_PORT_SPEED_32GB;
+ }
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 4)
+ speeds |= FDMI_PORT_SPEED_16GB;
+ }
+ if (ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 3)
+ speeds |= FDMI_PORT_SPEED_8GB;
+ }
+ if (ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 2)
+ speeds |= FDMI_PORT_SPEED_4GB;
+ }
+ return speeds;
+ }
+ if (IS_QLA2031(ha)) {
+ if ((ha->pdev->subsystem_vendor == 0x103C) &&
+ ((ha->pdev->subsystem_device == 0x8002) ||
+ (ha->pdev->subsystem_device == 0x8086))) {
+ speeds = FDMI_PORT_SPEED_16GB;
+ } else {
+ speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB;
+ }
+ return speeds;
+ }
+ if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
+ return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
+ if (IS_QLA24XX_TYPE(ha))
+ return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB;
+ if (IS_QLA23XX(ha))
+ return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
+ return FDMI_PORT_SPEED_1GB;
+}
+
+uint
+qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
+{
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ return FDMI_PORT_SPEED_1GB;
+ case PORT_SPEED_2GB:
+ return FDMI_PORT_SPEED_2GB;
+ case PORT_SPEED_4GB:
+ return FDMI_PORT_SPEED_4GB;
+ case PORT_SPEED_8GB:
+ return FDMI_PORT_SPEED_8GB;
+ case PORT_SPEED_10GB:
+ return FDMI_PORT_SPEED_10GB;
+ case PORT_SPEED_16GB:
+ return FDMI_PORT_SPEED_16GB;
+ case PORT_SPEED_32GB:
+ return FDMI_PORT_SPEED_32GB;
+ case PORT_SPEED_64GB:
+ return FDMI_PORT_SPEED_64GB;
+ default:
+ return FDMI_PORT_SPEED_UNKNOWN;
+ }
+}
+
/**
- * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
+ * qla2x00_hba_attributes() - perform HBA attributes registration
* @vha: HA context
+ * @entries: number of entries to use
+ * @callopt: Option to issue extended or standard FDMI
+ * command parameter
*
* Returns 0 on success.
*/
-static int
-qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
+static unsigned long
+qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
+ unsigned int callopt)
{
- int rval, alen;
- uint32_t size, sn;
-
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- void *entries;
- struct ct_fdmi_hba_attr *eiter;
struct qla_hw_data *ha = vha->hw;
-
- /* Issue RHBA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
-
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
- ct_req->req.rhba.entry_count = cpu_to_be32(1);
- memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
- size = 2 * WWN_SIZE + 4 + 4;
-
- /* Attributes */
- ct_req->req.rhba.attrs.count =
- cpu_to_be32(FDMI_HBA_ATTR_COUNT);
- entries = &ct_req->req;
+ struct new_utsname *p_sysid = utsname();
+ struct ct_fdmi_hba_attr *eiter;
+ uint16_t alen;
+ unsigned long size = 0;
/* Nodename. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x2025,
- "NodeName = %8phN.\n", eiter->a.node_name);
-
+ memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
+ alen = sizeof(eiter->a.node_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a0,
+ "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
/* Manufacturer. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
- alen = strlen(QLA2XXX_MANUFACTURER);
- snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2026,
- "Manufacturer = %s.\n", eiter->a.manufacturer);
-
+ alen = scnprintf(
+ eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+ "%s", QLA2XXX_MANUFACTURER);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a1,
+ "MANUFACTURER = %s.\n", eiter->a.manufacturer);
/* Serial number. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
- if (IS_FWI2_CAPABLE(ha))
- qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
- sizeof(eiter->a.serial_num));
- else {
- sn = ((ha->serial0 & 0x1f) << 16) |
+ alen = 0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ alen = qla2xxx_get_vpd_field(vha, "SN",
+ eiter->a.serial_num, sizeof(eiter->a.serial_num));
+ }
+ if (!alen) {
+ uint32_t sn = ((ha->serial0 & 0x1f) << 16) |
(ha->serial2 << 8) | ha->serial1;
- snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
- "%c%05d", 'A' + sn / 100000, sn % 100000);
+ alen = scnprintf(
+ eiter->a.serial_num, sizeof(eiter->a.serial_num),
+ "%c%05d", 'A' + sn / 100000, sn % 100000);
}
- alen = strlen(eiter->a.serial_num);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2027,
- "Serial no. = %s.\n", eiter->a.serial_num);
-
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a2,
+ "SERIAL NUMBER = %s.\n", eiter->a.serial_num);
/* Model name. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
- snprintf(eiter->a.model, sizeof(eiter->a.model),
- "%s", ha->model_number);
- alen = strlen(eiter->a.model);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2028,
- "Model Name = %s.\n", eiter->a.model);
-
+ alen = scnprintf(
+ eiter->a.model, sizeof(eiter->a.model),
+ "%s", ha->model_number);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a3,
+ "MODEL NAME = %s.\n", eiter->a.model);
/* Model description. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
- snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
- "%s", ha->model_desc);
- alen = strlen(eiter->a.model_desc);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2029,
- "Model Desc = %s.\n", eiter->a.model_desc);
-
+ alen = scnprintf(
+ eiter->a.model_desc, sizeof(eiter->a.model_desc),
+ "%s", ha->model_desc);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a4,
+ "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc);
/* Hardware version. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
- if (!IS_FWI2_CAPABLE(ha)) {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
- } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
+ alen = 0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (!alen) {
+ alen = qla2xxx_get_vpd_field(vha, "MN",
+ eiter->a.hw_version, sizeof(eiter->a.hw_version));
+ }
+ if (!alen) {
+ alen = qla2xxx_get_vpd_field(vha, "EC",
+ eiter->a.hw_version, sizeof(eiter->a.hw_version));
+ }
}
- alen = strlen(eiter->a.hw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x202a,
- "Hardware ver = %s.\n", eiter->a.hw_version);
-
+ if (!alen) {
+ alen = scnprintf(
+ eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ }
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a5,
+ "HARDWARE VERSION = %s.\n", eiter->a.hw_version);
/* Driver version. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
- snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
- "%s", qla2x00_version_str);
- alen = strlen(eiter->a.driver_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x202b,
- "Driver ver = %s.\n", eiter->a.driver_version);
-
+ alen = scnprintf(
+ eiter->a.driver_version, sizeof(eiter->a.driver_version),
+ "%s", qla2x00_version_str);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a6,
+ "DRIVER VERSION = %s.\n", eiter->a.driver_version);
/* Option ROM version. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
- snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
- "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
- alen = strlen(eiter->a.orom_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha , 0x202c,
- "Optrom vers = %s.\n", eiter->a.orom_version);
+ alen = scnprintf(
+ eiter->a.orom_version, sizeof(eiter->a.orom_version),
+ "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a7,
+ "OPTROM VERSION = %d.%02d.\n",
+ eiter->a.orom_version[1], eiter->a.orom_version[0]);
/* Firmware version */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
sizeof(eiter->a.fw_version));
- alen = strlen(eiter->a.fw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x202d,
- "Firmware vers = %s.\n", eiter->a.fw_version);
-
- /* Update MS request size. */
- qla2x00_update_ms_fdmi_iocb(vha, size + 16);
-
- ql_dbg(ql_dbg_disc, vha, 0x202e,
- "RHBA identifier = %8phN size=%d.\n",
- ct_req->req.rhba.hba_identifier, size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
- entries, size);
-
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2030,
- "RHBA issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
- ct_rsp->header.explanation_code ==
- CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x2034,
- "HBA already registered.\n");
- rval = QLA_ALREADY_REGISTERED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20ad,
- "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
- ct_rsp->header.reason_code,
- ct_rsp->header.explanation_code);
- }
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2035,
- "RHBA exiting normally.\n");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a8,
+ "FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
+ /* OS Name and Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
+ alen = 0;
+ if (p_sysid) {
+ alen = scnprintf(
+ eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s %s",
+ p_sysid->sysname, p_sysid->release, p_sysid->machine);
+ }
+ if (!alen) {
+ alen = scnprintf(
+ eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s",
+ "Linux", fc_host_system_hostname(vha->host));
}
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a9,
+ "OS VERSION = %s.\n", eiter->a.os_version);
+ if (callopt == CALLOPT_FDMI1)
+ goto done;
+ /* MAX CT Payload Length */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
+ eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2);
- return rval;
+ alen = sizeof(eiter->a.max_ct_len);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20aa,
+ "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
+ /* Node Symbolic Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
+ alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
+ sizeof(eiter->a.sym_name));
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ab,
+ "SYMBOLIC NAME = %s.\n", eiter->a.sym_name);
+ /* Vendor Specific information */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO);
+ eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC);
+ alen = sizeof(eiter->a.vendor_specific_info);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ac,
+ "VENDOR SPECIFIC INFO = 0x%x.\n",
+ be32_to_cpu(eiter->a.vendor_specific_info));
+ /* Num Ports */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
+ eiter->a.num_ports = cpu_to_be32(1);
+ alen = sizeof(eiter->a.num_ports);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ad,
+ "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
+ /* Fabric Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name,
+ sizeof(eiter->a.fabric_name));
+ alen = sizeof(eiter->a.fabric_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ae,
+ "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+ /* BIOS Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
+ alen = scnprintf(
+ eiter->a.bios_name, sizeof(eiter->a.bios_name),
+ "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20af,
+ "BIOS NAME = %s\n", eiter->a.bios_name);
+ /* Vendor Identifier */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER);
+ alen = scnprintf(
+ eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
+ "%s", "QLGC");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20b0,
+ "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier);
+done:
+ return size;
}
/**
- * qla2x00_fdmi_rpa() - perform RPA registration
+ * qla2x00_port_attributes() - perform Port attributes registration
* @vha: HA context
+ * @entries: number of entries to use
+ * @callopt: Option to issue extended or standard FDMI
+ * command parameter
*
* Returns 0 on success.
*/
-static int
-qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+static unsigned long
+qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
+ unsigned int callopt)
{
- int rval, alen;
- uint32_t size;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- void *entries;
+ struct new_utsname *p_sysid = utsname();
+ char *hostname = p_sysid ?
+ p_sysid->nodename : fc_host_system_hostname(vha->host);
struct ct_fdmi_port_attr *eiter;
- struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
- struct new_utsname *p_sysid = NULL;
-
- /* Issue RPA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
-
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
- RPA_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
- size = WWN_SIZE + 4;
-
- /* Attributes */
- ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
- entries = &ct_req->req;
+ uint16_t alen;
+ unsigned long size = 0;
/* FC4 types. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
- eiter->len = cpu_to_be16(4 + 32);
+ eiter->a.fc4_types[0] = 0x00;
+ eiter->a.fc4_types[1] = 0x00;
eiter->a.fc4_types[2] = 0x01;
- size += 4 + 32;
-
- ql_dbg(ql_dbg_disc, vha, 0x2039,
- "FC4_TYPES=%02x %02x.\n",
- eiter->a.fc4_types[2],
- eiter->a.fc4_types[1]);
-
+ eiter->a.fc4_types[3] = 0x00;
+ alen = sizeof(eiter->a.fc4_types);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c0,
+ "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types);
+ if (vha->flags.nvme_enabled) {
+ eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
+ ql_dbg(ql_dbg_disc, vha, 0x211f,
+ "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
+ eiter->a.fc4_types[6]);
+ }
/* Supported speed. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- if (IS_CNA_CAPABLE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_10GB);
- else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_32GB|
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB);
- else if (IS_QLA2031(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB);
- else if (IS_QLA25XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA24XX_TYPE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA23XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_1GB);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x203a,
- "Supported_Speed=%x.\n", eiter->a.sup_speed);
-
+ eiter->a.sup_speed = cpu_to_be32(
+ qla25xx_fdmi_port_speed_capability(ha));
+ alen = sizeof(eiter->a.sup_speed);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c1,
+ "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed));
/* Current speed. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- switch (ha->link_data_rate) {
- case PORT_SPEED_1GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_1GB);
- break;
- case PORT_SPEED_2GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_2GB);
- break;
- case PORT_SPEED_4GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_4GB);
- break;
- case PORT_SPEED_8GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_8GB);
- break;
- case PORT_SPEED_10GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_10GB);
- break;
- case PORT_SPEED_16GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_16GB);
- break;
- case PORT_SPEED_32GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_32GB);
- break;
- default:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
- break;
- }
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x203b,
- "Current_Speed=%x.\n", eiter->a.cur_speed);
-
+ eiter->a.cur_speed = cpu_to_be32(
+ qla25xx_fdmi_port_speed_currently(ha));
+ alen = sizeof(eiter->a.cur_speed);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c2,
+ "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed));
/* Max frame size. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->len = cpu_to_be16(4 + 4);
- eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size) :
- le16_to_cpu(ha->init_cb->frame_payload_size);
- eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x203c,
- "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
-
+ eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size);
+ alen = sizeof(eiter->a.max_frame_size);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c3,
+ "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size));
/* OS device name. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
- "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
- alen = strlen(eiter->a.os_dev_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x204b,
- "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
-
+ alen = scnprintf(
+ eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+ "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c4,
+ "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name);
/* Hostname. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
- p_sysid = utsname();
- if (p_sysid) {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", p_sysid->nodename);
- } else {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", fc_host_system_hostname(vha->host));
- }
- alen = strlen(eiter->a.host_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
-
- /* Update MS request size. */
- qla2x00_update_ms_fdmi_iocb(vha, size + 16);
-
- ql_dbg(ql_dbg_disc, vha, 0x203e,
- "RPA portname %016llx, size = %d.\n",
- wwn_to_u64(ct_req->req.rpa.port_name), size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
- entries, size);
-
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2040,
- "RPA issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
- ct_rsp->header.explanation_code ==
- CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x20cd,
- "RPA already registered.\n");
- rval = QLA_ALREADY_REGISTERED;
- }
-
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2041,
- "RPA exiting normally.\n");
- }
-
- return rval;
-}
-
-/**
- * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
- * @vha: HA context
- *
- * Returns 0 on success.
- */
-static int
-qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
-{
- int rval, alen;
- uint32_t size, sn;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- void *entries;
- struct ct_fdmiv2_hba_attr *eiter;
- struct qla_hw_data *ha = vha->hw;
- struct new_utsname *p_sysid = NULL;
-
- /* Issue RHBA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
-
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
- RHBA_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
- ct_req->req.rhba2.entry_count = cpu_to_be32(1);
- memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
- size = 2 * WWN_SIZE + 4 + 4;
+ if (!*hostname || !strncmp(hostname, "(none)", 6))
+ hostname = "Linux-default";
+ alen = scnprintf(
+ eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", hostname);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c5,
+ "HOSTNAME = %s.\n", eiter->a.host_name);
- /* Attributes */
- ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
- entries = &ct_req->req;
+ if (callopt == CALLOPT_FDMI1)
+ goto done;
- /* Nodename. */
+ /* Node Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x207d,
- "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+ eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
+ memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
+ alen = sizeof(eiter->a.node_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c6,
+ "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
- /* Manufacturer. */
+ /* Port Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
- snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
- eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
- alen = strlen(eiter->a.manufacturer);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a5,
- "Manufacturer = %s.\n", eiter->a.manufacturer);
+ eiter->type = cpu_to_be16(FDMI_PORT_NAME);
+ memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name));
+ alen = sizeof(eiter->a.port_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c7,
+ "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name));
- /* Serial number. */
+ /* Port Symbolic Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
- if (IS_FWI2_CAPABLE(ha))
- qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
- sizeof(eiter->a.serial_num));
- else {
- sn = ((ha->serial0 & 0x1f) << 16) |
- (ha->serial2 << 8) | ha->serial1;
- snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
- "%c%05d", 'A' + sn / 100000, sn % 100000);
- }
- alen = strlen(eiter->a.serial_num);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a6,
- "Serial no. = %s.\n", eiter->a.serial_num);
+ eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
+ alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
+ sizeof(eiter->a.port_sym_name));
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name);
- /* Model name. */
+ /* Port Type */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
- snprintf(eiter->a.model, sizeof(eiter->a.model),
- "%s", ha->model_number);
- alen = strlen(eiter->a.model);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a7,
- "Model Name = %s.\n", eiter->a.model);
-
- /* Model description. */
+ eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
+ eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
+ alen = sizeof(eiter->a.port_type);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c9,
+ "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type));
+
+ /* Supported Class of Service */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
- snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
- "%s", ha->model_desc);
- alen = strlen(eiter->a.model_desc);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a8,
- "Model Desc = %s.\n", eiter->a.model_desc);
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
+ eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
+ alen = sizeof(eiter->a.port_supported_cos);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ca,
+ "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos));
- /* Hardware version. */
+ /* Port Fabric Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
- if (!IS_FWI2_CAPABLE(ha)) {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
- } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
- }
- alen = strlen(eiter->a.hw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a9,
- "Hardware ver = %s.\n", eiter->a.hw_version);
+ eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name,
+ sizeof(eiter->a.fabric_name));
+ alen = sizeof(eiter->a.fabric_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cb,
+ "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
- /* Driver version. */
+ /* FC4_type */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
- snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
- "%s", qla2x00_version_str);
- alen = strlen(eiter->a.driver_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20aa,
- "Driver ver = %s.\n", eiter->a.driver_version);
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
+ eiter->a.port_fc4_type[0] = 0x00;
+ eiter->a.port_fc4_type[1] = 0x00;
+ eiter->a.port_fc4_type[2] = 0x01;
+ eiter->a.port_fc4_type[3] = 0x00;
+ alen = sizeof(eiter->a.port_fc4_type);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cc,
+ "PORT ACTIVE FC4 TYPE = %016llx.\n",
+ *(uint64_t *)eiter->a.port_fc4_type);
- /* Option ROM version. */
+ /* Port State */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
- snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
- "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
- alen = strlen(eiter->a.orom_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha , 0x20ab,
- "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
- eiter->a.orom_version[0]);
+ eiter->type = cpu_to_be16(FDMI_PORT_STATE);
+ eiter->a.port_state = cpu_to_be32(2);
+ alen = sizeof(eiter->a.port_state);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cd,
+ "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state));
- /* Firmware version */
+ /* Number of Ports */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
- ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
- sizeof(eiter->a.fw_version));
- alen = strlen(eiter->a.fw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20ac,
- "Firmware vers = %s.\n", eiter->a.fw_version);
-
- /* OS Name and Version */
+ eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
+ eiter->a.num_ports = cpu_to_be32(1);
+ alen = sizeof(eiter->a.num_ports);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ce,
+ "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
+
+ /* Port Identifier */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
- p_sysid = utsname();
- if (p_sysid) {
- snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
- "%s %s %s",
- p_sysid->sysname, p_sysid->release, p_sysid->version);
- } else {
- snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
- "%s %s", "Linux", fc_host_system_hostname(vha->host));
- }
- alen = strlen(eiter->a.os_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20ae,
- "OS Name and Version = %s.\n", eiter->a.os_version);
+ eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER);
+ eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
+ alen = sizeof(eiter->a.port_id);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cf,
+ "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id));
+
+ if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan)
+ goto done;
- /* MAX CT Payload Length */
+ /* Smart SAN Service Category (Populate Smart SAN Initiator)*/
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
- eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE);
+ alen = scnprintf(
+ eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service),
+ "%s", "Smart SAN Initiator");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d0,
+ "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service);
+
+ /* Smart SAN GUID (NWWN+PWWN) */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID);
+ memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE);
+ memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE);
+ alen = sizeof(eiter->a.smartsan_guid);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d1,
+ "Smart SAN GUID = %016llx-%016llx\n",
+ wwn_to_u64(eiter->a.smartsan_guid),
+ wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE));
+
+ /* Smart SAN Version (populate "Smart SAN Version 1.0") */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION);
+ alen = scnprintf(
+ eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version),
+ "%s", "Smart SAN Version 2.0");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version);
+
+ /* Smart SAN Product Name (Specify Adapter Model No) */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME);
+ alen = scnprintf(eiter->a.smartsan_prod_name,
+ sizeof(eiter->a.smartsan_prod_name),
+ "ISP%04x", ha->pdev->device);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name);
+
+ /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO);
+ eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1);
+ alen = sizeof(eiter->a.smartsan_port_info);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d4,
+ "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info);
+
+ /* Smart SAN Security Support */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT);
+ eiter->a.smartsan_security_support = cpu_to_be32(1);
+ alen = sizeof(eiter->a.smartsan_security_support);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
+ "SMARTSAN SECURITY SUPPORT = %d\n",
+ be32_to_cpu(eiter->a.smartsan_security_support));
- ql_dbg(ql_dbg_disc, vha, 0x20af,
- "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
+done:
+ return size;
+}
- /* Node Sybolic Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
- qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
- sizeof(eiter->a.sym_name));
- alen = strlen(eiter->a.sym_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
+/**
+ * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
+ * @vha: HA context
+ * @callopt: Option to issue FDMI registration
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long size = 0;
+ unsigned int rval, count;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
- ql_dbg(ql_dbg_disc, vha, 0x20b0,
- "Symbolic Name = %s.\n", eiter->a.sym_name);
+ count = callopt != CALLOPT_FDMI1 ?
+ FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT;
- /* Vendor Id */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
- eiter->a.vendor_id = cpu_to_be32(0x1077);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ size = RHBA_RSP_SIZE;
- ql_dbg(ql_dbg_disc, vha, 0x20b1,
- "Vendor Id = %x.\n", eiter->a.vendor_id);
+ ql_dbg(ql_dbg_disc, vha, 0x20e0,
+ "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
- /* Num Ports */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
- eiter->a.num_ports = cpu_to_be32(1);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
- ql_dbg(ql_dbg_disc, vha, 0x20b2,
- "Port Num = %x.\n", eiter->a.num_ports);
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size);
+ ct_rsp = &ha->ct_sns->p.rsp;
- /* Fabric Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
- memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
+ /* Prepare FDMI command entries */
+ memcpy(ct_req->req.rhba.hba_identifier, vha->port_name,
+ sizeof(ct_req->req.rhba.hba_identifier));
+ size += sizeof(ct_req->req.rhba.hba_identifier);
- ql_dbg(ql_dbg_disc, vha, 0x20b3,
- "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+ ct_req->req.rhba.entry_count = cpu_to_be32(1);
+ size += sizeof(ct_req->req.rhba.entry_count);
- /* BIOS Version */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
- snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
- "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
- alen = strlen(eiter->a.bios_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
+ memcpy(ct_req->req.rhba.port_name, vha->port_name,
+ sizeof(ct_req->req.rhba.port_name));
+ size += sizeof(ct_req->req.rhba.port_name);
- ql_dbg(ql_dbg_disc, vha, 0x20b4,
- "BIOS Name = %s\n", eiter->a.bios_name);
+ /* Attribute count */
+ ct_req->req.rhba.attrs.count = cpu_to_be32(count);
+ size += sizeof(ct_req->req.rhba.attrs.count);
- /* Vendor Identifier */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
- snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
- "%s", "QLGC");
- alen = strlen(eiter->a.vendor_identifier);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
+ /* Attribute block */
+ entries = &ct_req->req.rhba.attrs.entry;
- ql_dbg(ql_dbg_disc, vha, 0x201b,
- "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
+ size += qla2x00_hba_attributes(vha, entries, callopt);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- ql_dbg(ql_dbg_disc, vha, 0x20b5,
- "RHBA identifier = %016llx.\n",
- wwn_to_u64(ct_req->req.rhba2.hba_identifier));
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
+ ql_dbg(ql_dbg_disc, vha, 0x20e1,
+ "RHBA %016llx %016llx.\n",
+ wwn_to_u64(ct_req->req.rhba.hba_identifier),
+ wwn_to_u64(ct_req->req.rhba.port_name));
+
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2,
entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x20b7,
- "RHBA issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
+ sizeof(*ha->ms_iocb));
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e3,
+ "RHBA iocb failed (%d).\n", rval);
+ return rval;
+ }
+ rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA");
+ if (rval) {
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x20b8,
- "HBA already registered.\n");
- rval = QLA_ALREADY_REGISTERED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2016,
- "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
- ct_rsp->header.reason_code,
- ct_rsp->header.explanation_code);
+ ql_dbg(ql_dbg_disc, vha, 0x20e4,
+ "RHBA already registered.\n");
+ return QLA_ALREADY_REGISTERED;
}
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20b9,
- "RHBA FDMI V2 exiting normally.\n");
+
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
+ "RHBA failed, CT Reason %#x, CT Explanation %#x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ return rval;
}
+ ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n");
return rval;
}
-/**
- * qla2x00_fdmi_dhba() -
- * @vha: HA context
- *
- * Returns 0 on success.
- */
+
static int
qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
{
@@ -2250,22 +2241,17 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
-
/* Issue RPA */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
DHBA_RSP_SIZE);
-
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
ct_rsp = &ha->ct_sns->p.rsp;
-
/* Prepare FDMI command arguments -- portname. */
memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
-
ql_dbg(ql_dbg_disc, vha, 0x2036,
"DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
-
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
@@ -2280,337 +2266,178 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_disc, vha, 0x2038,
"DHBA exiting normally.\n");
}
-
return rval;
}
/**
- * qla2x00_fdmiv2_rpa() -
+ * qla2x00_fdmi_rprt() - perform RPRT registration
* @vha: HA context
+ * @callopt: Option to issue extended or standard FDMI
+ * command parameter
*
* Returns 0 on success.
*/
static int
-qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
+qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt)
{
- int rval, alen;
- uint32_t size;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
struct qla_hw_data *ha = vha->hw;
+ ulong size = 0;
+ uint rval, count;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
void *entries;
- struct ct_fdmiv2_port_attr *eiter;
- struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
- struct new_utsname *p_sysid = NULL;
-
- /* Issue RPA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
-
+ count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
+ FDMI2_SMARTSAN_PORT_ATTR_COUNT :
+ callopt != CALLOPT_FDMI1 ?
+ FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
+
+ size = RPRT_RSP_SIZE;
+ ql_dbg(ql_dbg_disc, vha, 0x20e8,
+ "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size);
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size);
ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
- size = WWN_SIZE + 4;
-
- /* Attributes */
- ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
- entries = &ct_req->req;
-
- /* FC4 types. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
- eiter->len = cpu_to_be16(4 + 32);
- eiter->a.fc4_types[2] = 0x01;
- size += 4 + 32;
-
- ql_dbg(ql_dbg_disc, vha, 0x20ba,
- "FC4_TYPES=%02x %02x.\n",
- eiter->a.fc4_types[2],
- eiter->a.fc4_types[1]);
-
- if (vha->flags.nvme_enabled) {
- eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
- ql_dbg(ql_dbg_disc, vha, 0x211f,
- "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
- eiter->a.fc4_types[6]);
- }
-
- /* Supported speed. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- if (IS_CNA_CAPABLE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_10GB);
- else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_32GB|
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB);
- else if (IS_QLA2031(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB);
- else if (IS_QLA25XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA24XX_TYPE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA23XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_1GB);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20bb,
- "Supported Port Speed = %x.\n", eiter->a.sup_speed);
-
- /* Current speed. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- switch (ha->link_data_rate) {
- case PORT_SPEED_1GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
- break;
- case PORT_SPEED_2GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
- break;
- case PORT_SPEED_4GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
- break;
- case PORT_SPEED_8GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
- break;
- case PORT_SPEED_10GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
- break;
- case PORT_SPEED_16GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
- break;
- case PORT_SPEED_32GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
- break;
- default:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
- break;
+ /* Prepare FDMI command entries */
+ memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name,
+ sizeof(ct_req->req.rprt.hba_identifier));
+ size += sizeof(ct_req->req.rprt.hba_identifier);
+ memcpy(ct_req->req.rprt.port_name, vha->port_name,
+ sizeof(ct_req->req.rprt.port_name));
+ size += sizeof(ct_req->req.rprt.port_name);
+ /* Attribute count */
+ ct_req->req.rprt.attrs.count = cpu_to_be32(count);
+ size += sizeof(ct_req->req.rprt.attrs.count);
+ /* Attribute block */
+ entries = ct_req->req.rprt.attrs.entry;
+ size += qla2x00_port_attributes(vha, entries, callopt);
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+ ql_dbg(ql_dbg_disc, vha, 0x20e9,
+ "RPRT %016llx %016llx.\n",
+ wwn_to_u64(ct_req->req.rprt.port_name),
+ wwn_to_u64(ct_req->req.rprt.port_name));
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea,
+ entries, size);
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(*ha->ms_iocb));
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "RPRT iocb failed (%d).\n", rval);
+ return rval;
}
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x2017,
- "Current_Speed = %x.\n", eiter->a.cur_speed);
-
- /* Max frame size. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->len = cpu_to_be16(4 + 4);
- eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size) :
- le16_to_cpu(ha->init_cb->frame_payload_size);
- eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20bc,
- "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
-
- /* OS device name. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- alen = strlen(QLA2XXX_DRIVER_NAME);
- snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
- "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20be,
- "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
+ rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT");
+ if (rval) {
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "RPRT already registered.\n");
+ return QLA_ALREADY_REGISTERED;
+ }
- /* Hostname. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
- p_sysid = utsname();
- if (p_sysid) {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", p_sysid->nodename);
- } else {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", fc_host_system_hostname(vha->host));
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ return rval;
}
- alen = strlen(eiter->a.host_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x201a,
- "HostName=%s.\n", eiter->a.host_name);
-
- /* Node Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
- memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c0,
- "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
-
- /* Port Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_NAME);
- memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c1,
- "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
-
- /* Port Symbolic Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
- qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
- sizeof(eiter->a.port_sym_name));
- alen = strlen(eiter->a.port_sym_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c2,
- "port symbolic name = %s\n", eiter->a.port_sym_name);
-
- /* Port Type */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
- eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c3,
- "Port Type = %x.\n", eiter->a.port_type);
-
- /* Class of Service */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
- eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c4,
- "Supported COS = %08x\n", eiter->a.port_supported_cos);
-
- /* Port Fabric Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
- memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
+ ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n");
+ return rval;
+}
- ql_dbg(ql_dbg_disc, vha, 0x20c5,
- "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+/**
+ * qla2x00_fdmi_rpa() - perform RPA registration
+ * @vha: HA context
+ * @callopt: Option to issue FDMI registration
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ulong size = 0;
+ uint rval, count;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
- /* FC4_type */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
- eiter->a.port_fc4_type[0] = 0;
- eiter->a.port_fc4_type[1] = 0;
- eiter->a.port_fc4_type[2] = 1;
- eiter->a.port_fc4_type[3] = 0;
- eiter->len = cpu_to_be16(4 + 32);
- size += 4 + 32;
+ count =
+ callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
+ FDMI2_SMARTSAN_PORT_ATTR_COUNT :
+ callopt != CALLOPT_FDMI1 ?
+ FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
- ql_dbg(ql_dbg_disc, vha, 0x20c6,
- "Port Active FC4 Type = %02x %02x.\n",
- eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+ size =
+ callopt != CALLOPT_FDMI1 ?
+ SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE;
- if (vha->flags.nvme_enabled) {
- eiter->a.port_fc4_type[4] = 0;
- eiter->a.port_fc4_type[5] = 0;
- eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
- ql_dbg(ql_dbg_disc, vha, 0x2120,
- "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
- eiter->a.port_fc4_type[6]);
- }
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
- /* Port State */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_STATE);
- eiter->a.port_state = cpu_to_be32(1);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
- ql_dbg(ql_dbg_disc, vha, 0x20c7,
- "Port State = %x.\n", eiter->a.port_state);
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size);
+ ct_rsp = &ha->ct_sns->p.rsp;
- /* Number of Ports */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
- eiter->a.num_ports = cpu_to_be32(1);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Prepare FDMI command entries. */
+ memcpy(ct_req->req.rpa.port_name, vha->port_name,
+ sizeof(ct_req->req.rpa.port_name));
+ size += sizeof(ct_req->req.rpa.port_name);
- ql_dbg(ql_dbg_disc, vha, 0x20c8,
- "Number of ports = %x.\n", eiter->a.num_ports);
+ /* Attribute count */
+ ct_req->req.rpa.attrs.count = cpu_to_be32(count);
+ size += sizeof(ct_req->req.rpa.attrs.count);
- /* Port Id */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_ID);
- eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Attribute block */
+ entries = ct_req->req.rpa.attrs.entry;
- ql_dbg(ql_dbg_disc, vha, 0x201c,
- "Port Id = %x.\n", eiter->a.port_id);
+ size += qla2x00_port_attributes(vha, entries, callopt);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- ql_dbg(ql_dbg_disc, vha, 0x2018,
- "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
+ ql_dbg(ql_dbg_disc, vha, 0x20f1,
+ "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name));
+
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2,
entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x20cb,
- "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
+ sizeof(*ha->ms_iocb));
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f3,
+ "RPA iocb failed (%d).\n", rval);
+ return rval;
+ }
+
+ rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA");
+ if (rval) {
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x20ce,
- "RPA FDMI v2 already registered\n");
- rval = QLA_ALREADY_REGISTERED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2020,
- "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
- ct_rsp->header.reason_code,
- ct_rsp->header.explanation_code);
+ ql_dbg(ql_dbg_disc, vha, 0x20f4,
+ "RPA already registered.\n");
+ return QLA_ALREADY_REGISTERED;
}
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20cc,
- "RPA FDMI V2 exiting normally.\n");
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f5,
+ "RPA failed, CT Reason code: %#x, CT Explanation %#x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ return rval;
}
+ ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n");
return rval;
}
@@ -2623,18 +2450,31 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
int
qla2x00_fdmi_register(scsi_qla_host_t *vha)
{
- int rval = QLA_FUNCTION_FAILED;
+ int rval = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
IS_QLAFX00(ha))
- return QLA_FUNCTION_FAILED;
+ return rval;
rval = qla2x00_mgmt_svr_login(vha);
if (rval)
return rval;
- rval = qla2x00_fdmiv2_rhba(vha);
+ /* For npiv/vport send rprt only */
+ if (vha->vp_idx) {
+ if (ql2xsmartsan)
+ rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN);
+ if (rval || !ql2xsmartsan)
+ rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2);
+ if (rval)
+ rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1);
+
+ return rval;
+ }
+
+ /* Try fdmi2 first, if fails then try fdmi1 */
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
goto try_fdmi;
@@ -2643,18 +2483,22 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
if (rval)
goto try_fdmi;
- rval = qla2x00_fdmiv2_rhba(vha);
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
if (rval)
goto try_fdmi;
}
- rval = qla2x00_fdmiv2_rpa(vha);
+
+ if (ql2xsmartsan)
+ rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN);
+ if (rval || !ql2xsmartsan)
+ rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2);
if (rval)
goto try_fdmi;
- goto out;
+ return rval;
try_fdmi:
- rval = qla2x00_fdmi_rhba(vha);
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
return rval;
@@ -2663,12 +2507,13 @@ try_fdmi:
if (rval)
return rval;
- rval = qla2x00_fdmi_rhba(vha);
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
if (rval)
return rval;
}
- rval = qla2x00_fdmi_rpa(vha);
-out:
+
+ rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1);
+
return rval;
}
@@ -2893,7 +2738,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Set default FC4 Type as UNKNOWN so the default is to
* Process this port */
- list[i].fc4_type = FC4_TYPE_UNKNOWN;
+ list[i].fc4_type = 0;
/* Do not attempt GFF_ID if we are not FWI_2 capable */
if (!IS_FWI2_CAPABLE(ha))
@@ -2979,6 +2824,10 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (fcport->disc_state == DSC_DELETE_PEND)
return;
+ /* We will figure-out what happen after AUTH completes */
+ if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
+ return;
+
if (ea->sp->gen2 != fcport->login_gen) {
/* target side must have changed it. */
ql_dbg(ql_dbg_disc, vha, 0x20d3,
@@ -3041,7 +2890,8 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
qla24xx_handle_gpsc_event(vha, &ea);
done:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
@@ -3053,6 +2903,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -3061,8 +2912,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->name = "gpsc";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gpsc_sp_done);
/* CT_IU preamble */
ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
@@ -3080,9 +2931,6 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla24xx_async_gpsc_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x205e,
"Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
sp->name, fcport->port_name, sp->handle,
@@ -3095,7 +2943,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -3144,7 +2993,8 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
break;
}
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
@@ -3243,7 +3093,7 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
"%s %d %8phC post new sess\n",
__func__, __LINE__, ea->port_name);
qla24xx_post_newsess_work(vha, &ea->id,
- ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
+ ea->port_name, NULL, NULL, 0);
}
}
}
@@ -3283,13 +3133,15 @@ static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
if (res) {
if (res == QLA_FUNCTION_TIMEOUT) {
qla24xx_post_gpnid_work(sp->vha, &ea.id);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
} else if (sp->gen1) {
/* There was another RSCN for this Nport ID */
qla24xx_post_gpnid_work(sp->vha, &ea.id);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
@@ -3310,7 +3162,8 @@ static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
@@ -3330,6 +3183,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
if (!vha->flags.online)
goto done;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
@@ -3338,14 +3192,16 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->name = "gpnid";
sp->u.iocb_cmd.u.ctarg.id = *id;
sp->gen1 = 0;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gpnid_sp_done);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
list_for_each_entry(tsp, &vha->gpnid_list, elem) {
if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
tsp->gen1++;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
goto done;
}
}
@@ -3386,9 +3242,6 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_gpnid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x2067,
"Async-%s hdl=%x ID %3phC.\n", sp->name,
sp->handle, &ct_req->req.port_id.port_id);
@@ -3418,25 +3271,18 @@ done_free_sp:
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
-
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
-void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- qla24xx_post_gnl_work(vha, fcport);
-}
void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
{
struct scsi_qla_host *vha = sp->vha;
fc_port_t *fcport = sp->fcport;
struct ct_sns_rsp *ct_rsp;
- struct event_arg ea;
uint8_t fc4_scsi_feat;
uint8_t fc4_nvme_feat;
@@ -3444,10 +3290,10 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
"Async done-%s res %x ID %x. %8phC\n",
sp->name, res, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
- ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp;
fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ sp->rc = res;
/*
* FC-GS-7, 5.2.3.12 FC-4 Features - format
@@ -3468,68 +3314,129 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
}
}
- memset(&ea, 0, sizeof(ea));
- ea.sp = sp;
- ea.fcport = sp->fcport;
- ea.rc = res;
+ if (sp->flags & SRB_WAKEUP_ON_COMP) {
+ complete(sp->comp);
+ } else {
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
- qla24xx_handle_gffid_event(vha, &ea);
- sp->free(sp);
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ /* we should not be here */
+ dump_stack();
+ }
}
/* Get FC4 Feature with Nport ID. */
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
srb_t *sp;
+ DECLARE_COMPLETION_ONSTACK(comp);
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ /* this routine does not have handling for no wait */
+ if (!vha->flags.online || !wait)
return rval;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gffid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gffid_sp_done);
+ sp->comp = &comp;
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
+
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns response.\n",
+ __func__);
+ goto done_free_sp;
+ }
/* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
- GFF_ID_RSP_SIZE);
+ ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE);
ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla24xx_async_gffid_sp_done;
-
- ql_dbg(ql_dbg_disc, vha, 0x2132,
- "Async-%s hdl=%x %8phC.\n", sp->name,
- sp->handle, fcport->port_name);
-
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+
+ if (rval != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
goto done_free_sp;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x3074,
+ "Async-%s hdl=%x portid %06x\n",
+ sp->name, sp->handle, fcport->d_id.b24);
+ }
+
+ wait_for_completion(sp->comp);
+ rval = sp->rc;
- return rval;
done_free_sp:
- sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
@@ -3596,7 +3503,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
fcport->scan_state = QLA_FCPORT_SCAN;
- fcport->logout_on_delete = 0;
+ if (fcport->loop_id == FC_NO_LOOP_ID)
+ fcport->logout_on_delete = 0;
+ else
+ fcport->logout_on_delete = 1;
}
}
goto login_logout;
@@ -3647,7 +3557,17 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
continue;
fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->last_rscn_gen = fcport->rscn_gen;
+ fcport->fc4_type = rp->fc4type;
found = true;
+
+ if (fcport->scan_needed) {
+ if (NVME_PRIORITY(vha->hw, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+ }
+
/*
* If device was not a fabric device before.
*/
@@ -3655,7 +3575,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
qla2x00_clear_loop_id(fcport);
fcport->flags |= FCF_FABRIC_DEVICE;
} else if (fcport->d_id.b24 != rp->id.b24 ||
- fcport->scan_needed) {
+ (fcport->scan_needed &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_NVME_INITIATOR)) {
qlt_schedule_sess_for_deletion(fcport);
}
fcport->d_id.b24 = rp->id.b24;
@@ -3689,19 +3611,32 @@ login_logout:
}
if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ bool do_delete = false;
+
+ if (fcport->scan_needed &&
+ fcport->disc_state == DSC_LOGIN_PEND) {
+ /* Cable got disconnected after we sent
+ * a login. Do delete to prevent timeout.
+ */
+ fcport->logout_on_delete = 1;
+ do_delete = true;
+ }
+
fcport->scan_needed = 0;
- if ((qla_dual_mode_enabled(vha) ||
- qla_ini_mode_enabled(vha)) &&
- atomic_read(&fcport->state) == FCS_ONLINE) {
+ if (((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) ||
+ do_delete) {
if (fcport->loop_id != FC_NO_LOOP_ID) {
if (fcport->flags & FCF_FCP2_DEVICE)
- fcport->logout_on_delete = 0;
+ continue;
- ql_dbg(ql_dbg_disc, vha, 0x20f0,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- fcport->port_name);
+ ql_log(ql_log_warn, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ fcport->tgt_link_down_time = 0;
qlt_schedule_sess_for_deletion(fcport);
continue;
}
@@ -3887,12 +3822,23 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
"Async done-%s res %x FC4Type %x\n",
sp->name, res, sp->gen2);
- del_timer(&sp->u.iocb_cmd.timer);
sp->rc = res;
if (res) {
unsigned long flags;
const char *name = sp->name;
+ if (res == QLA_OS_TIMER_EXPIRED) {
+ /* switch is ignoring all commands.
+ * This might be a zone disable behavior.
+ * This means we hit 64s timeout.
+ * 22s GPNFT + 44s Abort = 64s
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: Switch Zone check please .\n",
+ name);
+ qla2x00_mark_all_devices_lost(vha);
+ }
+
/*
* We are in an Interrupt context, queue up this
* sp for GNNFT_DONE work. This will allow all
@@ -4000,9 +3946,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->name = "gnnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gpnft_gnnft_sp_done);
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
@@ -4018,8 +3963,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gpnft_gnnft_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
sp->handle, ct_req->req.gpn_ft.port_type);
@@ -4046,8 +3989,8 @@ done_free_sp:
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
-
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
@@ -4099,9 +4042,12 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s: Performing FCP Scan\n", __func__);
- if (sp)
- sp->free(sp); /* should not happen */
+ if (sp) {
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ }
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp) {
spin_lock_irqsave(&vha->work_lock, flags);
@@ -4146,6 +4092,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
+ /* ref: INIT */
qla2x00_rel_sp(sp);
return rval;
}
@@ -4165,9 +4112,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->name = "gpnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gpnft_gnnft_sp_done);
rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
@@ -4182,8 +4128,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gpnft_gnnft_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
sp->handle, ct_req->req.gpn_ft.port_type);
@@ -4211,7 +4155,8 @@ done_free_sp:
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
@@ -4275,7 +4220,8 @@ static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
qla24xx_handle_gnnid_event(vha, &ea);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
@@ -4288,6 +4234,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
return rval;
qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
@@ -4297,9 +4244,8 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->name = "gnnid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gnnid_sp_done);
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
@@ -4318,8 +4264,6 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gnnid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
sp->name, fcport->port_name,
@@ -4331,7 +4275,8 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
@@ -4405,7 +4350,8 @@ static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
qla24xx_handle_gfpnid_event(vha, &ea);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
@@ -4417,6 +4363,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
@@ -4425,9 +4372,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->name = "gfpnid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gfpnid_sp_done);
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
@@ -4446,8 +4392,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gfpnid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
sp->name, fcport->port_name,
@@ -4460,7 +4404,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9e6b56527b25..e12db95de688 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_gbl.h"
@@ -35,7 +34,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *);
static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
static int qla25xx_init_queues(struct qla_hw_data *);
-static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
struct event_arg *ea);
static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
@@ -49,10 +47,20 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
+ scsi_qla_host_t *vha = sp->vha;
WARN_ON(irqs_disabled());
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
+
+ /* ref: TMR */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+
+ if (vha && qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9008,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
}
void qla2x00_sp_free(srb_t *sp)
@@ -63,6 +71,16 @@ void qla2x00_sp_free(srb_t *sp)
qla2x00_rel_sp(sp);
}
+void qla2xxx_rel_done_warning(srb_t *sp, int res)
+{
+ WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
+}
+
+void qla2xxx_rel_free_warning(srb_t *sp)
+{
+ WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
+}
+
/* Asynchronous Login/Logout Routines -------------------------------------- */
unsigned long
@@ -117,22 +135,31 @@ static void qla24xx_abort_iocb_timeout(void *data)
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- if (sp->cmd_sp)
+ if (sp->cmd_sp) {
+ /*
+ * This done function should take care of
+ * original command ref: INIT
+ */
sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
+ }
- abt->u.abt.comp_status = CS_TIMEOUT;
+ abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
sp->done(sp, QLA_OS_TIMER_EXPIRED);
}
static void qla24xx_abort_sp_done(srb_t *sp, int res)
{
struct srb_iocb *abt = &sp->u.iocb_cmd;
+ srb_t *orig_sp = sp->cmd_sp;
+
+ if (orig_sp)
+ qla_wait_nvme_release_cmd_kref(orig_sp);
- del_timer(&sp->u.iocb_cmd.timer);
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&abt->u.abt.comp);
else
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
@@ -141,12 +168,15 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
struct srb_iocb *abt_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
+ /* ref: INIT for ABTS command */
sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
GFP_ATOMIC);
if (!sp)
- return rval;
+ return QLA_MEMORY_ALLOC_FAILED;
+ QLA_VHA_MARK_BUSY(vha, bail);
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
@@ -155,31 +185,31 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
- abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
/* FW can send 2 x ABTS's timeout/20s */
- qla2x00_init_timer(sp, 42);
+ qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
+ sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
- sp->done = qla24xx_abort_sp_done;
-
ql_dbg(ql_dbg_async, vha, 0x507c,
"Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
cmd_sp->type);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
if (wait) {
wait_for_completion(&abt_iocb->u.abt.comp);
rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
- sp->free(sp);
+ QLA_SUCCESS : QLA_ERR_FROM_FW;
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
return rval;
@@ -274,26 +304,13 @@ static void qla2x00_async_login_sp_done(srb_t *sp, int res)
ea.iop[0] = lio->u.logio.iop[0];
ea.iop[1] = lio->u.logio.iop[1];
ea.sp = sp;
+ if (res)
+ ea.data[0] = MBS_COMMAND_ERROR;
qla24xx_handle_plogi_done_event(vha, &ea);
}
- sp->free(sp);
-}
-
-static inline bool
-fcport_is_smaller(fc_port_t *fcport)
-{
- if (wwn_to_u64(fcport->port_name) <
- wwn_to_u64(fcport->vha->port_name))
- return true;
- else
- return false;
-}
-
-static inline bool
-fcport_is_bigger(fc_port_t *fcport)
-{
- return !fcport_is_smaller(fcport);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
@@ -312,6 +329,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
}
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -324,27 +342,33 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
sp->name = "login";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_login_sp_done);
lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_login_sp_done;
- if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
+ if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
- else
- lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ } else {
+ if (vha->hw->flags.edif_enabled &&
+ DBELL_ACTIVE(vha)) {
+ lio->u.logio.flags |=
+ (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
+ } else {
+ lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ }
+ }
if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+ rval = qla2x00_start_sp(sp);
+
ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
- "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->login_retry);
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
+ fcport->port_name, sp->handle, fcport->loop_id,
+ fcport->d_id.b24, fcport->login_retry,
+ lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : "");
- rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -354,7 +378,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
@@ -365,36 +390,33 @@ static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
{
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
sp->fcport->login_gen++;
- qlt_logo_completion_handler(sp->fcport, res);
- sp->free(sp);
+ qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
fcport->flags |= FCF_ASYNC_SENT;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_LOGOUT_CMD;
sp->name = "logout";
-
- lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_logout_sp_done;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_logout_sp_done),
ql_dbg(ql_dbg_disc, vha, 0x2070,
- "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
+ "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->port_name);
+ fcport->port_name, fcport->explicit_logout);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -402,7 +424,8 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
return rval;
@@ -428,29 +451,26 @@ static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
if (!test_bit(UNLOADING, &vha->dpc_flags))
qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_PRLO_CMD;
sp->name = "prlo";
-
- lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_prlo_sp_done;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_prlo_sp_done);
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
@@ -464,7 +484,8 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
@@ -547,10 +568,12 @@ static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
ea.iop[1] = lio->u.logio.iop[1];
ea.fcport = sp->fcport;
ea.sp = sp;
+ if (res)
+ ea.data[0] = MBS_COMMAND_ERROR;
qla24xx_handle_adisc_event(vha, &ea);
-
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
@@ -561,26 +584,34 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
+ if (IS_SESSION_DELETED(fcport)) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC is being delete - not sending command.\n",
+ __func__, fcport->port_name);
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
+ return rval;
+ }
+
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
fcport->flags |= FCF_ASYNC_SENT;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_ADISC_CMD;
sp->name = "adisc";
-
- lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_adisc_sp_done);
- sp->done = qla2x00_async_adisc_sp_done;
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
+ lio = &sp->u.iocb_cmd;
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
+ }
ql_dbg(ql_dbg_disc, vha, 0x206f,
"Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
@@ -593,7 +624,8 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
qla2x00_post_async_adisc_work(vha, fcport, data);
@@ -679,11 +711,11 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
+ "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, ea->rc,
fcport->login_gen, fcport->last_login_gen,
- fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
+ fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
@@ -705,6 +737,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e0,
"%s %8phC login gen changed\n",
__func__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
}
@@ -796,7 +829,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
default:
switch (current_login_state) {
case DSC_LS_PRLI_COMP:
- ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ ql_dbg(ql_dbg_disc,
vha, 0x20e4, "%s %d %8phC post gpdb\n",
__func__, __LINE__, fcport->port_name);
@@ -808,6 +841,13 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
qla2x00_post_async_adisc_work(vha, fcport,
data);
break;
+ case DSC_LS_PLOGI_COMP:
+ if (vha->hw->flags.edif_enabled) {
+ /* check to see if App support Secure */
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ break;
+ }
+ fallthrough;
case DSC_LS_PORT_UNAVAIL:
default:
if (fcport->loop_id == FC_NO_LOOP_ID) {
@@ -835,6 +875,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
*/
qla2x00_set_fcport_disc_state(fcport,
DSC_DELETED);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
break;
case DSC_LS_PRLI_COMP:
if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -847,6 +888,12 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
data);
break;
case DSC_LS_PLOGI_COMP:
+ if (vha->hw->flags.edif_enabled &&
+ DBELL_ACTIVE(vha)) {
+ /* check to see if App support secure or not */
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ break;
+ }
if (fcport_is_bigger(fcport)) {
/* local adapter is smaller */
if (fcport->loop_id != FC_NO_LOOP_ID)
@@ -857,7 +904,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport);
break;
}
- /* fall through */
+ fallthrough;
default:
if (fcport_is_smaller(fcport)) {
/* local adapter is bigger */
@@ -944,6 +991,9 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
break;
+ case ISP_CFG_NL:
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
default:
break;
}
@@ -967,8 +1017,6 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
sp->u.iocb_cmd.u.mbx.in_mb[2]);
- if (res == QLA_FUNCTION_TIMEOUT)
- return;
sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
@@ -992,7 +1040,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
ql_dbg(ql_dbg_disc, vha, 0x20e8,
"%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
- __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
+ __func__, &wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state,
(loop_id & 0x7fff));
}
@@ -1006,8 +1054,8 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
- list_del_init(&fcport->gnl_entry);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_del_init(&fcport->gnl_entry);
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
ea.fcport = fcport;
@@ -1043,7 +1091,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
__func__, __LINE__, (u8 *)&wwn, id.b24);
wwnn = wwn_to_u64(e->node_name);
qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
- (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
+ (u8 *)&wwnn, NULL, 0);
}
}
@@ -1061,13 +1109,13 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_iocb *mbx;
int rval = QLA_FUNCTION_FAILED;
unsigned long flags;
u16 *mb;
@@ -1092,6 +1140,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
vha->gnl.sent = 1;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -1100,10 +1149,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gnl_sp_done);
mb = sp->u.iocb_cmd.u.mbx.out_mb;
mb[0] = MBC_PORT_NODE_NAME_LIST;
@@ -1115,8 +1162,6 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
mb[8] = vha->gnl.size;
mb[9] = vha->vp_idx;
- sp->done = qla24xx_async_gnl_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x20da,
"Async-%s - OUT WWPN %8phC hndl %x\n",
sp->name, fcport->port_name, sp->handle);
@@ -1128,7 +1173,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
return rval;
@@ -1174,13 +1220,16 @@ done:
dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
sp->u.iocb_cmd.u.mbx.in_dma);
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
-static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_work_evt *e;
+ if (vha->host->active_mode == MODE_TARGET)
+ return QLA_FUNCTION_FAILED;
+
e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
if (!e)
return QLA_FUNCTION_FAILED;
@@ -1197,7 +1246,7 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
struct event_arg ea;
ql_dbg(ql_dbg_disc, vha, 0x2129,
- "%s %8phC res %d \n", __func__,
+ "%s %8phC res %x\n", __func__,
sp->fcport->port_name, res);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
@@ -1210,11 +1259,15 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
ea.iop[0] = lio->u.logio.iop[0];
ea.iop[1] = lio->u.logio.iop[1];
ea.sp = sp;
+ if (res == QLA_OS_TIMER_EXPIRED)
+ ea.data[0] = QLA_OS_TIMER_EXPIRED;
+ else if (res)
+ ea.data[0] = MBS_COMMAND_ERROR;
qla24xx_handle_prli_done_event(vha, &ea);
}
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
@@ -1247,21 +1300,20 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->type = SRB_PRLI_CMD;
sp->name = "prli";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_prli_sp_done);
lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_prli_sp_done;
lio->u.logio.flags = 0;
if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x211b,
- "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
+ "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
- fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc");
+ fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
+ NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -1273,7 +1325,8 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
@@ -1302,14 +1355,21 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
- fcport->loop_id == FC_NO_LOOP_ID) {
+ if (IS_SESSION_DELETED(fcport)) {
ql_log(ql_log_warn, vha, 0xffff,
- "%s: %8phC - not sending command.\n",
- __func__, fcport->port_name);
+ "%s: %8phC is being delete - not sending command.\n",
+ __func__, fcport->port_name);
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
+ if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC online %d flags %x - not sending command.\n",
+ __func__, fcport->port_name, vha->flags.online, fcport->flags);
+ goto done;
+ }
+
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -1321,10 +1381,8 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gpdb_sp_done);
pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
@@ -1343,11 +1401,10 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
mb[9] = vha->vp_idx;
mb[10] = opt;
+ mbx = &sp->u.iocb_cmd;
mbx->u.mbx.in = (void *)pd;
mbx->u.mbx.in_dma = pd_dma;
- sp->done = qla24xx_async_gpdb_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x20dc,
"Async-%s %8phC hndl %x opt %x\n",
sp->name, fcport->port_name, sp->handle, opt);
@@ -1361,7 +1418,7 @@ done_free_sp:
if (pd)
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
@@ -1400,6 +1457,56 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
+static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct port_database_24xx *pd)
+{
+ int rc = 0;
+
+ if (pd->secure_login) {
+ ql_dbg(ql_dbg_disc, vha, 0x104d,
+ "Secure Login established on %8phC\n",
+ fcport->port_name);
+ fcport->flags |= FCF_FCSP_DEVICE;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x104d,
+ "non-Secure Login %8phC",
+ fcport->port_name);
+ fcport->flags &= ~FCF_FCSP_DEVICE;
+ }
+ if (vha->hw->flags.edif_enabled) {
+ if (fcport->flags & FCF_FCSP_DEVICE) {
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
+ /* Start edif prli timer & ring doorbell for app */
+ fcport->edif.rx_sa_set = 0;
+ fcport->edif.tx_sa_set = 0;
+ fcport->edif.rx_sa_pending = 0;
+ fcport->edif.tx_sa_pending = 0;
+
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+ fcport->d_id.b24);
+
+ if (DBELL_ACTIVE(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ef,
+ "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->edif.app_sess_online = 1;
+
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
+ fcport->d_id.b24, 0, fcport);
+ }
+
+ rc = 1;
+ } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2117,
+ "%s %d %8phC post prli\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ rc = 1;
+ }
+ }
+ return rc;
+}
+
static
void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
@@ -1413,12 +1520,15 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__,
+ "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__,
fcport->port_name, fcport->disc_state, pd->current_login_state,
fcport->fc4_type, ea->rc);
- if (fcport->disc_state == DSC_DELETE_PEND)
+ if (fcport->disc_state == DSC_DELETE_PEND) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
+ __func__, __LINE__, fcport->port_name);
return;
+ }
if (NVME_TARGET(vha->hw, fcport))
ls = pd->current_login_state >> 4;
@@ -1435,6 +1545,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
} else if (ea->sp->gen1 != fcport->rscn_gen) {
qla_rscn_replay(fcport);
qlt_schedule_sess_for_deletion(fcport);
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
+ __func__, __LINE__, fcport->port_name, ls);
return;
}
@@ -1442,8 +1554,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
case PDS_PRLI_COMPLETE:
__qla24xx_parse_gpdb(vha, fcport, pd);
break;
- case PDS_PLOGI_PENDING:
case PDS_PLOGI_COMPLETE:
+ if (qla_chk_secure_login(vha, fcport, pd)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
+ __func__, __LINE__, fcport->port_name, ls);
+ return;
+ }
+ fallthrough;
+ case PDS_PLOGI_PENDING:
case PDS_PRLI_PENDING:
case PDS_PRLI2_PENDING:
/* Set discovery state back to GNL to Relogin attempt */
@@ -1452,6 +1570,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
+ __func__, __LINE__, fcport->port_name, ls);
return;
case PDS_LOGO_PENDING:
case PDS_PORT_UNAVAILABLE:
@@ -1469,6 +1589,11 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
u8 login = 0;
int rc;
+ ql_dbg(ql_dbg_disc, vha, 0x307b,
+ "%s %8phC DS %d LS %d lid %d retries=%d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
+
if (qla_tgt_mode_enabled(vha))
return;
@@ -1520,13 +1645,15 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
u16 sec;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->login_gen, fcport->loop_id, fcport->scan_state);
+ fcport->login_gen, fcport->loop_id, fcport->scan_state,
+ fcport->fc4_type);
- if (fcport->scan_state != QLA_FCPORT_FOUND)
+ if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_DELETE_PEND)
return 0;
if ((fcport->loop_id != FC_NO_LOOP_ID) &&
@@ -1547,7 +1674,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
return 0;
- if (fcport->flags & FCF_ASYNC_SENT) {
+ if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
}
@@ -1644,8 +1771,16 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_PEND:
- if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ if (vha->hw->flags.edif_enabled)
+ break;
+
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post %s PRLI\n",
+ __func__, __LINE__, fcport->port_name,
+ NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
qla24xx_post_prli_work(vha, fcport);
+ }
break;
case DSC_UPD_FCPORT:
@@ -1695,10 +1830,76 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
fc_port_t *fcport;
unsigned long flags;
- fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
- if (fcport) {
- fcport->scan_needed = 1;
- fcport->rscn_gen++;
+ switch (ea->id.b.rsvd_1) {
+ case RSCN_PORT_ADDR:
+ fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ if (fcport) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_disc, vha, 0x2115,
+ "Delaying session delete for FCP2 portid=%06x %8phC ",
+ fcport->d_id.b24, fcport->port_name);
+ return;
+ }
+
+ if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) {
+ /*
+ * On ipsec start by remote port, Target port
+ * may use RSCN to trigger initiator to
+ * relogin. If driver is already in the
+ * process of a relogin, then ignore the RSCN
+ * and allow the current relogin to continue.
+ * This reduces thrashing of the connection.
+ */
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ /*
+ * If state = online, then set scan_needed=1 to do relogin.
+ * Otherwise we're already in the middle of a relogin
+ */
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+ } else {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+ }
+ break;
+ case RSCN_AREA_ADDR:
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
+ continue;
+
+ if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+ }
+ break;
+ case RSCN_DOM_ADDR:
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
+ continue;
+
+ if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+ }
+ break;
+ case RSCN_FAB_ADDR:
+ default:
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
+ continue;
+
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+ break;
}
spin_lock_irqsave(&vha->work_lock, flags);
@@ -1740,6 +1941,13 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
struct event_arg *ea)
{
+ if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
+ vha->hw->flags.edif_enabled) {
+ /* check to see if App support Secure */
+ qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ return;
+ }
+
/* for pure Target Mode, PRLI will not be initiated */
if (vha->host->active_mode == MODE_TARGET)
return;
@@ -1791,7 +1999,7 @@ qla2x00_tmf_iocb_timeout(void *data)
}
}
spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
- tmf->u.tmf.comp_status = CS_TIMEOUT;
+ tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
tmf->u.tmf.data = QLA_FUNCTION_FAILED;
complete(&tmf->u.tmf.comp);
}
@@ -1812,23 +2020,24 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- tm_iocb = &sp->u.iocb_cmd;
+ QLA_VHA_MARK_BUSY(vha, bail);
sp->type = SRB_TM_CMD;
sp->name = "tmf";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
+ qla2x00_tmf_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
- tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+ tm_iocb = &sp->u.iocb_cmd;
init_completion(&tm_iocb->u.tmf.comp);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
-
tm_iocb->u.tmf.flags = flags;
tm_iocb->u.tmf.lun = lun;
- tm_iocb->u.tmf.data = tag;
- sp->done = qla2x00_tmf_sp_done;
ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
@@ -1858,7 +2067,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
}
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
@@ -1884,7 +2094,7 @@ qla24xx_async_abort_command(srb_t *sp)
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
- return QLA_FUNCTION_FAILED;
+ return QLA_ERR_NOT_FOUND;
}
if (sp->type == SRB_FXIOCB_DCMD)
return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
@@ -1896,6 +2106,7 @@ qla24xx_async_abort_command(srb_t *sp)
static void
qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
+ struct srb *sp;
WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
ea->data[0]);
@@ -1916,33 +2127,58 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
default:
- if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
- (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
- break;
- }
+ sp = ea->sp;
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC priority %s, fc4type %x prev try %s\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
+ "FCP" : "NVMe", ea->fcport->fc4_type,
+ (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ?
+ "NVME" : "FCP");
- /*
- * Retry PRLI with other FC-4 type if failure occurred on dual
- * FCP/NVMe port
- */
if (NVME_FCP_TARGET(ea->fcport)) {
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post %s prli\n",
- __func__, __LINE__, ea->fcport->port_name,
- (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ?
- "NVMe" : "FCP");
- if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
- ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+ if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI)
+ ea->fcport->do_prli_nvme = 0;
else
- ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
+ ea->fcport->do_prli_nvme = 1;
+ } else {
+ ea->fcport->do_prli_nvme = 0;
}
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- ea->fcport->keep_nport_handle = 0;
- ea->fcport->logout_on_delete = 1;
- qlt_schedule_sess_for_deletion(ea->fcport);
+ if (N2N_TOPO(vha->hw)) {
+ if (ea->fcport->n2n_link_reset_cnt ==
+ vha->hw->login_retry_count &&
+ ea->fcport->flags & FCF_FCSP_DEVICE) {
+ /* remote authentication app just started */
+ ea->fcport->n2n_link_reset_cnt = 0;
+ }
+
+ if (ea->fcport->n2n_link_reset_cnt <
+ vha->hw->login_retry_count) {
+ ea->fcport->n2n_link_reset_cnt++;
+ vha->relogin_jif = jiffies + 2 * HZ;
+ /*
+ * PRLI failed. Reset link to kick start
+ * state machine
+ */
+ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ql_log(ql_log_warn, vha, 0x2119,
+ "%s %d %8phC Unable to reconnect\n",
+ __func__, __LINE__,
+ ea->fcport->port_name);
+ }
+ } else {
+ /*
+ * switch connect. login failed. Take connection down
+ * and allow relogin to retrigger
+ */
+ ea->fcport->flags &= ~FCF_ASYNC_SENT;
+ ea->fcport->keep_nport_handle = 0;
+ ea->fcport->logout_on_delete = 1;
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ }
break;
}
}
@@ -2003,38 +2239,45 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- if (NVME_TARGET(vha->hw, ea->fcport)) {
- ql_dbg(ql_dbg_disc, vha, 0x2117,
- "%s %d %8phC post prli\n",
- __func__, __LINE__, ea->fcport->port_name);
- qla24xx_post_prli_work(vha, ea->fcport);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->loop_id, ea->fcport->d_id.b24);
-
+ if (vha->hw->flags.edif_enabled) {
set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
ea->fcport->send_els_logo = 0;
- ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ } else {
+ if (NVME_TARGET(vha->hw, fcport)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2117,
+ "%s %d %8phC post prli\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
+ __func__, __LINE__, fcport->port_name,
+ fcport->loop_id, fcport->d_id.b24);
+
+ set_bit(fcport->loop_id, vha->hw->loop_id_map);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ fcport->logout_on_delete = 1;
+ fcport->send_els_logo = 0;
+ fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ }
}
break;
case MBS_COMMAND_ERROR:
ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
__func__, __LINE__, ea->fcport->port_name, ea->data[1]);
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
- if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- else
- qla2x00_mark_device_lost(vha, ea->fcport, 1);
+ qlt_schedule_sess_for_deletion(ea->fcport);
break;
case MBS_LOOP_ID_USED:
/* data[1] = IO PARAM 1 = nport ID */
@@ -2219,10 +2462,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/* Check for secure flash support */
if (IS_QLA28XX(ha)) {
- if (RD_REG_DWORD(&reg->mailbox12) & BIT_0) {
- ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
+ if (rd_reg_word(&reg->mailbox12) & BIT_0)
ha->flags.secure_adapter = 1;
- }
+ ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
+ (ha->flags.secure_adapter) ? "Yes" : "No");
}
@@ -2270,6 +2513,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x0078,
"Verifying loaded RISC code...\n");
+ /* If smartsan enabled then require fdmi and rdp enabled */
+ if (ql2xsmartsan) {
+ ql2xfdmienable = 1;
+ ql2xrdpenable = 1;
+ }
+
if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
rval = ha->isp_ops->chip_diag(vha);
if (rval)
@@ -2351,7 +2600,7 @@ qla2100_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2393,17 +2642,17 @@ qla2300_pci_config(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
break;
udelay(10);
}
/* Select FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
- RD_REG_WORD(&reg->ctrl_status);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
+ rd_reg_word(&reg->ctrl_status);
/* Get the fb rev level */
ha->fb_rev = RD_FB_CMD_REG(ha, reg);
@@ -2412,13 +2661,13 @@ qla2300_pci_config(scsi_qla_host_t *vha)
pci_clear_mwi(ha->pdev);
/* Deselect FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x0);
- RD_REG_WORD(&reg->ctrl_status);
+ wrt_reg_word(&reg->ctrl_status, 0x0);
+ rd_reg_word(&reg->ctrl_status);
/* Release RISC module. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
break;
udelay(10);
@@ -2433,7 +2682,7 @@ qla2300_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2477,7 +2726,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_dword(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2581,36 +2830,36 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
if (!IS_QLA2100(ha)) {
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) &
+ if ((rd_reg_word(&reg->hccr) &
HCCR_RISC_PAUSE) != 0)
break;
udelay(100);
}
} else {
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
udelay(10);
}
/* Select FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0x20);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* FPM Soft Reset. */
- WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
- RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+ wrt_reg_word(&reg->fpm_diag_config, 0x100);
+ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
/* Toggle Fpm Reset. */
if (!IS_QLA2200(ha)) {
- WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
- RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+ wrt_reg_word(&reg->fpm_diag_config, 0x0);
+ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
}
/* Select frame buffer registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0x10);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset frame buffer FIFOs. */
if (IS_QLA2200(ha)) {
@@ -2628,23 +2877,23 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
}
/* Select RISC module registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
/* Release RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT);
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/* Wait for RISC to recover from reset. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -2655,7 +2904,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
*/
udelay(20);
for (cnt = 30000; cnt; cnt--) {
- if ((RD_REG_WORD(&reg->ctrl_status) &
+ if ((rd_reg_word(&reg->ctrl_status) &
CSR_ISP_SOFT_RESET) == 0)
break;
udelay(100);
@@ -2664,13 +2913,13 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
udelay(10);
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
- WRT_REG_WORD(&reg->semaphore, 0);
+ wrt_reg_word(&reg->semaphore, 0);
/* Release RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
@@ -2688,8 +2937,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
/* Disable RISC pause on FPM parity error. */
if (!IS_QLA2100(ha)) {
- WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2714,6 +2963,49 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
return qla81xx_write_mpi_register(vha, mb);
}
+static int
+qla_chk_risc_recovery(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ __le16 __iomem *mbptr = &reg->mailbox0;
+ int i;
+ u16 mb[32];
+ int rc = QLA_SUCCESS;
+
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return rc;
+
+ /* this check is only valid after RISC reset */
+ mb[0] = rd_reg_word(mbptr);
+ mbptr++;
+ if (mb[0] == 0xf) {
+ rc = QLA_FUNCTION_FAILED;
+
+ for (i = 1; i < 32; i++) {
+ mb[i] = rd_reg_word(mbptr);
+ mbptr++;
+ }
+
+ ql_log(ql_log_warn, vha, 0x1015,
+ "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+ mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]);
+ ql_log(ql_log_warn, vha, 0x1015,
+ "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+ mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14],
+ mb[15]);
+ ql_log(ql_log_warn, vha, 0x1015,
+ "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+ mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22],
+ mb[23]);
+ ql_log(ql_log_warn, vha, 0x1015,
+ "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+ mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30],
+ mb[31]);
+ }
+ return rc;
+}
+
/**
* qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
* @vha: HA context
@@ -2730,36 +3022,37 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
uint16_t wd;
static int abts_cnt; /* ISP abort retry counts */
int rval = QLA_SUCCESS;
+ int print = 1;
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset RISC. */
- WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
"HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->ctrl_status),
- (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_dword(&reg->ctrl_status),
+ (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
- WRT_REG_DWORD(&reg->ctrl_status,
+ wrt_reg_dword(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
udelay(100);
/* Wait for firmware to complete NVRAM accesses. */
- RD_REG_WORD(&reg->mailbox0);
- for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rd_reg_word(&reg->mailbox0);
+ for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
@@ -2773,26 +3066,26 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
"HCCR: 0x%x, MailBox0 Status 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->mailbox0));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_word(&reg->mailbox0));
/* Wait for soft-reset to complete. */
- RD_REG_DWORD(&reg->ctrl_status);
+ rd_reg_dword(&reg->ctrl_status);
for (cnt = 0; cnt < 60; cnt++) {
barrier();
- if ((RD_REG_DWORD(&reg->ctrl_status) &
+ if ((rd_reg_dword(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
break;
udelay(5);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
"HCCR: 0x%x, Soft Reset status: 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->ctrl_status));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_dword(&reg->ctrl_status));
/* If required, do an MPI FW reset now */
if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
@@ -2811,31 +3104,40 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
}
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ mdelay(10);
+ rd_reg_dword(&reg->hccr);
- RD_REG_WORD(&reg->mailbox0);
- for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
- rval == QLA_SUCCESS; cnt--) {
+ wd = rd_reg_word(&reg->mailbox0);
+ for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) {
barrier();
- if (cnt)
- udelay(5);
- else
+ if (cnt) {
+ mdelay(1);
+ if (print && qla_chk_risc_recovery(vha))
+ print = 0;
+
+ wd = rd_reg_word(&reg->mailbox0);
+ } else {
rval = QLA_FUNCTION_TIMEOUT;
+
+ ql_log(ql_log_warn, vha, 0x015e,
+ "RISC reset timeout\n");
+ }
}
+
if (rval == QLA_SUCCESS)
set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
"Host Risc 0x%x, mailbox0 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_WORD(&reg->mailbox0));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_word(&reg->mailbox0));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2854,9 +3156,8 @@ qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
{
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
- WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
- *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
-
+ wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ *data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
}
static void
@@ -2864,8 +3165,8 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
{
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
- WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
- WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
+ wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
}
static void
@@ -2881,7 +3182,7 @@ qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
vha->hw->pdev->subsystem_device != 0x0240)
return;
- WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
+ wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
udelay(100);
attempt:
@@ -2983,7 +3284,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/*
* We need to have a delay here since the card will not respond while
@@ -2993,7 +3294,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
data = qla2x00_debounce_register(&reg->ctrl_status);
for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
udelay(5);
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
barrier();
}
@@ -3004,8 +3305,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
"Reset register cleared by chip reset.\n");
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
/* Workaround for QLA2312 PCI parity error */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -3219,6 +3520,14 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
struct rsp_que *rsp = ha->rsp_q_map[0];
struct qla2xxx_fw_dump *fw_dump;
+ if (ha->fw_dump) {
+ ql_dbg(ql_dbg_init, vha, 0x00bd,
+ "Firmware dump already allocated.\n");
+ return;
+ }
+
+ ha->fw_dumped = 0;
+ ha->fw_dump_cap_flags = 0;
dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
req_q_size = rsp_q_size = 0;
@@ -3229,7 +3538,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (IS_QLA83XX(ha))
fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
else if (IS_QLA81XX(ha))
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -3241,8 +3550,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
- !IS_QLA28XX(ha))
+ if (!IS_QLA83XX(ha))
mq_size = sizeof(struct qla2xxx_mq_chain);
/*
* Allocate maximum buffer size for all queues - Q0.
@@ -3283,6 +3591,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
j, fwdt->dump_size);
dump_size += fwdt->dump_size;
}
+ /* Add space for spare MPI fw dump. */
+ dump_size += ha->fwdt[1].dump_size;
} else {
req_q_size = req->length * sizeof(request_t);
rsp_q_size = rsp->length * sizeof(response_t);
@@ -3322,8 +3632,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
"Re-Allocated (%d KB) and save firmware dump.\n",
dump_size / 1024);
} else {
- if (ha->fw_dump)
- vfree(ha->fw_dump);
+ vfree(ha->fw_dump);
ha->fw_dump = fw_dump;
ha->fw_dump_len = ha->fw_dump_alloc_len =
@@ -3333,6 +3642,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
dump_size / 1024);
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->mpi_fw_dump = (char *)fw_dump +
+ ha->fwdt[1].dump_size;
mutex_unlock(&ha->optrom_mutex);
return;
}
@@ -3544,53 +3855,100 @@ static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
}
-/*
- * Return Code:
- * QLA_SUCCESS: no action
- * QLA_INTERFACE_ERROR: SFP is not there.
- * QLA_FUNCTION_FAILED: detected New SFP
+/**
+ * qla24xx_detect_sfp()
+ *
+ * @vha: adapter state pointer.
+ *
+ * @return
+ * 0 -- Configure firmware to use short-range settings -- normal
+ * buffer-to-buffer credits.
+ *
+ * 1 -- Configure firmware to use long-range settings -- extra
+ * buffer-to-buffer credits should be allocated with
+ * ha->lr_distance containing distance settings from NVRAM or SFP
+ * (if supported).
*/
int
qla24xx_detect_sfp(scsi_qla_host_t *vha)
{
- int rc = QLA_SUCCESS;
+ int rc, used_nvram;
struct sff_8247_a0 *a;
struct qla_hw_data *ha = vha->hw;
-
- if (!AUTO_DETECT_SFP_SUPPORT(vha))
+ struct nvram_81xx *nv = ha->nvram;
+#define LR_DISTANCE_UNKNOWN 2
+ static const char * const types[] = { "Short", "Long" };
+ static const char * const lengths[] = { "(10km)", "(5km)", "" };
+ u8 ll = 0;
+
+ /* Seed with NVRAM settings. */
+ used_nvram = 0;
+ ha->flags.lr_detected = 0;
+ if (IS_BPM_RANGE_CAPABLE(ha) &&
+ (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
+ used_nvram = 1;
+ ha->flags.lr_detected = 1;
+ ha->lr_distance =
+ (nv->enhanced_features >> LR_DIST_NV_POS)
+ & LR_DIST_NV_MASK;
+ }
+
+ if (!IS_BPM_ENABLED(vha))
goto out;
-
+ /* Determine SR/LR capabilities of SFP/Transceiver. */
rc = qla2x00_read_sfp_dev(vha, NULL, 0);
if (rc)
goto out;
+ used_nvram = 0;
a = (struct sff_8247_a0 *)vha->hw->sfp_data;
qla2xxx_print_sfp_info(vha);
- if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
- /* long range */
- ha->flags.detected_lr_sfp = 1;
+ ha->flags.lr_detected = 0;
+ ll = a->fc_ll_cc7;
+ if (ll & FC_LL_VL || ll & FC_LL_L) {
+ /* Long range, track length. */
+ ha->flags.lr_detected = 1;
if (a->length_km > 5 || a->length_100m > 50)
- ha->long_range_distance = LR_DISTANCE_10K;
+ ha->lr_distance = LR_DISTANCE_10K;
else
- ha->long_range_distance = LR_DISTANCE_5K;
-
- if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
- ql_dbg(ql_dbg_async, vha, 0x507b,
- "Detected Long Range SFP.\n");
- } else {
- /* short range */
- ha->flags.detected_lr_sfp = 0;
- if (ha->flags.using_lr_setting)
- ql_dbg(ql_dbg_async, vha, 0x5084,
- "Detected Short Range SFP.\n");
+ ha->lr_distance = LR_DISTANCE_5K;
}
- if (!vha->flags.init_done)
- rc = QLA_SUCCESS;
out:
- return rc;
+ ql_dbg(ql_dbg_async, vha, 0x507b,
+ "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
+ types[ha->flags.lr_detected],
+ ha->flags.lr_detected ? lengths[ha->lr_distance] :
+ lengths[LR_DISTANCE_UNKNOWN],
+ used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
+ return ha->flags.lr_detected;
+}
+
+void qla_init_iocb_limit(scsi_qla_host_t *vha)
+{
+ u16 i, num_qps;
+ u32 limit;
+ struct qla_hw_data *ha = vha->hw;
+
+ num_qps = ha->num_qpairs + 1;
+ limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+
+ ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
+ ha->base_qpair->fwres.iocbs_limit = limit;
+ ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
+ ha->base_qpair->fwres.iocbs_used = 0;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i]) {
+ ha->queue_pair_map[i]->fwres.iocbs_total =
+ ha->orig_fw_iocb_count;
+ ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
+ ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
+ limit / num_qps;
+ ha->queue_pair_map[i]->fwres.iocbs_used = 0;
+ }
+ }
}
/**
@@ -3608,6 +3966,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags;
uint16_t fw_major_version;
+ int done_once = 0;
if (IS_P3P_TYPE(ha)) {
rval = ha->isp_ops->load_risc(vha, &srisc_address);
@@ -3621,13 +3980,14 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Disable SRAM, Instruction RAM and GP RAM parity. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
+ rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
qla81xx_mpi_sync(vha);
+execute_fw_with_lr:
/* Load firmware sequences */
rval = ha->isp_ops->load_risc(vha, &srisc_address);
if (rval == QLA_SUCCESS) {
@@ -3649,11 +4009,17 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS) {
- qla24xx_detect_sfp(vha);
+ /* Enable BPM support? */
+ if (!done_once++ && qla24xx_detect_sfp(vha)) {
+ ql_dbg(ql_dbg_init, vha, 0x00ca,
+ "Re-starting firmware -- BPM.\n");
+ /* Best-effort - re-init. */
+ ha->isp_ops->reset_chip(vha);
+ ha->isp_ops->chip_diag(vha);
+ goto execute_fw_with_lr;
+ }
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) &&
- (ha->zio_mode == QLA_ZIO_MODE_6))
+ if (IS_ZIO_THRESHOLD_CAPABLE(ha))
qla27xx_set_zio_threshold(vha,
ha->last_zio_threshold);
@@ -3684,6 +4050,7 @@ enable_82xx_npiv:
MIN_MULTI_ID_FABRIC - 1;
}
qla2x00_get_resource_cnts(vha);
+ qla_init_iocb_limit(vha);
/*
* Allocate the array of outstanding commands
@@ -3708,6 +4075,11 @@ enable_82xx_npiv:
"ISP Firmware failed checksum.\n");
goto failed;
}
+
+ /* Enable PUREX PASSTHRU */
+ if (ql2xrdpenable || ha->flags.scm_supported_f ||
+ ha->flags.edif_enabled)
+ qla25xx_set_els_cmds_supported(vha);
} else
goto failed;
@@ -3716,11 +4088,11 @@ enable_82xx_npiv:
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2300(ha))
/* SRAM parity */
- WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
+ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
else
/* SRAM, Instruction RAM and GP RAM parity */
- WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
+ rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -3739,8 +4111,7 @@ enable_82xx_npiv:
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) {
+ if (IS_QLA83XX(ha)) {
ha->flags.fac_supported = 0;
rval = QLA_SUCCESS;
}
@@ -3891,7 +4262,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
}
/* Move PUREX, ABTS RX & RIDA to ATIOQ */
- if (ql2xmvasynctoatio &&
+ if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
(IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
if (qla_tgt_mode_enabled(vha) ||
qla_dual_mode_enabled(vha))
@@ -3910,15 +4281,36 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
qla_dual_mode_enabled(vha))
ha->fw_options[2] |= BIT_4;
else
- ha->fw_options[2] &= ~BIT_4;
+ ha->fw_options[2] &= ~(BIT_4);
/* Reserve 1/2 of emergency exchanges for ELS.*/
if (qla2xuseresexchforels)
ha->fw_options[2] |= BIT_8;
else
ha->fw_options[2] &= ~BIT_8;
+
+ /*
+ * N2N: set Secure=1 for PLOGI ACC and
+ * fw shal not send PRLI after PLOGI Acc
+ */
+ if (ha->flags.edif_enabled &&
+ DBELL_ACTIVE(vha)) {
+ ha->fw_options[3] |= BIT_15;
+ ha->flags.n2n_fw_acc_sec = 1;
+ } else {
+ ha->fw_options[3] &= ~BIT_15;
+ ha->flags.n2n_fw_acc_sec = 0;
+ }
}
+ if (ql2xrdpenable || ha->flags.scm_supported_f ||
+ ha->flags.edif_enabled)
+ ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
+
+ /* Enable Async 8130/8131 events -- transceiver insertion/removal */
+ if (IS_BPM_RANGE_CAPABLE(ha))
+ ha->fw_options[3] |= BIT_10;
+
ql_dbg(ql_dbg_init, vha, 0x00e8,
"%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
__func__, ha->fw_options[1], ha->fw_options[2],
@@ -3957,11 +4349,11 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
- WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
- WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
- RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
+ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
+ wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
+ wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
+ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
+ rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
}
void
@@ -4023,15 +4415,15 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
}
icb->firmware_options_2 |= cpu_to_le32(BIT_23);
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
- WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
+ wrt_reg_dword(&reg->isp25mq.req_q_in, 0);
+ wrt_reg_dword(&reg->isp25mq.req_q_out, 0);
+ wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0);
+ wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0);
} else {
- WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
- WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
+ wrt_reg_dword(&reg->isp24.req_q_in, 0);
+ wrt_reg_dword(&reg->isp24.req_q_out, 0);
+ wrt_reg_dword(&reg->isp24.rsp_q_in, 0);
+ wrt_reg_dword(&reg->isp24.rsp_q_out, 0);
}
qlt_24xx_config_rings(vha);
@@ -4041,11 +4433,11 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
ql_dbg(ql_dbg_init, vha, 0x00fd,
"Speed set by user : %s Gbps \n",
qla2x00_get_link_speed_str(ha, ha->set_data_rate));
- icb->firmware_options_3 = (ha->set_data_rate << 13);
+ icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
}
/* PCI posting */
- RD_REG_DWORD(&ioreg->hccr);
+ rd_reg_word(&ioreg->hccr);
}
/**
@@ -4076,7 +4468,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req || !test_bit(que, ha->req_qid_map))
continue;
- req->out_ptr = (void *)(req->ring + req->length);
+ req->out_ptr = (uint16_t *)(req->ring + req->length);
*req->out_ptr = 0;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
@@ -4093,7 +4485,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
rsp = ha->rsp_q_map[que];
if (!rsp || !test_bit(que, ha->rsp_qid_map))
continue;
- rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
*rsp->in_ptr = 0;
/* Initialize response queue entries */
if (IS_QLAFX00(ha))
@@ -4111,8 +4503,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
-
if (IS_QLAFX00(ha)) {
rval = qlafx00_init_firmware(vha, ha->init_cb_size);
goto next_check;
@@ -4121,6 +4511,12 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
/* Update any ISP specific firmware options before initialization. */
ha->isp_ops->update_fw_options(vha);
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
+ le32_to_cpu(mid_init_cb->init_cb.firmware_options_1),
+ le32_to_cpu(mid_init_cb->init_cb.firmware_options_2),
+ le32_to_cpu(mid_init_cb->init_cb.firmware_options_3));
+
if (ha->flags.npiv_supported) {
if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
@@ -4132,16 +4528,24 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->cur_fw_xcb_count);
ha->flags.dport_enabled =
- (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
+ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
+ BIT_7) != 0;
ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
(ha->flags.dport_enabled) ? "enabled" : "disabled");
/* FA-WWPN Status */
ha->flags.fawwpn_enabled =
- (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
+ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
+ BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
+ /* Init_cb will be reused for other command(s). Save a backup copy of port_name */
+ memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
}
+ /* ELS pass through payload is limit by frame size. */
+ if (ha->flags.edif_enabled)
+ mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
+
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
next_check:
if (rval) {
@@ -4176,8 +4580,6 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
if (IS_QLAFX00(vha->hw))
return qlafx00_fw_ready(vha);
- rval = QLA_SUCCESS;
-
/* Time to wait for loop down */
if (IS_P3P_TYPE(ha))
min_wait = 30;
@@ -4353,11 +4755,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
/* initialize */
ha->min_external_loopid = SNS_FIRST_LOOP_ID;
ha->operating_mode = LOOP;
- ha->switch_cap = 0;
switch (topo) {
case 0:
ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
+ ha->switch_cap = 0;
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
@@ -4371,6 +4773,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
case 2:
ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
+ ha->switch_cap = 0;
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_N;
strcpy(connect_type, "(N_Port-to-N_Port)");
@@ -4387,6 +4790,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
default:
ql_dbg(ql_dbg_disc, vha, 0x200f,
"HBA in unknown topology %x, using NL.\n", topo);
+ ha->switch_cap = 0;
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
@@ -4399,7 +4803,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!(topo == 2 && ha->flags.n2n_bigger))
+ if (vha->hw->flags.edif_enabled) {
+ if (topo != 2)
+ qlt_update_host_map(vha, id);
+ } else if (!(topo == 2 && ha->flags.n2n_bigger))
qlt_update_host_map(vha, id);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4516,7 +4923,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
ha->nvram_size = sizeof(*nv);
ha->nvram_base = 0;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
- if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
+ if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1)
ha->nvram_base = 0x80;
/* Get NVRAM data and calculate checksum. */
@@ -4551,18 +4958,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->special_options[1] = BIT_7;
} else if (IS_QLA2200(ha)) {
nv->firmware_options[0] = BIT_2 | BIT_1;
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = 1024;
+ nv->frame_payload_size = cpu_to_le16(1024);
} else if (IS_QLA2100(ha)) {
nv->firmware_options[0] = BIT_3 | BIT_1;
nv->firmware_options[1] = BIT_5;
- nv->frame_payload_size = 1024;
+ nv->frame_payload_size = cpu_to_le16(1024);
}
nv->max_iocb_allocation = cpu_to_le16(256);
@@ -4873,6 +5280,9 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->login_retry = vha->hw->login_retry_count;
fcport->chip_reset = vha->hw->base_qpair->chip_reset;
fcport->logout_on_delete = 1;
+ fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
+ fcport->tgt_short_link_down_cnt = 0;
+ fcport->dev_loss_tmo = 0;
if (!fcport->ct_desc.ct_sns) {
ql_log(ql_log_warn, vha, 0xd049,
@@ -4887,6 +5297,16 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
+ INIT_LIST_HEAD(&fcport->sess_cmd_list);
+ spin_lock_init(&fcport->sess_cmd_lock);
+
+ spin_lock_init(&fcport->edif.sa_list_lock);
+ INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
+ INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
+
+ spin_lock_init(&fcport->edif.indx_list_lock);
+ INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
+
return fcport;
}
@@ -4900,11 +5320,39 @@ qla2x00_free_fcport(fc_port_t *fcport)
fcport->ct_desc.ct_sns = NULL;
}
+
+ qla_edif_flush_sa_ctl_lists(fcport);
list_del(&fcport->list);
qla2x00_clear_loop_id(fcport);
+
+ qla_edif_list_del(fcport);
+
kfree(fcport);
}
+static void qla_get_login_template(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ u32 *bp, sz;
+ __be32 *q;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ ha->init_cb, sz);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ return;
+ }
+ q = (__be32 *)&ha->plogi_els_payld.fl_csp;
+
+ bp = (uint32_t *)ha->init_cb;
+ cpu_to_be32_array(q, bp, sz / 4);
+ ha->flags.plogi_template_valid = 1;
+}
+
/*
* qla2x00_configure_loop
* Updates Fibre Channel Device Database with what is actually on loop.
@@ -4948,6 +5396,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
clear_bit(RSCN_UPDATE, &vha->dpc_flags);
qla2x00_get_data_rate(vha);
+ qla_get_login_template(vha);
/* Determine what we need to do */
if ((ha->current_topology == ISP_CFG_FL ||
@@ -4997,6 +5446,14 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
ha->flags.fw_init_done = 1;
/*
+ * use link up to wake up app to get ready for
+ * authentication.
+ */
+ if (ha->flags.edif_enabled && DBELL_INACTIVE(vha))
+ qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
+ ha->link_data_rate);
+
+ /*
* Process any ATIO queue entries that came in
* while we weren't online.
*/
@@ -5015,7 +5472,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
"%s *** FAILED ***.\n", __func__);
} else {
ql_dbg(ql_dbg_disc, vha, 0x206b,
- "%s: exiting normally.\n", __func__);
+ "%s: exiting normally. local port wwpn %8phN id %06x)\n",
+ __func__, vha->port_name, vha->d_id.b24);
}
/* Restore state if a resync event occurred during processing */
@@ -5030,6 +5488,50 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
return (rval);
}
+static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ fc_port_t *fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
+
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->n2n_flag) {
+ qla24xx_fcport_handle_login(vha, fcport);
+ return QLA_SUCCESS;
+ }
+ }
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+ return QLA_FUNCTION_FAILED;
+}
+
+static void
+qla_reinitialize_link(scsi_qla_host_t *vha)
+{
+ int rval;
+
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ rval = qla2x00_full_login_lip(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xd051,
+ "Link reinitialization failed (%d)\n", rval);
+ }
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5047,7 +5549,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
int found_devs;
int found;
fc_port_t *fcport, *new_fcport;
-
uint16_t index;
uint16_t entries;
struct gid_list_info *gid;
@@ -5057,47 +5558,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
unsigned long flags;
/* Inititae N2N login. */
- if (N2N_TOPO(ha)) {
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- /* borrowing */
- u32 *bp, i, sz;
-
- memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct els_plogi_payload),
- ha->init_cb_size);
- rval = qla24xx_get_port_login_templ(vha,
- ha->init_cb_dma, (void *)ha->init_cb, sz);
- if (rval == QLA_SUCCESS) {
- bp = (uint32_t *)ha->init_cb;
- for (i = 0; i < sz/4 ; i++, bp++)
- *bp = cpu_to_be32(*bp);
-
- memcpy(&ha->plogi_els_payld.data,
- (void *)ha->init_cb,
- sizeof(ha->plogi_els_payld.data));
- } else {
- ql_dbg(ql_dbg_init, vha, 0x00d1,
- "PLOGI ELS param read fail.\n");
- goto skip_login;
- }
- }
-
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->n2n_flag) {
- qla24xx_fcport_handle_login(vha, fcport);
- return QLA_SUCCESS;
- }
- }
-skip_login:
- spin_lock_irqsave(&vha->work_lock, flags);
- vha->scan.scan_retry++;
- spin_unlock_irqrestore(&vha->work_lock, flags);
-
- if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- }
- }
+ if (N2N_TOPO(ha))
+ return qla2x00_configure_n2n_loop(vha);
found_devs = 0;
new_fcport = NULL;
@@ -5121,6 +5583,19 @@ skip_login:
spin_unlock_irqrestore(&vha->work_lock, flags);
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ u8 loop_map_entries = 0;
+ int rc;
+
+ rc = qla2x00_get_fcal_position_map(vha, NULL,
+ &loop_map_entries);
+ if (rc == QLA_SUCCESS && loop_map_entries > 1) {
+ /*
+ * There are devices that are still not logged
+ * in. Reinitialize to give them a chance.
+ */
+ qla_reinitialize_link(vha);
+ return QLA_FUNCTION_FAILED;
+ }
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -5209,6 +5684,13 @@ skip_login:
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
fcport->scan_state = QLA_FCPORT_FOUND;
+ if (fcport->login_retry == 0) {
+ fcport->login_retry = vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0x2135,
+ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
found++;
break;
}
@@ -5357,6 +5839,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
+ fcport->dev_loss_tmo = rport->dev_loss_tmo;
rport->supported_classes = fcport->supported_classes;
@@ -5372,13 +5855,14 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
if (fcport->port_type & FCT_NVME_DISCOVERY)
rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
+ fc_remote_port_rolechg(rport, rport_ids.roles);
+
ql_dbg(ql_dbg_disc, vha, 0x20ee,
- "%s %8phN. rport %p is %s mode\n",
- __func__, fcport->port_name, rport,
+ "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
+ __func__, fcport->port_name, vha->host_no,
+ rport->scsi_target_id, rport,
(fcport->port_type == FCT_TARGET) ? "tgt" :
((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
-
- fc_remote_port_rolechg(rport, rport_ids.roles);
}
/*
@@ -5415,6 +5899,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->logout_on_delete = 1;
fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
+ if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) {
+ fcport->tgt_short_link_down_cnt++;
+ fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
+ }
+
switch (vha->hw->current_topology) {
case ISP_CFG_N:
case ISP_CFG_NL:
@@ -5426,12 +5915,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_iidma_fcport(vha, fcport);
- if (NVME_TARGET(vha->hw, fcport)) {
- qla_nvme_register_remote(vha, fcport);
- qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- return;
- }
+ qla2x00_dfs_create_rport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
@@ -5454,6 +5938,9 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ if (NVME_TARGET(vha->hw, fcport))
+ qla_nvme_register_remote(vha, fcport);
+
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
@@ -5487,6 +5974,10 @@ void qla_register_fcport_fn(struct work_struct *work)
qla2x00_update_fcport(fcport->vha, fcport);
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
+ "%s rscn gen %d/%d next DS %d\n", __func__,
+ rscn_gen, fcport->rscn_gen, fcport->next_disc_state);
+
if (rscn_gen != fcport->rscn_gen) {
/* RSCN(s) came in while registration */
switch (fcport->next_disc_state) {
@@ -5541,24 +6032,22 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
vha->device_flags |= SWITCH_FOUND;
+ rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
+ if (rval != QLA_SUCCESS)
+ ql_dbg(ql_dbg_disc, vha, 0x20ff,
+ "Failed to get Fabric Port Name\n");
if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
rval = qla2x00_send_change_request(vha, 0x3, 0);
if (rval != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x121,
- "Failed to enable receiving of RSCN requests: 0x%x.\n",
- rval);
+ "Failed to enable receiving of RSCN requests: 0x%x.\n",
+ rval);
}
-
do {
qla2x00_mgmt_svr_login(vha);
- /* FDMI support. */
- if (ql2xfdmienable &&
- test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
- qla2x00_fdmi_register(vha);
-
/* Ensure we are logged into the SNS. */
loop_id = NPH_SNS_LID(ha);
rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
@@ -5570,6 +6059,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
return rval;
}
+
+ /* FDMI support. */
+ if (ql2xfdmienable &&
+ test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
+ qla2x00_fdmi_register(vha);
+
if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
if (qla2x00_rft_id(vha)) {
/* EMPTY */
@@ -5812,7 +6307,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
if (ql2xgffidenable &&
(!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
- new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
+ new_fcport->fc4_type != 0))
continue;
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
@@ -5843,6 +6338,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
break;
}
+ if (fcport->login_retry == 0)
+ fcport->login_retry =
+ vha->hw->login_retry_count;
/*
* If device was not a fabric device before.
*/
@@ -5880,7 +6378,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
break;
}
- if (NVME_TARGET(vha->hw, fcport)) {
+ if (found && NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
vha->fcport_count--;
@@ -6249,13 +6747,13 @@ void
qla2x00_update_fcports(scsi_qla_host_t *base_vha)
{
fc_port_t *fcport;
- struct scsi_qla_host *vha;
+ struct scsi_qla_host *vha, *tvp;
struct qla_hw_data *ha = base_vha->hw;
unsigned long flags;
spin_lock_irqsave(&ha->vport_slock, flags);
/* Go with deferred removal of rport references. */
- list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+ list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) {
atomic_inc(&vha->vref_count);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->drport &&
@@ -6364,29 +6862,6 @@ __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
return rval;
}
-static const char *
-qla83xx_dev_state_to_string(uint32_t dev_state)
-{
- switch (dev_state) {
- case QLA8XXX_DEV_COLD:
- return "COLD/RE-INIT";
- case QLA8XXX_DEV_INITIALIZING:
- return "INITIALIZING";
- case QLA8XXX_DEV_READY:
- return "READY";
- case QLA8XXX_DEV_NEED_RESET:
- return "NEED RESET";
- case QLA8XXX_DEV_NEED_QUIESCENT:
- return "NEED QUIESCENT";
- case QLA8XXX_DEV_FAILED:
- return "FAILED";
- case QLA8XXX_DEV_QUIESCENT:
- return "QUIESCENT";
- default:
- return "Unknown";
- }
-}
-
/* Assumes idc-lock always held on entry */
void
qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
@@ -6440,9 +6915,8 @@ qla83xx_initiating_reset(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
} else {
- const char *state = qla83xx_dev_state_to_string(dev_state);
-
- ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
+ ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n",
+ qdev_state(dev_state));
/* SV: XXX: Is timeout required here? */
/* Wait for IDC state change READY -> NEED_RESET */
@@ -6600,7 +7074,8 @@ void
qla2x00_quiesce_io(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
+ unsigned long flags;
ql_dbg(ql_dbg_dpc, vha, 0x401d,
"Quiescing I/O - ha=%p.\n", ha);
@@ -6609,8 +7084,18 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha)
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(vha);
- list_for_each_entry(vp, &ha->vp_list, list)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_mark_all_devices_lost(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
@@ -6625,7 +7110,7 @@ void
qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
unsigned long flags;
fc_port_t *fcport;
u16 i;
@@ -6656,14 +7141,20 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
- ha->flags.fw_started = 0;
+ QLA_FW_STOPPED(ha);
ha->flags.fw_init_done = 0;
ha->chip_reset++;
ha->base_qpair->chip_reset = ha->chip_reset;
+ ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
+ ha->base_qpair->prev_completion_cnt = 0;
for (i = 0; i < ha->max_qpairs; i++) {
- if (ha->queue_pair_map[i])
+ if (ha->queue_pair_map[i]) {
ha->queue_pair_map[i]->chip_reset =
ha->base_qpair->chip_reset;
+ ha->queue_pair_map[i]->cmd_cnt =
+ ha->queue_pair_map[i]->cmd_completion_cnt = 0;
+ ha->base_qpair->prev_completion_cnt = 0;
+ }
}
/* purge MBox commands */
@@ -6689,7 +7180,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
qla2x00_mark_all_devices_lost(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -6711,7 +7202,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
fcport->scan_state = 0;
}
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -6723,22 +7214,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
- if (!ha->flags.eeh_busy) {
- /* Make sure for ISP 82XX IO DMA is complete */
- if (IS_P3P_TYPE(ha)) {
- qla82xx_chip_reset_cleanup(vha);
- ql_log(ql_log_info, vha, 0x00b4,
- "Done chip reset cleanup.\n");
-
- /* Done waiting for pending commands.
- * Reset the online flag.
- */
- vha->flags.online = 0;
- }
+ /* Make sure for ISP 82XX IO DMA is complete */
+ if (IS_P3P_TYPE(ha)) {
+ qla82xx_chip_reset_cleanup(vha);
+ ql_log(ql_log_info, vha, 0x00b4,
+ "Done chip reset cleanup.\n");
- /* Requeue all commands in outstanding command list. */
- qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ /* Done waiting for pending commands. Reset online flag */
+ vha->flags.online = 0;
}
+
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
/* memory barrier */
wmb();
}
@@ -6759,13 +7246,25 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
int rval;
uint8_t status = 0;
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
struct req_que *req = ha->req_q_map[0];
unsigned long flags;
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
+ vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
+
+ if (vha->hw->flags.port_isolated)
+ return status;
+
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803f,
+ "ISP Abort - ISP reg disconnect, exiting.\n");
+ return status;
+ }
+
if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
ha->flags.chip_reset_done = 1;
vha->flags.online = 1;
@@ -6795,7 +7294,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
return 0;
break;
case QLA2XXX_INI_MODE_DUAL:
- if (!qla_dual_mode_enabled(vha))
+ if (!qla_dual_mode_enabled(vha) &&
+ !qla_ini_mode_enabled(vha))
return 0;
break;
case QLA2XXX_INI_MODE_ENABLED:
@@ -6805,8 +7305,18 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
ha->isp_ops->get_flash_version(vha, req->ring);
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803f,
+ "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
+ return status;
+ }
ha->isp_ops->nvram_config(vha);
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803f,
+ "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
+ return status;
+ }
if (!qla2x00_restart_isp(vha)) {
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
@@ -6887,11 +7397,16 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
}
+ if (vha->hw->flags.port_isolated) {
+ qla2x00_abort_isp_cleanup(vha);
+ return status;
+ }
+
if (!status) {
ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
qla2x00_configure_hba(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (vp->vp_idx) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -6932,36 +7447,41 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
static int
qla2x00_restart_isp(scsi_qla_host_t *vha)
{
- int status = 0;
+ int status;
struct qla_hw_data *ha = vha->hw;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(vha)) {
vha->flags.online = 0;
status = ha->isp_ops->chip_diag(vha);
- if (!status)
- status = qla2x00_setup_chip(vha);
+ if (status)
+ return status;
+ status = qla2x00_setup_chip(vha);
+ if (status)
+ return status;
}
- if (!status && !(status = qla2x00_init_rings(vha))) {
- clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
- ha->flags.chip_reset_done = 1;
+ status = qla2x00_init_rings(vha);
+ if (status)
+ return status;
- /* Initialize the queues in use */
- qla25xx_init_queues(ha);
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ ha->flags.chip_reset_done = 1;
- status = qla2x00_fw_ready(vha);
- if (!status) {
- /* Issue a marker after FW becomes ready. */
- qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- }
+ /* Initialize the queues in use */
+ qla25xx_init_queues(ha);
+ status = qla2x00_fw_ready(vha);
+ if (status) {
/* if no cable then assume it's good */
- if ((vha->device_flags & DFLG_NO_CABLE))
- status = 0;
+ return vha->device_flags & DFLG_NO_CABLE ? 0 : status;
}
- return (status);
+
+ /* Issue a marker after FW becomes ready. */
+ qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+
+ return 0;
}
static int
@@ -7025,10 +7545,10 @@ qla2x00_reset_adapter(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -7040,25 +7560,24 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
unsigned long flags = 0;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- int rval = QLA_SUCCESS;
if (IS_P3P_TYPE(ha))
- return rval;
+ return QLA_SUCCESS;
vha->flags.online = 0;
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (IS_NOPOLLING_TYPE(ha))
ha->isp_ops->enable_intrs(ha);
- return rval;
+ return QLA_SUCCESS;
}
/* On sparc systems, obtain port and node WWN from firmware
@@ -7090,7 +7609,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
int rval;
struct init_cb_24xx *icb;
struct nvram_24xx *nv;
- uint32_t *dptr;
+ __le32 *dptr;
uint8_t *dptr1, *dptr2;
uint32_t chksum;
uint16_t cnt;
@@ -7118,7 +7637,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
/* Get NVRAM data into cache and calculate checksum. */
- dptr = (uint32_t *)nv;
+ dptr = (__force __le32 *)nv;
ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
@@ -7146,7 +7665,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0);
nv->hard_address = cpu_to_le16(124);
@@ -7314,7 +7833,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->login_retry_count = ql2xloginretrycount;
/* N2N: driver will initiate Login instead of FW */
- icb->firmware_options_3 |= BIT_8;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Enable ZIO. */
if (!vha->flags.init_done) {
@@ -7382,7 +7901,7 @@ qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
static ulong
qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
{
- uint32_t *p = (void *)image_status;
+ __le32 *p = (__force __le32 *)image_status;
uint n = sizeof(*image_status) / sizeof(*p);
uint32_t sum = 0;
@@ -7414,6 +7933,9 @@ qla28xx_component_status(
active_regions->aux.npiv_config_2_3 =
qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
+
+ active_regions->aux.nvme_params =
+ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS);
}
static int
@@ -7445,7 +7967,7 @@ qla28xx_get_aux_images(
goto check_sec_image;
}
- qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
+ qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
ha->flt_region_aux_img_status_pri,
sizeof(pri_aux_image_status) >> 2);
qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
@@ -7478,7 +8000,7 @@ check_sec_image:
goto check_valid_image;
}
- qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
+ qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
ha->flt_region_aux_img_status_sec,
sizeof(sec_aux_image_status) >> 2);
qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
@@ -7522,11 +8044,12 @@ check_valid_image:
}
ql_dbg(ql_dbg_init, vha, 0x018f,
- "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
+ "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n",
active_regions->aux.board_config,
active_regions->aux.vpd_nvram,
active_regions->aux.npiv_config_0_1,
- active_regions->aux.npiv_config_2_3);
+ active_regions->aux.npiv_config_2_3,
+ active_regions->aux.nvme_params);
}
void
@@ -7543,7 +8066,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
goto check_sec_image;
}
- if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
+ if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
QLA_SUCCESS) {
WARN_ON_ONCE(true);
@@ -7650,7 +8173,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
ql_dbg(ql_dbg_init, vha, 0x008b,
"FW: Loading firmware from flash (%x).\n", faddr);
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x008c,
@@ -7663,18 +8186,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return QLA_FUNCTION_FAILED;
}
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) {
ql_dbg(ql_dbg_init, vha, 0x008d,
"-> Loading segment %u...\n", j);
qla24xx_read_flash_data(vha, dcode, faddr, 10);
- risc_addr = be32_to_cpu(dcode[2]);
- risc_size = be32_to_cpu(dcode[3]);
+ risc_addr = be32_to_cpu((__force __be32)dcode[2]);
+ risc_size = be32_to_cpu((__force __be32)dcode[3]);
if (!*srisc_addr) {
*srisc_addr = risc_addr;
- risc_attr = be32_to_cpu(dcode[9]);
+ risc_attr = be32_to_cpu((__force __be32)dcode[9]);
}
dlen = ha->fw_transfer_size >> 2;
@@ -7709,14 +8232,13 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
templates = (risc_attr & BIT_9) ? 2 : 1;
ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
for (j = 0; j < templates; j++, fwdt++) {
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 7);
- risc_size = be32_to_cpu(dcode[2]);
+ risc_size = be32_to_cpu((__force __be32)dcode[2]);
ql_dbg(ql_dbg_init, vha, 0x0161,
"-> fwdt%u template array at %#x (%#x dwords)\n",
j, faddr, risc_size);
@@ -7770,8 +8292,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return QLA_SUCCESS;
failed:
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
@@ -7785,7 +8306,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
int i, fragment;
- uint16_t *wcode, *fwcode;
+ uint16_t *wcode;
+ __be16 *fwcode;
uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
struct fw_blob *blob;
struct qla_hw_data *ha = vha->hw;
@@ -7805,7 +8327,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
wcode = (uint16_t *)req->ring;
*srisc_addr = 0;
- fwcode = (uint16_t *)blob->fw->data;
+ fwcode = (__force __be16 *)blob->fw->data;
fwclen = 0;
/* Validate firmware image by checking version. */
@@ -7853,7 +8375,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
"words 0x%x.\n", risc_addr, wlen);
for (i = 0; i < wlen; i++)
- wcode[i] = swab16(fwcode[i]);
+ wcode[i] = swab16((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
wlen);
@@ -7890,7 +8412,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ulong i;
uint j;
struct fw_blob *blob;
- uint32_t *fwcode;
+ __be32 *fwcode;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct fwdt *fwdt = ha->fwdt;
@@ -7906,8 +8428,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- fwcode = (void *)blob->fw->data;
- dcode = fwcode;
+ fwcode = (__force __be32 *)blob->fw->data;
+ dcode = (__force uint32_t *)fwcode;
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x0093,
"Unable to verify integrity of firmware image (%zd).\n",
@@ -7918,7 +8440,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) {
@@ -7944,7 +8466,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dlen);
for (i = 0; i < dlen; i++)
- dcode[i] = swab32(fwcode[i]);
+ dcode[i] = swab32((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
if (rval) {
@@ -7966,8 +8488,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
templates = (risc_attr & BIT_9) ? 2 : 1;
ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
for (j = 0; j < templates; j++, fwdt++) {
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
@@ -7998,7 +8519,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dcode = fwdt->template;
for (i = 0; i < risc_size; i++)
- dcode[i] = fwcode[i];
+ dcode[i] = (__force u32)fwcode[i];
if (!qla27xx_fwdt_template_valid(dcode)) {
ql_log(ql_log_warn, vha, 0x0175,
@@ -8027,8 +8548,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_SUCCESS;
failed:
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
@@ -8269,7 +8789,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
int rval;
struct init_cb_81xx *icb;
struct nvram_81xx *nv;
- uint32_t *dptr;
+ __le32 *dptr;
uint8_t *dptr1, *dptr2;
uint32_t chksum;
uint16_t cnt;
@@ -8316,7 +8836,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
"primary" : "secondary");
ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
- dptr = (uint32_t *)nv;
+ dptr = (__force __le32 *)nv;
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
@@ -8343,7 +8863,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0);
nv->port_name[0] = 0x21;
@@ -8387,7 +8907,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
if (IS_T10_PI_CAPABLE(ha))
- nv->frame_payload_size &= ~7;
+ nv->frame_payload_size &= cpu_to_le16(~7);
qlt_81xx_config_nvram_stage1(vha, nv);
@@ -8449,6 +8969,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
icb->node_name[0] &= 0xF0;
}
+ if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
+ if ((nv->enhanced_features & BIT_7) == 0)
+ ha->flags.scm_supported_a = 1;
+ }
+
/* Set host adapter parameters. */
ha->flags.disable_risc_code_load = 0;
ha->flags.enable_lip_reset = 0;
@@ -8550,10 +9075,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
/* enable RIDA Format2 */
- icb->firmware_options_3 |= BIT_0;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_0);
/* N2N: driver will initiate Login instead of FW */
- icb->firmware_options_3 |= BIT_8;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Determine NVMe/FCP priority for target ports */
ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
@@ -8570,7 +9095,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
{
int status, rval;
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
unsigned long flags;
status = qla2x00_init_rings(vha);
@@ -8642,7 +9167,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
"qla82xx_restart_isp succeeded.\n");
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (vp->vp_idx) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -8663,61 +9188,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
return status;
}
-void
-qla81xx_update_fw_options(scsi_qla_host_t *vha)
-{
- struct qla_hw_data *ha = vha->hw;
-
- /* Hold status IOCBs until ABTS response received. */
- if (ql2xfwholdabts)
- ha->fw_options[3] |= BIT_12;
-
- /* Set Retry FLOGI in case of P2P connection */
- if (ha->operating_mode == P2P) {
- ha->fw_options[2] |= BIT_3;
- ql_dbg(ql_dbg_disc, vha, 0x2103,
- "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
- __func__, ha->fw_options[2]);
- }
-
- /* Move PUREX, ABTS RX & RIDA to ATIOQ */
- if (ql2xmvasynctoatio) {
- if (qla_tgt_mode_enabled(vha) ||
- qla_dual_mode_enabled(vha))
- ha->fw_options[2] |= BIT_11;
- else
- ha->fw_options[2] &= ~BIT_11;
- }
-
- if (qla_tgt_mode_enabled(vha) ||
- qla_dual_mode_enabled(vha)) {
- /* FW auto send SCSI status during */
- ha->fw_options[1] |= BIT_8;
- ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
-
- /* FW perform Exchange validation */
- ha->fw_options[2] |= BIT_4;
- } else {
- ha->fw_options[1] &= ~BIT_8;
- ha->fw_options[10] &= 0x00ff;
-
- ha->fw_options[2] &= ~BIT_4;
- }
-
- if (ql2xetsenable) {
- /* Enable ETS Burst. */
- memset(ha->fw_options, 0, sizeof(ha->fw_options));
- ha->fw_options[2] |= BIT_9;
- }
-
- ql_dbg(ql_dbg_init, vha, 0x00e9,
- "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
- __func__, ha->fw_options[1], ha->fw_options[2],
- ha->fw_options[3], vha->host->active_mode);
-
- qla2x00_set_fw_options(vha, ha->fw_options);
-}
-
/*
* qla24xx_get_fcp_prio
* Gets the fcp cmd priority value for the logged in port.
@@ -8992,7 +9462,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->rsp->req = qpair->req;
qpair->rsp->qpair = qpair;
/* init qpair to this cpu. Will adjust at run time. */
- qla_cpu_update(qpair, smp_processor_id());
+ qla_cpu_update(qpair, raw_smp_processor_id());
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4)
@@ -9074,3 +9544,215 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
fail:
return ret;
}
+
+uint64_t
+qla2x00_count_set_bits(uint32_t num)
+{
+ /* Brian Kernighan's Algorithm */
+ u64 count = 0;
+
+ while (num) {
+ num &= (num - 1);
+ count++;
+ }
+ return count;
+}
+
+uint64_t
+qla2x00_get_num_tgts(scsi_qla_host_t *vha)
+{
+ fc_port_t *f, *tf;
+ u64 count = 0;
+
+ f = NULL;
+ tf = NULL;
+
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+ if (f->port_type != FCT_TARGET)
+ continue;
+ count++;
+ }
+ return count;
+}
+
+int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = NULL;
+ unsigned long int_flags;
+
+ if (flags & QLA2XX_HW_ERROR)
+ vha->hw_err_cnt = 0;
+ if (flags & QLA2XX_SHT_LNK_DWN)
+ vha->short_link_down_cnt = 0;
+ if (flags & QLA2XX_INT_ERR)
+ vha->interface_err_cnt = 0;
+ if (flags & QLA2XX_CMD_TIMEOUT)
+ vha->cmd_timeout_cnt = 0;
+ if (flags & QLA2XX_RESET_CMD_ERR)
+ vha->reset_cmd_err_cnt = 0;
+ if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->tgt_short_link_down_cnt = 0;
+ fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
+ }
+ vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
+ return 0;
+}
+
+int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags)
+{
+ return qla2xxx_reset_stats(host, flags);
+}
+
+int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags)
+{
+ return qla2xxx_reset_stats(host, flags);
+}
+
+int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags,
+ void *data, u64 size)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data;
+ struct ql_vnd_stats *rsp_data = &resp->stats;
+ u64 ini_entry_count = 0;
+ u64 i = 0;
+ u64 entry_count = 0;
+ u64 num_tgt = 0;
+ u32 tmp_stat_type = 0;
+ fc_port_t *fcport = NULL;
+ unsigned long int_flags;
+
+ /* Copy stat type to work on it */
+ tmp_stat_type = flags;
+
+ if (tmp_stat_type & BIT_17) {
+ num_tgt = qla2x00_get_num_tgts(vha);
+ /* unset BIT_17 */
+ tmp_stat_type &= ~(1 << 17);
+ }
+ ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
+
+ entry_count = ini_entry_count + num_tgt;
+
+ rsp_data->entry_count = entry_count;
+
+ i = 0;
+ if (flags & QLA2XX_HW_ERROR) {
+ rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->hw_err_cnt;
+ i++;
+ }
+
+ if (flags & QLA2XX_SHT_LNK_DWN) {
+ rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->short_link_down_cnt;
+ i++;
+ }
+
+ if (flags & QLA2XX_INT_ERR) {
+ rsp_data->entry[i].stat_type = QLA2XX_INT_ERR;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->interface_err_cnt;
+ i++;
+ }
+
+ if (flags & QLA2XX_CMD_TIMEOUT) {
+ rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->cmd_timeout_cnt;
+ i++;
+ }
+
+ if (flags & QLA2XX_RESET_CMD_ERR) {
+ rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt;
+ i++;
+ }
+
+ /* i will continue from previous loop, as target
+ * entries are after initiator
+ */
+ if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+ if (!fcport->rport)
+ continue;
+ rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN;
+ rsp_data->entry[i].tgt_num = fcport->rport->number;
+ rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt;
+ i++;
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
+ }
+ resp->status = EXT_STATUS_OK;
+
+ return 0;
+}
+
+int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags,
+ struct fc_rport *rport, void *data, u64 size)
+{
+ struct ql_vnd_tgt_stats_resp *tgt_data = data;
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
+ tgt_data->status = 0;
+ tgt_data->stats.entry_count = 1;
+ tgt_data->stats.entry[0].stat_type = flags;
+ tgt_data->stats.entry[0].tgt_num = rport->number;
+ tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt;
+
+ return 0;
+}
+
+int qla2xxx_disable_port(struct Scsi_Host *host)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+
+ vha->hw->flags.port_isolated = 1;
+
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9006,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
+ if (vha->flags.online) {
+ qla2x00_abort_isp_cleanup(vha);
+ qla2x00_wait_for_sess_deletion(vha);
+ }
+
+ return 0;
+}
+
+int qla2xxx_enable_port(struct Scsi_Host *host)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9001,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
+
+ vha->hw->flags.port_isolated = 0;
+ /* Set the flag to 1, so that isp_abort can proceed */
+ vha->flags.online = 1;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 364b3db8b2dc..db17f7f410cd 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_target.h"
@@ -11,7 +10,7 @@
* Continuation Type 1 IOCBs to allocate.
*
* @vha: HA context
- * @dsds: number of data segment decriptors needed
+ * @dsds: number of data segment descriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
@@ -40,16 +39,16 @@ qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
* register value.
*/
static __inline__ uint16_t
-qla2x00_debounce_register(volatile uint16_t __iomem *addr)
+qla2x00_debounce_register(volatile __le16 __iomem *addr)
{
volatile uint16_t first;
volatile uint16_t second;
do {
- first = RD_REG_WORD(addr);
+ first = rd_reg_word(addr);
barrier();
cpu_relax();
- second = RD_REG_WORD(addr);
+ second = rd_reg_word(addr);
} while (first != second);
return (first);
@@ -185,6 +184,8 @@ static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
sp->vha = vha;
sp->qpair = qpair;
sp->cmd_type = TYPE_SRB;
+ /* ref : INIT - normal flow */
+ kref_init(&sp->cmd_kref);
INIT_LIST_HEAD(&sp->elem);
}
@@ -207,10 +208,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
return sp;
}
+void qla2xxx_rel_done_warning(srb_t *sp, int res);
+void qla2xxx_rel_free_warning(srb_t *sp);
+
static inline void
qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
{
sp->qpair = NULL;
+ sp->done = qla2xxx_rel_done_warning;
+ sp->free = qla2xxx_rel_free_warning;
mempool_free(sp, qpair->srb_mempool);
QLA_QPAIR_MARK_NOT_BUSY(qpair);
}
@@ -266,11 +272,41 @@ qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
}
static inline void
-qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
+qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
{
- if (retry_delay)
- fcport->retry_delay_timestamp = jiffies +
- (retry_delay * HZ / 10);
+ u8 scope;
+ u16 qual;
+#define SQ_SCOPE_MASK 0xc000 /* SAM-6 rev5 5.3.2 */
+#define SQ_SCOPE_SHIFT 14
+#define SQ_QUAL_MASK 0x3fff
+
+#define SQ_MAX_WAIT_SEC 60 /* Max I/O hold off time in seconds. */
+#define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
+
+ if (!sts_qual) /* Common case. */
+ return;
+
+ scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
+ /* Handle only scope 1 or 2, which is for I-T nexus. */
+ if (scope != 1 && scope != 2)
+ return;
+
+ /* Skip processing, if retry delay timer is already in effect. */
+ if (fcport->retry_delay_timestamp &&
+ time_before(jiffies, fcport->retry_delay_timestamp))
+ return;
+
+ qual = sts_qual & SQ_QUAL_MASK;
+ if (qual < 1 || qual > 0x3fef)
+ return;
+ qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
+
+ /* qual is expressed in 100ms increments. */
+ fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
+
+ ql_log(ql_log_warn, fcport->vha, 0x5101,
+ "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
+ fcport->port_name, sts_qual, qual * 100);
}
static inline bool
@@ -329,7 +365,7 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
} else
req->ring_ptr++;
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
}
static inline int
@@ -343,3 +379,120 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
}
+
+enum {
+ RESOURCE_NONE,
+ RESOURCE_INI,
+};
+
+static inline int
+qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+{
+ u16 iocbs_used, i;
+ struct qla_hw_data *ha = qp->vha->hw;
+
+ if (!ql2xenforce_iocb_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return 0;
+ }
+
+ if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
+ qp->fwres.iocbs_used += iores->iocb_cnt;
+ return 0;
+ } else {
+ /* no need to acquire qpair lock. It's just rough calculation */
+ iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ }
+
+ if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
+ qp->fwres.iocbs_used += iores->iocb_cnt;
+ return 0;
+ } else {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+}
+
+static inline void
+qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+{
+ switch (iores->res_type) {
+ case RESOURCE_NONE:
+ break;
+ default:
+ if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
+ qp->fwres.iocbs_used -= iores->iocb_cnt;
+ } else {
+ // should not happen
+ qp->fwres.iocbs_used = 0;
+ }
+ break;
+ }
+ iores->res_type = RESOURCE_NONE;
+}
+
+#define ISP_REG_DISCONNECT 0xffffffffU
+/**************************************************************************
+ * qla2x00_isp_reg_stat
+ *
+ * Description:
+ * Read the host status register of ISP before aborting the command.
+ *
+ * Input:
+ * ha = pointer to host adapter structure.
+ *
+ *
+ * Returns:
+ * Either true or false.
+ *
+ * Note: Return true if there is register disconnect.
+ **************************************************************************/
+static inline
+uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
+{
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+
+ if (IS_P3P_TYPE(ha))
+ return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
+ else
+ return ((rd_reg_dword(&reg->host_status)) ==
+ ISP_REG_DISCONNECT);
+}
+
+static inline
+bool qla_pci_disconnected(struct scsi_qla_host *vha,
+ struct device_reg_24xx __iomem *reg)
+{
+ uint32_t stat;
+ bool ret = false;
+
+ stat = rd_reg_dword(&reg->host_status);
+ if (stat == 0xffffffff) {
+ ql_log(ql_log_info, vha, 0x8041,
+ "detected PCI disconnect.\n");
+ qla_schedule_eeh_work(vha);
+ ret = true;
+ }
+ return ret;
+}
+
+static inline bool
+fcport_is_smaller(fc_port_t *fcport)
+{
+ if (wwn_to_u64(fcport->port_name) <
+ wwn_to_u64(fcport->vha->port_name))
+ return true;
+ else
+ return false;
+}
+
+static inline bool
+fcport_is_bigger(fc_port_t *fcport)
+{
+ return !fcport_is_smaller(fcport);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 47bf60a9490a..42ce4e1fe744 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
@@ -44,7 +43,7 @@ qla2x00_get_cmd_direction(srb_t *sp)
* qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
* Continuation Type 0 IOCBs to allocate.
*
- * @dsds: number of data segment decriptors needed
+ * @dsds: number of data segment descriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
@@ -66,7 +65,7 @@ qla2x00_calc_iocbs_32(uint16_t dsds)
* qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
*
- * @dsds: number of data segment decriptors needed
+ * @dsds: number of data segment descriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
@@ -119,7 +118,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
*
* Returns a pointer to the continuation type 1 IOCB packet.
*/
-static inline cont_a64_entry_t *
+cont_a64_entry_t *
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
{
cont_a64_entry_t *cont_pkt;
@@ -146,7 +145,6 @@ inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- uint8_t guard = scsi_host_get_guard(cmd->device->host);
/* We always use DIFF Bundling for best performance */
*fw_prot_opts = 0;
@@ -167,7 +165,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- if (guard & SHOST_DIX_GUARD_IP)
+ if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
else
*fw_prot_opts |= PO_MODE_DIF_PASS;
@@ -177,6 +175,9 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
break;
}
+ if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK))
+ *fw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
return scsi_prot_sg_count(cmd);
}
@@ -376,7 +377,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
+ cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -428,8 +429,8 @@ qla2x00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
+ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
@@ -472,27 +473,27 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
/* Set chip new ring index. */
if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
} else if (IS_QLA83XX(ha)) {
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
- WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+ wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
} else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&reg->isp24.req_q_in);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
}
}
}
/**
- * qla2x00_marker() - Send a marker IOCB to the firmware.
+ * __qla2x00_marker() - Send a marker IOCB to the firmware.
* @vha: HA context
* @qpair: queue pair pointer
* @loop_id: loop ID
@@ -530,7 +531,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
mrk24->vp_index = vha->vp_idx;
- mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
+ mrk24->handle = make_handle(req->id, mrk24->handle);
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16((uint16_t)lun);
@@ -594,6 +595,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
struct ct6_dsd *ctx;
+ struct qla_qpair *qpair = sp->qpair;
cmd = GET_CMD_SP(sp);
@@ -612,12 +614,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
- vha->qla_stats.output_bytes += scsi_bufflen(cmd);
- vha->qla_stats.output_requests++;
+ qpair->counters.output_bytes += scsi_bufflen(cmd);
+ qpair->counters.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
- vha->qla_stats.input_bytes += scsi_bufflen(cmd);
- vha->qla_stats.input_requests++;
+ qpair->counters.input_bytes += scsi_bufflen(cmd);
+ qpair->counters.input_requests++;
}
cur_seg = scsi_sglist(cmd);
@@ -661,7 +663,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
cur_dsd->address = 0;
cur_dsd->length = 0;
cur_dsd++;
- cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
return 0;
}
@@ -669,7 +671,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
* qla24xx_calc_dsd_lists() - Determine number of DSD list required
* for Command Type 6.
*
- * @dsds: number of data segment decriptors needed
+ * @dsds: number of data segment descriptors needed
*
* Returns the number of dsd list needed to store @dsds.
*/
@@ -704,6 +706,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
+ struct qla_qpair *qpair = sp->qpair;
cmd = GET_CMD_SP(sp);
@@ -721,12 +724,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
- vha->qla_stats.output_bytes += scsi_bufflen(cmd);
- vha->qla_stats.output_requests++;
+ qpair->counters.output_bytes += scsi_bufflen(cmd);
+ qpair->counters.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
- vha->qla_stats.input_bytes += scsi_bufflen(cmd);
- vha->qla_stats.input_requests++;
+ qpair->counters.input_bytes += scsi_bufflen(cmd);
+ qpair->counters.input_requests++;
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -755,8 +758,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
}
struct fw_dif_context {
- uint32_t ref_tag;
- uint16_t app_tag;
+ __le32 ref_tag;
+ __le16 app_tag;
uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
};
@@ -771,74 +774,19 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- switch (scsi_get_prot_type(cmd)) {
- case SCSI_PROT_DIF_TYPE0:
- /*
- * No check for ql2xenablehba_err_chk, as it would be an
- * I/O error if hba tag generation is not done.
- */
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
-
- if (!qla2x00_hba_err_chk_enabled(sp))
- break;
+ pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd));
+ if (cmd->prot_flags & SCSI_PROT_REF_CHECK &&
+ qla2x00_hba_err_chk_enabled(sp)) {
pkt->ref_tag_mask[0] = 0xff;
pkt->ref_tag_mask[1] = 0xff;
pkt->ref_tag_mask[2] = 0xff;
pkt->ref_tag_mask[3] = 0xff;
- break;
-
- /*
- * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
- * match LBA in CDB + N
- */
- case SCSI_PROT_DIF_TYPE2:
- pkt->app_tag = cpu_to_le16(0);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
-
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
-
- if (!qla2x00_hba_err_chk_enabled(sp))
- break;
-
- /* enable ALL bytes of the ref tag */
- pkt->ref_tag_mask[0] = 0xff;
- pkt->ref_tag_mask[1] = 0xff;
- pkt->ref_tag_mask[2] = 0xff;
- pkt->ref_tag_mask[3] = 0xff;
- break;
-
- /* For Type 3 protection: 16 bit GUARD only */
- case SCSI_PROT_DIF_TYPE3:
- pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
- pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
- 0x00;
- break;
-
- /*
- * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
- * 16 bit app tag.
- */
- case SCSI_PROT_DIF_TYPE1:
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
- pkt->app_tag = cpu_to_le16(0);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
-
- if (!qla2x00_hba_err_chk_enabled(sp))
- break;
-
- /* enable ALL bytes of the ref tag */
- pkt->ref_tag_mask[0] = 0xff;
- pkt->ref_tag_mask[1] = 0xff;
- pkt->ref_tag_mask[2] = 0xff;
- pkt->ref_tag_mask[3] = 0xff;
- break;
}
+
+ pkt->app_tag = cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
}
int
@@ -904,7 +852,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
memset(&sgx, 0, sizeof(struct qla2_sgx));
if (sp) {
cmd = GET_CMD_SP(sp);
- prot_int = cmd->device->sector_size;
+ prot_int = scsi_prot_interval(cmd);
sgx.tot_bytes = scsi_bufflen(cmd);
sgx.cur_sg = scsi_sglist(cmd);
@@ -1389,7 +1337,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{
struct dsd64 *cur_dsd;
- uint32_t *fcp_dl;
+ __be32 *fcp_dl;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t total_bytes = 0;
@@ -1456,7 +1404,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
&crc_ctx_pkt->ref_tag, tot_prot_dsds);
put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
- cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+ cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
/* Determine SCSI command length -- align to 4 byte boundary */
if (cmd->cmd_len > 16) {
@@ -1545,7 +1493,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
/* Fibre channel byte count */
cmd_pkt->byte_count = cpu_to_le32(total_bytes);
- fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
+ fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
additional_fcpcdb_len);
*fcp_dl = htonl(total_bytes);
@@ -1599,12 +1547,17 @@ qla24xx_start_scsi(srb_t *sp)
uint16_t req_cnt;
uint16_t tot_dsds;
struct req_que *req = NULL;
+ struct rsp_que *rsp;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
+ if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
+ return qla28xx_start_scsi_edif(sp);
+
/* Setup device pointers. */
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1635,9 +1588,21 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1655,7 +1620,7 @@ qla24xx_start_scsi(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1695,10 +1660,16 @@ qla24xx_start_scsi(srb_t *sp)
} else
req->ring_ptr++;
+ sp->qpair->cmd_cnt++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -1707,6 +1678,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -1820,9 +1792,20 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg;
tot_dsds += nseg;
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1843,7 +1826,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Fill-in common area */
cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
clr_ptr = (uint32_t *)cmd_pkt + 2;
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
@@ -1880,8 +1863,14 @@ qla24xx_dif_start_scsi(srb_t *sp)
} else
req->ring_ptr++;
+ sp->qpair->cmd_cnt++;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1894,7 +1883,9 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return QLA_FUNCTION_FAILED;
}
@@ -1916,16 +1907,21 @@ qla2xxx_start_scsi_mq(srb_t *sp)
uint16_t req_cnt;
uint16_t tot_dsds;
struct req_que *req = NULL;
+ struct rsp_que *rsp;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
+ if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
+ return qla28xx_start_scsi_edif(sp);
+
/* Acquire qpair specific lock */
spin_lock_irqsave(&qpair->qp_lock, flags);
/* Setup qpair pointers */
req = qpair->req;
+ rsp = qpair->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1955,9 +1951,21 @@ qla2xxx_start_scsi_mq(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1975,7 +1983,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -2015,10 +2023,16 @@ qla2xxx_start_scsi_mq(srb_t *sp)
} else
req->ring_ptr++;
+ sp->qpair->cmd_cnt++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_SUCCESS;
@@ -2027,6 +2041,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -2155,9 +2170,21 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg;
tot_dsds += nseg;
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2178,7 +2205,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Fill-in common area */
cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
clr_ptr = (uint32_t *)cmd_pkt + 2;
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
@@ -2213,8 +2240,9 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
} else
req->ring_ptr++;
+ sp->qpair->cmd_cnt++;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
@@ -2232,7 +2260,9 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
return QLA_FUNCTION_FAILED;
}
@@ -2266,17 +2296,22 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
cnt = *req->out_ptr;
else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha))
- cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
- cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+ cnt = rd_reg_dword(reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
- cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+ cnt = rd_reg_dword(&reg->isp24.req_q_out);
else if (IS_QLAFX00(ha))
- cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
+ cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
+ if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
+ qla_schedule_eeh_work(vha);
+ return NULL;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2305,8 +2340,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE);
if (IS_QLAFX00(ha)) {
- WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
- WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
+ wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
+ wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
} else {
pkt->entry_count = req_cnt;
pkt->handle = handle;
@@ -2344,9 +2379,21 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
- logio->control_flags |= LCF_NVME_PRLI;
+ logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
if (sp->vha->flags.nvme_first_burst)
- logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
+ logio->io_parameter[0] =
+ cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
+ if (sp->vha->flags.nvme2_enabled) {
+ /* Set service parameter BIT_7 for NVME CONF support */
+ logio->io_parameter[0] |=
+ cpu_to_le32(NVME_PRLI_SP_CONF);
+ /* Set service parameter BIT_8 for SLER support */
+ logio->io_parameter[0] |=
+ cpu_to_le32(NVME_PRLI_SP_SLER);
+ /* Set service parameter BIT_9 for PI control support */
+ logio->io_parameter[0] |=
+ cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
+ }
}
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
@@ -2362,6 +2409,8 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
struct srb_iocb *lio = &sp->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+
if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
} else {
@@ -2370,6 +2419,12 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ if (lio->u.logio.flags & SRB_LOGIN_FCSP) {
+ logio->control_flags |=
+ cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI);
+ logio->io_parameter[0] =
+ cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO);
+ }
}
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2489,7 +2544,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->entry_type = TSK_MGMT_IOCB_TYPE;
tsk->entry_count = 1;
- tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
+ tsk->handle = make_handle(req->id, tsk->handle);
tsk->nport_handle = cpu_to_le16(fcport->loop_id);
tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk->control_flags = cpu_to_le32(flags);
@@ -2505,11 +2560,38 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
}
}
-void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+static void
+qla2x00_async_done(struct srb *sp, int res)
+{
+ if (del_timer(&sp->u.iocb_cmd.timer)) {
+ /*
+ * Successfully cancelled the timeout handler
+ * ref: TMR
+ */
+ if (kref_put(&sp->cmd_kref, qla2x00_sp_release))
+ return;
+ }
+ sp->async_done(sp, res);
+}
+
+void
+qla2x00_sp_release(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+
+ sp->free(sp);
+}
+
+void
+qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
+ void (*done)(struct srb *sp, int res))
{
timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
- sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+ sp->done = qla2x00_async_done;
+ sp->async_done = done;
sp->free = qla2x00_sp_free;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
sp->start_timer = 1;
@@ -2596,7 +2678,9 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
return -ENOMEM;
}
- /* Alloc SRB structure */
+ /* Alloc SRB structure
+ * ref: INIT
+ */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
kfree(fcport);
@@ -2617,18 +2701,19 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
- elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
- init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
- sp->done = qla2x00_els_dcmd_sp_done;
+ qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT,
+ qla2x00_els_dcmd_sp_done);
sp->free = qla2x00_els_dcmd_sp_free;
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout;
+ init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
GFP_KERNEL);
if (!elsio->u.els_logo.els_logo_pyld) {
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return QLA_FUNCTION_FAILED;
}
@@ -2651,7 +2736,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return QLA_FUNCTION_FAILED;
}
@@ -2662,7 +2748,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
wait_for_completion(&elsio->u.els_logo.comp);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
@@ -2678,27 +2765,30 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- els_iocb->tx_dsd_count = 1;
+ els_iocb->tx_dsd_count = cpu_to_le16(1);
els_iocb->vp_index = vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = 0;
els_iocb->opcode = elsio->u.els_logo.els_cmd;
- els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
- els_iocb->port_id[1] = sp->fcport->d_id.b.area;
- els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->d_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
/* For SID the byte order is different than DID */
els_iocb->s_id[1] = vha->d_id.b.al_pa;
els_iocb->s_id[2] = vha->d_id.b.area;
els_iocb->s_id[0] = vha->d_id.b.domain;
if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
- els_iocb->control_flags = 0;
+ if (vha->hw->flags.edif_enabled)
+ els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN);
+ else
+ els_iocb->control_flags = 0;
els_iocb->tx_byte_count = els_iocb->tx_len =
cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
&els_iocb->tx_address);
- els_iocb->rx_dsd_count = 1;
+ els_iocb->rx_dsd_count = cpu_to_le16(1);
els_iocb->rx_byte_count = els_iocb->rx_len =
cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
@@ -2710,7 +2800,6 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(uint8_t *)els_iocb,
sizeof(*els_iocb));
} else {
- els_iocb->control_flags = 1 << 13;
els_iocb->tx_byte_count =
cpu_to_le32(sizeof(struct els_logo_payload));
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
@@ -2730,7 +2819,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
sp->vha->qla_stats.control_requests++;
}
-static void
+void
qla2x00_els_dcmd2_iocb_timeout(void *data)
{
srb_t *sp = data;
@@ -2785,7 +2874,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct qla_work_evt *e;
struct fc_port *conflict_fcport;
port_id_t cid; /* conflict Nport id */
- u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+ const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072,
@@ -2793,12 +2882,14 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
- del_timer(&sp->u.iocb_cmd.timer);
+ /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/
+ fcport->logout_on_delete = 1;
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
else {
- switch (fw_status[0]) {
+ switch (le32_to_cpu(fw_status[0])) {
case CS_DATA_UNDERRUN:
case CS_COMPLETE:
memset(&ea, 0, sizeof(ea));
@@ -2808,9 +2899,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break;
case CS_IOCB_ERROR:
- switch (fw_status[1]) {
+ switch (le32_to_cpu(fw_status[1])) {
case LSC_SCODE_PORTID_USED:
- lid = fw_status[2] & 0xffff;
+ lid = le32_to_cpu(fw_status[2]) & 0xffff;
qlt_find_sess_invalidate_other(vha,
wwn_to_u64(fcport->port_name),
fcport->d_id, lid, &conflict_fcport);
@@ -2844,9 +2935,11 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break;
case LSC_SCODE_NPORT_USED:
- cid.b.domain = (fw_status[2] >> 16) & 0xff;
- cid.b.area = (fw_status[2] >> 8) & 0xff;
- cid.b.al_pa = fw_status[2] & 0xff;
+ cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
+ & 0xff;
+ cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
+ & 0xff;
+ cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
cid.b.rsvd_1 = 0;
ql_dbg(ql_dbg_disc, vha, 0x20ec,
@@ -2868,8 +2961,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+ break;
}
- /* fall through */
+ fallthrough;
default:
ql_dbg(ql_dbg_disc, vha, 0x20eb,
"%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
@@ -2877,9 +2971,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
fw_status[0], fw_status[1], fw_status[2]);
fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(fcport,
- DSC_LOGIN_FAILED);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qlt_schedule_sess_for_deletion(fcport);
break;
}
break;
@@ -2891,8 +2983,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
fw_status[0], fw_status[1], fw_status[2]);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qlt_schedule_sess_for_deletion(fcport);
break;
}
@@ -2901,7 +2992,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct srb_iocb *elsio = &sp->u.iocb_cmd;
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
e->u.iosb.sp = sp;
@@ -2919,7 +3011,9 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
int rval = QLA_SUCCESS;
void *ptr, *resp_ptr;
- /* Alloc SRB structure */
+ /* Alloc SRB structure
+ * ref: INIT
+ */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
ql_log(ql_log_info, vha, 0x70e6,
@@ -2932,24 +3026,22 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
elsio = &sp->u.iocb_cmd;
ql_dbg(ql_dbg_io, vha, 0x3073,
- "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
+ "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
- sp->type = SRB_ELS_DCMD;
- sp->name = "ELS_DCMD";
- sp->fcport = fcport;
-
- elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
- init_completion(&elsio->u.els_plogi.comp);
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
+ sp->type = SRB_ELS_DCMD;
+ sp->name = "ELS_DCMD";
+ sp->fcport = fcport;
+ qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2,
+ qla2x00_els_dcmd2_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
- sp->done = qla2x00_els_dcmd2_sp_done;
elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
ptr = elsio->u.els_plogi.els_plogi_pyld =
- dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
if (!elsio->u.els_plogi.els_plogi_pyld) {
@@ -2958,7 +3050,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
}
resp_ptr = elsio->u.els_plogi.els_resp_pyld =
- dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
&elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
if (!elsio->u.els_plogi.els_resp_pyld) {
@@ -2971,17 +3063,23 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
memset(ptr, 0, sizeof(struct els_plogi_payload));
memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
- &ha->plogi_els_payld.data,
- sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
+ &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
elsio->u.els_plogi.els_cmd = els_opcode;
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
+ if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
+ struct fc_els_flogi *p = ptr;
+
+ p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
+ }
+
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
sizeof(*elsio->u.els_plogi.els_plogi_pyld));
+ init_completion(&elsio->u.els_plogi.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -3004,11 +3102,49 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
out:
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
+/* it is assume qpair lock is held */
+void qla_els_pt_iocb(struct scsi_qla_host *vha,
+ struct els_entry_24xx *els_iocb,
+ struct qla_els_pt_arg *a)
+{
+ els_iocb->entry_type = ELS_IOCB_TYPE;
+ els_iocb->entry_count = 1;
+ els_iocb->sys_define = 0;
+ els_iocb->entry_status = 0;
+ els_iocb->handle = QLA_SKIP_HANDLE;
+ els_iocb->nport_handle = a->nport_handle;
+ els_iocb->rx_xchg_address = a->rx_xchg_address;
+ els_iocb->tx_dsd_count = cpu_to_le16(1);
+ els_iocb->vp_index = a->vp_idx;
+ els_iocb->sof_type = EST_SOFI3;
+ els_iocb->rx_dsd_count = cpu_to_le16(0);
+ els_iocb->opcode = a->els_opcode;
+
+ els_iocb->d_id[0] = a->did.b.al_pa;
+ els_iocb->d_id[1] = a->did.b.area;
+ els_iocb->d_id[2] = a->did.b.domain;
+ /* For SID the byte order is different than DID */
+ els_iocb->s_id[1] = vha->d_id.b.al_pa;
+ els_iocb->s_id[2] = vha->d_id.b.area;
+ els_iocb->s_id[0] = vha->d_id.b.domain;
+
+ els_iocb->control_flags = cpu_to_le16(a->control_flags);
+
+ els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
+ els_iocb->tx_len = cpu_to_le32(a->tx_len);
+ put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
+
+ els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
+ els_iocb->rx_len = cpu_to_le32(a->rx_len);
+ put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
+}
+
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
@@ -3020,7 +3156,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->sys_define = 0;
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
- els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
els_iocb->vp_index = sp->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
@@ -3030,9 +3166,9 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
sp->type == SRB_ELS_CMD_RPT ?
bsg_request->rqst_data.r_els.els_code :
bsg_request->rqst_data.h_els.command_code;
- els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
- els_iocb->port_id[1] = sp->fcport->d_id.b.area;
- els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->d_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
els_iocb->control_flags = 0;
els_iocb->rx_byte_count =
cpu_to_le32(bsg_job->reply_payload.payload_len);
@@ -3214,7 +3350,7 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t tot_dsds;
struct device_reg_82xx __iomem *reg;
uint32_t dbval;
- uint32_t *fcp_dl;
+ __be32 *fcp_dl;
uint8_t additional_cdb_len;
struct ct6_dsd *ctx;
struct scsi_qla_host *vha = sp->vha;
@@ -3308,7 +3444,7 @@ sufficient_dsds:
req_cnt = 1;
if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3358,7 +3494,7 @@ sufficient_dsds:
}
cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -3396,7 +3532,7 @@ sufficient_dsds:
memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
- fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
additional_cdb_len);
*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
@@ -3417,7 +3553,7 @@ sufficient_dsds:
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3429,7 +3565,7 @@ sufficient_dsds:
goto queuing_error;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
@@ -3493,10 +3629,10 @@ sufficient_dsds:
if (ql2xdbwr)
qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -3530,11 +3666,12 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
struct srb_iocb *aio = &sp->u.iocb_cmd;
scsi_qla_host_t *vha = sp->vha;
struct req_que *req = sp->qpair->req;
+ srb_t *orig_sp = sp->cmd_sp;
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->handle = make_handle(req->id, sp->handle);
if (sp->fcport) {
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -3542,10 +3679,15 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
}
abt_iocb->handle_to_abort =
- cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
- aio->u.abt.cmd_hndl));
+ make_handle(le16_to_cpu(aio->u.abt.req_que_no),
+ aio->u.abt.cmd_hndl);
abt_iocb->vp_index = vha->vp_idx;
- abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
+ abt_iocb->req_que_no = aio->u.abt.req_que_no;
+
+ /* need to pass original sp */
+ if (orig_sp)
+ qla_nvme_abort_set_option(abt_iocb, orig_sp);
+
/* Send the command to the firmware */
wmb();
}
@@ -3560,7 +3702,7 @@ qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
for (i = 0; i < sz; i++)
- mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
+ mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
}
static void
@@ -3584,7 +3726,7 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -3597,37 +3739,44 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.srr_reject_code = 0;
nack->u.isp24.srr_reject_code_expl = 0;
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
+ (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) &&
+ sp->vha->hw->flags.edif_enabled) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0x3074,
+ "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
+ sp->name, sp->handle, sp->fcport->loop_id,
+ sp->fcport->d_id.b24);
+ nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
+ }
}
/*
* Build NVME LS request
*/
-static int
+static void
qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
{
struct srb_iocb *nvme;
- int rval = QLA_SUCCESS;
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
- cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
+ cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
- cmd_pkt->tx_dseg_count = 1;
- cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
- cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
+ cmd_pkt->tx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
+ cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
- cmd_pkt->rx_dseg_count = 1;
- cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
- cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
+ cmd_pkt->rx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
+ cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
-
- return rval;
}
static void
@@ -3674,6 +3823,9 @@ qla2x00_start_sp(srb_t *sp)
void *pkt;
unsigned long flags;
+ if (vha->hw->flags.eeh_busy)
+ return -EIO;
+
spin_lock_irqsave(qp->qp_lock_ptr, flags);
pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) {
@@ -3701,6 +3853,10 @@ qla2x00_start_sp(srb_t *sp)
case SRB_ELS_CMD_HST:
qla24xx_els_iocb(sp, pkt);
break;
+ case SRB_ELS_CMD_HST_NOLOGIN:
+ qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg);
+ ((struct els_entry_24xx *)pkt)->handle = sp->handle;
+ break;
case SRB_CT_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_ct_iocb(sp, pkt) :
@@ -3748,12 +3904,25 @@ qla2x00_start_sp(srb_t *sp)
case SRB_PRLO_CMD:
qla24xx_prlo_iocb(sp, pkt);
break;
+ case SRB_SA_UPDATE:
+ qla24xx_sa_update_iocb(sp, pkt);
+ break;
+ case SRB_SA_REPLACE:
+ qla24xx_sa_replace_iocb(sp, pkt);
+ break;
default:
break;
}
- if (sp->start_timer)
+ if (sp->start_timer) {
+ /* ref: TMR timer ref
+ * this code should be just before start_iocbs function
+ * This will make sure that caller function don't to do
+ * kref_put even on failure
+ */
+ kref_get(&sp->cmd_kref);
add_timer(&sp->u.iocb_cmd.timer);
+ }
wmb();
qla2x00_start_iocbs(vha, qp->req);
@@ -3891,8 +4060,14 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -3905,7 +4080,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
}
cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
@@ -3931,5 +4106,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
qla2x00_start_iocbs(vha, req);
queuing_error:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e40705d38cea..e19fde304e5c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,11 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
+#include "qla_gbl.h"
#include <linux/delay.h>
#include <linux/slab.h>
@@ -22,15 +22,296 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
+static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
+ struct purex_item *item);
+static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
+ uint16_t size);
+static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
+ void *pkt);
+static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp);
+
+static void
+qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
+{
+ void *pkt = &item->iocb;
+ uint16_t pkt_size = item->size;
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
+ "%s: Enter\n", __func__);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
+ "-------- ELS REQ -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
+ pkt, pkt_size);
+
+ fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
+}
const char *const port_state_str[] = {
- "Unknown",
- "UNCONFIGURED",
- "DEAD",
- "LOST",
- "ONLINE"
+ [FCS_UNKNOWN] = "Unknown",
+ [FCS_UNCONFIGURED] = "UNCONFIGURED",
+ [FCS_DEVICE_DEAD] = "DEAD",
+ [FCS_DEVICE_LOST] = "LOST",
+ [FCS_ONLINE] = "ONLINE"
};
+static void
+qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
+{
+ struct abts_entry_24xx *abts =
+ (struct abts_entry_24xx *)&pkt->iocb;
+ struct qla_hw_data *ha = vha->hw;
+ struct els_entry_24xx *rsp_els;
+ struct abts_entry_24xx *abts_rsp;
+ dma_addr_t dma;
+ uint32_t fctl;
+ int rval;
+
+ ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
+
+ ql_log(ql_log_warn, vha, 0x0287,
+ "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
+ abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
+ abts->seq_id, abts->seq_cnt);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
+ "-------- ABTS RCV -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
+ (uint8_t *)abts, sizeof(*abts));
+
+ rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
+ GFP_KERNEL);
+ if (!rsp_els) {
+ ql_log(ql_log_warn, vha, 0x0287,
+ "Failed allocate dma buffer ABTS/ELS RSP.\n");
+ return;
+ }
+
+ /* terminate exchange */
+ rsp_els->entry_type = ELS_IOCB_TYPE;
+ rsp_els->entry_count = 1;
+ rsp_els->nport_handle = cpu_to_le16(~0);
+ rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
+ rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
+ ql_dbg(ql_dbg_init, vha, 0x0283,
+ "Sending ELS Response to terminate exchange %#x...\n",
+ abts->rx_xch_addr_to_abort);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
+ "-------- ELS RSP -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
+ (uint8_t *)rsp_els, sizeof(*rsp_els));
+ rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0288,
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
+ } else if (rsp_els->comp_status) {
+ ql_log(ql_log_warn, vha, 0x0289,
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
+ __func__, rsp_els->comp_status,
+ rsp_els->error_subcode_1, rsp_els->error_subcode_2);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x028a,
+ "%s: abort exchange done.\n", __func__);
+ }
+
+ /* send ABTS response */
+ abts_rsp = (void *)rsp_els;
+ memset(abts_rsp, 0, sizeof(*abts_rsp));
+ abts_rsp->entry_type = ABTS_RSP_TYPE;
+ abts_rsp->entry_count = 1;
+ abts_rsp->nport_handle = abts->nport_handle;
+ abts_rsp->vp_idx = abts->vp_idx;
+ abts_rsp->sof_type = abts->sof_type & 0xf0;
+ abts_rsp->rx_xch_addr = abts->rx_xch_addr;
+ abts_rsp->d_id[0] = abts->s_id[0];
+ abts_rsp->d_id[1] = abts->s_id[1];
+ abts_rsp->d_id[2] = abts->s_id[2];
+ abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
+ abts_rsp->s_id[0] = abts->d_id[0];
+ abts_rsp->s_id[1] = abts->d_id[1];
+ abts_rsp->s_id[2] = abts->d_id[2];
+ abts_rsp->cs_ctl = abts->cs_ctl;
+ /* include flipping bit23 in fctl */
+ fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
+ FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
+ abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
+ abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
+ abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
+ abts_rsp->type = FC_TYPE_BLD;
+ abts_rsp->rx_id = abts->rx_id;
+ abts_rsp->ox_id = abts->ox_id;
+ abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
+ abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
+ abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
+ abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
+ ql_dbg(ql_dbg_init, vha, 0x028b,
+ "Sending BA ACC response to ABTS %#x...\n",
+ abts->rx_xch_addr_to_abort);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
+ "-------- ELS RSP -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
+ (uint8_t *)abts_rsp, sizeof(*abts_rsp));
+ rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x028c,
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
+ } else if (abts_rsp->comp_status) {
+ ql_log(ql_log_warn, vha, 0x028d,
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
+ __func__, abts_rsp->comp_status,
+ abts_rsp->payload.error.subcode1,
+ abts_rsp->payload.error.subcode2);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x028ea,
+ "%s: done.\n", __func__);
+ }
+
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
+}
+
+/**
+ * __qla_consume_iocb - this routine is used to tell fw driver has processed
+ * or consumed the head IOCB along with the continuation IOCB's from the
+ * provided respond queue.
+ * @vha: host adapter pointer
+ * @pkt: pointer to current packet. On return, this pointer shall move
+ * to the next packet.
+ * @rsp: respond queue pointer.
+ *
+ * it is assumed pkt is the head iocb, not the continuation iocbk
+ */
+void __qla_consume_iocb(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp)
+{
+ struct rsp_que *rsp_q = *rsp;
+ response_t *new_pkt;
+ uint16_t entry_count_remaining;
+ struct purex_entry_24xx *purex = *pkt;
+
+ entry_count_remaining = purex->entry_count;
+ while (entry_count_remaining > 0) {
+ new_pkt = rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+
+ new_pkt->signature = RESPONSE_PROCESSED;
+ /* flush signature */
+ wmb();
+ --entry_count_remaining;
+ }
+}
+
+/**
+ * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
+ * and save to provided buffer
+ * @vha: host adapter pointer
+ * @pkt: pointer Purex IOCB
+ * @rsp: respond queue
+ * @buf: extracted ELS payload copy here
+ * @buf_len: buffer length
+ */
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
+{
+ struct purex_entry_24xx *purex = *pkt;
+ struct rsp_que *rsp_q = *rsp;
+ sts_cont_entry_t *new_pkt;
+ uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
+ uint16_t buffer_copy_offset = 0;
+ uint16_t entry_count_remaining;
+ u16 tpad;
+
+ entry_count_remaining = purex->entry_count;
+ total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
+ - PURX_ELS_HEADER_SIZE;
+
+ /*
+ * end of payload may not end in 4bytes boundary. Need to
+ * round up / pad for room to swap, before saving data
+ */
+ tpad = roundup(total_bytes, 4);
+
+ if (buf_len < tpad) {
+ ql_dbg(ql_dbg_async, vha, 0x5084,
+ "%s buffer is too small %d < %d\n",
+ __func__, buf_len, tpad);
+ __qla_consume_iocb(vha, pkt, rsp);
+ return -EIO;
+ }
+
+ pending_bytes = total_bytes = tpad;
+ no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
+ sizeof(purex->els_frame_payload) : pending_bytes;
+
+ memcpy(buf, &purex->els_frame_payload[0], no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+
+ ((response_t *)purex)->signature = RESPONSE_PROCESSED;
+ /* flush signature */
+ wmb();
+
+ do {
+ while ((total_bytes > 0) && (entry_count_remaining > 0)) {
+ new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ if (new_pkt->entry_type != STATUS_CONT_TYPE) {
+ ql_log(ql_log_warn, vha, 0x507a,
+ "Unexpected IOCB type, partial data 0x%x\n",
+ buffer_copy_offset);
+ break;
+ }
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+ no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
+ sizeof(new_pkt->data) : pending_bytes;
+ if ((buffer_copy_offset + no_bytes) <= total_bytes) {
+ memcpy((buf + buffer_copy_offset), new_pkt->data,
+ no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+ } else {
+ ql_log(ql_log_warn, vha, 0x5044,
+ "Attempt to copy more that we got, optimizing..%x\n",
+ buffer_copy_offset);
+ memcpy((buf + buffer_copy_offset), new_pkt->data,
+ total_bytes - buffer_copy_offset);
+ }
+
+ ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
+ /* flush signature */
+ wmb();
+ }
+
+ if (pending_bytes != 0 || entry_count_remaining != 0) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
+ total_bytes, entry_count_remaining);
+ return -EIO;
+ }
+ } while (entry_count_remaining > 0);
+
+ be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
+
+ return 0;
+}
+
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
* @irq: interrupt number
@@ -67,7 +348,7 @@ qla2100_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- hccr = RD_REG_WORD(&reg->hccr);
+ hccr = rd_reg_word(&reg->hccr);
if (qla2x00_check_reg16_for_disconnect(vha, hccr))
break;
if (hccr & HCCR_RISC_PAUSE) {
@@ -79,18 +360,18 @@ qla2100_intr_handler(int irq, void *dev_id)
* bit to be cleared. Schedule a big hammer to get
* out of the RISC PAUSED state.
*/
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
- } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
+ } else if ((rd_reg_word(&reg->istatus) & ISR_RISC_INT) == 0)
break;
- if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ if (rd_reg_word(&reg->semaphore) & BIT_0) {
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
/* Get mailbox data. */
mb[0] = RD_MAILBOX_REG(ha, reg, 0);
@@ -109,13 +390,13 @@ qla2100_intr_handler(int irq, void *dev_id)
mb[0]);
}
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- RD_REG_WORD(&reg->semaphore);
+ wrt_reg_word(&reg->semaphore, 0);
+ rd_reg_word(&reg->semaphore);
} else {
qla2x00_process_response_queue(rsp);
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
}
qla2x00_handle_mbx_completion(ha, status);
@@ -132,12 +413,7 @@ qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
!test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
!test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
- /*
- * Schedule this (only once) on the default system
- * workqueue so that all the adapter workqueues and the
- * DPC thread can be shutdown cleanly.
- */
- schedule_work(&vha->hw->board_disable);
+ qla_schedule_eeh_work(vha);
}
return true;
} else
@@ -187,14 +463,14 @@ qla2300_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSR_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_WORD(&reg->hccr);
+ hccr = rd_reg_word(&reg->hccr);
if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
ql_log(ql_log_warn, vha, 0x5026,
@@ -210,10 +486,10 @@ qla2300_intr_handler(int irq, void *dev_id)
* interrupt bit to be cleared. Schedule a big
* hammer to get out of the RISC PAUSED state.
*/
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSR_RISC_INT) == 0)
@@ -228,7 +504,7 @@ qla2300_intr_handler(int irq, void *dev_id)
status |= MBX_INTERRUPT;
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
+ wrt_reg_word(&reg->semaphore, 0);
break;
case 0x12:
mb[0] = MSW(stat);
@@ -256,8 +532,8 @@ qla2300_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD_RELAXED(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word_relaxed(&reg->hccr);
}
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -275,7 +551,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint32_t mboxes;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -291,15 +567,15 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
mboxes >>= 1;
- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
+ wptr = MAILBOX_REG(ha, reg, 1);
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
+ wptr = MAILBOX_REG(ha, reg, 8);
if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
else if (mboxes & BIT_0)
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
wptr++;
mboxes >>= 1;
@@ -314,19 +590,19 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
int rval;
struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
/* Seed data -- mailbox1 -> mailbox7. */
if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
- wptr = (uint16_t __iomem *)&reg24->mailbox1;
+ wptr = &reg24->mailbox1;
else if (IS_QLA8044(vha->hw))
- wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
+ wptr = &reg82->mailbox_out[1];
else
return;
for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
- mb[cnt] = RD_REG_WORD(wptr);
+ mb[cnt] = rd_reg_word(wptr);
ql_dbg(ql_dbg_async, vha, 0x5021,
"Inter-Driver Communication %s -- "
@@ -372,7 +648,7 @@ const char *
qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
{
static const char *const link_speeds[] = {
- "1", "2", "?", "4", "8", "16", "32", "10"
+ "1", "2", "?", "4", "8", "16", "32", "64", "10"
};
#define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
@@ -619,6 +895,212 @@ qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
return NULL;
}
+/* Shall be called only on supported adapters. */
+static void
+qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ bool reset_isp_needed = false;
+
+ ql_log(ql_log_warn, vha, 0x02f0,
+ "MPI Heartbeat stop. MPI reset is%s needed. "
+ "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
+ mb[1] & BIT_8 ? "" : " not",
+ mb[0], mb[1], mb[2], mb[3]);
+
+ if ((mb[1] & BIT_8) == 0)
+ return;
+
+ ql_log(ql_log_warn, vha, 0x02f1,
+ "MPI Heartbeat stop. FW dump needed\n");
+
+ if (ql2xfulldump_on_mpifail) {
+ ha->isp_ops->fw_dump(vha);
+ reset_isp_needed = true;
+ }
+
+ ha->isp_ops->mpi_fw_dump(vha, 1);
+
+ if (reset_isp_needed) {
+ vha->hw->flags.fw_init_done = 0;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+}
+
+static struct purex_item *
+qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
+{
+ struct purex_item *item = NULL;
+ uint8_t item_hdr_size = sizeof(*item);
+
+ if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
+ item = kzalloc(item_hdr_size +
+ (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
+ } else {
+ if (atomic_inc_return(&vha->default_item.in_use) == 1) {
+ item = &vha->default_item;
+ goto initialize_purex_header;
+ } else {
+ item = kzalloc(item_hdr_size, GFP_ATOMIC);
+ }
+ }
+ if (!item) {
+ ql_log(ql_log_warn, vha, 0x5092,
+ ">> Failed allocate purex list item.\n");
+
+ return NULL;
+ }
+
+initialize_purex_header:
+ item->vha = vha;
+ item->size = size;
+ return item;
+}
+
+static void
+qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
+ void (*process_item)(struct scsi_qla_host *vha,
+ struct purex_item *pkt))
+{
+ struct purex_list *list = &vha->purex_list;
+ ulong flags;
+
+ pkt->process_item = process_item;
+
+ spin_lock_irqsave(&list->lock, flags);
+ list_add_tail(&pkt->list, &list->head);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
+}
+
+/**
+ * qla24xx_copy_std_pkt() - Copy over purex ELS which is
+ * contained in a single IOCB.
+ * purex packet.
+ * @vha: SCSI driver HA context
+ * @pkt: ELS packet
+ */
+static struct purex_item
+*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
+{
+ struct purex_item *item;
+
+ item = qla24xx_alloc_purex_item(vha,
+ QLA_DEFAULT_PAYLOAD_SIZE);
+ if (!item)
+ return item;
+
+ memcpy(&item->iocb, pkt, sizeof(item->iocb));
+ return item;
+}
+
+/**
+ * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
+ * span over multiple IOCBs.
+ * @vha: SCSI driver HA context
+ * @pkt: ELS packet
+ * @rsp: Response queue
+ */
+static struct purex_item *
+qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
+ struct rsp_que **rsp)
+{
+ struct purex_entry_24xx *purex = *pkt;
+ struct rsp_que *rsp_q = *rsp;
+ sts_cont_entry_t *new_pkt;
+ uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
+ uint16_t buffer_copy_offset = 0;
+ uint16_t entry_count, entry_count_remaining;
+ struct purex_item *item;
+ void *fpin_pkt = NULL;
+
+ total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
+ - PURX_ELS_HEADER_SIZE;
+ pending_bytes = total_bytes;
+ entry_count = entry_count_remaining = purex->entry_count;
+ no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
+ sizeof(purex->els_frame_payload) : pending_bytes;
+ ql_log(ql_log_info, vha, 0x509a,
+ "FPIN ELS, frame_size 0x%x, entry count %d\n",
+ total_bytes, entry_count);
+
+ item = qla24xx_alloc_purex_item(vha, total_bytes);
+ if (!item)
+ return item;
+
+ fpin_pkt = &item->iocb;
+
+ memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+
+ ((response_t *)purex)->signature = RESPONSE_PROCESSED;
+ wmb();
+
+ do {
+ while ((total_bytes > 0) && (entry_count_remaining > 0)) {
+ if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
+ ql_dbg(ql_dbg_async, vha, 0x5084,
+ "Ran out of IOCBs, partial data 0x%x\n",
+ buffer_copy_offset);
+ cpu_relax();
+ continue;
+ }
+
+ new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ if (new_pkt->entry_type != STATUS_CONT_TYPE) {
+ ql_log(ql_log_warn, vha, 0x507a,
+ "Unexpected IOCB type, partial data 0x%x\n",
+ buffer_copy_offset);
+ break;
+ }
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+ no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
+ sizeof(new_pkt->data) : pending_bytes;
+ if ((buffer_copy_offset + no_bytes) <= total_bytes) {
+ memcpy(((uint8_t *)fpin_pkt +
+ buffer_copy_offset), new_pkt->data,
+ no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+ } else {
+ ql_log(ql_log_warn, vha, 0x5044,
+ "Attempt to copy more that we got, optimizing..%x\n",
+ buffer_copy_offset);
+ memcpy(((uint8_t *)fpin_pkt +
+ buffer_copy_offset), new_pkt->data,
+ total_bytes - buffer_copy_offset);
+ }
+
+ ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
+ wmb();
+ }
+
+ if (pending_bytes != 0 || entry_count_remaining != 0) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
+ total_bytes, entry_count_remaining);
+ qla24xx_free_purex_item(item);
+ return NULL;
+ }
+ } while (entry_count_remaining > 0);
+ host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
+ return item;
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @vha: SCSI driver HA context
@@ -648,7 +1130,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
goto skip_rio;
switch (mb[0]) {
case MBA_SCSI_COMPLETION:
- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
+ handles[0] = make_handle(mb[2], mb[1]);
handle_cnt = 1;
break;
case MBA_CMPLT_1_16BIT:
@@ -687,10 +1169,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
mb[0] = MBA_SCSI_COMPLETION;
break;
case MBA_CMPLT_2_32BIT:
- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
- handles[1] = le32_to_cpu(
- ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
- RD_MAILBOX_REG(ha, reg, 6));
+ handles[0] = make_handle(mb[2], mb[1]);
+ handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
+ RD_MAILBOX_REG(ha, reg, 6));
handle_cnt = 2;
mb[0] = MBA_SCSI_COMPLETION;
break;
@@ -716,16 +1197,31 @@ skip_rio:
break;
case MBA_SYSTEM_ERR: /* System Error */
- mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) ?
- RD_REG_WORD(&reg24->mailbox7) : 0;
- ql_log(ql_log_warn, vha, 0x5003,
- "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
- "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
- ha->fw_dump_mpi =
- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
- RD_REG_WORD(&reg24->mailbox7) & BIT_8;
- ha->isp_ops->fw_dump(vha, 1);
+ mbx = 0;
+
+ vha->hw_err_cnt++;
+
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ u16 m[4];
+
+ m[0] = rd_reg_word(&reg24->mailbox4);
+ m[1] = rd_reg_word(&reg24->mailbox5);
+ m[2] = rd_reg_word(&reg24->mailbox6);
+ mbx = m[3] = rd_reg_word(&reg24->mailbox7);
+
+ ql_log(ql_log_warn, vha, 0x5003,
+ "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
+ mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
+ } else
+ ql_log(ql_log_warn, vha, 0x5003,
+ "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
+ mb[1], mb[2], mb[3]);
+
+ if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ rd_reg_word(&reg24->mailbox7) & BIT_8)
+ ha->isp_ops->mpi_fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
ha->flags.fw_init_done = 0;
QLA_FW_STOPPED(ha);
@@ -758,6 +1254,8 @@ skip_rio:
ql_log(ql_log_warn, vha, 0x5006,
"ISP Request Transfer Error (%x).\n", mb[1]);
+ vha->hw_err_cnt++;
+
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
@@ -765,6 +1263,8 @@ skip_rio:
ql_log(ql_log_warn, vha, 0x5007,
"ISP Response Transfer Error (%x).\n", mb[1]);
+ vha->hw_err_cnt++;
+
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
@@ -813,23 +1313,31 @@ skip_rio:
"LOOP UP detected (%s Gbps).\n",
qla2x00_get_link_speed_str(ha, ha->link_data_rate));
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (mb[2] & BIT_0)
+ ql_log(ql_log_info, vha, 0x11a0,
+ "FEC=enabled (link up).\n");
+ }
+
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
- if (AUTO_DETECT_SFP_SUPPORT(vha)) {
- set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ if (vha->link_down_time < vha->hw->port_down_retry_count) {
+ vha->short_link_down_cnt++;
+ vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
}
+
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
SAVE_TOPO(ha);
ha->flags.lip_ae = 0;
ha->current_topology = 0;
+ vha->link_down_time = 0;
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
- ? RD_REG_WORD(&reg24->mailbox4) : 0;
- mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
+ ? rd_reg_word(&reg24->mailbox4) : 0;
+ mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4])
: mbx;
ql_log(ql_log_info, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
@@ -846,9 +1354,7 @@ skip_rio:
if (!vha->vp_idx) {
if (ha->flags.fawwpn_enabled &&
(ha->current_topology == ISP_CFG_F)) {
- void *wwpn = ha->init_cb->port_name;
-
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+ memcpy(vha->port_name, ha->port_name, WWN_SIZE);
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
ql_dbg(ql_dbg_init + ql_dbg_verbose,
@@ -1086,9 +1592,9 @@ global_port_update:
if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
break;
- ql_dbg(ql_dbg_async, vha, 0x5013,
- "RSCN database changed -- %04x %04x %04x.\n",
- mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x5013,
+ "RSCN database changed -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
@@ -1119,6 +1625,19 @@ global_port_update:
qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
}
break;
+ case MBA_CONGN_NOTI_RECV:
+ if (!ha->flags.scm_enabled ||
+ mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
+ break;
+
+ if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
+ ql_dbg(ql_dbg_async, vha, 0x509b,
+ "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
+ } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
+ ql_log(ql_log_warn, vha, 0x509b,
+ "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
+ }
+ break;
/* case MBA_RIO_RESPONSE: */
case MBA_ZIO_RESPONSE:
ql_dbg(ql_dbg_async, vha, 0x5015,
@@ -1134,6 +1653,7 @@ global_port_update:
ql_dbg(ql_dbg_async, vha, 0x5016,
"Discard RND Frame -- %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
+ vha->interface_err_cnt++;
break;
case MBA_TRACE_NOTIFICATION:
@@ -1196,7 +1716,7 @@ global_port_update:
break;
case MBA_IDC_NOTIFY:
if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[4] = rd_reg_word(&reg24->mailbox4);
if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
(mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
(mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
@@ -1210,11 +1730,11 @@ global_port_update:
qla2xxx_wake_dpc(vha);
}
}
- /* fall through */
+ fallthrough;
case MBA_IDC_COMPLETE:
if (ha->notify_lb_portup_comp && !vha->vp_idx)
complete(&ha->lb_portup_comp);
- /* Fallthru */
+ fallthrough;
case MBA_IDC_TIME_EXT:
if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
IS_QLA8044(ha))
@@ -1223,25 +1743,13 @@ global_port_update:
case MBA_IDC_AEN:
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- ha->flags.fw_init_done = 0;
- ql_log(ql_log_warn, vha, 0xffff,
- "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
- mb[0], mb[1], mb[2], mb[3]);
-
- if ((mb[1] & BIT_8) ||
- (mb[2] & BIT_8)) {
- ql_log(ql_log_warn, vha, 0xd013,
- "MPI Heartbeat stop. FW dump needed\n");
- ha->fw_dump_mpi = 1;
- ha->isp_ops->fw_dump(vha, 1);
- }
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ vha->hw_err_cnt++;
+ qla27xx_handle_8200_aen(vha, mb);
} else if (IS_QLA83XX(ha)) {
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
- mb[5] = RD_REG_WORD(&reg24->mailbox5);
- mb[6] = RD_REG_WORD(&reg24->mailbox6);
- mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ mb[4] = rd_reg_word(&reg24->mailbox4);
+ mb[5] = rd_reg_word(&reg24->mailbox5);
+ mb[6] = rd_reg_word(&reg24->mailbox6);
+ mb[7] = rd_reg_word(&reg24->mailbox7);
qla83xx_handle_8200_aen(vha, mb);
} else {
ql_dbg(ql_dbg_async, vha, 0x5052,
@@ -1251,9 +1759,13 @@ global_port_update:
break;
case MBA_DPORT_DIAGNOSTICS:
+ if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
+ (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
ql_dbg(ql_dbg_async, vha, 0x5052,
"D-Port Diagnostics: %04x %04x %04x %04x\n",
mb[0], mb[1], mb[2], mb[3]);
+ memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
static char *results[] = {
"start", "done(pass)", "done(error)", "undefined" };
@@ -1284,13 +1796,16 @@ global_port_update:
case MBA_TEMPERATURE_ALERT:
ql_dbg(ql_dbg_async, vha, 0x505e,
"TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
- if (mb[1] == 0x12)
- schedule_work(&ha->board_disable);
break;
case MBA_TRANS_INSERT:
ql_dbg(ql_dbg_async, vha, 0x5091,
"Transceiver Insertion: %04x\n", mb[1]);
+ set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
+ break;
+
+ case MBA_TRANS_REMOVE:
+ ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
break;
default:
@@ -1353,35 +1868,38 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
{
struct qla_hw_data *ha = vha->hw;
sts_entry_t *pkt = iocb;
- srb_t *sp = NULL;
+ srb_t *sp;
uint16_t index;
+ if (pkt->handle == QLA_SKIP_HANDLE)
+ return NULL;
+
index = LSW(pkt->handle);
if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
- "Invalid command index (%x) type %8ph.\n",
- index, iocb);
+ "%s: Invalid command index (%x) type %8ph.\n",
+ func, index, iocb);
if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- goto done;
+ return NULL;
}
sp = req->outstanding_cmds[index];
if (!sp) {
ql_log(ql_log_warn, vha, 0x5032,
- "Invalid completion handle (%x) -- timed-out.\n", index);
- return sp;
+ "%s: Invalid completion handle (%x) -- timed-out.\n",
+ func, index);
+ return NULL;
}
if (sp->handle != index) {
ql_log(ql_log_warn, vha, 0x5033,
- "SRB handle (%x) mismatch %x.\n", sp->handle, index);
+ "%s: SRB handle (%x) mismatch %x.\n", func,
+ sp->handle, index);
return NULL;
}
req->outstanding_cmds[index] = NULL;
-
-done:
return sp;
}
@@ -1476,6 +1994,7 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct mbx_24xx_entry *pkt)
{
const char func[] = "MBX-IOCB2";
+ struct qla_hw_data *ha = vha->hw;
srb_t *sp;
struct srb_iocb *si;
u16 sz, i;
@@ -1485,11 +2004,23 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
+ if (sp->type == SRB_SCSI_CMD ||
+ sp->type == SRB_NVME_CMD ||
+ sp->type == SRB_TM_CMD) {
+ ql_log(ql_log_warn, vha, 0x509d,
+ "Inconsistent event entry type %d\n", sp->type);
+ if (IS_P3P_TYPE(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
si = &sp->u.iocb_cmd;
sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
for (i = 0; i < sz; i++)
- si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
+ si->u.mbx.in_mb[i] = pkt->mb[i];
res = (si->u.mbx.in_mb[0] & MBS_MASK);
@@ -1587,9 +2118,10 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
static void
-qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
struct sts_entry_24xx *pkt, int iocb_type)
{
+ struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
const char func[] = "ELS_CT_IOCB";
const char *type;
srb_t *sp;
@@ -1597,18 +2129,58 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
- int res;
+ int res, logit = 1;
struct srb_iocb *els;
+ uint n;
+ scsi_qla_host_t *vha;
+ struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
- sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
if (!sp)
return;
+ bsg_job = sp->u.bsg_job;
+ vha = sp->vha;
type = NULL;
+
+ comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
+ fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
+ fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
+
switch (sp->type) {
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
+ type = "rpt hst";
+ break;
+ case SRB_ELS_CMD_HST_NOLOGIN:
type = "els";
+ {
+ struct els_entry_24xx *els = (void *)pkt;
+ struct qla_bsg_auth_els_request *p =
+ (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+ ql_dbg(ql_dbg_user, vha, 0x700f,
+ "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd),
+ e->d_id[2], e->d_id[1], e->d_id[0],
+ comp_status, p->e.extra_rx_xchg_address, bsg_job);
+
+ if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
+ if (sp->remap.remapped) {
+ n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ sp->remap.rsp.buf,
+ sp->remap.rsp.len);
+ ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
+ "%s: SG copied %x of %x\n",
+ __func__, n, sp->remap.rsp.len);
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x700f,
+ "%s: NOT REMAPPED (error)...!!!\n",
+ __func__);
+ }
+ }
+ }
break;
case SRB_CT_CMD:
type = "ct pass-through";
@@ -1638,34 +2210,76 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
}
- comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
- fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
- fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
-
if (iocb_type == ELS_IOCB_TYPE) {
els = &sp->u.iocb_cmd;
- els->u.els_plogi.fw_status[0] = fw_status[0];
- els->u.els_plogi.fw_status[1] = fw_status[1];
- els->u.els_plogi.fw_status[2] = fw_status[2];
- els->u.els_plogi.comp_status = fw_status[0];
+ els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
+ els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
+ els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
+ els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
if (comp_status == CS_COMPLETE) {
res = DID_OK << 16;
} else {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- els->u.els_plogi.len =
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count);
+ els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
+ ese->total_byte_count));
+
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
+ __func__, e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
+ logit = 0;
+ }
+
+ } else if (comp_status == CS_PORT_LOGGED_OUT) {
+ ql_dbg(ql_dbg_disc, vha, 0x911e,
+ "%s %d schedule session deletion\n",
+ __func__, __LINE__);
+
+ els->u.els_plogi.len = 0;
+ res = DID_IMM_RETRY << 16;
+ qlt_schedule_sess_for_deletion(sp->fcport);
} else {
els->u.els_plogi.len = 0;
res = DID_ERROR << 16;
}
+
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ if (logit) {
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
+ }
+ if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
+ sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s rcv reject. Sched delete\n", __func__);
+ qlt_schedule_sess_for_deletion(sp->fcport);
+ }
+ } else if (logit) {
+ ql_log(ql_log_info, vha, 0x503f,
+ "%s IOCB Done hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+ ql_log(ql_log_info, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
+ }
}
- ql_dbg(ql_dbg_user, vha, 0x503f,
- "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
- type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count));
goto els_ct_done;
}
@@ -1681,23 +2295,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
bsg_reply->reply_payload_rcv_len =
- le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
+ le32_to_cpu(ese->total_byte_count);
ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count));
+ le32_to_cpu(ese->total_byte_count));
} else {
ql_dbg(ql_dbg_user, vha, 0x5040,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
type, sp->handle, comp_status,
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->error_subcode_1),
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->error_subcode_2));
+ le32_to_cpu(ese->error_subcode_1),
+ le32_to_cpu(ese->error_subcode_2));
res = DID_ERROR << 16;
bsg_reply->reply_payload_rcv_len = 0;
}
@@ -1727,6 +2338,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
struct srb_iocb *lio;
uint16_t *data;
uint32_t iop[2];
+ int logit = 1;
sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
if (!sp)
@@ -1754,11 +2366,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
- ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
- "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
- "iop0=%x.\n", type, fcport->port_name, sp->handle,
- fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ ql_dbg(ql_dbg_async, sp->vha, 0x5036,
+ "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
le32_to_cpu(logio->io_parameter[0]));
vha->hw->exch_starvation = 0;
@@ -1775,6 +2385,10 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (sp->type != SRB_LOGIN_CMD)
goto logio_done;
+ lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
+ if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
+ fcport->flags |= FCF_FCSP_DEVICE;
+
iop[0] = le32_to_cpu(logio->io_parameter[0]);
if (iop[0] & BIT_4) {
fcport->port_type = FCT_TARGET;
@@ -1802,9 +2416,11 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_PORTID_USED:
data[0] = MBS_PORT_ID_USED;
data[1] = LSW(iop[1]);
+ logit = 0;
break;
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
+ logit = 0;
break;
case LSC_SCODE_CMD_FAILED:
if (iop[1] == 0x0606) {
@@ -1831,20 +2447,26 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* fall through */
+ fallthrough;
default:
data[0] = MBS_COMMAND_ERROR;
break;
}
- ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
- "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n", type, fcport->port_name,
- sp->handle, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa,
- le16_to_cpu(logio->comp_status),
- le32_to_cpu(logio->io_parameter[0]),
- le32_to_cpu(logio->io_parameter[1]));
+ if (logit)
+ ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
+ "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
+ le16_to_cpu(logio->comp_status),
+ le32_to_cpu(logio->io_parameter[0]),
+ le32_to_cpu(logio->io_parameter[1]));
+ else
+ ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
+ "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
+ le16_to_cpu(logio->comp_status),
+ le32_to_cpu(logio->io_parameter[0]),
+ le32_to_cpu(logio->io_parameter[1]));
logio_done:
sp->done(sp, 0);
@@ -1859,11 +2481,13 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
srb_t *sp;
struct srb_iocb *iocb;
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
+ u16 comp_status;
sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
if (!sp)
return;
+ comp_status = le16_to_cpu(sts->comp_status);
iocb = &sp->u.iocb_cmd;
type = sp->name;
fcport = sp->fcport;
@@ -1877,10 +2501,11 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
ql_log(ql_log_warn, fcport->vha, 0x5039,
"Async-%s error - hdl=%x completion status(%x).\n",
- type, sp->handle, sts->comp_status);
+ type, sp->handle, comp_status);
iocb->u.tmf.data = QLA_FUNCTION_FAILED;
} else if ((le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
+ host_to_fcp_swap(sts->data, sizeof(sts->data));
if (le32_to_cpu(sts->rsp_data_len) < 4) {
ql_log(ql_log_warn, fcport->vha, 0x503b,
"Async-%s error - hdl=%x not enough response(%d).\n",
@@ -1893,6 +2518,30 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
}
}
+ switch (comp_status) {
+ case CS_PORT_LOGGED_OUT:
+ case CS_PORT_CONFIG_CHG:
+ case CS_PORT_BUSY:
+ case CS_INCOMPLETE:
+ case CS_PORT_UNAVAILABLE:
+ case CS_TIMEOUT:
+ case CS_RESET:
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+ "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa,
+ port_state_str[FCS_ONLINE],
+ comp_status);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ break;
+
+ default:
+ break;
+ }
+
if (iocb->u.tmf.data != QLA_SUCCESS)
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
sts, sizeof(*sts));
@@ -1909,7 +2558,8 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
uint16_t state_flags;
struct nvmefc_fcp_req *fd;
uint16_t ret = QLA_SUCCESS;
- uint16_t comp_status = le16_to_cpu(sts->comp_status);
+ __le16 comp_status = sts->comp_status;
+ int logit = 0;
iocb = &sp->u.iocb_cmd;
fcport = sp->fcport;
@@ -1919,6 +2569,14 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (unlikely(iocb->u.nvme.aen_op))
atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
+ else
+ sp->qpair->cmd_completion_cnt++;
+
+ if (unlikely(comp_status != CS_COMPLETE))
+ logit = 1;
+
+ fd->transferred_length = fd->payload_length -
+ le32_to_cpu(sts->residual_len);
/*
* State flags: Bit 6 and 0.
@@ -1930,16 +2588,28 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
*/
if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
iocb->u.nvme.rsp_pyld_len = 0;
+ } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
+ (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
+ /* Response already DMA'd to fd->rspaddr. */
+ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
} else if ((state_flags & SF_FCP_RSP_DMA)) {
- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ /*
+ * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
+ * as an error.
+ */
+ iocb->u.nvme.rsp_pyld_len = 0;
+ fd->transferred_length = 0;
+ ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
+ "Unexpected values in NVMe_RSP IU.\n");
+ logit = 1;
} else if (state_flags & SF_NVME_ERSP) {
uint32_t *inbuf, *outbuf;
uint16_t iter;
inbuf = (uint32_t *)&sts->nvme_ersp_data;
outbuf = (uint32_t *)fd->rspaddr;
- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
- if (unlikely(iocb->u.nvme.rsp_pyld_len >
+ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
+ if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
sizeof(struct nvme_fc_ersp_iu))) {
if (ql_mask_match(ql_dbg_io)) {
WARN_ONCE(1, "Unexpected response payload length %u.\n",
@@ -1949,22 +2619,34 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
iocb->u.nvme.rsp_pyld_len);
}
iocb->u.nvme.rsp_pyld_len =
- sizeof(struct nvme_fc_ersp_iu);
+ cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
}
- iter = iocb->u.nvme.rsp_pyld_len >> 2;
+ iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
for (; iter; iter--)
*outbuf++ = swab32(*inbuf++);
- } else { /* unhandled case */
- ql_log(ql_log_warn, fcport->vha, 0x503a,
- "NVME-%s error. Unhandled state_flags of %x\n",
- sp->name, state_flags);
}
- fd->transferred_length = fd->payload_length -
- le32_to_cpu(sts->residual_len);
+ if (state_flags & SF_NVME_ERSP) {
+ struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
+ u32 tgt_xfer_len;
+
+ tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
+ if (fd->transferred_length != tgt_xfer_len) {
+ ql_log(ql_log_warn, fcport->vha, 0x3079,
+ "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
+ tgt_xfer_len, fd->transferred_length);
+ logit = 1;
+ } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
+ /*
+ * Do not log if this is just an underflow and there
+ * is no data loss.
+ */
+ logit = 0;
+ }
+ }
- if (unlikely(comp_status != CS_COMPLETE))
- ql_log(ql_log_warn, fcport->vha, 0x5060,
+ if (unlikely(logit))
+ ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status,
fd->transferred_length, le32_to_cpu(sts->residual_len),
@@ -1974,7 +2656,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
* If transport error then Failure (HBA rejects request)
* otherwise transport will handle.
*/
- switch (comp_status) {
+ switch (le16_to_cpu(comp_status)) {
case CS_COMPLETE:
break;
@@ -1982,7 +2664,16 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
case CS_PORT_UNAVAILABLE:
case CS_PORT_LOGGED_OUT:
fcport->nvme_flag |= NVME_FLAG_RESETTING;
- /* fall through */
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+ "Port to be marked lost on fcport=%06x, current "
+ "port state= %s comp_status %x.\n",
+ fcport->d_id.b24, port_state_str[FCS_ONLINE],
+ comp_status);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ fallthrough;
case CS_ABORTED:
case CS_PORT_BUSY:
fd->transferred_length = 0;
@@ -2116,7 +2807,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
/* Adjust ring index */
- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
+ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
}
static inline void
@@ -2207,9 +2898,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
* For type 3: ref & app tag is all 'f's
* For type 0,1,2: app tag is all 'f's
*/
- if ((a_app_tag == T10_PI_APP_ESCAPE) &&
- ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
- (a_ref_tag == T10_PI_REF_ESCAPE))) {
+ if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
+ (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
+ a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
uint32_t blocks_done, resid;
sector_t lba_s = scsi_get_lba(cmd);
@@ -2261,31 +2952,22 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
/* check guard */
if (e_guard != a_guard) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- set_driver_byte(cmd, DRIVER_SENSE);
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
/* check ref tag */
if (e_ref_tag != a_ref_tag) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- set_driver_byte(cmd, DRIVER_SENSE);
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
/* check appl tag */
if (e_app_tag != a_app_tag) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- set_driver_byte(cmd, DRIVER_SENSE);
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
@@ -2469,7 +3151,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int logit = 1;
int res = 0;
uint16_t state_flags = 0;
- uint16_t retry_delay = 0;
+ uint16_t sts_qual = 0;
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
@@ -2515,11 +3197,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
return;
}
-
- if (sp->abort)
- sp->aborted = 1;
- else
- sp->completed = 1;
+ qla_put_iocbs(sp->qpair, &sp->iores);
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
@@ -2548,6 +3226,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
/* Fast path completion. */
+ qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
+ sp->qpair->cmd_completion_cnt++;
+
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);
@@ -2585,13 +3266,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
host_to_fcp_swap(sts24->data, sizeof(sts24->data));
ox_id = le16_to_cpu(sts24->ox_id);
par_sense_len = sizeof(sts24->data);
- /* Valid values of the retry delay timer are 0x1-0xffef */
- if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
- retry_delay = sts24->retry_delay & 0x3fff;
- ql_dbg(ql_dbg_io, sp->vha, 0x3033,
- "%s: scope=%#x retry_delay=%#x\n", __func__,
- sts24->retry_delay >> 14, retry_delay);
- }
+ sts_qual = le16_to_cpu(sts24->status_qualifier);
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le16_to_cpu(sts->req_sense_length);
@@ -2629,9 +3304,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
* Check retry_delay_timer value if we receive a busy or
* queue full.
*/
- if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
- lscsi_status == SAM_STAT_BUSY)
- qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
+ if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
+ lscsi_status == SAM_STAT_BUSY))
+ qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
/*
* Based on Host and scsi status generate status code for Linux
@@ -2683,9 +3358,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
- "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
- resid, scsi_bufflen(cp));
+ ql_log(ql_log_warn, fcport->vha, 0x301d,
+ "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ vha->interface_err_cnt++;
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
@@ -2708,9 +3385,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
* task not completed.
*/
- ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
- "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
- resid, scsi_bufflen(cp));
+ ql_log(ql_log_warn, fcport->vha, 0x301f,
+ "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ vha->interface_err_cnt++;
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
@@ -2754,6 +3433,7 @@ check_scsi_status:
case CS_PORT_UNAVAILABLE:
case CS_TIMEOUT:
case CS_RESET:
+ case CS_EDIF_INV_REQ:
/*
* We are going to have the fc class block the rport
@@ -2794,6 +3474,7 @@ check_scsi_status:
case CS_TRANSPORT:
res = DID_ERROR << 16;
+ vha->hw_err_cnt++;
if (!IS_PI_SPLIT_DET_CAPABLE(ha))
break;
@@ -2814,6 +3495,7 @@ check_scsi_status:
ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
pkt, sizeof(*sts24));
res = DID_ERROR << 16;
+ vha->hw_err_cnt++;
break;
default:
res = DID_ERROR << 16;
@@ -2823,14 +3505,12 @@ check_scsi_status:
out:
if (logit)
ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
- "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
- "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
- "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
- comp_status, scsi_status, res, vha->host_no,
- cp->device->id, cp->device->lun, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
- cp->cmnd, scsi_bufflen(cp), rsp_info_len,
- resid_len, fw_resid_len, sp, cp);
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
+ comp_status, scsi_status, res, vha->host_no,
+ cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
+ cp->cmnd, scsi_bufflen(cp), rsp_info_len,
+ resid_len, fw_resid_len, sp, cp);
if (rsp->status_srb == NULL)
sp->done(sp, res);
@@ -2938,11 +3618,13 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
default:
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
+ qla_put_iocbs(sp->qpair, &sp->iores);
sp->done(sp, res);
return 0;
}
break;
+ case SA_UPDATE_IOCB_TYPE:
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
@@ -2964,7 +3646,7 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint32_t mboxes;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -2980,11 +3662,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
mboxes >>= 1;
- wptr = (uint16_t __iomem *)&reg->mailbox1;
+ wptr = &reg->mailbox1;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
mboxes >>= 1;
wptr++;
@@ -2997,6 +3679,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "ABT_IOCB";
srb_t *sp;
+ srb_t *orig_sp = NULL;
struct srb_iocb *abt;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
@@ -3004,7 +3687,12 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
+ abt->u.abt.comp_status = pkt->comp_status;
+ orig_sp = sp->cmd_sp;
+ /* Need to pass original sp */
+ if (orig_sp)
+ qla_nvme_abort_process_comp_status(pkt, orig_sp);
+
sp->done(sp, 0);
}
@@ -3024,6 +3712,46 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
}
/**
+ * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
+ * before iocb processing can start.
+ * @vha: host adapter pointer
+ * @rsp: respond queue
+ * @pkt: head iocb describing how many continuation iocb
+ * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
+ */
+static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
+{
+ int start_pkt_ring_index;
+ u32 iocb_cnt = 0;
+ int rc = 0;
+
+ if (pkt->entry_count == 1)
+ return rc;
+
+ /* ring_index was pre-increment. set it back to current pkt */
+ if (rsp->ring_index == 0)
+ start_pkt_ring_index = rsp->length - 1;
+ else
+ start_pkt_ring_index = rsp->ring_index - 1;
+
+ if (rsp_q_in < start_pkt_ring_index)
+ /* q in ptr is wrapped */
+ iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
+ else
+ iocb_cnt = rsp_q_in - start_pkt_ring_index;
+
+ if (iocb_cnt < pkt->entry_count)
+ rc = -EIO;
+
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
+ __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
+
+ return rc;
+}
+
+/**
* qla24xx_process_response_queue() - Process response queue entries.
* @vha: SCSI driver HA context
* @rsp: response queue
@@ -3033,15 +3761,33 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
{
struct sts_entry_24xx *pkt;
struct qla_hw_data *ha = vha->hw;
+ struct purex_entry_24xx *purex_entry;
+ struct purex_item *pure_item;
+ u16 rsp_in = 0, cur_ring_index;
+ int is_shadow_hba;
if (!ha->flags.fw_started)
return;
- if (rsp->qpair->cpuid != smp_processor_id())
+ if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
+ rsp->qpair->rcv_intr = 1;
qla_cpu_update(rsp->qpair, smp_processor_id());
+ }
- while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+#define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
+ do { \
+ _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
+ rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
+ } while (0)
+
+ is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
+
+ __update_rsp_in(is_shadow_hba, rsp, rsp_in);
+
+ while (rsp->ring_index != rsp_in &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
+ cur_ring_index = rsp->ring_index;
rsp->ring_index++;
if (rsp->ring_index == rsp->length) {
@@ -3083,6 +3829,14 @@ process_err:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
+ if (qla_ini_mode_enabled(vha)) {
+ pure_item = qla24xx_copy_std_pkt(vha, pkt);
+ if (!pure_item)
+ break;
+ qla24xx_queue_purex_item(vha, pure_item,
+ qla24xx_process_abts);
+ break;
+ }
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) {
/* ensure that the ATIO queue is empty */
@@ -3092,7 +3846,7 @@ process_err:
} else {
qlt_24xx_process_atio_queue(vha, 1);
}
- /* fall through */
+ fallthrough;
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
@@ -3127,12 +3881,66 @@ process_err:
qla_ctrlvp_completed(vha, rsp->req,
(struct vp_ctrl_entry_24xx *)pkt);
break;
+ case PUREX_IOCB_TYPE:
+ purex_entry = (void *)pkt;
+ switch (purex_entry->els_frame_payload[3]) {
+ case ELS_RDP:
+ pure_item = qla24xx_copy_std_pkt(vha, pkt);
+ if (!pure_item)
+ break;
+ qla24xx_queue_purex_item(vha, pure_item,
+ qla24xx_process_purex_rdp);
+ break;
+ case ELS_FPIN:
+ if (!vha->hw->flags.scm_enabled) {
+ ql_log(ql_log_warn, vha, 0x5094,
+ "SCM not active for this port\n");
+ break;
+ }
+ pure_item = qla27xx_copy_fpin_pkt(vha,
+ (void **)&pkt, &rsp);
+ __update_rsp_in(is_shadow_hba, rsp, rsp_in);
+ if (!pure_item)
+ break;
+ qla24xx_queue_purex_item(vha, pure_item,
+ qla27xx_process_purex_fpin);
+ break;
+
+ case ELS_AUTH_ELS:
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
+ /*
+ * ring_ptr and ring_index were
+ * pre-incremented above. Reset them
+ * back to current. Wait for next
+ * interrupt with all IOCBs to arrive
+ * and re-process.
+ */
+ rsp->ring_ptr = (response_t *)pkt;
+ rsp->ring_index = cur_ring_index;
+
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "Defer processing ELS opcode %#x...\n",
+ purex_entry->els_frame_payload[3]);
+ return;
+ }
+ qla24xx_auth_els(vha, (void **)&pkt, &rsp);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x509c,
+ "Discarding ELS Request opcode 0x%x\n",
+ purex_entry->els_frame_payload[3]);
+ }
+ break;
+ case SA_UPDATE_IOCB_TYPE:
+ qla28xx_sa_update_iocb_entry(vha, rsp->req,
+ (struct sa_update_28xx *)pkt);
+ break;
+
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
- "Received unknown response pkt type %x "
- "entry status=%x.\n",
- pkt->entry_type, pkt->entry_status);
+ "Received unknown response pkt type 0x%x entry status=%x.\n",
+ pkt->entry_type, pkt->entry_status);
break;
}
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -3143,9 +3951,9 @@ process_err:
if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
- WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
+ wrt_reg_dword(&reg->rsp_q_out[0], rsp->ring_index);
} else {
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
}
}
@@ -3162,13 +3970,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
return;
rval = QLA_SUCCESS;
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x0001);
- for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x0001);
+ for (cnt = 10000; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt) {
- WRT_REG_DWORD(&reg->iobase_window, 0x0001);
+ wrt_reg_dword(&reg->iobase_window, 0x0001);
udelay(10);
} else
rval = QLA_FUNCTION_TIMEOUT;
@@ -3177,11 +3985,11 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto next_test;
rval = QLA_SUCCESS;
- WRT_REG_DWORD(&reg->iobase_window, 0x0003);
- for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ wrt_reg_dword(&reg->iobase_window, 0x0003);
+ for (cnt = 100; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt) {
- WRT_REG_DWORD(&reg->iobase_window, 0x0003);
+ wrt_reg_dword(&reg->iobase_window, 0x0003);
udelay(10);
} else
rval = QLA_FUNCTION_TIMEOUT;
@@ -3190,13 +3998,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto done;
next_test:
- if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
+ if (rd_reg_dword(&reg->iobase_c8) & BIT_3)
ql_log(ql_log_info, vha, 0x504c,
"Additional code -- 0x55AA.\n");
done:
- WRT_REG_DWORD(&reg->iobase_window, 0x0000);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x0000);
+ rd_reg_dword(&reg->iobase_window);
}
/**
@@ -3240,14 +4048,14 @@ qla24xx_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_DWORD(&reg->hccr);
+ hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_warn, vha, 0x504b,
"RISC paused -- HCCR=%x, Dumping firmware.\n",
@@ -3255,7 +4063,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla2xxx_check_risc_status(vha);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -3272,9 +4080,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
break;
case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox1);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb[1] = rd_reg_word(&reg->mailbox1);
+ mb[2] = rd_reg_word(&reg->mailbox2);
+ mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
case INTR_RSP_QUE_UPDATE:
@@ -3294,8 +4102,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat * 0xff);
break;
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD_RELAXED(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword_relaxed(&reg->hccr);
if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
ndelay(3500);
}
@@ -3334,8 +4142,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
if (!ha->flags.disable_msix_handshake) {
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD_RELAXED(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword_relaxed(&reg->hccr);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3369,22 +4177,23 @@ qla24xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_DWORD(&reg->hccr);
+ hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_info, vha, 0x5050,
"RISC paused -- HCCR=%x, Dumping firmware.\n",
hccr);
qla2xxx_check_risc_status(vha);
+ vha->hw_err_cnt++;
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -3401,9 +4210,9 @@ qla24xx_msix_default(int irq, void *dev_id)
break;
case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox1);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb[1] = rd_reg_word(&reg->mailbox1);
+ mb[2] = rd_reg_word(&reg->mailbox2);
+ mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
case INTR_RSP_QUE_UPDATE:
@@ -3423,7 +4232,7 @@ qla24xx_msix_default(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
} while (0);
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3442,6 +4251,25 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
{
struct qla_hw_data *ha;
struct qla_qpair *qpair;
+
+ qpair = dev_id;
+ if (!qpair) {
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = qpair->hw;
+
+ queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
struct device_reg_24xx __iomem *reg;
unsigned long flags;
@@ -3453,15 +4281,12 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
}
ha = qpair->hw;
- /* Clear the interrupt, if enabled, for this response queue */
- if (unlikely(!ha->flags.disable_msix_handshake)) {
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- }
+ reg = &ha->iobase->isp24;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
- queue_work(ha->wq, &qpair->q_work);
+ queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
@@ -3478,6 +4303,7 @@ static const struct qla_init_msix_entry msix_entries[] = {
{ "rsp_q", qla24xx_msix_rsp_q },
{ "atio_q", qla83xx_msix_atio_q },
{ "qpair_multiq", qla2xxx_msix_rsp_q },
+ { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
};
static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
@@ -3505,10 +4331,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
/* user wants to control IRQ setting for target mode */
ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
- ha->msix_count, PCI_IRQ_MSIX);
+ min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
+ PCI_IRQ_MSIX);
} else
ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
- ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&desc);
if (ret < 0) {
@@ -3608,16 +4436,12 @@ msix_register_fail:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
- } else
- if (ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
+ if (IS_MQUE_CAPABLE(ha) &&
+ (ha->msixbase && ha->mqiobase && ha->max_qpairs))
+ ha->mqenable = 1;
+ else
+ ha->mqenable = 0;
+
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
@@ -3711,6 +4535,8 @@ skip_msi:
ql_dbg(ql_dbg_init, vha, 0x0125,
"INTa mode: Enabled.\n");
ha->flags.mr_intr_valid = 1;
+ /* Set max_qpair to 0, as MSI-X and MSI in not enabled */
+ ha->max_qpairs = 0;
}
clear_risc_ints:
@@ -3718,7 +4544,7 @@ clear_risc_ints:
goto fail;
spin_lock_irq(&ha->hardware_lock);
- WRT_REG_WORD(&reg->isp.semaphore, 0);
+ wrt_reg_word(&reg->isp.semaphore, 0);
spin_unlock_irq(&ha->hardware_lock);
fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 9e09964f5c0e..359595a64664 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
@@ -10,6 +9,12 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+#ifdef CONFIG_PPC
+#define IS_PPCARCH true
+#else
+#define IS_PPCARCH false
+#endif
+
static struct mb_cmd_name {
uint16_t cmd;
const char *str;
@@ -59,6 +64,7 @@ static struct rom_cmd {
{ MBC_IOCB_COMMAND_A64 },
{ MBC_GET_ADAPTER_LOOP_ID },
{ MBC_READ_SFP },
+ { MBC_SET_RNID_PARAMS },
{ MBC_GET_RNID_PARAMS },
{ MBC_GET_SET_ZIO_THRESHOLD },
};
@@ -102,11 +108,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
int rval, i;
unsigned long flags = 0;
device_reg_t *reg;
- uint8_t abort_active;
+ uint8_t abort_active, eeh_delay;
uint8_t io_lock_on;
uint16_t command = 0;
uint16_t *iptr;
- uint16_t __iomem *optr;
+ __le16 __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
@@ -117,10 +123,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
- if (ha->pdev->error_state > pci_channel_io_frozen) {
+ if (ha->pdev->error_state == pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x1001,
- "error_state is greater than pci_channel_io_frozen, "
- "exiting.\n");
+ "PCI channel failed permanently, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -137,7 +142,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"PCI error, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
-
+ eeh_delay = 0;
reg = ha->iobase;
io_lock_on = base_vha->flags.init_done;
@@ -160,10 +165,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
}
/* check if ISP abort is active and return cmd with timeout */
- if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
- test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
- !is_rom_cmd(mcp->mb[0])) {
+ if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+ !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
ql_log(ql_log_info, vha, 0x1005,
"Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
mcp->mb[0]);
@@ -181,11 +186,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_log(ql_log_warn, vha, 0xd035,
"Cmd access timeout, cmd=0x%x, Exiting.\n",
mcp->mb[0]);
+ vha->hw_err_cnt++;
atomic_dec(&ha->num_pend_mbx_stage1);
return QLA_FUNCTION_TIMEOUT;
}
atomic_dec(&ha->num_pend_mbx_stage1);
- if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+ ha->flags.eeh_busy) {
+ ql_log(ql_log_warn, vha, 0xd035,
+ "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
+ ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
rval = QLA_ABORTED;
goto premature_exit;
}
@@ -209,11 +219,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
- optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
+ optr = &reg->isp82.mailbox_in[0];
else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
- optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
+ optr = &reg->isp24.mailbox0;
else
- optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
+ optr = MAILBOX_REG(ha, &reg->isp, 0);
iptr = mcp->mb;
command = mcp->mb[0];
@@ -223,12 +233,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Mailbox registers (OUT):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
- optr =
- (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
+ optr = MAILBOX_REG(ha, &reg->isp, 8);
if (mboxes & BIT_0) {
ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr);
- WRT_REG_WORD(optr, *iptr);
+ wrt_reg_word(optr, *iptr);
+ } else {
+ wrt_reg_word(optr, 0);
}
mboxes >>= 1;
@@ -254,18 +265,26 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
if (IS_P3P_TYPE(ha))
- WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
else if (IS_FWI2_CAPABLE(ha))
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
- WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117a,
+ "cmd=%x Timeout.\n", command);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
if (chip_reset != ha->chip_reset) {
+ eeh_delay = ha->flags.eeh_busy ? 1 : 0;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -275,14 +294,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_ABORTED;
goto premature_exit;
}
- ql_dbg(ql_dbg_mbx, vha, 0x117a,
- "cmd=%x Timeout.\n", command);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ eeh_delay = ha->flags.eeh_busy ? 1 : 0;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -301,7 +316,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Cmd=%x Polling Mode.\n", command);
if (IS_P3P_TYPE(ha)) {
- if (RD_REG_DWORD(&reg->isp82.hint) &
+ if (rd_reg_dword(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -309,20 +324,23 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
+ vha->hw_err_cnt++;
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
- WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
} else if (IS_FWI2_CAPABLE(ha))
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
- WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
while (!ha->flags.mbox_int) {
if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ eeh_delay = ha->flags.eeh_busy ? 1 : 0;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -335,14 +353,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (time_after(jiffies, wait_time))
break;
- /*
- * Check if it's UNLOADING, cause we cannot poll in
- * this case, or else a NULL pointer dereference
- * is triggered.
- */
- if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
- return QLA_FUNCTION_TIMEOUT;
-
/* Check for pending interrupts. */
qla2x00_poll(ha->rsp_q_map[0]);
@@ -414,27 +424,29 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint16_t w;
if (IS_FWI2_CAPABLE(ha)) {
- mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
- mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
- mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
- mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
- mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
- ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
- host_status = RD_REG_DWORD(&reg->isp24.host_status);
- hccr = RD_REG_DWORD(&reg->isp24.hccr);
+ mb[0] = rd_reg_word(&reg->isp24.mailbox0);
+ mb[1] = rd_reg_word(&reg->isp24.mailbox1);
+ mb[2] = rd_reg_word(&reg->isp24.mailbox2);
+ mb[3] = rd_reg_word(&reg->isp24.mailbox3);
+ mb[7] = rd_reg_word(&reg->isp24.mailbox7);
+ ictrl = rd_reg_dword(&reg->isp24.ictrl);
+ host_status = rd_reg_dword(&reg->isp24.host_status);
+ hccr = rd_reg_dword(&reg->isp24.hccr);
ql_log(ql_log_warn, vha, 0xd04c,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
mb[7], host_status, hccr);
+ vha->hw_err_cnt++;
} else {
mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
- ictrl = RD_REG_WORD(&reg->isp.ictrl);
+ ictrl = rd_reg_word(&reg->isp.ictrl);
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
+ vha->hw_err_cnt++;
}
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
@@ -463,7 +475,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* a dump
*/
if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
rval = QLA_FUNCTION_TIMEOUT;
}
}
@@ -507,6 +519,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
"abort.\n", command, mcp->mb[0],
ha->flags.eeh_busy);
+ vha->hw_err_cnt++;
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
@@ -531,11 +544,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Mailbox cmd timeout occurred, cmd=0x%x, "
"mb[0]=0x%x. Scheduling ISP abort ",
command, mcp->mb[0]);
+ vha->hw_err_cnt++;
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
/* Allow next mbx cmd to come in. */
complete(&ha->mbx_cmd_comp);
- if (ha->isp_ops->abort_isp(vha)) {
+ if (ha->isp_ops->abort_isp(vha) &&
+ !ha->flags.eeh_busy) {
/* Failed. retry later. */
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
@@ -574,20 +589,31 @@ mbx_done:
if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
ql_dbg(ql_dbg_mbx, vha, 0x1198,
"host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
- RD_REG_DWORD(&reg->isp24.host_status),
- RD_REG_DWORD(&reg->isp24.ictrl),
- RD_REG_DWORD(&reg->isp24.istatus));
+ rd_reg_dword(&reg->isp24.host_status),
+ rd_reg_dword(&reg->isp24.ictrl),
+ rd_reg_dword(&reg->isp24.istatus));
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1206,
"ctrl_status=%#x ictrl=%#x istatus=%#x\n",
- RD_REG_WORD(&reg->isp.ctrl_status),
- RD_REG_WORD(&reg->isp.ictrl),
- RD_REG_WORD(&reg->isp.istatus));
+ rd_reg_word(&reg->isp.ctrl_status),
+ rd_reg_word(&reg->isp.ictrl),
+ rd_reg_word(&reg->isp.istatus));
}
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
+ i = 500;
+ while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
+ /*
+ * The caller of this mailbox encounter pci error.
+ * Hold the thread until PCIE link reset complete to make
+ * sure caller does not unmap dma while recovery is
+ * in progress.
+ */
+ msleep(1);
+ i--;
+ }
return rval;
}
@@ -635,6 +661,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
ql_dbg(ql_dbg_mbx, vha, 0x1023,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
+ vha->hw_err_cnt++;
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
"Done %s.\n", __func__);
@@ -643,30 +670,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
return rval;
}
-#define EXTENDED_BB_CREDITS BIT_0
#define NVME_ENABLE_FLAG BIT_3
-static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
-{
- uint16_t mb4 = BIT_0;
-
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
- mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
-
- return mb4;
-}
-
-static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
-{
- uint16_t mb4 = BIT_0;
-
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- struct nvram_81xx *nv = ha->nvram;
-
- mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
- }
-
- return mb4;
-}
+#define EDIF_HW_SUPPORT BIT_10
/*
* qla2x00_execute_fw
@@ -690,10 +695,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
struct qla_hw_data *ha = vha->hw;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ u8 semaphore = 0;
+#define EXE_FW_FORCE_SEMAPHORE BIT_7
+ u8 retry = 5;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
"Entered %s.\n", __func__);
+again:
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
@@ -703,25 +712,13 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[3] = 0;
mcp->mb[4] = 0;
mcp->mb[11] = 0;
- ha->flags.using_lr_setting = 0;
- if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
- IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ql2xautodetectsfp) {
- if (ha->flags.detected_lr_sfp) {
- mcp->mb[4] |=
- qla25xx_set_sfp_lr_dist(ha);
- ha->flags.using_lr_setting = 1;
- }
- } else {
- struct nvram_81xx *nv = ha->nvram;
- /* set LR distance if specified in nvram */
- if (nv->enhanced_features &
- NEF_LR_DIST_ENABLE) {
- mcp->mb[4] |=
- qla25xx_set_nvr_lr_dist(ha);
- ha->flags.using_lr_setting = 1;
- }
- }
+
+ /* Enable BPM? */
+ if (ha->flags.lr_detected) {
+ mcp->mb[4] = BIT_0;
+ if (IS_BPM_RANGE_CAPABLE(ha))
+ mcp->mb[4] |=
+ ha->lr_distance << LR_DIST_FW_POS;
}
if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
@@ -739,6 +736,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
vha->min_supported_speed =
nv->min_supported_speed;
}
+
+ if (IS_PPCARCH)
+ mcp->mb[11] |= BIT_4;
}
if (ha->flags.exlogins_enabled)
@@ -747,8 +747,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
if (ha->flags.exchoffld_enabled)
mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
+ if (semaphore)
+ mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
+
mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
- mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
+ mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
} else {
mcp->mb[1] = LSW(risc_addr);
mcp->out_mb |= MBX_1;
@@ -763,8 +766,24 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
+ if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
+ mcp->mb[1] == 0x27 && retry) {
+ semaphore = 1;
+ retry--;
+ ql_dbg(ql_dbg_async, vha, 0x1026,
+ "Exe FW: force semaphore.\n");
+ goto again;
+ }
+
+ if (retry) {
+ retry--;
+ ql_dbg(ql_dbg_async, vha, 0x509d,
+ "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry);
+ goto again;
+ }
ql_dbg(ql_dbg_mbx, vha, 0x1026,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ vha->hw_err_cnt++;
return rval;
}
@@ -794,6 +813,12 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
}
}
+ if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
+ ha->flags.edif_hw = 1;
+ ql_log(ql_log_info, vha, 0xffff,
+ "%s: edif HW\n", __func__);
+ }
+
done:
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
"Done %s.\n", __func__);
@@ -873,7 +898,7 @@ qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
* Context:
* Kernel context.
*/
-#define CONFIG_XLOGINS_MEM 0x3
+#define CONFIG_XLOGINS_MEM 0x9
int
qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
{
@@ -900,8 +925,9 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x111b,
+ "EXlogin Failed=%x. MB0=%x MB11=%x\n",
+ rval, mcp->mb[0], mcp->mb[11]);
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
"Done %s.\n", __func__);
@@ -1120,6 +1146,21 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
"%s: FC-NVMe is Enabled (0x%x)\n",
__func__, ha->fw_attributes_h);
}
+
+ /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
+ if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
+ ql_log(ql_log_info, vha, 0xd302,
+ "Firmware supports NVMe2 0x%x\n",
+ ha->fw_attributes_ext[0]);
+ vha->flags.nvme2_enabled = 1;
+ }
+
+ if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
+ (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
+ ha->flags.edif_enabled = 1;
+ ql_log(ql_log_info, vha, 0xffff,
+ "%s: edif is enabled\n", __func__);
+ }
}
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -1137,11 +1178,29 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
if (IS_QLA28XX(ha)) {
- if (mcp->mb[16] & BIT_10) {
- ql_log(ql_log_info, vha, 0xffff,
- "FW support secure flash updates\n");
+ if (mcp->mb[16] & BIT_10)
ha->flags.secure_fw = 1;
- }
+
+ ql_log(ql_log_info, vha, 0xffff,
+ "Secure Flash Update in FW: %s\n",
+ (ha->flags.secure_fw) ? "Supported" :
+ "Not Supported");
+ }
+
+ if (ha->flags.scm_supported_a &&
+ (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
+ ha->flags.scm_supported_f = 1;
+ ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
+ }
+ ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
+ (ha->flags.scm_supported_f) ? "Supported" :
+ "Not Supported");
+
+ if (vha->flags.nvme2_enabled) {
+ /* set BIT_15 of special feature control block for SLER */
+ ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
+ /* set BIT_14 of special feature control block for PI CTRL*/
+ ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
}
}
@@ -1315,6 +1374,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
+ vha->hw_err_cnt++;
} else {
/*EMPTY*/
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
@@ -1405,17 +1465,20 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ if (!vha->hw->flags.fw_started)
+ return QLA_INVALID_COMMAND;
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
"Entered %s.\n", __func__);
mcp->mb[0] = MBC_IOCB_COMMAND_A64;
mcp->mb[1] = 0;
- mcp->mb[2] = MSW(phys_addr);
- mcp->mb[3] = LSW(phys_addr);
+ mcp->mb[2] = MSW(LSD(phys_addr));
+ mcp->mb[3] = LSW(LSD(phys_addr));
mcp->mb[6] = MSW(MSD(phys_addr));
mcp->mb[7] = LSW(MSD(phys_addr));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_2|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
mcp->tov = tov;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -1424,13 +1487,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
} else {
- sts_entry_t *sts_entry = (sts_entry_t *) buffer;
+ sts_entry_t *sts_entry = buffer;
/* Mask reserved bits. */
sts_entry->entry_status &=
IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
- "Done %s.\n", __func__);
+ "Done %s (status=%x).\n", __func__,
+ sts_entry->entry_status);
}
return rval;
@@ -1475,7 +1539,7 @@ qla2x00_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
"Entered %s.\n", __func__);
- if (vha->flags.qpairs_available && sp->qpair)
+ if (sp->qpair)
req = sp->qpair->req;
else
req = vha->req;
@@ -1649,7 +1713,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
if (IS_FWI2_CAPABLE(vha->hw))
mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
- mcp->in_mb |= MBX_15;
+ mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
+
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -1702,8 +1767,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
}
}
- if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
+ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
vha->bbcr = mcp->mb[15];
+ if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
+ ql_log(ql_log_info, vha, 0x11a4,
+ "SCM: EDC ELS completed, flags 0x%x\n",
+ mcp->mb[21]);
+ }
+ if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
+ vha->hw->flags.scm_enabled = 1;
+ vha->scm_fabric_connection_flags |=
+ SCM_FLAG_RDF_COMPLETED;
+ ql_log(ql_log_info, vha, 0x11a5,
+ "SCM: RDF ELS completed, flags 0x%x\n",
+ mcp->mb[23]);
+ }
+ }
}
return rval;
@@ -1816,6 +1895,17 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mcp->mb[14] = sizeof(*ha->ex_init_cb);
mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
}
+
+ if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
+ mcp->mb[1] |= BIT_1;
+ mcp->mb[16] = MSW(ha->sf_init_cb_dma);
+ mcp->mb[17] = LSW(ha->sf_init_cb_dma);
+ mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
+ mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
+ mcp->mb[15] = sizeof(*ha->sf_init_cb);
+ mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
+ }
+
/* 1 and 2 should normally be captured. */
mcp->in_mb = MBX_2|MBX_1|MBX_0;
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
@@ -2045,6 +2135,57 @@ gpd_error_out:
return rval;
}
+int
+qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
+ struct port_database_24xx *pdb)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ dma_addr_t pdb_dma;
+ int rval;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
+ "Entered %s.\n", __func__);
+
+ memset(pdb, 0, sizeof(*pdb));
+
+ pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
+ sizeof(*pdb), DMA_FROM_DEVICE);
+ if (!pdb_dma) {
+ ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ mcp->mb[0] = MBC_GET_PORT_DATABASE;
+ mcp->mb[1] = nport_handle;
+ mcp->mb[2] = MSW(LSD(pdb_dma));
+ mcp->mb[3] = LSW(LSD(pdb_dma));
+ mcp->mb[6] = MSW(MSD(pdb_dma));
+ mcp->mb[7] = LSW(MSD(pdb_dma));
+ mcp->mb[9] = 0;
+ mcp->mb[10] = 0;
+ mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->buf_size = sizeof(*pdb);
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = vha->hw->login_timeout * 2;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x111a,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
+ "Done %s.\n", __func__);
+ }
+
+ dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
+ sizeof(*pdb), DMA_FROM_DEVICE);
+
+ return rval;
+}
+
/*
* qla2x00_get_firmware_state
* Get adapter firmware state.
@@ -2384,7 +2525,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
- lg->handle = MAKE_HANDLE(req->id, lg->handle);
+ lg->handle = make_handle(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
if (opt & BIT_0)
@@ -2654,7 +2795,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
req = vha->req;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
- lg->handle = MAKE_HANDLE(req->id, lg->handle);
+ lg->handle = make_handle(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
@@ -2905,8 +3046,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
ha->orig_fw_iocb_count = mcp->mb[10];
if (ha->flags.npiv_supported)
ha->max_npiv_vports = mcp->mb[11];
- if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
ha->fw_max_fcf_count = mcp->mb[12];
}
@@ -2928,7 +3068,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
* Kernel context.
*/
int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
+ u8 *num_entries)
{
int rval;
mbx_cmd_t mc;
@@ -2968,6 +3109,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE);
+ if (num_entries)
+ *num_entries = pmap[0];
}
dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
@@ -3001,7 +3144,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter = (void *)stats;
+ uint32_t *iter = (uint32_t *)stats;
ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
struct qla_hw_data *ha = vha->hw;
@@ -3060,20 +3203,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter, dwords;
+ uint32_t *iter = (uint32_t *)stats;
+ ushort dwords = sizeof(*stats)/sizeof(*iter);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
"Entered %s.\n", __func__);
memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
- mc.mb[2] = MSW(stats_dma);
- mc.mb[3] = LSW(stats_dma);
+ mc.mb[2] = MSW(LSD(stats_dma));
+ mc.mb[3] = LSW(LSD(stats_dma));
mc.mb[6] = MSW(MSD(stats_dma));
mc.mb[7] = LSW(MSD(stats_dma));
- mc.mb[8] = sizeof(struct link_statistics) / 4;
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
- mc.mb[10] = cpu_to_le16(options);
+ mc.mb[8] = dwords;
+ mc.mb[9] = vha->vp_idx;
+ mc.mb[10] = options;
rval = qla24xx_send_mb_cmd(vha, &mc);
@@ -3086,8 +3230,6 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
"Done %s.\n", __func__);
/* Re-endianize - firmware data is le32. */
- dwords = sizeof(struct link_statistics) / 4;
- iter = &stats->link_fail_cnt;
for ( ; dwords--; iter++)
le32_to_cpus(iter);
}
@@ -3111,16 +3253,16 @@ qla24xx_abort_command(srb_t *sp)
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
+ struct req_que *req;
struct qla_qpair *qpair = sp->qpair;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
- if (vha->flags.qpairs_available && sp->qpair)
+ if (sp->qpair)
req = sp->qpair->req;
else
- return QLA_FUNCTION_FAILED;
+ return QLA_ERR_NO_QPAIR;
if (ql2xasynctmfenable)
return qla24xx_async_abort_command(sp);
@@ -3133,7 +3275,7 @@ qla24xx_abort_command(srb_t *sp)
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
- return QLA_FUNCTION_FAILED;
+ return QLA_ERR_NOT_FOUND;
}
abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
@@ -3145,15 +3287,17 @@ qla24xx_abort_command(srb_t *sp)
abt->entry_type = ABORT_IOCB_TYPE;
abt->entry_count = 1;
- abt->handle = MAKE_HANDLE(req->id, abt->handle);
+ abt->handle = make_handle(req->id, abt->handle);
abt->nport_handle = cpu_to_le16(fcport->loop_id);
- abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
+ abt->handle_to_abort = make_handle(req->id, handle);
abt->port_id[0] = fcport->d_id.b.al_pa;
abt->port_id[1] = fcport->d_id.b.area;
abt->port_id[2] = fcport->d_id.b.domain;
abt->vp_index = fcport->vha->vp_idx;
abt->req_que_no = cpu_to_le16(req->id);
+ /* Need to pass original sp */
+ qla_nvme_abort_set_option(abt, sp);
rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
if (rval != QLA_SUCCESS) {
@@ -3168,7 +3312,7 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx, vha, 0x1090,
"Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(abt->nport_handle));
- if (abt->nport_handle == CS_IOCB_ERROR)
+ if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
rval = QLA_FUNCTION_PARAMETER_ERROR;
else
rval = QLA_FUNCTION_FAILED;
@@ -3176,6 +3320,10 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
"Done %s.\n", __func__);
}
+ if (rval == QLA_SUCCESS)
+ qla_nvme_abort_process_comp_status(abt, sp);
+
+ qla_wait_nvme_release_cmd_kref(sp);
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -3224,7 +3372,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
tsk->p.tsk.entry_count = 1;
- tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
+ tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -3888,21 +4036,46 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->scan_state = QLA_FCPORT_SCAN;
fcport->n2n_flag = 0;
}
+ id.b24 = 0;
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(rptid_entry->u.f1.port_name)) {
+ vha->d_id.b24 = 0;
+ vha->d_id.b.al_pa = 1;
+ ha->flags.n2n_bigger = 1;
+
+ id.b.al_pa = 2;
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: assign local id %x remote id %x\n",
+ vha->d_id.b24, id.b24);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote login - Waiting for WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+ ha->flags.n2n_bigger = 0;
+ }
fcport = qla2x00_find_fcport_by_wwpn(vha,
rptid_entry->u.f1.port_name, 1);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
if (fcport) {
fcport->plogi_nack_done_deadline = jiffies + HZ;
- fcport->dm_login_expire = jiffies + 2*HZ;
+ fcport->dm_login_expire = jiffies +
+ QLA_N2N_WAIT_TIME * HZ;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->n2n_flag = 1;
fcport->keep_nport_handle = 1;
+ fcport->login_retry = vha->hw->login_retry_count;
fcport->fc4_type = FS_FC4TYPE_FCP;
if (vha->flags.nvme_enabled)
fcport->fc4_type |= FS_FC4TYPE_NVME;
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(fcport->port_name)) {
+ fcport->d_id = id;
+ }
+
switch (fcport->disc_state) {
case DSC_DELETED:
set_bit(RELOGIN_NEEDED,
@@ -3915,25 +4088,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
break;
}
} else {
- id.b24 = 0;
- if (wwn_to_u64(vha->port_name) >
- wwn_to_u64(rptid_entry->u.f1.port_name)) {
- vha->d_id.b24 = 0;
- vha->d_id.b.al_pa = 1;
- ha->flags.n2n_bigger = 1;
- ha->flags.n2n_ae = 0;
-
- id.b.al_pa = 2;
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: assign local id %x remote id %x\n",
- vha->d_id.b24, id.b24);
- } else {
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: Remote login - Waiting for WWPN %8phC.\n",
- rptid_entry->u.f1.port_name);
- ha->flags.n2n_bigger = 0;
- ha->flags.n2n_ae = 1;
- }
qla24xx_post_newsess_work(vha, &id,
rptid_entry->u.f1.port_name,
rptid_entry->u.f1.node_name,
@@ -3945,7 +4099,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
return;
- break;
case TOPO_FL:
ha->current_topology = ISP_CFG_FL;
break;
@@ -4054,6 +4207,16 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.remote_nport_id[1];
fcport->d_id.b.al_pa =
rptid_entry->u.f2.remote_nport_id[0];
+
+ /*
+ * For the case where remote port sending PRLO, FW
+ * sends up RIDA Format 2 as an indication of session
+ * loss. In other word, FW state change from PRLI
+ * complete back to PLOGI complete. Delete the
+ * session and let relogin drive the reconnect.
+ */
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qlt_schedule_sess_for_deletion(fcport);
}
}
}
@@ -4195,7 +4358,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
mcp->mb[8] = MSW(addr);
- mcp->out_mb = MBX_8|MBX_0;
+ mcp->mb[10] = 0;
+ mcp->out_mb = MBX_10|MBX_8|MBX_0;
} else {
mcp->mb[0] = MBC_DUMP_RISC_RAM;
mcp->out_mb = MBX_0;
@@ -4387,9 +4551,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
- WRT_REG_DWORD(req->req_q_in, 0);
+ wrt_reg_dword(req->req_q_in, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- WRT_REG_DWORD(req->req_q_out, 0);
+ wrt_reg_dword(req->req_q_out, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4458,9 +4622,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
- WRT_REG_DWORD(rsp->rsp_q_out, 0);
+ wrt_reg_dword(rsp->rsp_q_out, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- WRT_REG_DWORD(rsp->rsp_q_in, 0);
+ wrt_reg_dword(rsp->rsp_q_in, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4687,7 +4851,7 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
mbx_cmd_t *mcp = &mc;
int i;
int len;
- uint16_t *str;
+ __le16 *str;
struct qla_hw_data *ha = vha->hw;
if (!IS_P3P_TYPE(ha))
@@ -4696,14 +4860,14 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
"Entered %s.\n", __func__);
- str = (void *)version;
+ str = (__force __le16 *)version;
len = strlen(version);
mcp->mb[0] = MBC_SET_RNID_PARAMS;
mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
mcp->out_mb = MBX_1|MBX_0;
for (i = 4; i < 16 && len; i++, str++, len -= 2) {
- mcp->mb[i] = cpu_to_le16p(str);
+ mcp->mb[i] = le16_to_cpup(str);
mcp->out_mb |= 1<<i;
}
for (; i < 16; i++) {
@@ -4821,12 +4985,88 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
"Done %s.\n", __func__);
bp = (uint32_t *) buf;
for (i = 0; i < (bufsiz-4)/4; i++, bp++)
- *bp = le32_to_cpu(*bp);
+ *bp = le32_to_cpu((__force __le32)*bp);
}
return rval;
}
+#define PUREX_CMD_COUNT 4
+int
+qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint8_t *els_cmd_map;
+ uint8_t active_cnt = 0;
+ dma_addr_t els_cmd_map_dma;
+ uint8_t cmd_opcode[PUREX_CMD_COUNT];
+ uint8_t i, index, purex_bit;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_SUCCESS;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
+ "Entered %s.\n", __func__);
+
+ els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
+ &els_cmd_map_dma, GFP_KERNEL);
+ if (!els_cmd_map) {
+ ql_log(ql_log_warn, vha, 0x7101,
+ "Failed to allocate RDP els command param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ /* List of Purex ELS */
+ if (ql2xrdpenable) {
+ cmd_opcode[active_cnt] = ELS_RDP;
+ active_cnt++;
+ }
+ if (ha->flags.scm_supported_f) {
+ cmd_opcode[active_cnt] = ELS_FPIN;
+ active_cnt++;
+ }
+ if (ha->flags.edif_enabled) {
+ cmd_opcode[active_cnt] = ELS_AUTH_ELS;
+ active_cnt++;
+ }
+
+ for (i = 0; i < active_cnt; i++) {
+ index = cmd_opcode[i] / 8;
+ purex_bit = cmd_opcode[i] % 8;
+ els_cmd_map[index] |= 1 << purex_bit;
+ }
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
+ mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
+ mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
+ mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
+ mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->buf_size = ELS_CMD_MAP_SIZE;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x118d,
+ "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
+ "Done %s.\n", __func__);
+ }
+
+ dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
+ els_cmd_map, els_cmd_map_dma);
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
@@ -4880,8 +5120,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mcp->mb[0] = MBC_READ_SFP;
mcp->mb[1] = dev;
- mcp->mb[2] = MSW(sfp_dma);
- mcp->mb[3] = LSW(sfp_dma);
+ mcp->mb[2] = MSW(LSD(sfp_dma));
+ mcp->mb[3] = LSW(LSD(sfp_dma));
mcp->mb[6] = MSW(MSD(sfp_dma));
mcp->mb[7] = LSW(MSD(sfp_dma));
mcp->mb[8] = len;
@@ -4934,8 +5174,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mcp->mb[0] = MBC_WRITE_SFP;
mcp->mb[1] = dev;
- mcp->mb[2] = MSW(sfp_dma);
- mcp->mb[3] = LSW(sfp_dma);
+ mcp->mb[2] = MSW(LSD(sfp_dma));
+ mcp->mb[3] = LSW(LSD(sfp_dma));
mcp->mb[6] = MSW(MSD(sfp_dma));
mcp->mb[7] = LSW(MSD(sfp_dma));
mcp->mb[8] = len;
@@ -5056,7 +5296,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
mcp->mb[8] = MSW(risc_addr);
mcp->out_mb = MBX_8|MBX_1|MBX_0;
mcp->in_mb = MBX_3|MBX_2|MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
@@ -5170,10 +5410,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
- IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+ IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
mcp->in_mb |= MBX_1;
- if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
@@ -5243,7 +5484,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mcp->mb[8] = MSW(risc_addr);
mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_1|MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
@@ -5275,18 +5516,18 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
/* Write the MBC data to the registers */
- WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
- WRT_REG_WORD(&reg->mailbox1, mb[0]);
- WRT_REG_WORD(&reg->mailbox2, mb[1]);
- WRT_REG_WORD(&reg->mailbox3, mb[2]);
- WRT_REG_WORD(&reg->mailbox4, mb[3]);
+ wrt_reg_word(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
+ wrt_reg_word(&reg->mailbox1, mb[0]);
+ wrt_reg_word(&reg->mailbox2, mb[1]);
+ wrt_reg_word(&reg->mailbox3, mb[2]);
+ wrt_reg_word(&reg->mailbox4, mb[3]);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
/* Poll for MBC interrupt */
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (stat & HSRX_RISC_INT) {
stat &= 0xff;
@@ -5294,10 +5535,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
stat == 0x10 || stat == 0x11) {
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
- mb0 = RD_REG_WORD(&reg->mailbox0);
- WRT_REG_DWORD(&reg->hccr,
+ mb0 = rd_reg_word(&reg->mailbox0);
+ wrt_reg_dword(&reg->hccr,
HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rd_reg_dword(&reg->hccr);
break;
}
}
@@ -5399,7 +5640,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
- mcp->in_mb |= MBX_3;
+ mcp->in_mb |= MBX_4|MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -5407,6 +5648,15 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1107,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
+ if (mcp->mb[1] != 0x7)
+ ha->link_data_rate = mcp->mb[1];
+
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (mcp->mb[4] & BIT_0)
+ ql_log(ql_log_info, vha, 0x11a2,
+ "FEC=enabled (data rate).\n");
+ }
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
"Done %s.\n", __func__);
if (mcp->mb[1] != 0x7)
@@ -5506,7 +5756,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (mb != NULL) {
@@ -5593,7 +5843,7 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -5628,7 +5878,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -5820,7 +6070,7 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
if (IS_QLA8031(ha))
mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -5856,7 +6106,7 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
mcp->in_mb = MBX_2|MBX_1|MBX_0;
if (IS_QLA8031(ha))
mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -6066,7 +6316,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1144,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
}
@@ -6111,7 +6361,7 @@ qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
mcp->mb[4]);
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
} else {
if (subcode & BIT_5)
*sector_size = mcp->mb[1];
@@ -6226,6 +6476,54 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
+int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
+ struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp)
+{
+ int rval;
+ dma_addr_t dd_dma;
+ uint size = sizeof(dd->buf);
+ uint16_t options = dd->options;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
+ "Entered %s.\n", __func__);
+
+ dd_dma = dma_map_single(&vha->hw->pdev->dev,
+ dd->buf, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
+ ql_log(ql_log_warn, vha, 0x1194,
+ "Failed to map dma buffer.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(dd->buf, 0, size);
+
+ mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
+ mcp->mb[1] = options;
+ mcp->mb[2] = MSW(LSD(dd_dma));
+ mcp->mb[3] = LSW(LSD(dd_dma));
+ mcp->mb[6] = MSW(MSD(dd_dma));
+ mcp->mb[7] = LSW(MSD(dd_dma));
+ mcp->mb[8] = size;
+ mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = MBX_TOV_SECONDS * 4;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
+ "Done %s.\n", __func__);
+ }
+
+ dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
+
+ return rval;
+}
+
static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
{
sp->u.iocb_cmd.u.mbx.rc = res;
@@ -6248,23 +6546,21 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
if (!vha->hw->flags.fw_started)
goto done;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
- sp->type = SRB_MB_IOCB;
- sp->name = mb_to_str(mcp->mb[0]);
-
c = &sp->u.iocb_cmd;
- c->timeout = qla2x00_async_iocb_timeout;
init_completion(&c->u.mbx.comp);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->type = SRB_MB_IOCB;
+ sp->name = mb_to_str(mcp->mb[0]);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_mb_sp_done);
memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
- sp->done = qla2x00_async_mb_sp_done;
-
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1018,
@@ -6296,7 +6592,8 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
}
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -6325,13 +6622,13 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_PORT_DATABASE;
- mc.mb[1] = cpu_to_le16(fcport->loop_id);
+ mc.mb[1] = fcport->loop_id;
mc.mb[2] = MSW(pd_dma);
mc.mb[3] = LSW(pd_dma);
mc.mb[6] = MSW(MSD(pd_dma));
mc.mb[7] = LSW(MSD(pd_dma));
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
- mc.mb[10] = cpu_to_le16((uint16_t)opt);
+ mc.mb[9] = vha->vp_idx;
+ mc.mb[10] = opt;
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
@@ -6394,6 +6691,12 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
+ ql_dbg(ql_dbg_disc, vha, 0x2062,
+ "%8phC SVC Param w3 %02x%02x",
+ fcport->port_name,
+ pd->prli_svc_param_word_3[1],
+ pd->prli_svc_param_word_3[0]);
+
if (NVME_TARGET(vha->hw, fcport)) {
fcport->port_type = FCT_NVME;
if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
@@ -6442,7 +6745,7 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
mc.mb[6] = MSW(MSD(id_list_dma));
mc.mb[7] = LSW(MSD(id_list_dma));
mc.mb[8] = 0;
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[9] = vha->vp_idx;
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
@@ -6468,8 +6771,8 @@ int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
- mcp->mb[1] = cpu_to_le16(1);
- mcp->mb[2] = cpu_to_le16(value);
+ mcp->mb[1] = 1;
+ mcp->mb[2] = value;
mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -6494,7 +6797,7 @@ int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
memset(mcp->mb, 0, sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
- mcp->mb[1] = cpu_to_le16(0);
+ mcp->mb[1] = 0;
mcp->out_mb = MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -6688,3 +6991,120 @@ int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
return rval;
}
+
+int
+ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval;
+
+ if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
+ __func__, options);
+
+ mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
+ mcp->mb[1] = options;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ if (options & BIT_0) {
+ if (options & BIT_1) {
+ mcp->mb[2] = led[2];
+ mcp->out_mb |= MBX_2;
+ }
+ if (options & BIT_2) {
+ mcp->mb[3] = led[0];
+ mcp->out_mb |= MBX_3;
+ }
+ if (options & BIT_3) {
+ mcp->mb[4] = led[1];
+ mcp->out_mb |= MBX_4;
+ }
+ } else {
+ mcp->in_mb |= MBX_4|MBX_3|MBX_2;
+ }
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval) {
+ ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
+ __func__, rval, mcp->mb[0], mcp->mb[1]);
+ return rval;
+ }
+
+ if (options & BIT_0) {
+ ha->beacon_blink_led = 0;
+ ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
+ } else {
+ led[2] = mcp->mb[2];
+ led[0] = mcp->mb[3];
+ led[1] = mcp->mb[4];
+ ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
+ __func__, led[0], led[1], led[2]);
+ }
+
+ return rval;
+}
+
+/**
+ * qla_no_op_mb(): This MB is used to check if FW is still alive and
+ * able to generate an interrupt. Otherwise, a timeout will trigger
+ * FW dump + reset
+ * @vha: host adapter pointer
+ * Return: None
+ */
+void qla_no_op_mb(struct scsi_qla_host *vha)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval;
+
+ memset(&mc, 0, sizeof(mc));
+ mcp->mb[0] = 0; // noop cmd= 0
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 5;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval) {
+ ql_dbg(ql_dbg_async, vha, 0x7071,
+ "Failed %s %x\n", __func__, rval);
+ }
+}
+
+int qla_mailbox_passthru(scsi_qla_host_t *vha,
+ uint16_t *mbx_in, uint16_t *mbx_out)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = -EINVAL;
+
+ memset(&mc, 0, sizeof(mc));
+ /* Receiving all 32 register's contents */
+ memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t)));
+
+ mcp->out_mb = 0xFFFFFFFF;
+ mcp->in_mb = 0xFFFFFFFF;
+
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ mcp->bufp = NULL;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xf0a2,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n",
+ __func__);
+ /* passing all 32 register's contents */
+ memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t));
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 8ae639d089d1..16a9f22bb860 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_gbl.h"
@@ -66,7 +65,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
uint16_t vp_id;
struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
- u8 i;
+ u32 i, bailout;
mutex_lock(&ha->vport_lock);
/*
@@ -76,21 +75,29 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- for (i = 0; i < 10; i++) {
- if (wait_event_timeout(vha->vref_waitq,
- !atomic_read(&vha->vref_count), HZ) > 0)
+ bailout = 0;
+ for (i = 0; i < 500; i++) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ if (atomic_read(&vha->vref_count) == 0) {
+ list_del(&vha->list);
+ qlt_update_vp_map(vha, RESET_VP_IDX);
+ bailout = 1;
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ if (bailout)
break;
+ else
+ msleep(20);
}
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- if (atomic_read(&vha->vref_count)) {
- ql_dbg(ql_dbg_vport, vha, 0xfffa,
- "vha->vref_count=%u timeout\n", vha->vref_count.counter);
- vha->vref_count = (atomic_t)ATOMIC_INIT(0);
+ if (!bailout) {
+ ql_log(ql_log_info, vha, 0xfffa,
+ "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_del(&vha->list);
+ qlt_update_vp_map(vha, RESET_VP_IDX);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
- list_del(&vha->list);
- qlt_update_vp_map(vha, RESET_VP_IDX);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
vp_id = vha->vp_idx;
ha->num_vhosts--;
@@ -159,6 +166,14 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
int ret = QLA_SUCCESS;
fc_port_t *fcport;
+ if (vha->hw->flags.edif_enabled) {
+ if (DBELL_ACTIVE(vha))
+ qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE,
+ FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN);
+ /* delete sessions and flush sa_indexes */
+ qla2x00_wait_for_sess_deletion(vha);
+ }
+
if (vha->hw->flags.fw_started)
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
@@ -167,7 +182,8 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list)
fcport->logout_on_delete = 0;
- qla2x00_mark_all_devices_lost(vha);
+ if (!vha->hw->flags.edif_enabled)
+ qla2x00_wait_for_sess_deletion(vha);
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
@@ -258,13 +274,13 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
void
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
{
- scsi_qla_host_t *vha;
+ scsi_qla_host_t *vha, *tvp;
struct qla_hw_data *ha = rsp->hw;
int i = 0;
unsigned long flags;
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vha, &ha->vp_list, list) {
+ list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
if (vha->vp_idx) {
if (test_bit(VPORT_DELETE, &vha->dpc_flags))
continue;
@@ -361,6 +377,13 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
}
+ if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
+ if (atomic_read(&vha->loop_state) == LOOP_READY) {
+ qla24xx_process_purex_list(&vha->purex_list);
+ clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
+ }
+ }
+
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, vha, 0x4016,
"FCPort update scheduled.\n");
@@ -410,7 +433,7 @@ void
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *vp;
+ scsi_qla_host_t *vp, *tvp;
unsigned long flags = 0;
if (vha->vp_idx)
@@ -424,7 +447,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
return;
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (vp->vp_idx) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -509,6 +532,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
vha->dpc_flags = 0L;
+ ha->dpc_active = 0;
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
/*
* To fix the issue of processing a parent's RSCN for the vport before
@@ -569,7 +595,6 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
}
kfree(req->outstanding_cmds);
kfree(req);
- req = NULL;
}
static void
@@ -595,7 +620,6 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mutex_unlock(&ha->vport_lock);
}
kfree(rsp);
- rsp = NULL;
}
int
@@ -760,7 +784,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_in = &reg->isp25mq.req_q_in;
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
- req->out_ptr = (void *)(req->ring + req->length);
+ req->out_ptr = (uint16_t *)(req->ring + req->length);
mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
@@ -798,11 +822,9 @@ static void qla_do_work(struct work_struct *work)
{
unsigned long flags;
struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
- struct scsi_qla_host *vha;
- struct qla_hw_data *ha = qpair->hw;
+ struct scsi_qla_host *vha = qpair->vha;
spin_lock_irqsave(&qpair->qp_lock, flags);
- vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, qpair->rsp);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
@@ -874,7 +896,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
- rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
@@ -886,7 +908,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->rsp_q_out);
ret = qla25xx_request_irq(ha, qpair, qpair->msix,
- QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
+ ha->flags.disable_msix_handshake ?
+ QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
if (ret)
goto que_failed;
@@ -944,6 +967,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
return QLA_PARAMETER_ERROR;
+ /* ref: INIT */
sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
if (!sp)
return rval;
@@ -951,9 +975,8 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
sp->comp = &comp;
- sp->done = qla_ctrlvp_sp_done;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla_ctrlvp_sp_done);
sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
@@ -987,6 +1010,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
break;
}
done:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index cad1fc2a1b28..f726eb8449c5 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include <linux/delay.h>
@@ -46,17 +45,16 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
uint8_t io_lock_on;
uint16_t command = 0;
uint32_t *iptr;
- uint32_t __iomem *optr;
+ __le32 __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- if (ha->pdev->error_state > pci_channel_io_frozen) {
+ if (ha->pdev->error_state == pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x115c,
- "error_state is greater than pci_channel_io_frozen, "
- "exiting.\n");
+ "PCI channel failed permanently, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -110,7 +108,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
+ optr = &reg->ispfx00.mailbox0;
iptr = mcp->mb;
command = mcp->mb[0];
@@ -118,7 +116,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
- WRT_REG_DWORD(optr, *iptr);
+ wrt_reg_dword(optr, *iptr);
mboxes >>= 1;
optr++;
@@ -518,7 +516,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha)
}
/**
- * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
+ * qlafx00_soc_cpu_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
* @vha: HA context
*
*/
@@ -677,14 +675,14 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
- WRT_REG_DWORD(&reg->req_q_in, 0);
- WRT_REG_DWORD(&reg->req_q_out, 0);
+ wrt_reg_dword(&reg->req_q_in, 0);
+ wrt_reg_dword(&reg->req_q_out, 0);
- WRT_REG_DWORD(&reg->rsp_q_in, 0);
- WRT_REG_DWORD(&reg->rsp_q_out, 0);
+ wrt_reg_dword(&reg->rsp_q_in, 0);
+ wrt_reg_dword(&reg->rsp_q_out, 0);
/* PCI posting */
- RD_REG_DWORD(&reg->rsp_q_out);
+ rd_reg_dword(&reg->rsp_q_out);
}
char *
@@ -741,29 +739,6 @@ qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag)
}
int
-qlafx00_loop_reset(scsi_qla_host_t *vha)
-{
- int ret;
- struct fc_port *fcport;
- struct qla_hw_data *ha = vha->hw;
-
- if (ql2xtargetreset) {
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->port_type != FCT_TARGET)
- continue;
-
- ret = ha->isp_ops->target_reset(fcport, 0, 0);
- if (ret != QLA_SUCCESS) {
- ql_dbg(ql_dbg_taskm, vha, 0x803d,
- "Bus Reset failed: Reset=%d "
- "d_id=%x.\n", ret, fcport->d_id.b24);
- }
- }
- }
- return QLA_SUCCESS;
-}
-
-int
qlafx00_iospace_config(struct qla_hw_data *ha)
{
if (pci_request_selected_regions(ha->pdev, ha->bars,
@@ -913,9 +888,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* 30 seconds wait - Adjust if required */
wait_time = 30;
- pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
+ pseudo_aen = rd_reg_dword(&reg->pseudoaen);
if (pseudo_aen == 1) {
- aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
rval = qlafx00_driver_shutdown(vha, 10);
@@ -926,7 +901,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* wait time before firmware ready */
wtime = jiffies + (wait_time * HZ);
do {
- aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
+ aenmbx = rd_reg_dword(&reg->aenmailbox0);
barrier();
ql_dbg(ql_dbg_mbx, vha, 0x0133,
"aenmbx: 0x%x\n", aenmbx);
@@ -945,15 +920,15 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
case MBA_FW_RESTART_CMPLT:
/* Set the mbx and rqstq intr code */
- aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
- WRT_REG_DWORD(&reg->aenmailbox0, 0);
- RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
+ ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
+ wrt_reg_dword(&reg->aenmailbox0, 0);
+ rd_reg_dword_relaxed(&reg->aenmailbox0);
ql_dbg(ql_dbg_init, vha, 0x0134,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -983,13 +958,13 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
* 3. issue Get FW State Mbox cmd to determine fw state
* Set the mbx and rqstq intr code from Shadow Regs
*/
- aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->initval1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
- ha->req_que_len = RD_REG_DWORD(&reg->initval5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
+ ha->req_que_off = rd_reg_dword(&reg->initval1);
+ ha->rsp_que_off = rd_reg_dword(&reg->initval3);
+ ha->req_que_len = rd_reg_dword(&reg->initval5);
+ ha->rsp_que_len = rd_reg_dword(&reg->initval6);
ql_dbg(ql_dbg_init, vha, 0x0135,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -1035,7 +1010,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
if (time_after_eq(jiffies, wtime)) {
ql_dbg(ql_dbg_init, vha, 0x0137,
"Init f/w failed: aen[7]: 0x%x\n",
- RD_REG_DWORD(&reg->aenmailbox7));
+ rd_reg_dword(&reg->aenmailbox7));
rval = QLA_FUNCTION_FAILED;
done = true;
break;
@@ -1429,7 +1404,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp)
pkt = rsp->ring_ptr;
for (cnt = 0; cnt < rsp->length; cnt++) {
pkt->signature = RESPONSE_PROCESSED;
- WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
+ wrt_reg_dword((void __force __iomem *)&pkt->signature,
RESPONSE_PROCESSED);
pkt++;
}
@@ -1445,13 +1420,13 @@ qlafx00_rescan_isp(scsi_qla_host_t *vha)
qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
- aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+ ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
ql_dbg(ql_dbg_disc, vha, 0x2094,
"fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
@@ -1496,7 +1471,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
(!test_bit(UNLOADING, &vha->dpc_flags)) &&
(!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
(ha->mr.fw_hbt_en)) {
- fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
+ fw_heart_beat = rd_reg_dword(&reg->fwheartbeat);
if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
ha->mr.old_fw_hbt_cnt = fw_heart_beat;
ha->mr.fw_hbt_miss_cnt = 0;
@@ -1516,7 +1491,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
/* Reset recovery to be performed in timer routine */
- aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
+ aenmbx0 = rd_reg_dword(&reg->aenmailbox0);
if (ha->mr.fw_reset_timer_exp) {
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -1711,10 +1686,9 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
return;
}
-int
+void
qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
{
- int rval = 0;
uint32_t aen_code, aen_data;
aen_code = FCH_EVT_VENDOR_UNIQUE;
@@ -1765,8 +1739,6 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
fc_host_post_event(vha->host, fc_get_event_number(),
aen_code, aen_data);
-
- return rval;
}
static void
@@ -1815,17 +1787,18 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
struct register_host_info *preg_hsi;
struct new_utsname *p_sysid = NULL;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_FXIOCB_DCMD;
sp->name = "fxdisc";
+ qla2x00_init_async_sp(sp, FXDISC_TIMEOUT,
+ qla2x00_fxdisc_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_fxdisc_iocb_timeout;
fdisc = &sp->u.iocb_cmd;
- fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
- qla2x00_init_timer(sp, FXDISC_TIMEOUT);
-
switch (fx_type) {
case FXDISC_GET_CONFIG_INFO:
fdisc->u.fxiocb.flags =
@@ -1926,7 +1899,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
}
fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
- sp->done = qla2x00_fxdisc_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -2002,7 +1974,8 @@ done_unmap_req:
dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -2722,7 +2695,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
uint16_t lreq_q_in = 0;
uint16_t lreq_q_out = 0;
- lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+ lreq_q_in = rd_reg_dword(rsp->rsp_q_in);
lreq_q_out = rsp->ring_index;
while (lreq_q_in != lreq_q_out) {
@@ -2784,7 +2757,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
}
/* Adjust ring index */
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
}
/**
@@ -2815,9 +2788,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
- ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
+ ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
ql_dbg(ql_dbg_async, vha, 0x5077,
"Asynchronous port Update received "
"aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2847,13 +2820,13 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
default:
- ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
- ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
- ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
- ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
- ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
+ ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
+ ha->aenmb[4] = rd_reg_dword(&reg->aenmailbox4);
+ ha->aenmb[5] = rd_reg_dword(&reg->aenmailbox5);
+ ha->aenmb[6] = rd_reg_dword(&reg->aenmailbox6);
+ ha->aenmb[7] = rd_reg_dword(&reg->aenmailbox7);
ql_dbg(ql_dbg_async, vha, 0x5078,
"AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
@@ -2865,7 +2838,7 @@ qlafx00_async_event(scsi_qla_host_t *vha)
}
/**
- * qlafx00x_mbx_completion() - Process mailbox command completions.
+ * qlafx00_mbx_completion() - Process mailbox command completions.
* @vha: SCSI driver HA context
* @mb0: value to be written into mailbox register 0
*/
@@ -2873,7 +2846,7 @@ static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
{
uint16_t cnt;
- uint32_t __iomem *wptr;
+ __le32 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
@@ -2883,10 +2856,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out32[0] = mb0;
- wptr = (uint32_t __iomem *)&reg->mailbox17;
+ wptr = &reg->mailbox17;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
+ ha->mailbox_out32[cnt] = rd_reg_dword(wptr);
wptr++;
}
}
@@ -2940,13 +2913,13 @@ qlafx00_intr_handler(int irq, void *dev_id)
break;
if (stat & QLAFX00_INTR_MB_CMPLT) {
- mb[0] = RD_REG_WORD(&reg->mailbox16);
+ mb[0] = rd_reg_dword(&reg->mailbox16);
qlafx00_mbx_completion(vha, mb[0]);
status |= MBX_INTERRUPT;
clr_intr |= QLAFX00_INTR_MB_CMPLT;
}
if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
- ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
+ ha->aenmb[0] = rd_reg_dword(&reg->aenmailbox0);
qlafx00_async_event(vha);
clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
}
@@ -3114,7 +3087,7 @@ qlafx00_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3136,7 +3109,7 @@ qlafx00_start_scsi(srb_t *sp)
memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
- lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
+ lcmd_pkt.handle = make_handle(req->id, sp->handle);
lcmd_pkt.reserved_0 = 0;
lcmd_pkt.port_path_ctrl = 0;
lcmd_pkt.reserved_1 = 0;
@@ -3179,7 +3152,7 @@ qlafx00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3206,7 +3179,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1;
- tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ tm_iocb.handle = make_handle(req->id, sp->handle);
tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
@@ -3216,7 +3189,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
sizeof(struct scsi_lun));
}
- memcpy((void *)ptm_iocb, &tm_iocb,
+ memcpy(ptm_iocb, &tm_iocb,
sizeof(struct tsk_mgmt_entry_fx00));
wmb();
}
@@ -3232,13 +3205,12 @@ qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
abt_iocb.entry_count = 1;
- abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
- abt_iocb.abort_handle =
- cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
+ abt_iocb.handle = make_handle(req->id, sp->handle);
+ abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl);
abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
abt_iocb.req_que_no = cpu_to_le16(req->id);
- memcpy((void *)pabt_iocb, &abt_iocb,
+ memcpy(pabt_iocb, &abt_iocb,
sizeof(struct abort_iocb_entry_fx00));
wmb();
}
@@ -3255,7 +3227,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
fx_iocb.entry_type = FX00_IOCB_TYPE;
- fx_iocb.handle = cpu_to_le32(sp->handle);
+ fx_iocb.handle = sp->handle;
fx_iocb.entry_count = entry_cnt;
if (sp->type == SRB_FXIOCB_DCMD) {
@@ -3272,8 +3244,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
fx_iocb.req_xfrcnt =
cpu_to_le16(fxio->u.fxiocb.req_len);
put_unaligned_le64(fxio->u.fxiocb.req_dma_handle,
- &fx_iocb.dseg_rq.address);
- fx_iocb.dseg_rq.length =
+ &fx_iocb.dseg_rq[0].address);
+ fx_iocb.dseg_rq[0].length =
cpu_to_le32(fxio->u.fxiocb.req_len);
}
@@ -3282,8 +3254,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
fx_iocb.rsp_xfrcnt =
cpu_to_le16(fxio->u.fxiocb.rsp_len);
put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle,
- &fx_iocb.dseg_rsp.address);
- fx_iocb.dseg_rsp.length =
+ &fx_iocb.dseg_rsp[0].address);
+ fx_iocb.dseg_rsp[0].length =
cpu_to_le32(fxio->u.fxiocb.rsp_len);
}
@@ -3320,7 +3292,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
cpu_to_le16(bsg_job->request_payload.sg_cnt);
tot_dsds =
bsg_job->request_payload.sg_cnt;
- cur_dsd = &fx_iocb.dseg_rq;
+ cur_dsd = &fx_iocb.dseg_rq[0];
avail_dsds = 1;
for_each_sg(bsg_job->request_payload.sg_list, sg,
tot_dsds, index) {
@@ -3375,7 +3347,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
fx_iocb.rsp_dsdcnt =
cpu_to_le16(bsg_job->reply_payload.sg_cnt);
tot_dsds = bsg_job->reply_payload.sg_cnt;
- cur_dsd = &fx_iocb.dseg_rsp;
+ cur_dsd = &fx_iocb.dseg_rsp[0];
avail_dsds = 1;
for_each_sg(bsg_job->reply_payload.sg_list, sg,
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4567f0c42486..4f63aff333db 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_MR_H
#define __QLA_MR_H
@@ -96,7 +95,7 @@ struct tsk_mgmt_entry_fx00 {
uint8_t sys_define;
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
uint32_t reserved_0;
@@ -121,13 +120,13 @@ struct abort_iocb_entry_fx00 {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
__le32 reserved_0;
__le16 tgt_id_sts; /* Completion status. */
__le16 options;
- __le32 abort_handle; /* System handle. */
+ uint32_t abort_handle; /* System handle. */
__le32 reserved_2;
__le16 req_que_no;
@@ -166,7 +165,7 @@ struct fxdisc_entry_fx00 {
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
__le32 reserved_0; /* System handle. */
__le16 func_num;
@@ -177,8 +176,12 @@ struct fxdisc_entry_fx00 {
uint8_t flags;
uint8_t reserved_1;
- struct dsd64 dseg_rq;
- struct dsd64 dseg_rsp;
+ /*
+ * Use array size 1 below to prevent that Coverity complains about
+ * the append_dsd64() calls for the two arrays below.
+ */
+ struct dsd64 dseg_rq[1];
+ struct dsd64 dseg_rsp[1];
__le32 dataword;
__le32 adapid;
@@ -359,47 +362,47 @@ struct config_info_data {
#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */
#define QLAFX00_SET_HST_INTR(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
value)
#define QLAFX00_CLR_HST_INTR(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_RD_INTR_REG(ha) \
- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
+ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
#define QLAFX00_CLR_INTR_REG(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
- WRT_REG_DWORD((ha)->cregbase + off, val)
+ wrt_reg_dword((ha)->cregbase + off, val)
#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
- RD_REG_DWORD((ha)->cregbase + off)
+ rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_HBA_RST_REG(ha, val)\
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_RST_REG, val)
#define QLAFX00_RD_ICNTRL_REG(ha) \
- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
+ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
QLAFX00_ICR_ENB_MASK))
#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
QLAFX00_ICR_DIS_MASK))
#define QLAFX00_RD_REG(ha, off) \
- RD_REG_DWORD((ha)->cregbase + off)
+ rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_WR_REG(ha, off, val) \
- WRT_REG_DWORD((ha)->cregbase + off, val)
+ wrt_reg_dword((ha)->cregbase + off, val)
struct qla_mt_iocb_rqst_fx00 {
__le32 reserved_0;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index bfcd02fdf2b8..02fdeb0d31ec 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -1,14 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2017 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_nvme.h"
#include <linux/scatterlist.h>
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@@ -42,7 +43,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
req.port_name = wwn_to_u64(fcport->port_name);
req.node_name = wwn_to_u64(fcport->node_name);
req.port_role = 0;
- req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
+ req.dev_loss_tmo = fcport->dev_loss_tmo;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
@@ -69,6 +70,17 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return ret;
}
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
+ fcport->dev_loss_tmo);
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
+ ql_log(ql_log_info, vha, 0x212a,
+ "PortID:%06x Supports SLER\n", req.port_id);
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
+ ql_log(ql_log_info, vha, 0x212b,
+ "PortID:%06x Supports PI control\n", req.port_id);
+
rport = fcport->nvme_remote_port->private;
rport->fcport = fcport;
@@ -84,8 +96,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
struct qla_hw_data *ha;
struct qla_qpair *qpair;
- if (!qidx)
- qidx++;
+ /* Map admin queue and 1st IO queue to index 0 */
+ if (qidx)
+ qidx--;
vha = (struct scsi_qla_host *)lport->private;
ha = vha->hw;
@@ -101,19 +114,24 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
return -EINVAL;
}
- if (ha->queue_pair_map[qidx]) {
- *handle = ha->queue_pair_map[qidx];
- ql_log(ql_log_info, vha, 0x2121,
- "Returning existing qpair of %p for idx=%x\n",
- *handle, qidx);
- return 0;
- }
+ /* Use base qpair if max_qpairs is 0 */
+ if (!ha->max_qpairs) {
+ qpair = ha->base_qpair;
+ } else {
+ if (ha->queue_pair_map[qidx]) {
+ *handle = ha->queue_pair_map[qidx];
+ ql_log(ql_log_info, vha, 0x2121,
+ "Returning existing qpair of %p for idx=%x\n",
+ *handle, qidx);
+ return 0;
+ }
- qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
- if (qpair == NULL) {
- ql_log(ql_log_warn, vha, 0x2122,
- "Failed to allocate qpair\n");
- return -EINVAL;
+ qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
+ if (!qpair) {
+ ql_log(ql_log_warn, vha, 0x2122,
+ "Failed to allocate qpair\n");
+ return -EINVAL;
+ }
}
*handle = qpair;
@@ -138,12 +156,13 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
priv->sp = NULL;
sp->priv = NULL;
if (priv->comp_status == QLA_SUCCESS) {
- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
+ fd->status = NVME_SC_SUCCESS;
} else {
fd->rcv_rsplen = 0;
fd->transferred_length = 0;
+ fd->status = NVME_SC_INTERNAL;
}
- fd->status = 0;
spin_unlock_irqrestore(&priv->cmd_lock, flags);
fd->done(fd);
@@ -151,6 +170,18 @@ out:
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
+static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
+{
+ if (sp->flags & SRB_DMA_VALID) {
+ struct srb_iocb *nvme = &sp->u.iocb_cmd;
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+
+ dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ fd->rqstlen, DMA_TO_DEVICE);
+ sp->flags &= ~SRB_DMA_VALID;
+ }
+}
+
static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
@@ -167,6 +198,8 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
spin_unlock_irqrestore(&priv->cmd_lock, flags);
fd = priv->fd;
+
+ qla_nvme_ls_unmap(sp, fd);
fd->done(fd, priv->comp_status);
out:
qla2x00_rel_sp(sp);
@@ -213,13 +246,15 @@ static void qla_nvme_abort_work(struct work_struct *work)
srb_t *sp = priv->sp;
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
- int rval;
+ int rval, abts_done_called = 1;
+ bool io_wait_for_abort_done;
+ uint32_t handle;
ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
- "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
- __func__, sp, sp->handle, fcport, fcport->deleted);
+ "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
+ __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
- if (!ha->flags.fw_started && fcport->deleted)
+ if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
goto out;
if (ha->flags.host_shutting_down) {
@@ -230,13 +265,36 @@ static void qla_nvme_abort_work(struct work_struct *work)
goto out;
}
+ /*
+ * sp may not be valid after abort_command if return code is either
+ * SUCCESS or ERR_FROM_FW codes, so cache the value here.
+ */
+ io_wait_for_abort_done = ql2xabts_wait_nvme &&
+ QLA_ABTS_WAIT_ENABLED(sp);
+ handle = sp->handle;
+
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
"%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
__func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
- sp, sp->handle, fcport, rval);
+ sp, handle, fcport, rval);
+ /*
+ * If async tmf is enabled, the abort callback is called only on
+ * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
+ */
+ if (ql2xasynctmfenable &&
+ rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
+ abts_done_called = 0;
+
+ /*
+ * Returned before decreasing kref so that I/O requests
+ * are waited until ABTS complete. This kref is decreased
+ * at qla24xx_abort_sp_done function.
+ */
+ if (abts_done_called && io_wait_for_abort_done)
+ return;
out:
/* kref_get was done before work was schedule. */
kref_put(&sp->cmd_kref, sp->put_fn);
@@ -276,8 +334,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct qla_hw_data *ha;
srb_t *sp;
-
- if (!fcport || (fcport && fcport->deleted))
+ if (!fcport || fcport->deleted)
return rval;
vha = fcport->vha;
@@ -295,7 +352,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done;
sp->put_fn = qla_nvme_release_ls_cmd_kref;
- sp->priv = (void *)priv;
+ sp->priv = priv;
priv->sp = sp;
kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock);
@@ -313,6 +370,8 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
fd->rqstlen, DMA_TO_DEVICE);
+ sp->flags |= SRB_DMA_VALID;
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
@@ -320,6 +379,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
wake_up(&sp->nvme_ls_waitq);
sp->priv = NULL;
priv->sp = NULL;
+ qla_nvme_ls_unmap(sp, fd);
qla2x00_rel_sp(sp);
return rval;
}
@@ -361,16 +421,19 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
uint16_t avail_dsds;
struct dsd64 *cur_dsd;
struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
struct srb_iocb *nvme = &sp->u.iocb_cmd;
struct scatterlist *sgl, *sg;
struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+ struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
uint32_t rval = QLA_SUCCESS;
/* Setup qpair pointers */
req = qpair->req;
+ rsp = qpair->rsp;
tot_dsds = fd->sg_cnt;
/* Acquire qpair specific lock */
@@ -383,8 +446,13 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
}
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -398,8 +466,6 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
}
if (unlikely(!fd->sqid)) {
- struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
-
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
nvme->u.nvme.aen_op = 1;
atomic_inc(&ha->nvme_active_aen_cnt);
@@ -413,7 +479,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -426,11 +492,11 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) {
- cmd_pkt->control_flags = CF_READ_DATA;
- vha->qla_stats.input_bytes += fd->payload_length;
- vha->qla_stats.input_requests++;
+ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
+ qpair->counters.input_bytes += fd->payload_length;
+ qpair->counters.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
- cmd_pkt->control_flags = CF_WRITE_DATA;
+ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
if ((vha->flags.nvme_first_burst) &&
(sp->fcport->nvme_prli_service_param &
NVME_PRLI_SP_FIRST_BURST)) {
@@ -438,14 +504,23 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
sp->fcport->nvme_first_burst_size) ||
(sp->fcport->nvme_first_burst_size == 0))
cmd_pkt->control_flags |=
- CF_NVME_FIRST_BURST_ENABLE;
+ cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
}
- vha->qla_stats.output_bytes += fd->payload_length;
- vha->qla_stats.output_requests++;
+ qpair->counters.output_bytes += fd->payload_length;
+ qpair->counters.output_requests++;
} else if (fd->io_dir == 0) {
cmd_pkt->control_flags = 0;
}
+ if (sp->fcport->edif.enable && fd->io_dir != 0)
+ cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
+
+ /* Set BIT_13 of control flags for Async event */
+ if (vha->flags.nvme2_enabled &&
+ cmd->sqe.common.opcode == nvme_admin_async_event) {
+ cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
+ }
+
/* Set NPORT-ID */
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -513,11 +588,20 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->ring_ptr++;
}
+ /* ignore nvme async cmd due to long timeout */
+ if (!nvme->u.nvme.aen_op)
+ sp->qpair->cmd_cnt++;
+
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
return rval;
}
@@ -529,19 +613,30 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fc_port_t *fcport;
struct srb_iocb *nvme;
struct scsi_qla_host *vha;
- int rval = -ENODEV;
+ int rval;
srb_t *sp;
struct qla_qpair *qpair = hw_queue_handle;
struct nvme_private *priv = fd->private;
struct qla_nvme_rport *qla_rport = rport->private;
+ if (!priv) {
+ /* nvme association has been torn down */
+ return -ENODEV;
+ }
+
fcport = qla_rport->fcport;
- if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
- (fcport && fcport->deleted))
- return rval;
+ if (unlikely(!qpair || !fcport || fcport->deleted))
+ return -EBUSY;
+
+ if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
+ return -ENODEV;
vha = fcport->vha;
+
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ return -EBUSY;
+
/*
* If we know the dev is going away while the transport is still sending
* IO's return busy back to stall the IO Q. This happens when the
@@ -560,7 +655,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
init_waitqueue_head(&sp->nvme_ls_waitq);
kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock);
- sp->priv = (void *)priv;
+ sp->priv = priv;
priv->sp = sp;
sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd";
@@ -568,6 +663,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
sp->put_fn = qla_nvme_release_fcp_cmd_kref;
sp->qpair = qpair;
sp->vha = vha;
+ sp->cmd_sp = sp;
nvme = &sp->u.iocb_cmd;
nvme->u.nvme.desc = fd;
@@ -584,6 +680,14 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return rval;
}
+static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
+ struct blk_mq_queue_map *map)
+{
+ struct scsi_qla_host *vha = lport->private;
+
+ blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
+}
+
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
{
struct scsi_qla_host *vha = lport->private;
@@ -610,7 +714,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
- .module = THIS_MODULE,
.localport_delete = qla_nvme_localport_delete,
.remoteport_delete = qla_nvme_remoteport_delete,
.create_queue = qla_nvme_alloc_queue,
@@ -619,7 +722,8 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
- .max_hw_queues = 8,
+ .map_queues = qla_nvme_map_queues,
+ .max_hw_queues = DEF_NVME_HW_QUEUES,
.max_sgl_segments = 1024,
.max_dif_sgl_segments = 64,
.dma_boundary = 0xFFFFFFFF,
@@ -636,7 +740,7 @@ void qla_nvme_unregister_remote_port(struct fc_port *fcport)
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
- ql_log(ql_log_warn, NULL, 0x2112,
+ ql_log(ql_log_warn, fcport->vha, 0x2112,
"%s: unregister remoteport on %p %8phN\n",
__func__, fcport, fcport->port_name);
@@ -678,7 +782,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo;
- int ret = EINVAL;
+ int ret = -EINVAL;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
@@ -686,25 +790,51 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
ha = vha->hw;
tmpl = &qla_nvme_fc_transport;
- WARN_ON(vha->nvme_local_port);
- WARN_ON(ha->max_req_queues < 3);
+ if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
+ ql_log(ql_log_warn, vha, 0xfffd,
+ "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
+ ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
+ ql2xnvme_queues = DEF_NVME_HW_QUEUES;
+ } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
+ ql_log(ql_log_warn, vha, 0xfffd,
+ "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
+ ql2xnvme_queues, (ha->max_qpairs - 1),
+ (ha->max_qpairs - 1));
+ ql2xnvme_queues = ((ha->max_qpairs - 1));
+ }
qla_nvme_fc_transport.max_hw_queues =
- min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
- (uint8_t)(ha->max_req_queues - 2));
+ min((uint8_t)(ql2xnvme_queues),
+ (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
+
+ ql_log(ql_log_info, vha, 0xfffb,
+ "Number of NVME queues used for this port: %d\n",
+ qla_nvme_fc_transport.max_hw_queues);
pinfo.node_name = wwn_to_u64(vha->node_name);
pinfo.port_name = wwn_to_u64(vha->port_name);
pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
pinfo.port_id = vha->d_id.b24;
- ql_log(ql_log_info, vha, 0xffff,
- "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
- pinfo.node_name, pinfo.port_name, pinfo.port_id);
- qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
-
- ret = nvme_fc_register_localport(&pinfo, tmpl,
- get_device(&ha->pdev->dev), &vha->nvme_local_port);
+ mutex_lock(&ha->vport_lock);
+ /*
+ * Check again for nvme_local_port to see if any other thread raced
+ * with this one and finished registration.
+ */
+ if (!vha->nvme_local_port) {
+ ql_log(ql_log_info, vha, 0xffff,
+ "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
+ pinfo.node_name, pinfo.port_name, pinfo.port_id);
+ qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
+
+ ret = nvme_fc_register_localport(&pinfo, tmpl,
+ get_device(&ha->pdev->dev),
+ &vha->nvme_local_port);
+ mutex_unlock(&ha->vport_lock);
+ } else {
+ mutex_unlock(&ha->vport_lock);
+ return 0;
+ }
if (ret) {
ql_log(ql_log_warn, vha, 0xffff,
"register_localport failed: ret=%x\n", ret);
@@ -714,3 +844,85 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
return ret;
}
+
+void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
+{
+ struct qla_hw_data *ha;
+
+ if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
+ return;
+
+ ha = orig_sp->fcport->vha->hw;
+
+ WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
+ /* Use Driver Specified Retry Count */
+ abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
+ abt->drv.abts_rty_cnt = cpu_to_le16(2);
+ /* Use specified response timeout */
+ abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
+ /* set it to 2 * r_a_tov in secs */
+ abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
+}
+
+void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
+{
+ u16 comp_status;
+ struct scsi_qla_host *vha;
+
+ if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
+ return;
+
+ vha = orig_sp->fcport->vha;
+
+ comp_status = le16_to_cpu(abt->comp_status);
+ switch (comp_status) {
+ case CS_RESET: /* reset event aborted */
+ case CS_ABORTED: /* IOCB was cleaned */
+ /* N_Port handle is not currently logged in */
+ case CS_TIMEOUT:
+ /* N_Port handle was logged out while waiting for ABTS to complete */
+ case CS_PORT_UNAVAILABLE:
+ /* Firmware found that the port name changed */
+ case CS_PORT_LOGGED_OUT:
+ /* BA_RJT was received for the ABTS */
+ case CS_PORT_CONFIG_CHG:
+ ql_dbg(ql_dbg_async, vha, 0xf09d,
+ "Abort I/O IOCB completed with error, comp_status=%x\n",
+ comp_status);
+ break;
+
+ /* BA_RJT was received for the ABTS */
+ case CS_REJECT_RECEIVED:
+ ql_dbg(ql_dbg_async, vha, 0xf09e,
+ "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
+ abt->fw.ba_rjt_vendorUnique);
+ ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
+ "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
+ abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
+ break;
+
+ case CS_COMPLETE:
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
+ "IOCB request is completed successfully comp_status=%x\n",
+ comp_status);
+ break;
+
+ case CS_IOCB_ERROR:
+ ql_dbg(ql_dbg_async, vha, 0xf0a0,
+ "IOCB request is failed, comp_status=%x\n", comp_status);
+ break;
+
+ default:
+ ql_dbg(ql_dbg_async, vha, 0xf0a1,
+ "Invalid Abort IO IOCB Completion Status %x\n",
+ comp_status);
+ break;
+ }
+}
+
+inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
+{
+ if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
+ return;
+ kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index ef912902d4e5..d299478371b2 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2017 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_NVME_H
#define __QLA_NVME_H
@@ -14,8 +13,8 @@
#include "qla_def.h"
#include "qla_dsd.h"
-/* default dev loss time (seconds) before transport tears down ctrl */
-#define NVME_FC_DEV_LOSS_TMO 30
+#define MIN_NVME_HW_QUEUES 1
+#define DEF_NVME_HW_QUEUES 8
#define NVME_ATIO_CMD_OFF 32
#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
@@ -48,26 +47,27 @@ struct cmd_nvme {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
- uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */
+ __le16 dseg_count; /* Data segment count. */
+ __le16 nvme_rsp_dsd_len; /* NVMe RSP DSD length */
uint64_t rsvd;
- uint16_t control_flags; /* Control Flags */
+ __le16 control_flags; /* Control Flags */
+#define CF_ADMIN_ASYNC_EVENT BIT_13
#define CF_NVME_FIRST_BURST_ENABLE BIT_11
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
- uint16_t nvme_cmnd_dseg_len; /* Data segment length. */
+ __le16 nvme_cmnd_dseg_len; /* Data segment length. */
__le64 nvme_cmnd_dseg_address __packed;/* Data segment address. */
__le64 nvme_rsp_dseg_address __packed; /* Data segment address. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -82,24 +82,24 @@ struct pt_ls4_request {
uint8_t sys_define;
uint8_t entry_status;
uint32_t handle;
- uint16_t status;
- uint16_t nport_handle;
- uint16_t tx_dseg_count;
+ __le16 status;
+ __le16 nport_handle;
+ __le16 tx_dseg_count;
uint8_t vp_index;
uint8_t rsvd;
- uint16_t timeout;
- uint16_t control_flags;
+ __le16 timeout;
+ __le16 control_flags;
#define CF_LS4_SHIFT 13
#define CF_LS4_ORIGINATOR 0
#define CF_LS4_RESPONDER 1
#define CF_LS4_RESPONDER_TERM 2
- uint16_t rx_dseg_count;
- uint16_t rsvd2;
- uint32_t exchange_address;
- uint32_t rsvd3;
- uint32_t rx_byte_count;
- uint32_t tx_byte_count;
+ __le16 rx_dseg_count;
+ __le16 rsvd2;
+ __le32 exchange_address;
+ __le32 rsvd3;
+ __le32 rx_byte_count;
+ __le32 tx_byte_count;
struct dsd64 dsd[2];
};
@@ -107,32 +107,32 @@ struct pt_ls4_request {
struct pt_ls4_rx_unsol {
uint8_t entry_type;
uint8_t entry_count;
- uint16_t rsvd0;
- uint16_t rsvd1;
+ __le16 rsvd0;
+ __le16 rsvd1;
uint8_t vp_index;
uint8_t rsvd2;
- uint16_t rsvd3;
- uint16_t nport_handle;
- uint16_t frame_size;
- uint16_t rsvd4;
- uint32_t exchange_address;
+ __le16 rsvd3;
+ __le16 nport_handle;
+ __le16 frame_size;
+ __le16 rsvd4;
+ __le32 exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
be_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t param;
- uint32_t desc0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 param;
+ __le32 desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
- uint32_t desc_len;
- uint32_t payload[3];
+ __le32 desc_len;
+ __le32 payload[3];
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 185c5f34d4c1..6dfb70edb9a6 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include <linux/delay.h>
@@ -336,20 +335,20 @@ static unsigned qla82xx_crb_hub_agt[64] = {
};
/* Device states */
-static char *q_dev_state[] = {
- "Unknown",
- "Cold",
- "Initializing",
- "Ready",
- "Need Reset",
- "Need Quiescent",
- "Failed",
- "Quiescent",
+static const char *const q_dev_state[] = {
+ [QLA8XXX_DEV_UNKNOWN] = "Unknown",
+ [QLA8XXX_DEV_COLD] = "Cold/Re-init",
+ [QLA8XXX_DEV_INITIALIZING] = "Initializing",
+ [QLA8XXX_DEV_READY] = "Ready",
+ [QLA8XXX_DEV_NEED_RESET] = "Need Reset",
+ [QLA8XXX_DEV_NEED_QUIESCENT] = "Need Quiescent",
+ [QLA8XXX_DEV_FAILED] = "Failed",
+ [QLA8XXX_DEV_QUIESCENT] = "Quiescent",
};
-char *qdev_state(uint32_t dev_state)
+const char *qdev_state(uint32_t dev_state)
{
- return q_dev_state[dev_state];
+ return (dev_state < MAX_STATES) ? q_dev_state[dev_state] : "Unknown";
}
/*
@@ -370,7 +369,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
/* Read back value to make sure write has gone through before trying
* to use it.
*/
- win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+ win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
if (win_read != ha->crb_win) {
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
@@ -380,47 +379,6 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
*off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
-static inline unsigned long
-qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
-{
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- /* See if we are currently pointing to the region we want to use next */
- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
- /* No need to change window. PCIX and PCIEregs are in both
- * regs are in both windows.
- */
- return off;
- }
-
- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
- /* We are in first CRB window */
- if (ha->curr_window != 0)
- WARN_ON(1);
- return off;
- }
-
- if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
- /* We are in second CRB window */
- off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
-
- if (ha->curr_window != 1)
- return off;
-
- /* We are in the QM or direct access
- * register region - do nothing
- */
- if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
- (off < QLA82XX_PCI_CAMQM_MAX))
- return off;
- }
- /* strange address given */
- ql_dbg(ql_dbg_p3p, vha, 0xb001,
- "%s: Warning: unm_nic_pci_set_crbwindow "
- "called with an unknown address(%llx).\n",
- QLA2XXX_DRIVER_NAME, off);
- return off;
-}
-
static int
qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
void __iomem **off_out)
@@ -520,7 +478,7 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
qla82xx_crb_win_lock(ha);
qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
}
- data = RD_REG_DWORD(off);
+ data = rd_reg_dword(off);
if (rv == 1) {
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
@@ -531,29 +489,26 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
return data;
}
-#define IDC_LOCK_TIMEOUT 100000000
+/*
+ * Context: task, might sleep
+ */
int qla82xx_idc_lock(struct qla_hw_data *ha)
{
- int i;
- int done = 0, timeout = 0;
+ const int delay_ms = 100, timeout_ms = 2000;
+ int done, total = 0;
- while (!done) {
+ might_sleep();
+
+ while (true) {
/* acquire semaphore5 from PCI HW block */
done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
if (done == 1)
break;
- if (timeout >= IDC_LOCK_TIMEOUT)
+ if (WARN_ON_ONCE(total >= timeout_ms))
return -1;
- timeout++;
-
- /* Yield CPU */
- if (!in_interrupt())
- schedule();
- else {
- for (i = 0; i < 20; i++)
- cpu_relax();
- }
+ total += delay_ms;
+ msleep(delay_ms);
}
return 0;
@@ -937,17 +892,17 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
{
uint32_t off_value, rval = 0;
- WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
+ wrt_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
/* Read back value to make sure write has gone through */
- RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+ rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
off_value = (off & 0x0000FFFF);
if (flag)
- WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
+ wrt_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
data);
else
- rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M +
+ rval = rd_reg_dword(off_value + CRB_INDIRECT_2M +
ha->nx_pcibase);
return rval;
@@ -1007,26 +962,21 @@ qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
static int
qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
{
- long timeout = 0;
- uint32_t done = 1 ;
- uint32_t val;
- int ret = 0;
+ uint32_t val = 0;
+ int i, ret;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
- while ((done != 0) && (ret == 0)) {
+ for (i = 0; i < 50000; i++) {
ret = qla82xx_read_status_reg(ha, &val);
- done = val & 1;
- timeout++;
+ if (ret < 0 || (val & 1) == 0)
+ return ret;
udelay(10);
cond_resched();
- if (timeout >= 50000) {
- ql_log(ql_log_warn, vha, 0xb00d,
- "Timeout reached waiting for write finish.\n");
- return -1;
- }
}
- return ret;
+ ql_log(ql_log_warn, vha, 0xb00d,
+ "Timeout reached waiting for write finish.\n");
+ return -1;
}
static int
@@ -1113,7 +1063,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
return ret;
}
- if (qla82xx_flash_set_write_enable(ha))
+ ret = qla82xx_flash_set_write_enable(ha);
+ if (ret < 0)
goto done_write;
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
@@ -1213,6 +1164,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
* Offset 4: Offset and number of addr/value pairs
* that present in CRB initialize sequence
*/
+ n = 0;
if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
qla82xx_rom_fast_read(ha, 4, &n) != 0) {
ql_log(ql_log_fatal, vha, 0x006e,
@@ -1561,14 +1513,14 @@ qla82xx_get_table_desc(const u8 *unirom, int section)
uint32_t i;
struct qla82xx_uri_table_desc *directory =
(struct qla82xx_uri_table_desc *)&unirom[0];
- __le32 offset;
- __le32 tab_type;
- __le32 entries = cpu_to_le32(directory->num_entries);
+ uint32_t offset;
+ uint32_t tab_type;
+ uint32_t entries = le32_to_cpu(directory->num_entries);
for (i = 0; i < entries; i++) {
- offset = cpu_to_le32(directory->findex) +
- (i * cpu_to_le32(directory->entry_size));
- tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
+ offset = le32_to_cpu(directory->findex) +
+ (i * le32_to_cpu(directory->entry_size));
+ tab_type = get_unaligned_le32((u32 *)&unirom[offset] + 8);
if (tab_type == section)
return (struct qla82xx_uri_table_desc *)&unirom[offset];
@@ -1582,16 +1534,17 @@ qla82xx_get_data_desc(struct qla_hw_data *ha,
u32 section, u32 idx_offset)
{
const u8 *unirom = ha->hablob->fw->data;
- int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
+ int idx = get_unaligned_le32((u32 *)&unirom[ha->file_prd_off] +
+ idx_offset);
struct qla82xx_uri_table_desc *tab_desc = NULL;
- __le32 offset;
+ uint32_t offset;
tab_desc = qla82xx_get_table_desc(unirom, section);
if (!tab_desc)
return NULL;
- offset = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size) * idx);
+ offset = le32_to_cpu(tab_desc->findex) +
+ (le32_to_cpu(tab_desc->entry_size) * idx);
return (struct qla82xx_uri_data_desc *)&unirom[offset];
}
@@ -1606,7 +1559,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha,
QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
if (uri_desc)
- offset = cpu_to_le32(uri_desc->findex);
+ offset = le32_to_cpu(uri_desc->findex);
}
return (u8 *)&ha->hablob->fw->data[offset];
@@ -1620,7 +1573,7 @@ static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
QLA82XX_URI_FIRMWARE_IDX_OFF);
if (uri_desc)
- return cpu_to_le32(uri_desc->size);
+ return le32_to_cpu(uri_desc->size);
}
return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
@@ -1636,7 +1589,7 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
QLA82XX_URI_FIRMWARE_IDX_OFF);
if (uri_desc)
- offset = cpu_to_le32(uri_desc->findex);
+ offset = le32_to_cpu(uri_desc->findex);
}
return (u8 *)&ha->hablob->fw->data[offset];
@@ -1790,9 +1743,9 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
put_unaligned_le64(req->dma, &icb->request_q_address);
put_unaligned_le64(rsp->dma, &icb->response_q_address);
- WRT_REG_DWORD(&reg->req_q_out[0], 0);
- WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
- WRT_REG_DWORD(&reg->rsp_q_out[0], 0);
+ wrt_reg_dword(&reg->req_q_out[0], 0);
+ wrt_reg_dword(&reg->rsp_q_in[0], 0);
+ wrt_reg_dword(&reg->rsp_q_out[0], 0);
}
static int
@@ -1847,8 +1800,8 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
struct qla82xx_uri_table_desc *ptab_desc = NULL;
const uint8_t *unirom = ha->hablob->fw->data;
uint32_t i;
- __le32 entries;
- __le32 flags, file_chiprev, offset;
+ uint32_t entries;
+ uint32_t flags, file_chiprev, offset;
uint8_t chiprev = ha->chip_revision;
/* Hardcoding mn_present flag for P3P */
int mn_present = 0;
@@ -1859,14 +1812,14 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
if (!ptab_desc)
return -1;
- entries = cpu_to_le32(ptab_desc->num_entries);
+ entries = le32_to_cpu(ptab_desc->num_entries);
for (i = 0; i < entries; i++) {
- offset = cpu_to_le32(ptab_desc->findex) +
- (i * cpu_to_le32(ptab_desc->entry_size));
- flags = cpu_to_le32(*((int *)&unirom[offset] +
+ offset = le32_to_cpu(ptab_desc->findex) +
+ (i * le32_to_cpu(ptab_desc->entry_size));
+ flags = le32_to_cpu(*((__le32 *)&unirom[offset] +
QLA82XX_URI_FLAGS_OFF));
- file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
+ file_chiprev = le32_to_cpu(*((__le32 *)&unirom[offset] +
QLA82XX_URI_CHIP_REV_OFF));
flagbit = mn_present ? 1 : 2;
@@ -1996,18 +1949,18 @@ void
qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
- wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
+ wptr = &reg->mailbox_out[1];
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
wptr++;
}
@@ -2069,8 +2022,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 1; iter--; ) {
- if (RD_REG_DWORD(&reg->host_int)) {
- stat = RD_REG_DWORD(&reg->host_status);
+ if (rd_reg_dword(&reg->host_int)) {
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
@@ -2082,9 +2035,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2097,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
qla2x00_handle_mbx_completion(ha, status);
@@ -2135,11 +2088,11 @@ qla82xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
break;
if (host_int) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
@@ -2151,9 +2104,9 @@ qla82xx_msix_default(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2166,7 +2119,7 @@ qla82xx_msix_default(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
} while (0);
qla2x00_handle_mbx_completion(ha, status);
@@ -2196,11 +2149,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
reg = &ha->iobase->isp82;
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
qla24xx_process_response_queue(vha, rsp);
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
@@ -2213,7 +2166,6 @@ qla82xx_poll(int irq, void *dev_id)
struct qla_hw_data *ha;
struct rsp_que *rsp;
struct device_reg_82xx __iomem *reg;
- int status = 0;
uint32_t stat;
uint32_t host_int = 0;
uint16_t mb[8];
@@ -2231,24 +2183,23 @@ qla82xx_poll(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
if (host_int) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
case 0x2:
case 0x10:
case 0x11:
qla82xx_mbx_completion(vha, MSW(stat));
- status |= MBX_INTERRUPT;
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2260,7 +2211,7 @@ qla82xx_poll(int irq, void *dev_id)
stat * 0xff);
break;
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2549,8 +2500,8 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
return qla82xx_check_rcvpeg_state(ha);
}
-static uint32_t *
-qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+static __le32 *
+qla82xx_read_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr,
uint32_t length)
{
uint32_t i;
@@ -2675,13 +2626,13 @@ qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
scsi_block_requests(vha->host);
- qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
+ qla82xx_read_flash_data(vha, buf, offset, length);
scsi_unblock_requests(vha->host);
return buf;
}
static int
-qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
+qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr,
uint32_t faddr, uint32_t dwords)
{
int ret;
@@ -2758,7 +2709,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
}
ret = qla82xx_write_flash_dword(ha, faddr,
- cpu_to_le32(*dwptr));
+ le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_p3p, vha, 0xb020,
"Unable to program flash address=%x data=%x.\n",
@@ -2818,10 +2769,10 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
if (ql2xdbwr)
qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -3110,8 +3061,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0x00b6,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* Force to DEV_COLD unless someone else is starting a reset */
if (dev_state != QLA8XXX_DEV_INITIALIZING &&
@@ -3234,8 +3184,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
old_dev_state = dev_state;
ql_log(ql_log_info, vha, 0x009b,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
@@ -3256,9 +3205,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
if (loopcount < 5) {
ql_log(ql_log_info, vha, 0x009d,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) :
- "Unknown");
+ dev_state, qdev_state(dev_state));
}
switch (dev_state) {
@@ -3488,8 +3435,7 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
} else
ql_log(ql_log_info, vha, 0xb031,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
}
/*
@@ -3724,7 +3670,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
/* Minidump related functions */
static int
qla82xx_minidump_process_control(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
struct qla82xx_md_entry_crb *crb_entry;
@@ -3841,12 +3787,12 @@ qla82xx_minidump_process_control(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_md_entry_rdocm *ocm_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
r_addr = ocm_hdr->read_addr;
@@ -3854,7 +3800,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
loop_cnt = ocm_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase);
+ r_value = rd_reg_dword(r_addr + ha->nx_pcibase);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
@@ -3863,12 +3809,12 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
struct qla82xx_md_entry_mux *mux_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
r_addr = mux_hdr->read_addr;
@@ -3889,12 +3835,12 @@ qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_md_entry_crb *crb_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
r_addr = crb_hdr->addr;
@@ -3912,7 +3858,7 @@ qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
static int
qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t addr, r_addr, c_addr, t_r_addr;
@@ -3921,7 +3867,7 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
uint32_t c_value_w, c_value_r;
struct qla82xx_md_entry_cache *cache_hdr;
int rval = QLA_FUNCTION_FAILED;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
@@ -3971,14 +3917,14 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t addr, r_addr, c_addr, t_r_addr;
uint32_t i, k, loop_count, t_value, r_cnt, r_value;
uint32_t c_value_w;
struct qla82xx_md_entry_cache *cache_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
@@ -4006,14 +3952,14 @@ qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t s_addr, r_addr;
uint32_t r_stride, r_value, r_cnt, qid = 0;
uint32_t i, k, loop_cnt;
struct qla82xx_md_entry_queue *q_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
s_addr = q_hdr->select_addr;
@@ -4036,13 +3982,13 @@ qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_value;
uint32_t i, loop_cnt;
struct qla82xx_md_entry_rdrom *rom_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
r_addr = rom_hdr->read_addr;
@@ -4062,7 +4008,7 @@ qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
static int
qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_value, r_data;
@@ -4070,7 +4016,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
struct qla82xx_md_entry_rdmem *m_hdr;
unsigned long flags;
int rval = QLA_FUNCTION_FAILED;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
r_addr = m_hdr->read_addr;
@@ -4163,12 +4109,12 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
int no_entry_hdr = 0;
qla82xx_md_entry_hdr_t *entry_hdr;
struct qla82xx_md_template_hdr *tmplt_hdr;
- uint32_t *data_ptr;
+ __le32 *data_ptr;
uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
int i = 0, rval = QLA_FUNCTION_FAILED;
tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
- data_ptr = (uint32_t *)ha->md_dump;
+ data_ptr = ha->md_dump;
if (ha->fw_dumped) {
ql_log(ql_log_warn, vha, 0xb037,
@@ -4177,7 +4123,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
goto md_failed;
}
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
if (!ha->md_tmplt_hdr || !ha->md_dump) {
ql_log(ql_log_warn, vha, 0xb038,
@@ -4357,7 +4303,7 @@ skip_nxt_entry:
ql_log(ql_log_info, vha, 0xb044,
"Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
md_failed:
@@ -4514,7 +4460,7 @@ exit:
}
void
-qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla82xx_fw_dump(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 230abee10598..6dc80c8ddf79 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_NX_H
#define __QLA_NX_H
@@ -541,14 +540,18 @@
#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
/* Every driver should use these Device State */
-#define QLA8XXX_DEV_COLD 1
-#define QLA8XXX_DEV_INITIALIZING 2
-#define QLA8XXX_DEV_READY 3
-#define QLA8XXX_DEV_NEED_RESET 4
-#define QLA8XXX_DEV_NEED_QUIESCENT 5
-#define QLA8XXX_DEV_FAILED 6
-#define QLA8XXX_DEV_QUIESCENT 7
-#define MAX_STATES 8 /* Increment if new state added */
+enum {
+ QLA8XXX_DEV_UNKNOWN,
+ QLA8XXX_DEV_COLD,
+ QLA8XXX_DEV_INITIALIZING,
+ QLA8XXX_DEV_READY,
+ QLA8XXX_DEV_NEED_RESET,
+ QLA8XXX_DEV_NEED_QUIESCENT,
+ QLA8XXX_DEV_FAILED,
+ QLA8XXX_DEV_QUIESCENT,
+ MAX_STATES, /* Increment if new state added */
+};
+
#define QLA8XXX_BAD_VALUE 0xbad0bad0
#define QLA82XX_IDC_VERSION 1
@@ -800,16 +803,16 @@ struct qla82xx_legacy_intr_set {
#define QLA82XX_URI_FIRMWARE_IDX_OFF 29
struct qla82xx_uri_table_desc{
- uint32_t findex;
- uint32_t num_entries;
- uint32_t entry_size;
- uint32_t reserved[5];
+ __le32 findex;
+ __le32 num_entries;
+ __le32 entry_size;
+ __le32 reserved[5];
};
struct qla82xx_uri_data_desc{
- uint32_t findex;
- uint32_t size;
- uint32_t reserved[5];
+ __le32 findex;
+ __le32 size;
+ __le32 reserved[5];
};
/* UNIFIED ROMIMAGE END */
@@ -829,22 +832,22 @@ struct qla82xx_uri_data_desc{
* ISP 8021 I/O Register Set structure definitions.
*/
struct device_reg_82xx {
- uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
- uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */
- uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */
+ __le32 req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
+ __le32 rsp_q_in[64]; /* Response Queue In-Pointer. */
+ __le32 rsp_q_out[64]; /* Response Queue Out-Pointer. */
- uint16_t mailbox_in[32]; /* Mail box In registers */
- uint16_t unused_1[32];
- uint32_t hint; /* Host interrupt register */
+ __le16 mailbox_in[32]; /* Mailbox In registers */
+ __le16 unused_1[32];
+ __le32 hint; /* Host interrupt register */
#define HINT_MBX_INT_PENDING BIT_0
- uint16_t unused_2[62];
- uint16_t mailbox_out[32]; /* Mail box Out registers */
- uint32_t unused_3[48];
+ __le16 unused_2[62];
+ __le16 mailbox_out[32]; /* Mailbox Out registers */
+ __le32 unused_3[48];
- uint32_t host_status; /* host status */
+ __le32 host_status; /* host status */
#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
- uint32_t host_int; /* Interrupt status. */
+ __le32 host_int; /* Interrupt status. */
#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */
};
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index c056f466f1f4..41ff6fbdb933 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include <linux/vmalloc.h>
@@ -140,7 +139,7 @@ qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
uint32_t mask)
{
unsigned long timeout;
- uint32_t temp;
+ uint32_t temp = 0;
/* jiffies after 100ms */
timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
@@ -660,7 +659,7 @@ static int
qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
int duration, uint32_t test_mask, uint32_t test_result)
{
- uint32_t value;
+ uint32_t value = 0;
int timeout_error;
uint8_t retries;
int ret_val = QLA_SUCCESS;
@@ -1441,7 +1440,7 @@ qla8044_device_bootstrap(struct scsi_qla_host *vha)
if (idc_ctrl & GRACEFUL_RESET_BIT1) {
qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
(idc_ctrl & ~GRACEFUL_RESET_BIT1));
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
}
dev_ready:
@@ -1939,8 +1938,7 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
"Device state is 0x%x = %s\n",
- dev_state, dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
@@ -1953,8 +1951,7 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
ql_log(ql_log_warn, vha, 0xb0cf,
"%s: Device Init Failed 0x%x = %s\n",
QLA2XXX_DRIVER_NAME, dev_state,
- dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
+ qdev_state(dev_state));
qla8044_wr_direct(vha,
QLA8044_CRB_DEV_STATE_INDEX,
QLA8XXX_DEV_FAILED);
@@ -1964,8 +1961,7 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
ql_log(ql_log_info, vha, 0xb0d0,
"Device state is 0x%x = %s\n",
- dev_state, dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
@@ -2029,7 +2025,7 @@ exit_error:
}
/**
- * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * qla8044_check_temp - Check the ISP82XX temperature.
* @vha: adapter block pointer.
*
* Note: The caller should not hold the idc lock.
@@ -2227,19 +2223,16 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
if (opcode & QLA82XX_DBG_OPCODE_WR) {
qla8044_wr_reg_indirect(vha, crb_addr,
crb_entry->value_1);
- opcode &= ~QLA82XX_DBG_OPCODE_WR;
}
if (opcode & QLA82XX_DBG_OPCODE_RW) {
qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
qla8044_wr_reg_indirect(vha, crb_addr, read_value);
- opcode &= ~QLA82XX_DBG_OPCODE_RW;
}
if (opcode & QLA82XX_DBG_OPCODE_AND) {
qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
read_value &= crb_entry->value_2;
- opcode &= ~QLA82XX_DBG_OPCODE_AND;
if (opcode & QLA82XX_DBG_OPCODE_OR) {
read_value |= crb_entry->value_3;
opcode &= ~QLA82XX_DBG_OPCODE_OR;
@@ -2250,7 +2243,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
read_value |= crb_entry->value_3;
qla8044_wr_reg_indirect(vha, crb_addr, read_value);
- opcode &= ~QLA82XX_DBG_OPCODE_OR;
}
if (opcode & QLA82XX_DBG_OPCODE_POLL) {
poll_time = crb_entry->crb_strd.poll_timeout;
@@ -2270,7 +2262,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
crb_addr, &read_value);
}
} while (1);
- opcode &= ~QLA82XX_DBG_OPCODE_POLL;
}
if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
@@ -2284,7 +2275,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
qla8044_rd_reg_indirect(vha, addr, &read_value);
index = crb_entry->crb_ctrl.state_index_v;
tmplt_hdr->saved_state_array[index] = read_value;
- opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
}
if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
@@ -2304,7 +2294,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
}
qla8044_wr_reg_indirect(vha, addr, read_value);
- opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
}
if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
@@ -2317,7 +2306,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
read_value |= crb_entry->value_3;
read_value += crb_entry->value_1;
tmplt_hdr->saved_state_array[index] = read_value;
- opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
}
crb_addr += crb_entry->crb_strd.addr_stride;
}
@@ -2595,7 +2583,7 @@ qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
struct qla8044_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
- uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value = 0;
struct qla8044_minidump_entry_mux *mux_hdr;
uint32_t *data_ptr = *d_ptr;
@@ -2965,7 +2953,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
/* Prepare: Write pex-dma descriptor to MS memory. */
rval = qla8044_ms_mem_write_128b(vha,
- m_hdr->desc_card_addr, (void *)&dma_desc,
+ m_hdr->desc_card_addr, (uint32_t *)&dma_desc,
(sizeof(struct qla8044_pex_dma_descriptor)/16));
if (rval) {
ql_log(ql_log_warn, vha, 0xb14a,
@@ -2987,7 +2975,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
read_size += chunk_size;
}
- *d_ptr = (void *)data_ptr;
+ *d_ptr = (uint32_t *)data_ptr;
error_exit:
if (rdmem_buffer)
@@ -3249,7 +3237,7 @@ qla8044_collect_md_data(struct scsi_qla_host *vha)
goto md_failed;
}
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
if (!ha->md_tmplt_hdr || !ha->md_dump) {
ql_log(ql_log_warn, vha, 0xb10e,
@@ -3470,7 +3458,7 @@ skip_nxt_entry:
ql_log(ql_log_info, vha, 0xb110,
"Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
@@ -3487,7 +3475,7 @@ qla8044_get_minidump(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
if (!qla8044_collect_md_data(vha)) {
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
ha->prev_minidump_failed = 0;
} else {
ql_log(ql_log_fatal, vha, 0xb0db,
@@ -3946,8 +3934,8 @@ qla8044_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
for (iter = 1; iter--; ) {
- if (RD_REG_DWORD(&reg->host_int)) {
- stat = RD_REG_DWORD(&reg->host_status);
+ if (rd_reg_dword(&reg->host_int)) {
+ stat = rd_reg_dword(&reg->host_status);
if ((stat & HSRX_RISC_INT) == 0)
break;
@@ -3961,9 +3949,9 @@ qla8044_intr_handler(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -3976,7 +3964,7 @@ qla8044_intr_handler(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
qla2x00_handle_mbx_completion(ha, status);
@@ -4070,7 +4058,7 @@ exit_isp_reset:
}
void
-qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla8044_fw_dump(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 8ba7c1db07c3..2fc902a9fade 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_NX2_H
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b520a980d1dc..2c85f3cce726 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
@@ -15,6 +14,9 @@
#include <linux/slab.h>
#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
+#include <linux/crash_dump.h>
+#include <linux/trace_events.h>
+#include <linux/trace.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -35,6 +37,18 @@ static int apidev_major;
*/
struct kmem_cache *srb_cachep;
+static struct trace_array *qla_trc_array;
+
+int ql2xfulldump_on_mpifail;
+module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
+ "Set this to take full dump on MPI hang.");
+
+int ql2xenforce_iocb_limit = 1;
+module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql2xenforce_iocb_limit,
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
+
/*
* CT6 CTX allocation cache
*/
@@ -44,6 +58,11 @@ static struct kmem_cache *ctx_cachep;
*/
uint ql_errlev = 0x8001;
+int ql2xsecenable;
+module_param(ql2xsecenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xsecenable,
+ "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
+
static int ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xenableclass2,
@@ -102,6 +121,11 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"ql2xextended_error_logging=1).\n"
"\t\tDo LOGICAL OR of the value to enable more than one level");
+int ql2xextended_error_logging_ktrace = 1;
+module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xextended_error_logging_ktrace,
+ "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n");
+
int ql2xshiftctondsd = 6;
module_param(ql2xshiftctondsd, int, S_IRUGO);
MODULE_PARM_DESC(ql2xshiftctondsd,
@@ -113,7 +137,8 @@ module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
- "0 - no FDMI. Default is 1 - perform FDMI.");
+ "0 - no FDMI registrations. "
+ "1 - provide FDMI registrations (default).");
#define MAX_Q_DEPTH 64
static int ql2xmaxqdepth = MAX_Q_DEPTH;
@@ -122,11 +147,7 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
"Default is 64.");
-#if (IS_ENABLED(CONFIG_NVME_FC))
-int ql2xenabledif;
-#else
int ql2xenabledif = 2;
-#endif
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF:\n"
@@ -190,12 +211,6 @@ MODULE_PARM_DESC(ql2xdbwr,
" 0 -- Regular doorbell.\n"
" 1 -- CAMRAM doorbell (faster).\n");
-int ql2xtargetreset = 1;
-module_param(ql2xtargetreset, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xtargetreset,
- "Enable target reset."
- "Default is 1 - use hw defaults.");
-
int ql2xgffidenable;
module_param(ql2xgffidenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xgffidenable,
@@ -306,11 +321,44 @@ MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
"0 (Default). Based on check.\n"
"1 Force using internal buffers\n");
+int ql2xsmartsan;
+module_param(ql2xsmartsan, int, 0444);
+module_param_named(smartsan, ql2xsmartsan, int, 0444);
+MODULE_PARM_DESC(ql2xsmartsan,
+ "Send SmartSAN Management Attributes for FDMI Registration."
+ " Default is 0 - No SmartSAN registration,"
+ " 1 - Register SmartSAN Management Attributes.");
+
+int ql2xrdpenable;
+module_param(ql2xrdpenable, int, 0444);
+module_param_named(rdpenable, ql2xrdpenable, int, 0444);
+MODULE_PARM_DESC(ql2xrdpenable,
+ "Enables RDP responses. "
+ "0 - no RDP responses (default). "
+ "1 - provide RDP responses.");
+int ql2xabts_wait_nvme = 1;
+module_param(ql2xabts_wait_nvme, int, 0444);
+MODULE_PARM_DESC(ql2xabts_wait_nvme,
+ "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
+
+
+static u32 ql2xdelay_before_pci_error_handling = 5;
+module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
+MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
+ "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
+
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
-static int qla2xxx_map_queues(struct Scsi_Host *shost);
+static void qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
+u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
+module_param(ql2xnvme_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(ql2xnvme_queues,
+ "Number of NVMe Queues that can be configured.\n"
+ "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n"
+ "1 - Minimum number of queues supported\n"
+ "8 - Default value");
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
@@ -583,6 +631,9 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
case 3:
speed_str = "8.0GT/s";
break;
+ case 4:
+ speed_str = "16.0GT/s";
+ break;
default:
speed_str = "<unknown>";
break;
@@ -698,10 +749,11 @@ void qla2x00_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- sp->free(sp);
+ /* kref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
cmd->result = res;
- CMD_SP(cmd) = NULL;
- cmd->scsi_done(cmd);
+ sp->type = 0;
+ scsi_done(cmd);
if (comp)
complete(comp);
}
@@ -789,10 +841,11 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
cmd->result = res;
- CMD_SP(cmd) = NULL;
- cmd->scsi_done(cmd);
+ sp->type = 0;
+ scsi_done(cmd);
if (comp)
complete(comp);
}
@@ -819,7 +872,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
uint16_t hwq;
struct qla_qpair *qpair = NULL;
- tag = blk_mq_unique_tag(cmd->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq = blk_mq_unique_tag_to_hwq(tag);
qpair = ha->queue_pair_map[hwq];
@@ -859,12 +912,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_fail_command;
}
- if (!fcport) {
- cmd->result = DID_NO_CONNECT << 16;
+ if (!fcport || fcport->deleted) {
+ cmd->result = DID_IMM_RETRY << 16;
goto qc24_fail_command;
}
- if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
ql_dbg(ql_dbg_io, vha, 0x3005,
@@ -889,12 +942,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
sp = scsi_cmd_priv(cmd);
+ /* ref: INIT */
qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
-
- CMD_SP(cmd) = (void *)sp;
sp->free = qla2x00_sp_free_dma;
sp->done = qla2x00_sp_compl;
@@ -908,13 +960,14 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return 0;
qc24_host_busy_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -932,7 +985,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
srb_t *sp;
int rval;
- rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE;
+ rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16);
if (rval) {
cmd->result = rval;
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
@@ -941,12 +994,19 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
goto qc24_fail_command;
}
- if (!fcport) {
+ if (!qpair->online) {
+ ql_dbg(ql_dbg_io, vha, 0x3077,
+ "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
- if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ if (!fcport || fcport->deleted) {
+ cmd->result = DID_IMM_RETRY << 16;
+ goto qc24_fail_command;
+ }
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
ql_dbg(ql_dbg_io, vha, 0x3077,
@@ -971,11 +1031,11 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
goto qc24_target_busy;
sp = scsi_cmd_priv(cmd);
+ /* ref: INIT */
qla2xxx_init_sp(sp, vha, qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- CMD_SP(cmd) = (void *)sp;
sp->free = qla2xxx_qpair_sp_free_dma;
sp->done = qla2xxx_qpair_sp_compl;
@@ -983,26 +1043,20 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
- if (rval == QLA_INTERFACE_ERROR)
- goto qc24_free_sp_fail_command;
goto qc24_host_busy_free_sp;
}
return 0;
qc24_host_busy_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
-qc24_free_sp_fail_command:
- sp->free(sp);
- CMD_SP(cmd) = NULL;
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
-
qc24_fail_command:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -1027,6 +1081,7 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
unsigned long wait_iter = ABORT_WAIT_ITER;
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
struct qla_hw_data *ha = vha->hw;
+ srb_t *sp = scsi_cmd_priv(cmd);
int ret = QLA_SUCCESS;
if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
@@ -1035,10 +1090,9 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
return ret;
}
- while (CMD_SP(cmd) && wait_iter--) {
+ while (sp->type && wait_iter--)
msleep(ABORT_POLLING_PERIOD);
- }
- if (CMD_SP(cmd))
+ if (sp->type)
ret = QLA_FUNCTION_FAILED;
return ret;
@@ -1090,12 +1144,28 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
int res;
+ /* Return 0 = sleep, x=wake */
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_init, vha, 0x00ec,
"tgt %p, fcport_count=%d\n",
vha, vha->fcport_count);
res = (vha->fcport_count == 0);
+ if (res) {
+ struct fc_port *fcport;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->deleted != QLA_SESS_DELETED) {
+ /* session(s) may not be fully logged in
+ * (ie fcport_count=0), but session
+ * deletion thread(s) may be inflight.
+ */
+
+ res = 0;
+ break;
+ }
+ }
+ }
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return res;
@@ -1177,35 +1247,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-#define ISP_REG_DISCONNECT 0xffffffffU
-/**************************************************************************
-* qla2x00_isp_reg_stat
-*
-* Description:
-* Read the host status register of ISP before aborting the command.
-*
-* Input:
-* ha = pointer to host adapter structure.
-*
-*
-* Returns:
-* Either true or false.
-*
-* Note: Return true if there is register disconnect.
-**************************************************************************/
-static inline
-uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
-{
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
-
- if (IS_P3P_TYPE(ha))
- return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
- else
- return ((RD_REG_DWORD(&reg->host_status)) ==
- ISP_REG_DISCONNECT);
-}
-
/**************************************************************************
* qla2xxx_eh_abort
*
@@ -1235,35 +1276,29 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
uint32_t ratov_j;
struct qla_qpair *qpair;
unsigned long flags;
+ int fast_fail_status = SUCCESS;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
"PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
return FAILED;
}
+ /* Save any FAST_IO_FAIL value to return later if abort succeeds */
ret = fc_block_scsi_eh(cmd);
if (ret != 0)
- return ret;
+ fast_fail_status = ret;
sp = scsi_cmd_priv(cmd);
qpair = sp->qpair;
+ vha->cmd_timeout_cnt++;
+
if ((sp->fcport && sp->fcport->deleted) || !qpair)
- return SUCCESS;
+ return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- if (sp->completed) {
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return SUCCESS;
- }
-
- if (sp->abort || sp->aborted) {
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return FAILED;
- }
-
- sp->abort = 1;
sp->comp = &comp;
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -1296,7 +1331,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
__func__, ha->r_a_tov/10);
ret = FAILED;
} else {
- ret = SUCCESS;
+ ret = fast_fail_status;
}
break;
default:
@@ -1316,21 +1351,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
/*
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
*/
-int
-qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
- uint64_t l, enum nexus_wait_type type)
+static int
+__qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
{
int cnt, match, status;
unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req;
+ scsi_qla_host_t *vha = qpair->vha;
+ struct req_que *req = qpair->req;
srb_t *sp;
struct scsi_cmnd *cmd;
status = QLA_SUCCESS;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = vha->req;
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (cnt = 1; status == QLA_SUCCESS &&
cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
@@ -1357,12 +1391,32 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
if (!match)
continue;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
status = qla2x00_eh_wait_on_command(cmd);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ return status;
+}
+
+int
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
+{
+ struct qla_qpair *qpair;
+ struct qla_hw_data *ha = vha->hw;
+ int i, status = QLA_SUCCESS;
+ status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
+ type);
+ for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
+ qpair = ha->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
+ type);
+ }
return status;
}
@@ -1374,27 +1428,36 @@ static char *reset_errors[] = {
};
static int
-__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
- struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
+qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ struct scsi_device *sdev = cmd->device;
+ scsi_qla_host_t *vha = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
+ struct qla_hw_data *ha = vha->hw;
int err;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803e,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
+
if (!fcport) {
return FAILED;
}
- err = fc_block_scsi_eh(cmd);
+ err = fc_block_rport(rport);
if (err != 0)
return err;
if (fcport->deleted)
- return SUCCESS;
+ return FAILED;
ql_log(ql_log_info, vha, 0x8009,
- "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
- cmd->device->id, cmd->device->lun, cmd);
+ "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
+ sdev->id, sdev->lun, cmd);
err = 0;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
@@ -1403,64 +1466,100 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
goto eh_reset_failed;
}
err = 2;
- if (do_reset(fcport, cmd->device->lun, 1)
+ if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1)
!= QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800c,
"do_reset failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
- cmd->device->lun, type) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+ sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
ql_log(ql_log_info, vha, 0x800e,
- "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
- vha->host_no, cmd->device->id, cmd->device->lun, cmd);
+ "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n",
+ vha->host_no, sdev->id, sdev->lun, cmd);
return SUCCESS;
eh_reset_failed:
ql_log(ql_log_info, vha, 0x800f,
- "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
- reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+ "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
+ reset_errors[err], vha->host_no, sdev->id, sdev->lun,
cmd);
+ vha->reset_cmd_err_cnt++;
return FAILED;
}
static int
-qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
+qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
{
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct scsi_device *sdev = cmd->device;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport));
struct qla_hw_data *ha = vha->hw;
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ int err;
if (qla2x00_isp_reg_stat(ha)) {
- ql_log(ql_log_info, vha, 0x803e,
+ ql_log(ql_log_info, vha, 0x803f,
"PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
return FAILED;
}
- return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
- ha->isp_ops->lun_reset);
-}
+ if (!fcport) {
+ return FAILED;
+ }
-static int
-qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
-{
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- struct qla_hw_data *ha = vha->hw;
+ err = fc_block_rport(rport);
+ if (err != 0)
+ return err;
- if (qla2x00_isp_reg_stat(ha)) {
- ql_log(ql_log_info, vha, 0x803f,
- "PCI/Register disconnect, exiting.\n");
+ if (fcport->deleted)
return FAILED;
+
+ ql_log(ql_log_info, vha, 0x8009,
+ "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
+ sdev->id, cmd);
+
+ err = 0;
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800a,
+ "Wait for hba online failed for cmd=%p.\n", cmd);
+ goto eh_reset_failed;
+ }
+ err = 2;
+ if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800c,
+ "target_reset failed for cmd=%p.\n", cmd);
+ goto eh_reset_failed;
}
+ err = 3;
+ if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+ 0, WAIT_TARGET) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800d,
+ "wait for pending cmds failed for cmd=%p.\n", cmd);
+ goto eh_reset_failed;
+ }
+
+ ql_log(ql_log_info, vha, 0x800e,
+ "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n",
+ vha->host_no, sdev->id, cmd);
+
+ return SUCCESS;
- return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
- ha->isp_ops->target_reset);
+eh_reset_failed:
+ ql_log(ql_log_info, vha, 0x800f,
+ "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
+ reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+ cmd);
+ vha->reset_cmd_err_cnt++;
+ return FAILED;
}
/**************************************************************************
@@ -1482,7 +1581,6 @@ static int
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int ret = FAILED;
unsigned int id;
uint64_t lun;
@@ -1491,21 +1589,13 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8040,
"PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
return FAILED;
}
id = cmd->device->id;
lun = cmd->device->lun;
- if (!fcport) {
- return ret;
- }
-
- ret = fc_block_scsi_eh(cmd);
- if (ret != 0)
- return ret;
- ret = FAILED;
-
if (qla2x00_chip_is_down(vha))
return ret;
@@ -1568,7 +1658,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8041,
"PCI/Register disconnect, exiting.\n");
- schedule_work(&ha->board_disable);
+ qla_pci_set_eeh_busy(vha);
return SUCCESS;
}
@@ -1642,27 +1732,10 @@ int
qla2x00_loop_reset(scsi_qla_host_t *vha)
{
int ret;
- struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLAFX00(ha)) {
- return qlafx00_loop_reset(vha);
- }
-
- if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->port_type != FCT_TARGET)
- continue;
-
- ret = ha->isp_ops->target_reset(fcport, 0, 0);
- if (ret != QLA_SUCCESS) {
- ql_dbg(ql_dbg_taskm, vha, 0x802c,
- "Bus Reset failed: Reset=%d "
- "d_id=%x.\n", ret, fcport->d_id.b24);
- }
- }
- }
-
+ if (IS_QLAFX00(ha))
+ return QLA_SUCCESS;
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1688,6 +1761,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
unsigned long *flags)
__releases(qp->qp_lock_ptr)
@@ -1696,10 +1773,13 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
DECLARE_COMPLETION_ONSTACK(comp);
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
int rval;
bool ret_cmd;
uint32_t ratov_j;
+ lockdep_assert_held(qp->qp_lock_ptr);
+
if (qla2x00_chip_is_down(vha)) {
sp->done(sp, res);
return;
@@ -1715,7 +1795,6 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
sp->comp = &comp;
- sp->abort = 1;
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
rval = ha->isp_ops->abort_command(sp);
@@ -1739,13 +1818,17 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- if (ret_cmd && (!sp->completed || !sp->aborted))
+ if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
sp->done(sp, res);
} else {
sp->done(sp, res);
}
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
static void
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
@@ -1792,6 +1875,10 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
void
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{
@@ -1861,7 +1948,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
/* Any upper-dword bits set? */
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
- !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+ !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
/* Ok, a 64bit DMA mask is applicable. */
ha->flags.enable_64bit_addressing = 1;
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
@@ -1871,7 +1958,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
}
dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
- pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
}
static void
@@ -1883,8 +1970,8 @@ qla2x00_enable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
/* enable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
- RD_REG_WORD(&reg->ictrl);
+ wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
+ rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1898,8 +1985,8 @@ qla2x00_disable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
/* disable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, 0);
- RD_REG_WORD(&reg->ictrl);
+ wrt_reg_word(&reg->ictrl, 0);
+ rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1911,8 +1998,8 @@ qla24xx_enable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
- WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT);
+ rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1926,8 +2013,8 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
return;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1993,6 +2080,11 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
+
+ /* Check if FW supports MQ or not */
+ if (!(ha->fw_attributes & BIT_6))
+ goto mqiobase_exit;
+
if (!ql2xmqsupport || !ql2xnvmeenable ||
(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
@@ -2285,7 +2377,7 @@ static struct isp_operations qla81xx_isp_ops = {
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
- .update_fw_options = qla81xx_update_fw_options,
+ .update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
@@ -2402,7 +2494,7 @@ static struct isp_operations qla83xx_isp_ops = {
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
- .update_fw_options = qla81xx_update_fw_options,
+ .update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
@@ -2499,6 +2591,7 @@ static struct isp_operations qla27xx_isp_ops = {
.read_nvram = NULL,
.write_nvram = NULL,
.fw_dump = qla27xx_fwdump,
+ .mpi_fw_dump = qla27xx_mpi_fwdump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla83xx_beacon_blink,
@@ -2723,6 +2816,16 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
return atomic_read(&vha->loop_state) == LOOP_READY;
}
+static void qla_heartbeat_work_fn(struct work_struct *work)
+{
+ struct qla_hw_data *ha = container_of(work,
+ struct qla_hw_data, heartbeat_work);
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ if (!ha->flags.mbox_busy && base_vha->flags.init_done)
+ qla_no_op_mb(base_vha);
+}
+
static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
@@ -2745,6 +2848,27 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
spin_unlock_irqrestore(&vha->work_lock, flags);
}
+static void
+qla_trace_init(void)
+{
+ qla_trc_array = trace_array_get_by_name("qla2xxx");
+ if (!qla_trc_array) {
+ ql_log(ql_log_fatal, NULL, 0x0001,
+ "Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
+ return;
+ }
+
+ QLA_TRACE_ENABLE(qla_trc_array);
+}
+
+static void
+qla_trace_uninit(void)
+{
+ if (!qla_trc_array)
+ return;
+ trace_array_put(qla_trc_array);
+}
+
/*
* PCI driver interface
*/
@@ -2801,13 +2925,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
+ if (is_kdump_kernel()) {
+ ql2xmqsupport = 0;
+ ql2xallocfwdump = 0;
+ }
+
/* This may fail but that's ok */
pci_enable_pcie_error_reporting(pdev);
- /* Turn off T10-DIF when FC-NVMe is enabled */
- if (ql2xnvmeenable)
- ql2xenabledif = 0;
-
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009,
@@ -2822,6 +2947,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ha->tgt.sess_lock);
spin_lock_init(&ha->tgt.atio_lock);
+ spin_lock_init(&ha->sadb_lock);
+ INIT_LIST_HEAD(&ha->sadb_tx_index_list);
+ INIT_LIST_HEAD(&ha->sadb_rx_index_list);
+
+ spin_lock_init(&ha->sadb_fp_lock);
+
+ if (qla_edif_sadb_build_free_pool(ha)) {
+ kfree(ha);
+ goto disable_device;
+ }
+
atomic_set(&ha->nvme_active_aen_cnt, 0);
/* Clear our data area */
@@ -3020,8 +3156,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->portnum = PCI_FUNC(ha->pdev->devfn);
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
- req_length = REQUEST_ENTRY_CNT_24XX;
- rsp_length = RESPONSE_ENTRY_CNT_2300;
+ req_length = REQUEST_ENTRY_CNT_83XX;
+ rsp_length = RESPONSE_ENTRY_CNT_83XX;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
@@ -3111,6 +3247,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
ha->mr.fcport.scan_state = 1;
+ qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN |
+ QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT |
+ QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN);
+
/* Set the SG table size based on ISP type */
if (!IS_FWI2_CAPABLE(ha)) {
if (IS_QLA2100(ha))
@@ -3145,6 +3285,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->transportt, sht->vendor_id);
INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+ INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn);
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
@@ -3235,7 +3376,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
- ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
if (unlikely(!ha->wq)) {
ret = -ENOMEM;
goto probe_failed;
@@ -3277,6 +3418,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ /* Check if FW supports MQ or not for ISP25xx */
+ if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6))
+ ha->mqenable = 0;
+
if (ha->mqenable) {
bool startit = false;
@@ -3290,6 +3435,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
for (i = 0; i < ha->max_qpairs; i++)
qla2xxx_create_qpair(base_vha, 5, 0, startit);
}
+ qla_init_iocb_limit(base_vha);
if (ha->flags.running_gold_fw)
goto skip_dpc;
@@ -3404,7 +3550,7 @@ skip_dpc:
qla_dual_mode_enabled(base_vha))
scsi_scan_host(host);
else
- ql_dbg(ql_dbg_init, base_vha, 0x0122,
+ ql_log(ql_log_info, base_vha, 0x0122,
"skipping scsi_scan_host() for non-initiator port\n");
qla2x00_alloc_sysfs_attr(base_vha);
@@ -3439,16 +3585,11 @@ skip_dpc:
if (test_bit(UNLOADING, &base_vha->dpc_flags))
return -ENODEV;
- if (ha->flags.detected_lr_sfp) {
- ql_log(ql_log_info, base_vha, 0xffff,
- "Reset chip to pick up LR SFP setting\n");
- set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
- qla2xxx_wake_dpc(base_vha);
- }
-
return 0;
probe_failed:
+ qla_enode_stop(base_vha);
+ qla_edb_stop(base_vha);
if (base_vha->gnl.l) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
@@ -3671,8 +3812,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
if (ha->mqiobase)
iounmap(ha->mqiobase);
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
- ha->msixbase)
+ if (ha->msixbase)
iounmap(ha->msixbase);
}
}
@@ -3720,6 +3860,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);
+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) {
if (ha->flags.fw_started)
@@ -3738,21 +3885,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_wait_for_sess_deletion(base_vha);
- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
- set_bit(UNLOADING, &base_vha->dpc_flags);
-
qla_nvme_delete(base_vha);
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
base_vha->gnl.l = NULL;
+ qla_enode_stop(base_vha);
+ qla_edb_stop(base_vha);
vfree(base_vha->scan.l);
@@ -3786,7 +3926,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_sysfs_attr(base_vha, true);
fc_remove_host(base_vha->host);
- qlt_remove_target_resources(ha);
scsi_remove_host(base_vha->host);
@@ -3806,6 +3945,22 @@ qla2x00_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static inline void
+qla24xx_free_purex_list(struct purex_list *list)
+{
+ struct purex_item *item, *next;
+ ulong flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ list_for_each_entry_safe(item, next, &list->head, list) {
+ list_del(&item->list);
+ if (item == &item->vha->default_item)
+ continue;
+ kfree(item);
+ }
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
static void
qla2x00_free_device(scsi_qla_host_t *vha)
{
@@ -3832,16 +3987,20 @@ qla2x00_free_device(scsi_qla_host_t *vha)
/* Flush the work queue and remove it */
if (ha->wq) {
- flush_workqueue(ha->wq);
destroy_workqueue(ha->wq);
ha->wq = NULL;
}
+ qla24xx_free_purex_list(&vha->purex_list);
+
qla2x00_mem_free(ha);
qla82xx_md_free(vha);
+ qla_edif_sadb_release_free_pool(ha);
+ qla_edif_sadb_release(ha);
+
qla2x00_free_queues(ha);
}
@@ -3894,6 +4053,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
qla2x00_schedule_rport_del(vha, fcport);
}
+
/*
* We may need to retry the login, so don't change the state of the
* port but do the retries.
@@ -3907,19 +4067,6 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
-/*
- * qla2x00_mark_all_devices_lost
- * Updates fcport state when device goes offline.
- *
- * Input:
- * ha = adapter block pointer.
- * fcport = port structure pointer.
- *
- * Return:
- * None.
- *
- * Context:
- */
void
qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
{
@@ -3929,18 +4076,18 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
"Mark all dev lost\n");
list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) &&
+ fcport->port_type == FCT_TARGET &&
+ !qla2x00_reset_active(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x211a,
+ "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
+ fcport->flags, fcport->port_type,
+ fcport->d_id.b24, fcport->port_name);
+ continue;
+ }
fcport->scan_state = 0;
qlt_schedule_sess_for_deletion(fcport);
-
- if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
- continue;
-
- /*
- * No point in marking the device as lost, if the device is
- * already DEAD.
- */
- if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
- continue;
}
}
@@ -3970,15 +4117,20 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
struct req_que **req, struct rsp_que **rsp)
{
char name[16];
+ int rc;
ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
&ha->init_cb_dma, GFP_KERNEL);
if (!ha->init_cb)
goto fail;
- if (qlt_mem_alloc(ha) < 0)
+ rc = btree_init32(&ha->host_map);
+ if (rc)
goto fail_free_init_cb;
+ if (qlt_mem_alloc(ha) < 0)
+ goto fail_free_btree;
+
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
@@ -3988,7 +4140,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->srb_mempool)
goto fail_free_gid_list;
- if (IS_P3P_TYPE(ha)) {
+ if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) {
/* Allocate cache for CT6 Ctx. */
if (!ctx_cachep) {
ctx_cachep = kmem_cache_create("qla2xxx_ctx",
@@ -4022,7 +4174,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
- if (IS_P3P_TYPE(ha) || ql2xenabledif) {
+ if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) {
ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DSD_LIST_DMA_POOL_SIZE, 8, 0);
if (!ha->dl_dma_pool) {
@@ -4063,7 +4215,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ql_dbg_pci(ql_dbg_init, ha->pdev,
0xe0ee, "%s: failed alloc dsd\n",
__func__);
- return 1;
+ return -ENOMEM;
}
ha->dif_bundle_kallocs++;
@@ -4209,6 +4361,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"ex_init_cb=%p.\n", ha->ex_init_cb);
}
+ /* Get consistent memory allocated for Special Features-CB. */
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->sf_init_cb_dma);
+ if (!ha->sf_init_cb)
+ goto fail_sf_init_cb;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
+ "sf_init_cb=%p.\n", ha->sf_init_cb);
+ }
+
INIT_LIST_HEAD(&ha->gbl_dsd_list);
/* Get consistent memory allocated for Async Port-Database. */
@@ -4252,8 +4414,36 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
goto fail_flt_buffer;
}
+ /* allocate the purex dma pool */
+ ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ ELS_MAX_PAYLOAD, 8, 0);
+
+ if (!ha->purex_dma_pool) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+ "Unable to allocate purex_dma_pool.\n");
+ goto fail_flt;
+ }
+
+ ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
+ ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
+ ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
+
+ if (!ha->elsrej.c) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
+ "Alloc failed for els reject cmd.\n");
+ goto fail_elsrej;
+ }
+ ha->elsrej.c->er_cmd = ELS_LS_RJT;
+ ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
+ ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
return 0;
+fail_elsrej:
+ dma_pool_destroy(ha->purex_dma_pool);
+fail_flt:
+ dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ ha->flt, ha->flt_dma);
+
fail_flt_buffer:
dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
ha->sfp_data, ha->sfp_data_dma);
@@ -4262,6 +4452,8 @@ fail_sfp_data:
fail_loop_id_map:
dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
fail_async_pd:
+ dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma);
+fail_sf_init_cb:
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb:
kfree(ha->npiv_info);
@@ -4342,6 +4534,8 @@ fail_free_gid_list:
ha->gid_list_dma = 0;
fail_free_tgt_mem:
qlt_mem_free(ha);
+fail_free_btree:
+ btree_destroy32(&ha->host_map);
fail_free_init_cb:
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
ha->init_cb_dma);
@@ -4357,11 +4551,12 @@ int
qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
{
int rval;
- uint16_t size, max_cnt, temp;
+ uint16_t size, max_cnt;
+ uint32_t temp;
struct qla_hw_data *ha = vha->hw;
/* Return if we don't need to alloacate any extended logins */
- if (!ql2xexlogins)
+ if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
return QLA_SUCCESS;
if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
@@ -4603,23 +4798,21 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev,
EFT_SIZE, ha->eft, ha->eft_dma);
- if (ha->fw_dump)
- vfree(ha->fw_dump);
+ vfree(ha->fw_dump);
ha->fce = NULL;
ha->fce_dma = 0;
ha->flags.fce_enabled = 0;
ha->eft = NULL;
ha->eft_dma = 0;
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
ha->fw_dump_cap_flags = 0;
ha->fw_dump_reading = 0;
ha->fw_dump = NULL;
ha->fw_dump_len = 0;
for (j = 0; j < 2; j++, fwdt++) {
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
}
@@ -4684,6 +4877,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
+ if (ha->sf_init_cb)
+ dma_pool_free(ha->s_dma_pool,
+ ha->sf_init_cb, ha->sf_init_cb_dma);
+
if (ha->ex_init_cb)
dma_pool_free(ha->s_dma_pool,
ha->ex_init_cb, ha->ex_init_cb_dma);
@@ -4755,10 +4952,21 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->dif_bundl_pool = NULL;
qlt_mem_free(ha);
+ qla_remove_hostmap(ha);
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
+
+ dma_pool_destroy(ha->purex_dma_pool);
+ ha->purex_dma_pool = NULL;
+
+ if (ha->elsrej.c) {
+ dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
+ ha->elsrej.c, ha->elsrej.cdma);
+ ha->elsrej.c = NULL;
+ }
+
ha->init_cb = NULL;
ha->init_cb_dma = 0;
@@ -4771,6 +4979,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
kfree(ha->swl);
ha->swl = NULL;
kfree(ha->loop_id_map);
+ ha->sf_init_cb = NULL;
+ ha->sf_init_cb_dma = 0;
ha->loop_id_map = NULL;
}
@@ -4803,7 +5013,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->work_list);
INIT_LIST_HEAD(&vha->list);
INIT_LIST_HEAD(&vha->qla_cmd_list);
- INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
@@ -4811,10 +5020,16 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->gpnid_list);
INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
+ INIT_LIST_HEAD(&vha->purex_list.head);
+ spin_lock_init(&vha->purex_list.lock);
+
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
+ qla_enode_init(vha);
+ qla_edb_init(vha);
+
vha->gnl.size = sizeof(struct get_name_list_extended) *
(ha->max_loop_id + 1);
@@ -4841,7 +5056,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
}
INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
- sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
vha->host, vha->hw, vha,
@@ -4856,6 +5071,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
struct qla_work_evt *e;
uint8_t bail;
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return NULL;
+
QLA_VHA_MARK_BUSY(vha, bail);
if (bail)
return NULL;
@@ -4967,7 +5185,7 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
switch (code) {
case QLA_UEVENT_CODE_FW_DUMP:
- snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
vha->host_no);
break;
default:
@@ -5049,12 +5267,20 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ fcport->tgt_short_link_down_cnt = 0;
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
fcport->fc4_type = e->u.new_sess.fc4_type;
+ if (NVME_PRIORITY(vha->hw, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+
if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
+ fcport->dm_login_expire = jiffies +
+ QLA_N2N_WAIT_TIME * HZ;
fcport->fc4_type = FS_FC4TYPE_FCP;
fcport->n2n_flag = 1;
if (vha->flags.nvme_enabled)
@@ -5168,9 +5394,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->n2n_flag = 1;
}
fcport->fw_login_state = 0;
- /*
- * wait link init done before sending login
- */
+
+ schedule_delayed_work(&vha->scan.scan_work, 5);
} else {
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -5300,6 +5525,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
e->u.fcport.fcport, false);
break;
+ case QLA_EVT_SA_REPLACE:
+ rc = qla24xx_issue_sa_replace_iocb(vha, e);
+ break;
}
if (rc == EAGAIN) {
@@ -5349,6 +5577,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (atomic_read(&fcport->state) != FCS_ONLINE &&
fcport->login_retry) {
if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_LOGIN_AUTH_PEND ||
fcport->disc_state == DSC_LOGIN_COMPLETE)
continue;
@@ -5361,6 +5590,11 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
ea.fcport = fcport;
qla24xx_handle_relogin_event(vha, &ea);
} else if (vha->hw->current_topology ==
+ ISP_CFG_NL &&
+ IS_QLA2XXX_MIDTYPE(vha->hw)) {
+ (void)qla24xx_fcport_handle_login(vha,
+ fcport);
+ } else if (vha->hw->current_topology ==
ISP_CFG_NL) {
fcport->login_retry--;
status =
@@ -5577,25 +5811,10 @@ qla83xx_service_idc_aen(struct work_struct *work)
}
}
-static void
-qla83xx_wait_logic(void)
-{
- int i;
-
- /* Yield CPU */
- if (!in_interrupt()) {
- /*
- * Wait about 200ms before retrying again.
- * This controls the number of retries for single
- * lock operation.
- */
- msleep(100);
- schedule();
- } else {
- for (i = 0; i < 20; i++)
- cpu_relax(); /* This a nop instr on i386 */
- }
-}
+/*
+ * Control the frequency of IDC lock retries
+ */
+#define QLA83XX_WAIT_LOGIC_MS 100
static int
qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
@@ -5685,7 +5904,7 @@ retry_lockid:
goto exit;
if (o_drv_lockid == n_drv_lockid) {
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
goto retry_lockid;
} else
return QLA_SUCCESS;
@@ -5694,6 +5913,9 @@ exit:
return rval;
}
+/*
+ * Context: task, can sleep
+ */
void
qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
@@ -5701,6 +5923,8 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
uint32_t lock_owner;
struct qla_hw_data *ha = base_vha->hw;
+ might_sleep();
+
/* IDC-lock implementation using driver-lock/lock-id remote registers */
retry_lock:
if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
@@ -5719,7 +5943,7 @@ retry_lock:
/* Retry/Perform IDC-Lock recovery */
if (qla83xx_idc_lock_recovery(base_vha)
== QLA_SUCCESS) {
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
goto retry_lock;
} else
ql_log(ql_log_warn, base_vha, 0xb075,
@@ -5731,6 +5955,495 @@ retry_lock:
return;
}
+static bool
+qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
+ struct purex_entry_24xx *purex)
+{
+ char fwstr[16];
+ u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
+ struct port_database_24xx *pdb;
+
+ /* Domain Controller is always logged-out. */
+ /* if RDP request is not from Domain Controller: */
+ if (sid != 0xfffc01)
+ return false;
+
+ ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
+
+ pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
+ if (!pdb) {
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "%s: Failed allocate pdb\n", __func__);
+ } else if (qla24xx_get_port_database(vha,
+ le16_to_cpu(purex->nport_handle), pdb)) {
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "%s: Failed get pdb sid=%x\n", __func__, sid);
+ } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
+ pdb->current_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "%s: Port not logged in sid=%#x\n", __func__, sid);
+ } else {
+ /* RDP request is from logged in port */
+ kfree(pdb);
+ return false;
+ }
+ kfree(pdb);
+
+ vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
+ fwstr[strcspn(fwstr, " ")] = 0;
+ /* if FW version allows RDP response length upto 2048 bytes: */
+ if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
+ return false;
+
+ ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
+
+ /* RDP response length is to be reduced to maximum 256 bytes */
+ return true;
+}
+
+/*
+ * Function Name: qla24xx_process_purex_iocb
+ *
+ * Description:
+ * Prepare a RDP response and send to Fabric switch
+ *
+ * PARAMETERS:
+ * vha: SCSI qla host
+ * purex: RDP request received by HBA
+ */
+void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
+ struct purex_item *item)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct purex_entry_24xx *purex =
+ (struct purex_entry_24xx *)&item->iocb;
+ dma_addr_t rsp_els_dma;
+ dma_addr_t rsp_payload_dma;
+ dma_addr_t stat_dma;
+ dma_addr_t sfp_dma;
+ struct els_entry_24xx *rsp_els = NULL;
+ struct rdp_rsp_payload *rsp_payload = NULL;
+ struct link_statistics *stat = NULL;
+ uint8_t *sfp = NULL;
+ uint16_t sfp_flags = 0;
+ uint rsp_payload_length = sizeof(*rsp_payload);
+ int rval;
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
+ "%s: Enter\n", __func__);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
+ "-------- ELS REQ -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
+ purex, sizeof(*purex));
+
+ if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
+ rsp_payload_length =
+ offsetof(typeof(*rsp_payload), optical_elmt_desc);
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "Reducing RSP payload length to %u bytes...\n",
+ rsp_payload_length);
+ }
+
+ rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
+ &rsp_els_dma, GFP_KERNEL);
+ if (!rsp_els) {
+ ql_log(ql_log_warn, vha, 0x0183,
+ "Failed allocate dma buffer ELS RSP.\n");
+ goto dealloc;
+ }
+
+ rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
+ &rsp_payload_dma, GFP_KERNEL);
+ if (!rsp_payload) {
+ ql_log(ql_log_warn, vha, 0x0184,
+ "Failed allocate dma buffer ELS RSP payload.\n");
+ goto dealloc;
+ }
+
+ sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
+ &sfp_dma, GFP_KERNEL);
+
+ stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
+ &stat_dma, GFP_KERNEL);
+
+ /* Prepare Response IOCB */
+ rsp_els->entry_type = ELS_IOCB_TYPE;
+ rsp_els->entry_count = 1;
+ rsp_els->sys_define = 0;
+ rsp_els->entry_status = 0;
+ rsp_els->handle = 0;
+ rsp_els->nport_handle = purex->nport_handle;
+ rsp_els->tx_dsd_count = cpu_to_le16(1);
+ rsp_els->vp_index = purex->vp_idx;
+ rsp_els->sof_type = EST_SOFI3;
+ rsp_els->rx_xchg_address = purex->rx_xchg_addr;
+ rsp_els->rx_dsd_count = 0;
+ rsp_els->opcode = purex->els_frame_payload[0];
+
+ rsp_els->d_id[0] = purex->s_id[0];
+ rsp_els->d_id[1] = purex->s_id[1];
+ rsp_els->d_id[2] = purex->s_id[2];
+
+ rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
+ rsp_els->rx_byte_count = 0;
+ rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
+
+ put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
+ rsp_els->tx_len = rsp_els->tx_byte_count;
+
+ rsp_els->rx_address = 0;
+ rsp_els->rx_len = 0;
+
+ /* Prepare Response Payload */
+ rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
+ rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
+ sizeof(rsp_payload->hdr));
+
+ /* Link service Request Info Descriptor */
+ rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
+ rsp_payload->ls_req_info_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
+ rsp_payload->ls_req_info_desc.req_payload_word_0 =
+ cpu_to_be32p((uint32_t *)purex->els_frame_payload);
+
+ /* Link service Request Info Descriptor 2 */
+ rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
+ rsp_payload->ls_req_info_desc2.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
+ rsp_payload->ls_req_info_desc2.req_payload_word_0 =
+ cpu_to_be32p((uint32_t *)purex->els_frame_payload);
+
+
+ rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
+ rsp_payload->sfp_diag_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
+
+ if (sfp) {
+ /* SFP Flags */
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
+ if (!rval) {
+ /* SFP Flags bits 3-0: Port Tx Laser Type */
+ if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
+ sfp_flags |= BIT_0; /* short wave */
+ else if (sfp[0] & BIT_1)
+ sfp_flags |= BIT_1; /* long wave 1310nm */
+ else if (sfp[1] & BIT_4)
+ sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
+ }
+
+ /* SFP Type */
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
+ if (!rval) {
+ sfp_flags |= BIT_4; /* optical */
+ if (sfp[0] == 0x3)
+ sfp_flags |= BIT_6; /* sfp+ */
+ }
+
+ rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
+
+ /* SFP Diagnostics */
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
+ if (!rval) {
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
+ rsp_payload->sfp_diag_desc.temperature = trx[0];
+ rsp_payload->sfp_diag_desc.vcc = trx[1];
+ rsp_payload->sfp_diag_desc.tx_bias = trx[2];
+ rsp_payload->sfp_diag_desc.tx_power = trx[3];
+ rsp_payload->sfp_diag_desc.rx_power = trx[4];
+ }
+ }
+
+ /* Port Speed Descriptor */
+ rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
+ rsp_payload->port_speed_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
+ rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
+ qla25xx_fdmi_port_speed_capability(ha));
+ rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
+ qla25xx_fdmi_port_speed_currently(ha));
+
+ /* Link Error Status Descriptor */
+ rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
+ rsp_payload->ls_err_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
+
+ if (stat) {
+ rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
+ if (!rval) {
+ rsp_payload->ls_err_desc.link_fail_cnt =
+ cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
+ rsp_payload->ls_err_desc.loss_sync_cnt =
+ cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
+ rsp_payload->ls_err_desc.loss_sig_cnt =
+ cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
+ rsp_payload->ls_err_desc.prim_seq_err_cnt =
+ cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
+ rsp_payload->ls_err_desc.inval_xmit_word_cnt =
+ cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
+ rsp_payload->ls_err_desc.inval_crc_cnt =
+ cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
+ rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
+ }
+ }
+
+ /* Portname Descriptor */
+ rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
+ rsp_payload->port_name_diag_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
+ memcpy(rsp_payload->port_name_diag_desc.WWNN,
+ vha->node_name,
+ sizeof(rsp_payload->port_name_diag_desc.WWNN));
+ memcpy(rsp_payload->port_name_diag_desc.WWPN,
+ vha->port_name,
+ sizeof(rsp_payload->port_name_diag_desc.WWPN));
+
+ /* F-Port Portname Descriptor */
+ rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
+ rsp_payload->port_name_direct_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
+ memcpy(rsp_payload->port_name_direct_desc.WWNN,
+ vha->fabric_node_name,
+ sizeof(rsp_payload->port_name_direct_desc.WWNN));
+ memcpy(rsp_payload->port_name_direct_desc.WWPN,
+ vha->fabric_port_name,
+ sizeof(rsp_payload->port_name_direct_desc.WWPN));
+
+ /* Bufer Credit Descriptor */
+ rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
+ rsp_payload->buffer_credit_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
+ rsp_payload->buffer_credit_desc.fcport_b2b = 0;
+ rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
+ rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
+
+ if (ha->flags.plogi_template_valid) {
+ uint32_t tmp =
+ be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
+ rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
+ }
+
+ if (rsp_payload_length < sizeof(*rsp_payload))
+ goto send;
+
+ /* Optical Element Descriptor, Temperature */
+ rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[0].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Voltage */
+ rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[1].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Tx Bias Current */
+ rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[2].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Tx Power */
+ rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[3].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Rx Power */
+ rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[4].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+
+ if (sfp) {
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
+ if (!rval) {
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
+
+ /* Optical Element Descriptor, Temperature */
+ rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
+ rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
+ rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
+ rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
+ rsp_payload->optical_elmt_desc[0].element_flags =
+ cpu_to_be32(1 << 28);
+
+ /* Optical Element Descriptor, Voltage */
+ rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
+ rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
+ rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
+ rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
+ rsp_payload->optical_elmt_desc[1].element_flags =
+ cpu_to_be32(2 << 28);
+
+ /* Optical Element Descriptor, Tx Bias Current */
+ rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
+ rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
+ rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
+ rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
+ rsp_payload->optical_elmt_desc[2].element_flags =
+ cpu_to_be32(3 << 28);
+
+ /* Optical Element Descriptor, Tx Power */
+ rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
+ rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
+ rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
+ rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
+ rsp_payload->optical_elmt_desc[3].element_flags =
+ cpu_to_be32(4 << 28);
+
+ /* Optical Element Descriptor, Rx Power */
+ rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
+ rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
+ rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
+ rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
+ rsp_payload->optical_elmt_desc[4].element_flags =
+ cpu_to_be32(5 << 28);
+ }
+
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
+ if (!rval) {
+ /* Temperature high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[0].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 7 & 1) << 3 |
+ (sfp[0] >> 6 & 1) << 2 |
+ (sfp[4] >> 7 & 1) << 1 |
+ (sfp[4] >> 6 & 1) << 0);
+
+ /* Voltage high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[1].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 5 & 1) << 3 |
+ (sfp[0] >> 4 & 1) << 2 |
+ (sfp[4] >> 5 & 1) << 1 |
+ (sfp[4] >> 4 & 1) << 0);
+
+ /* Tx Bias Current high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[2].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 3 & 1) << 3 |
+ (sfp[0] >> 2 & 1) << 2 |
+ (sfp[4] >> 3 & 1) << 1 |
+ (sfp[4] >> 2 & 1) << 0);
+
+ /* Tx Power high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[3].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 1 & 1) << 3 |
+ (sfp[0] >> 0 & 1) << 2 |
+ (sfp[4] >> 1 & 1) << 1 |
+ (sfp[4] >> 0 & 1) << 0);
+
+ /* Rx Power high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[4].element_flags |=
+ cpu_to_be32(
+ (sfp[1] >> 7 & 1) << 3 |
+ (sfp[1] >> 6 & 1) << 2 |
+ (sfp[5] >> 7 & 1) << 1 |
+ (sfp[5] >> 6 & 1) << 0);
+ }
+ }
+
+ /* Optical Product Data Descriptor */
+ rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
+ rsp_payload->optical_prod_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
+
+ if (sfp) {
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
+ if (!rval) {
+ memcpy(rsp_payload->optical_prod_desc.vendor_name,
+ sfp + 0,
+ sizeof(rsp_payload->optical_prod_desc.vendor_name));
+ memcpy(rsp_payload->optical_prod_desc.part_number,
+ sfp + 20,
+ sizeof(rsp_payload->optical_prod_desc.part_number));
+ memcpy(rsp_payload->optical_prod_desc.revision,
+ sfp + 36,
+ sizeof(rsp_payload->optical_prod_desc.revision));
+ memcpy(rsp_payload->optical_prod_desc.serial_number,
+ sfp + 48,
+ sizeof(rsp_payload->optical_prod_desc.serial_number));
+ }
+
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
+ if (!rval) {
+ memcpy(rsp_payload->optical_prod_desc.date,
+ sfp + 0,
+ sizeof(rsp_payload->optical_prod_desc.date));
+ }
+ }
+
+send:
+ ql_dbg(ql_dbg_init, vha, 0x0183,
+ "Sending ELS Response to RDP Request...\n");
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
+ "-------- ELS RSP -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
+ rsp_els, sizeof(*rsp_els));
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
+ "-------- ELS RSP PAYLOAD -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
+ rsp_payload, rsp_payload_length);
+
+ rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0188,
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
+ } else if (rsp_els->comp_status) {
+ ql_log(ql_log_warn, vha, 0x0189,
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
+ __func__, rsp_els->comp_status,
+ rsp_els->error_subcode_1, rsp_els->error_subcode_2);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
+ }
+
+dealloc:
+ if (stat)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
+ stat, stat_dma);
+ if (sfp)
+ dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
+ sfp, sfp_dma);
+ if (rsp_payload)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
+ rsp_payload, rsp_payload_dma);
+ if (rsp_els)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
+ rsp_els, rsp_els_dma);
+}
+
+void
+qla24xx_free_purex_item(struct purex_item *item)
+{
+ if (item == &item->vha->default_item)
+ memset(&item->vha->default_item, 0, sizeof(struct purex_item));
+ else
+ kfree(item);
+}
+
+void qla24xx_process_purex_list(struct purex_list *list)
+{
+ struct list_head head = LIST_HEAD_INIT(head);
+ struct purex_item *item, *next;
+ ulong flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ list_splice_init(&list->head, &head);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ list_for_each_entry_safe(item, next, &head, list) {
+ list_del(&item->list);
+ item->process_item(item->vha, item);
+ qla24xx_free_purex_item(item);
+ }
+}
+
+/*
+ * Context: task, can sleep
+ */
void
qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
@@ -5741,6 +6454,8 @@ qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
uint32_t data;
struct qla_hw_data *ha = base_vha->hw;
+ might_sleep();
+
/* IDC-unlock implementation using driver-unlock/lock-id
* remote registers
*/
@@ -5756,7 +6471,7 @@ retry_unlock:
/* SV: XXX: IDC unlock retrying needed here? */
/* Retry for IDC-unlock */
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
retry++;
ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
"Failed to release IDC lock, retrying=%d\n", retry);
@@ -5764,7 +6479,7 @@ retry_unlock:
}
} else if (retry < 10) {
/* Retry for IDC-unlock */
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
retry++;
ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
"Failed to read drv-lockid, retrying=%d\n", retry);
@@ -5780,7 +6495,7 @@ retry_unlock2:
if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
if (retry < 10) {
/* Retry for IDC-unlock */
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
retry++;
ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
"Failed to release IDC lock, retrying=%d\n", retry);
@@ -6044,13 +6759,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
struct pci_dev *pdev = ha->pdev;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
ql_log(ql_log_warn, base_vha, 0x015b,
"Disabling adapter.\n");
@@ -6061,9 +6769,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
return;
}
- qla2x00_wait_for_sess_deletion(base_vha);
+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
- set_bit(UNLOADING, &base_vha->dpc_flags);
+ qla2x00_wait_for_sess_deletion(base_vha);
qla2x00_delete_all_vps(ha, base_vha);
@@ -6142,6 +6855,9 @@ qla2x00_do_dpc(void *data)
schedule();
+ if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
+ qla_pci_set_eeh_busy(base_vha);
+
if (!base_vha->flags.init_done || ha->flags.mbox_busy)
goto end_loop;
@@ -6254,13 +6970,14 @@ qla2x00_do_dpc(void *data)
}
if (test_and_clear_bit(DETECT_SFP_CHANGE,
- &base_vha->dpc_flags) &&
- !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) {
- qla24xx_detect_sfp(base_vha);
-
- if (ha->flags.detected_lr_sfp !=
- ha->flags.using_lr_setting)
- set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+ &base_vha->dpc_flags)) {
+ /* Semantic:
+ * - NO-OP -- await next ISP-ABORT. Preferred method
+ * to minimize disruptions that will occur
+ * when a forced chip-reset occurs.
+ * - Force -- ISP-ABORT scheduled.
+ */
+ /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
}
if (test_and_clear_bit
@@ -6287,6 +7004,7 @@ qla2x00_do_dpc(void *data)
if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
+ base_vha->flags.online = 1;
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");
if (ha->isp_ops->abort_isp(base_vha)) {
@@ -6301,6 +7019,15 @@ qla2x00_do_dpc(void *data)
}
}
+ if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
+ if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
+ qla24xx_process_purex_list
+ (&base_vha->purex_list);
+ clear_bit(PROCESS_PUREX_IOCB,
+ &base_vha->dpc_flags);
+ }
+ }
+
if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
&base_vha->dpc_flags)) {
qla2x00_update_fcports(base_vha);
@@ -6424,28 +7151,23 @@ intr_on_check:
mutex_unlock(&ha->mq_lock);
}
- if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
- &base_vha->dpc_flags)) {
+ if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
+ &base_vha->dpc_flags)) {
+ u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold;
+
+ if (threshold > ha->orig_fw_xcb_count)
+ threshold = ha->orig_fw_xcb_count;
+
ql_log(ql_log_info, base_vha, 0xffffff,
- "nvme: SET ZIO Activity exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
- if (qla27xx_set_zio_threshold(base_vha,
- ha->nvme_last_rptd_aen)) {
+ "SET ZIO Activity exchange threshold to %d.\n",
+ threshold);
+ if (qla27xx_set_zio_threshold(base_vha, threshold)) {
ql_log(ql_log_info, base_vha, 0xffffff,
- "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
+ "Unable to SET ZIO Activity exchange threshold to %d.\n",
+ threshold);
}
}
- if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
- &base_vha->dpc_flags)) {
- ql_log(ql_log_info, base_vha, 0xffffff,
- "SET ZIO Activity exchange threshold to %d.\n",
- ha->last_zio_threshold);
- qla27xx_set_zio_threshold(base_vha,
- ha->last_zio_threshold);
- }
-
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -6510,6 +7232,104 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
}
}
+static bool qla_do_heartbeat(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ u32 cmpl_cnt;
+ u16 i;
+ bool do_heartbeat = false;
+
+ /*
+ * Allow do_heartbeat only if we don’t have any active interrupts,
+ * but there are still IOs outstanding with firmware.
+ */
+ cmpl_cnt = ha->base_qpair->cmd_completion_cnt;
+ if (cmpl_cnt == ha->base_qpair->prev_completion_cnt &&
+ cmpl_cnt != ha->base_qpair->cmd_cnt) {
+ do_heartbeat = true;
+ goto skip;
+ }
+ ha->base_qpair->prev_completion_cnt = cmpl_cnt;
+
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i]) {
+ cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt;
+ if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt &&
+ cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) {
+ do_heartbeat = true;
+ break;
+ }
+ ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt;
+ }
+ }
+
+skip:
+ return do_heartbeat;
+}
+
+static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx)
+ return;
+
+ if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
+ return;
+
+ /*
+ * dpc thread cannot run if heartbeat is running at the same time.
+ * We also do not want to starve heartbeat task. Therefore, do
+ * heartbeat task at least once every 5 seconds.
+ */
+ if (dpc_started &&
+ time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ))
+ return;
+
+ if (qla_do_heartbeat(vha)) {
+ ha->last_heartbeat_run_jiffies = jiffies;
+ queue_work(ha->wq, &ha->heartbeat_work);
+ }
+}
+
+static void qla_wind_down_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->flags.eeh_busy)
+ return;
+ if (ha->pci_error_state)
+ /* system is trying to recover */
+ return;
+
+ /*
+ * Current system is not handling PCIE error. At this point, this is
+ * best effort to wind down the adapter.
+ */
+ if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
+ !ha->flags.eeh_flush) {
+ ql_log(ql_log_info, vha, 0x9009,
+ "PCI Error detected, attempting to reset hardware.\n");
+
+ ha->isp_ops->reset_chip(vha);
+ ha->isp_ops->disable_intrs(ha);
+
+ ha->flags.eeh_flush = EEH_FLUSH_RDY;
+ ha->eeh_jif = jiffies;
+
+ } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
+ time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) {
+ pci_clear_master(ha->pdev);
+
+ /* flush all command */
+ qla2x00_abort_isp_cleanup(vha);
+ ha->flags.eeh_flush = EEH_FLUSH_DONE;
+
+ ql_log(ql_log_info, vha, 0x900a,
+ "PCI Error handling complete, all IOs aborted.\n");
+ }
+}
+
/**************************************************************************
* qla2x00_timer
*
@@ -6529,8 +7349,12 @@ qla2x00_timer(struct timer_list *t)
uint16_t w;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+ unsigned long flags;
+ fc_port_t *fcport = NULL;
if (ha->flags.eeh_busy) {
+ qla_wind_down_chip(vha);
+
ql_dbg(ql_dbg_timer, vha, 0x6000,
"EEH = %d, restarting timer.\n",
ha->flags.eeh_busy);
@@ -6560,6 +7384,16 @@ qla2x00_timer(struct timer_list *t)
if (!vha->vp_idx && IS_QLAFX00(ha))
qlafx00_timer_routine(vha);
+ if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
+ vha->link_down_time++;
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
+ fcport->tgt_link_down_time++;
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
/* Loop down handler. */
if (atomic_read(&vha->loop_down_timer) > 0 &&
!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
@@ -6641,6 +7475,10 @@ qla2x00_timer(struct timer_list *t)
}
}
+ /* check if edif running */
+ if (vha->hw->flags.edif_enabled)
+ qla_edif_timer(vha);
+
/* Process any deferred work. */
if (!list_empty(&vha->work_list)) {
unsigned long flags;
@@ -6658,22 +7496,22 @@ qla2x00_timer(struct timer_list *t)
* FC-NVME
* see if the active AEN count has changed from what was last reported.
*/
+ index = atomic_read(&ha->nvme_active_aen_cnt);
if (!vha->vp_idx &&
- (atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen) &&
+ (index != ha->nvme_last_rptd_aen) &&
ha->zio_mode == QLA_ZIO_MODE_6 &&
!ha->flags.host_shutting_down) {
+ ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
ql_log(ql_log_info, vha, 0x3002,
"nvme: Sched: Set ZIO exchange threshold to %d.\n",
ha->nvme_last_rptd_aen);
- ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
- set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+ set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
start_dpc++;
}
if (!vha->vp_idx &&
- (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
- (ha->zio_mode == QLA_ZIO_MODE_6) &&
- (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
+ atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
+ IS_ZIO_THRESHOLD_CAPABLE(ha)) {
ql_log(ql_log_info, vha, 0x3002,
"Sched: Set ZIO exchange threshold to %d.\n",
ha->last_zio_threshold);
@@ -6682,6 +7520,8 @@ qla2x00_timer(struct timer_list *t)
start_dpc++;
}
+ /* borrowing w to signify dpc will run */
+ w = 0;
/* Schedule the DPC routine if needed */
if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
@@ -6692,7 +7532,8 @@ qla2x00_timer(struct timer_list *t)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
+ test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
"fcport_update_needed=%d start_dpc=%d "
@@ -6705,15 +7546,19 @@ qla2x00_timer(struct timer_list *t)
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
- "relogin_needed=%d.\n",
+ "relogin_needed=%d, Process_purex_iocb=%d.\n",
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
+ test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
+ w = 1;
}
+ qla_heart_beat(vha, w);
+
qla2x00_restart_timer(vha, WATCH_INTERVAL);
}
@@ -6833,11 +7678,13 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
struct qla_qpair *qpair = NULL;
- struct scsi_qla_host *vp;
+ struct scsi_qla_host *vp, *tvp;
fc_port_t *fcport;
int i;
unsigned long flags;
+ ql_dbg(ql_dbg_aer, vha, 0x9000,
+ "%s\n", __func__);
ha->chip_reset++;
ha->base_qpair->chip_reset = ha->chip_reset;
@@ -6847,34 +7694,22 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
ha->base_qpair->chip_reset;
}
- /* purge MBox commands */
- if (atomic_read(&ha->num_pend_mbx_stage3)) {
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- complete(&ha->mbx_intr_comp);
- }
-
- i = 0;
-
- while (atomic_read(&ha->num_pend_mbx_stage3) ||
- atomic_read(&ha->num_pend_mbx_stage2) ||
- atomic_read(&ha->num_pend_mbx_stage1)) {
- msleep(20);
- i++;
- if (i > 50)
- break;
- }
-
- ha->flags.purge_mbox = 0;
+ /*
+ * purge mailbox might take a while. Slot Reset/chip reset
+ * will take care of the purge
+ */
mutex_lock(&ha->mq_lock);
+ ha->base_qpair->online = 0;
list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
qpair->online = 0;
+ wmb();
mutex_unlock(&ha->mq_lock);
qla2x00_mark_all_devices_lost(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
qla2x00_mark_all_devices_lost(vp);
@@ -6888,7 +7723,7 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
list_for_each_entry(fcport, &vp->vp_fcports, list)
@@ -6905,28 +7740,32 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
scsi_qla_host_t *vha = pci_get_drvdata(pdev);
struct qla_hw_data *ha = vha->hw;
+ pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET;
- ql_dbg(ql_dbg_aer, vha, 0x9000,
- "PCI error detected, state %x.\n", state);
+ ql_log(ql_log_warn, vha, 0x9000,
+ "PCI error detected, state %x.\n", state);
+ ha->pci_error_state = QLA_PCI_ERR_DETECTED;
if (!atomic_read(&pdev->enable_cnt)) {
ql_log(ql_log_info, vha, 0xffff,
"PCI device is disabled,state %x\n", state);
- return PCI_ERS_RESULT_NEED_RESET;
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ goto out;
}
switch (state) {
case pci_channel_io_normal:
- ha->flags.eeh_busy = 0;
+ qla_pci_set_eeh_busy(vha);
if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- return PCI_ERS_RESULT_CAN_RECOVER;
+ ret = PCI_ERS_RESULT_CAN_RECOVER;
+ break;
case pci_channel_io_frozen:
- ha->flags.eeh_busy = 1;
- qla_pci_error_cleanup(vha);
- return PCI_ERS_RESULT_NEED_RESET;
+ qla_pci_set_eeh_busy(vha);
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ break;
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
@@ -6934,9 +7773,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- return PCI_ERS_RESULT_DISCONNECT;
+ ret = PCI_ERS_RESULT_DISCONNECT;
}
- return PCI_ERS_RESULT_NEED_RESET;
+out:
+ ql_dbg(ql_dbg_aer, vha, 0x600d,
+ "PCI error detected returning [%x].\n", ret);
+ return ret;
}
static pci_ers_result_t
@@ -6950,20 +7792,31 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+ ql_log(ql_log_warn, base_vha, 0x9000,
+ "mmio enabled\n");
+
+ ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
+
if (IS_QLA82XX(ha))
return PCI_ERS_RESULT_RECOVERED;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, base_vha, 0x803f,
+ "During mmio enabled, PCI/Register disconnect still detected.\n");
+ goto out;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2100(ha) || IS_QLA2200(ha)){
- stat = RD_REG_DWORD(&reg->hccr);
+ stat = rd_reg_word(&reg->hccr);
if (stat & HCCR_RISC_PAUSE)
risc_paused = 1;
} else if (IS_QLA23XX(ha)) {
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED)
risc_paused = 1;
} else if (IS_FWI2_CAPABLE(ha)) {
- stat = RD_REG_DWORD(&reg24->host_status);
+ stat = rd_reg_dword(&reg24->host_status);
if (stat & HSRX_RISC_PAUSED)
risc_paused = 1;
}
@@ -6972,11 +7825,13 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
if (risc_paused) {
ql_log(ql_log_info, base_vha, 0x9003,
"RISC paused -- mmio_enabled, Dumping firmware.\n");
- ha->isp_ops->fw_dump(base_vha, 0);
-
- return PCI_ERS_RESULT_NEED_RESET;
- } else
- return PCI_ERS_RESULT_RECOVERED;
+ qla2xxx_dump_fw(base_vha);
+ }
+out:
+ /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
+ ql_dbg(ql_dbg_aer, base_vha, 0x600d,
+ "mmio enabled returning.\n");
+ return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t
@@ -6988,9 +7843,10 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
int rc;
struct qla_qpair *qpair = NULL;
- ql_dbg(ql_dbg_aer, base_vha, 0x9004,
- "Slot Reset.\n");
+ ql_log(ql_log_warn, base_vha, 0x9004,
+ "Slot Reset.\n");
+ ha->pci_error_state = QLA_PCI_SLOT_RESET;
/* Workaround: qla2xxx driver which access hardware earlier
* needs error state to be pci_channel_io_online.
* Otherwise mailbox command timesout.
@@ -7024,16 +7880,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
qpair->online = 1;
mutex_unlock(&ha->mq_lock);
+ ha->flags.eeh_busy = 0;
base_vha->flags.online = 1;
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
- ret = PCI_ERS_RESULT_RECOVERED;
+ ha->isp_ops->abort_isp(base_vha);
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ if (qla2x00_isp_reg_stat(ha)) {
+ ha->flags.eeh_busy = 1;
+ qla_pci_error_cleanup(base_vha);
+ ql_log(ql_log_warn, base_vha, 0x9005,
+ "Device unable to recover from PCI error.\n");
+ } else {
+ ret = PCI_ERS_RESULT_RECOVERED;
+ }
exit_slot_reset:
ql_dbg(ql_dbg_aer, base_vha, 0x900e,
- "slot_reset return %x.\n", ret);
+ "Slot Reset returning %x.\n", ret);
return ret;
}
@@ -7045,16 +7909,58 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int ret;
- ql_dbg(ql_dbg_aer, base_vha, 0x900f,
- "pci_resume.\n");
+ ql_log(ql_log_warn, base_vha, 0x900f,
+ "Pci Resume.\n");
- ha->flags.eeh_busy = 0;
ret = qla2x00_wait_for_hba_online(base_vha);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x9002,
"The device failed to resume I/O from slot/link_reset.\n");
}
+ ha->pci_error_state = QLA_PCI_RESUME;
+ ql_dbg(ql_dbg_aer, base_vha, 0x600d,
+ "Pci Resume returning.\n");
+}
+
+void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ bool do_cleanup = false;
+ unsigned long flags;
+
+ if (ha->flags.eeh_busy)
+ return;
+
+ spin_lock_irqsave(&base_vha->work_lock, flags);
+ if (!ha->flags.eeh_busy) {
+ ha->eeh_jif = jiffies;
+ ha->flags.eeh_flush = 0;
+
+ ha->flags.eeh_busy = 1;
+ do_cleanup = true;
+ }
+ spin_unlock_irqrestore(&base_vha->work_lock, flags);
+
+ if (do_cleanup)
+ qla_pci_error_cleanup(base_vha);
+}
+
+/*
+ * this routine will schedule a task to pause IO from interrupt context
+ * if caller sees a PCIE error event (register read = 0xf's)
+ */
+void qla_schedule_eeh_work(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ if (ha->flags.eeh_busy)
+ return;
+
+ set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags);
+ qla2xxx_wake_dpc(base_vha);
}
static void
@@ -7108,17 +8014,15 @@ qla_pci_reset_done(struct pci_dev *pdev)
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
}
-static int qla2xxx_map_queues(struct Scsi_Host *shost)
+static void qla2xxx_map_queues(struct Scsi_Host *shost)
{
- int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
- rc = blk_mq_map_queues(qmap);
+ blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
- return rc;
+ blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
}
struct scsi_host_template qla2xxx_driver_template = {
@@ -7128,6 +8032,7 @@ struct scsi_host_template qla2xxx_driver_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = qla2xxx_eh_abort,
+ .eh_should_retry_cmd = fc_eh_should_retry_cmd,
.eh_device_reset_handler = qla2xxx_eh_device_reset,
.eh_target_reset_handler = qla2xxx_eh_target_reset,
.eh_bus_reset_handler = qla2xxx_eh_bus_reset,
@@ -7146,7 +8051,7 @@ struct scsi_host_template qla2xxx_driver_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
- .shost_attrs = qla2x00_host_attrs,
+ .shost_groups = qla2x00_host_groups,
.supported_mode = MODE_INITIATOR,
.track_queue_depth = 1,
@@ -7219,13 +8124,19 @@ qla2x00_module_init(void)
{
int ret = 0;
+ BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
BUILD_BUG_ON(sizeof(init_cb_t) != 96);
+ BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
BUILD_BUG_ON(sizeof(request_t) != 64);
+ BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
@@ -7233,17 +8144,72 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
+ BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
+ BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
+ BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
+ BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
+ BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
+ BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
+ BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
+ BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
+ BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
+ BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
+ BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
+ BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
+ BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
+ BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
+ BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
+ BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
+ BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
+ BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
+ BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
+ BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
+ BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
+ BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
+ BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
+ BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
- BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
- BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
+ BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sw_info_t) != 32);
+ BUILD_BUG_ON(sizeof(target_id_t) != 2);
+
+ qla_trace_init();
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
@@ -7275,9 +8241,6 @@ qla2x00_module_init(void)
if (ql2xextended_error_logging == 1)
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
- if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
- qla_insert_tgt_attrs();
-
qla2xxx_transport_template =
fc_attach_transport(&qla2xxx_transport_functions);
if (!qla2xxx_transport_template) {
@@ -7326,6 +8289,8 @@ qlt_exit:
destroy_cache:
kmem_cache_destroy(srb_cachep);
+
+ qla_trace_uninit();
return ret;
}
@@ -7344,6 +8309,7 @@ qla2x00_module_exit(void)
fc_release_transport(qla2xxx_transport_template);
qlt_exit();
kmem_cache_destroy(srb_cachep);
+ qla_trace_uninit();
}
module_init(qla2x00_module_init);
@@ -7352,7 +8318,6 @@ module_exit(qla2x00_module_exit);
MODULE_AUTHOR("QLogic Corporation");
MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(QLA2XXX_VERSION);
MODULE_FIRMWARE(FW_FILE_ISP21XX);
MODULE_FIRMWARE(FW_FILE_ISP22XX);
MODULE_FIRMWARE(FW_FILE_ISP2300);
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 2fb7ebfbbc38..a5f3000ae53b 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#define MAX_RETRIES_OF_ISP_ABORT 5
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 76a38bf86cbc..c092a6b1ced4 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
@@ -26,24 +25,24 @@ qla2x00_lock_nvram_access(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
- data = RD_REG_WORD(&reg->nvram);
+ data = rd_reg_word(&reg->nvram);
while (data & NVR_BUSY) {
udelay(100);
- data = RD_REG_WORD(&reg->nvram);
+ data = rd_reg_word(&reg->nvram);
}
/* Lock resource */
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
udelay(5);
- data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ data = rd_reg_word(&reg->u.isp2300.host_semaphore);
while ((data & BIT_0) == 0) {
/* Lock failed */
udelay(100);
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
udelay(5);
- data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ data = rd_reg_word(&reg->u.isp2300.host_semaphore);
}
}
}
@@ -58,8 +57,8 @@ qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
}
}
@@ -73,15 +72,15 @@ qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
@@ -120,21 +119,21 @@ qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
/* Read data from NVRAM. */
for (cnt = 0; cnt < 16; cnt++) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT | NVR_CLOCK);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
data <<= 1;
- reg_data = RD_REG_WORD(&reg->nvram);
+ reg_data = rd_reg_word(&reg->nvram);
if (reg_data & NVR_DATA_IN)
data |= BIT_0;
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
/* Deselect chip. */
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_DESELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
return data;
@@ -171,8 +170,8 @@ qla2x00_nv_deselect(struct qla_hw_data *ha)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_DESELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
@@ -183,7 +182,7 @@ qla2x00_nv_deselect(struct qla_hw_data *ha)
* @data: word to program
*/
static void
-qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
+qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data)
{
int count;
uint16_t word;
@@ -202,7 +201,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
/* Write data */
nv_cmd = (addr << 16) | NV_WRITE_OP;
- nv_cmd |= data;
+ nv_cmd |= (__force u16)data;
nv_cmd <<= 5;
for (count = 0; count < 27; count++) {
if (nv_cmd & BIT_31)
@@ -216,8 +215,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -226,7 +225,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
qla2x00_nv_deselect(ha);
@@ -241,7 +240,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
static int
qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
- uint16_t data, uint32_t tmo)
+ __le16 data, uint32_t tmo)
{
int ret, count;
uint16_t word;
@@ -261,7 +260,7 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
/* Write data */
nv_cmd = (addr << 16) | NV_WRITE_OP;
- nv_cmd |= data;
+ nv_cmd |= (__force u16)data;
nv_cmd <<= 5;
for (count = 0; count < 27; count++) {
if (nv_cmd & BIT_31)
@@ -275,11 +274,11 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
do {
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
if (!--tmo) {
ret = QLA_FUNCTION_FAILED;
break;
@@ -308,7 +307,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
int ret, stat;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
- uint16_t wprot, wprot_old;
+ __le16 wprot, wprot_old;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* Clear NVRAM write protection. */
@@ -318,7 +317,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
cpu_to_le16(0x1234), 100000);
wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
- if (stat != QLA_SUCCESS || wprot != 0x1234) {
+ if (stat != QLA_SUCCESS || wprot != cpu_to_le16(0x1234)) {
/* Write enable. */
qla2x00_nv_write(ha, NVR_DATA_OUT);
qla2x00_nv_write(ha, 0);
@@ -347,8 +346,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready. */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -357,7 +356,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
if (wait_cnt)
@@ -407,8 +406,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready. */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -417,7 +416,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
}
@@ -456,11 +455,11 @@ qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ulong cnt = 30000;
- WRT_REG_DWORD(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
+ wrt_reg_dword(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
while (cnt--) {
- if (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) {
- *data = RD_REG_DWORD(&reg->flash_data);
+ if (rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG) {
+ *data = rd_reg_dword(&reg->flash_data);
return QLA_SUCCESS;
}
udelay(10);
@@ -499,11 +498,11 @@ qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ulong cnt = 500000;
- WRT_REG_DWORD(&reg->flash_data, data);
- WRT_REG_DWORD(&reg->flash_addr, addr | FARX_DATA_FLAG);
+ wrt_reg_dword(&reg->flash_data, data);
+ wrt_reg_dword(&reg->flash_addr, addr | FARX_DATA_FLAG);
while (cnt--) {
- if (!(RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG))
+ if (!(rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG))
return QLA_SUCCESS;
udelay(10);
cond_resched();
@@ -549,11 +548,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
{
const char *loc, *locations[] = { "DEF", "PCI" };
uint32_t pcihdr, pcids;
- uint16_t cnt, chksum, *wptr;
+ uint16_t cnt, chksum;
+ __le16 *wptr;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct qla_flt_location *fltl = (void *)req->ring;
- uint32_t *dcode = (void *)req->ring;
+ uint32_t *dcode = (uint32_t *)req->ring;
uint8_t *buf = (void *)req->ring, *bcode, last_image;
/*
@@ -610,7 +610,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
if (memcmp(fltl->sig, "QFLT", 4))
goto end;
- wptr = (void *)req->ring;
+ wptr = (__force __le16 *)req->ring;
cnt = sizeof(*fltl) / sizeof(*wptr);
for (chksum = 0; cnt--; wptr++)
chksum += le16_to_cpu(*wptr);
@@ -671,7 +671,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
struct qla_flt_header *flt = ha->flt;
struct qla_flt_region *region = &flt->region[0];
- uint16_t *wptr, cnt, chksum;
+ __le16 *wptr;
+ uint16_t cnt, chksum;
uint32_t start;
/* Assign FCP prio region since older adapters may not have FLT, or
@@ -681,8 +682,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
ha->flt_region_flt = flt_addr;
- wptr = (uint16_t *)ha->flt;
- ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2,
+ wptr = (__force __le16 *)ha->flt;
+ ha->isp_ops->read_optrom(vha, flt, flt_addr << 2,
(sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE));
if (le16_to_cpu(*wptr) == 0xffff)
@@ -843,7 +844,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_nvram = start;
break;
case FLT_REG_IMG_PRI_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_img_status_pri = start;
break;
case FLT_REG_IMG_SEC_27XX:
@@ -949,7 +950,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
uint16_t cnt, chksum;
- uint16_t *wptr = (void *)req->ring;
+ __le16 *wptr = (__force __le16 *)req->ring;
struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring;
uint8_t man_id, flash_id;
uint16_t mid = 0, fid = 0;
@@ -1042,14 +1043,14 @@ static void
qla2xxx_get_idc_param(scsi_qla_host_t *vha)
{
#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
- uint32_t *wptr;
+ __le32 *wptr;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
if (!(IS_P3P_TYPE(ha)))
return;
- wptr = (uint32_t *)req->ring;
+ wptr = (__force __le32 *)req->ring;
ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8);
if (*wptr == cpu_to_le32(0xffffffff)) {
@@ -1095,7 +1096,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
{
#define NPIV_CONFIG_SIZE (16*1024)
void *data;
- uint16_t *wptr;
+ __le16 *wptr;
uint16_t cnt, chksum;
int i;
struct qla_npiv_header hdr;
@@ -1197,9 +1198,9 @@ qla24xx_unprotect_flash(scsi_qla_host_t *vha)
return qla81xx_fac_do_write_enable(vha, 1);
/* Enable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
if (!ha->fdt_wrt_disable)
goto done;
@@ -1240,8 +1241,8 @@ qla24xx_protect_flash(scsi_qla_host_t *vha)
skip_wrt_protect:
/* Disable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
return QLA_SUCCESS;
}
@@ -1265,7 +1266,7 @@ qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata)
}
static int
-qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr,
uint32_t dwords)
{
int ret;
@@ -1352,10 +1353,10 @@ next:
/* Slow write */
ret = qla24xx_write_flash_dword(ha,
- flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
+ flash_data_addr(ha, faddr), le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_user, vha, 0x7006,
- "Failed slopw write %x (%x)\n", faddr, *dwptr);
+ "Failed slow write %x (%x)\n", faddr, *dwptr);
break;
}
}
@@ -1379,11 +1380,11 @@ qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
uint32_t i;
- uint16_t *wptr;
+ __le16 *wptr;
struct qla_hw_data *ha = vha->hw;
/* Word reads to NVRAM via registers. */
- wptr = (uint16_t *)buf;
+ wptr = buf;
qla2x00_lock_nvram_access(ha);
for (i = 0; i < bytes >> 1; i++, naddr++)
wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha,
@@ -1456,7 +1457,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
{
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t *dwptr = buf;
+ __le32 *dwptr = buf;
uint32_t i;
int ret;
@@ -1466,9 +1467,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
return ret;
/* Enable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
/* Disable NVRAM write-protection. */
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
@@ -1478,7 +1479,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
naddr = nvram_data_addr(ha, naddr);
bytes >>= 2;
for (i = 0; i < bytes; i++, naddr++, dwptr++) {
- if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) {
+ if (qla24xx_write_flash_dword(ha, naddr, le32_to_cpu(*dwptr))) {
ql_dbg(ql_dbg_user, vha, 0x709a,
"Unable to program nvram address=%x data=%x.\n",
naddr, *dwptr);
@@ -1490,9 +1491,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
/* Disable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
return ret;
}
@@ -1588,8 +1589,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
- gpio_enable = RD_REG_WORD(&reg->gpioe);
- gpio_data = RD_REG_WORD(&reg->gpiod);
+ gpio_enable = rd_reg_word(&reg->gpioe);
+ gpio_data = rd_reg_word(&reg->gpiod);
}
/* Set the modified gpio_enable values */
@@ -1598,8 +1599,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
- WRT_REG_WORD(&reg->gpioe, gpio_enable);
- RD_REG_WORD(&reg->gpioe);
+ wrt_reg_word(&reg->gpioe, gpio_enable);
+ rd_reg_word(&reg->gpioe);
}
qla2x00_flip_colors(ha, &led_color);
@@ -1614,8 +1615,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
- WRT_REG_WORD(&reg->gpiod, gpio_data);
- RD_REG_WORD(&reg->gpiod);
+ wrt_reg_word(&reg->gpiod, gpio_data);
+ rd_reg_word(&reg->gpiod);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1645,8 +1646,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
- gpio_enable = RD_REG_WORD(&reg->gpioe);
- gpio_data = RD_REG_WORD(&reg->gpiod);
+ gpio_enable = rd_reg_word(&reg->gpioe);
+ gpio_data = rd_reg_word(&reg->gpiod);
}
gpio_enable |= GPIO_LED_MASK;
@@ -1654,8 +1655,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
- WRT_REG_WORD(&reg->gpioe, gpio_enable);
- RD_REG_WORD(&reg->gpioe);
+ wrt_reg_word(&reg->gpioe, gpio_enable);
+ rd_reg_word(&reg->gpioe);
}
/* Clear out previously set LED colour. */
@@ -1663,8 +1664,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
- WRT_REG_WORD(&reg->gpiod, gpio_data);
- RD_REG_WORD(&reg->gpiod);
+ wrt_reg_word(&reg->gpiod, gpio_data);
+ rd_reg_word(&reg->gpiod);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1731,13 +1732,13 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
/* Save the Original GPIOD. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Enable the gpio_data reg for update. */
gpio_data |= GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Set the color bits. */
qla24xx_flip_colors(ha, &led_color);
@@ -1749,8 +1750,8 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
gpio_data |= led_color;
/* Set the modified gpio_data values. */
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ gpio_data = rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1881,12 +1882,12 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
goto skip_gpio;
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Enable the gpio_data reg for update. */
gpio_data |= GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1929,12 +1930,12 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
/* Give control back to firmware. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Disable the gpio_data reg for update. */
gpio_data &= ~GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
set_fw_options:
@@ -1970,10 +1971,10 @@ qla2x00_flash_enable(struct qla_hw_data *ha)
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
data |= CSR_FLASH_ENABLE;
- WRT_REG_WORD(&reg->ctrl_status, data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/**
@@ -1986,10 +1987,10 @@ qla2x00_flash_disable(struct qla_hw_data *ha)
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
data &= ~(CSR_FLASH_ENABLE);
- WRT_REG_WORD(&reg->ctrl_status, data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/**
@@ -2008,7 +2009,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
uint16_t bank_select;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- bank_select = RD_REG_WORD(&reg->ctrl_status);
+ bank_select = rd_reg_word(&reg->ctrl_status);
if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
/* Specify 64K address range: */
@@ -2016,11 +2017,11 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
bank_select &= ~0xf8;
bank_select |= addr >> 12 & 0xf0;
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- data = RD_REG_WORD(&reg->flash_data);
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ data = rd_reg_word(&reg->flash_data);
return (uint8_t)data;
}
@@ -2028,13 +2029,13 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
/* Setup bit 16 of flash address. */
if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
} else if (((addr & BIT_16) == 0) &&
(bank_select & CSR_FLASH_64K_BANK)) {
bank_select &= ~(CSR_FLASH_64K_BANK);
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/* Always perform IO mapped accesses to the FLASH registers. */
@@ -2049,7 +2050,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
} while (data != data2);
} else {
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
data = qla2x00_debounce_register(&reg->flash_data);
}
@@ -2068,20 +2069,20 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
uint16_t bank_select;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- bank_select = RD_REG_WORD(&reg->ctrl_status);
+ bank_select = rd_reg_word(&reg->ctrl_status);
if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
/* Specify 64K address range: */
/* clear out Module Select and Flash Address bits [19:16]. */
bank_select &= ~0xf8;
bank_select |= addr >> 12 & 0xf0;
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_data, (uint16_t)data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
return;
}
@@ -2089,13 +2090,13 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
/* Setup bit 16 of flash address. */
if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
} else if (((addr & BIT_16) == 0) &&
(bank_select & CSR_FLASH_64K_BANK)) {
bank_select &= ~(CSR_FLASH_64K_BANK);
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/* Always perform IO mapped accesses to the FLASH registers. */
@@ -2103,10 +2104,10 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
} else {
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_data, (uint16_t)data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
}
@@ -2289,12 +2290,12 @@ qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
midpoint = length / 2;
- WRT_REG_WORD(&reg->nvram, 0);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, 0);
+ rd_reg_word(&reg->nvram);
for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) {
if (ilength == midpoint) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram);
}
data = qla2x00_read_flash_byte(ha, saddr);
if (saddr % 100)
@@ -2319,11 +2320,11 @@ qla2x00_suspend_hba(struct scsi_qla_host *vha)
/* Pause RISC. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
+ rd_reg_word(&reg->hccr);
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
break;
udelay(100);
}
@@ -2362,12 +2363,12 @@ qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf,
midpoint = ha->optrom_size / 2;
qla2x00_flash_enable(ha);
- WRT_REG_WORD(&reg->nvram, 0);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, 0);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
for (addr = offset, data = buf; addr < length; addr++, data++) {
if (addr == midpoint) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
}
*data = qla2x00_read_flash_byte(ha, addr);
@@ -2399,7 +2400,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_number = 0;
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
/* Go with write. */
@@ -2455,7 +2456,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_mask = 0x10000;
break;
}
- /* Fall through... */
+ fallthrough;
case 0x1f: /* Atmel flash. */
/* 512k sector size. */
@@ -2464,7 +2465,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_mask = 0x80000000;
break;
}
- /* Fall through... */
+ fallthrough;
case 0x01: /* AMD flash. */
if (flash_id == 0x38 || flash_id == 0x40 ||
@@ -2497,7 +2498,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_mask = 0x1e000;
break;
}
- /* fall through */
+ fallthrough;
default:
/* Default to 16 kb sector size. */
rest_addr = 0x3fff;
@@ -2548,8 +2549,8 @@ update_flash:
}
}
} else if (addr == ha->optrom_size / 2) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram);
}
if (flash_id == 0xda && man_id == 0xc1) {
@@ -2610,7 +2611,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with read. */
- qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2);
+ qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
@@ -2620,10 +2621,11 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
}
static int
-qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf,
+qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, __le32 *buf,
uint32_t len, uint32_t buf_size_without_sfub, uint8_t *sfub_buf)
{
- uint32_t *p, check_sum = 0;
+ uint32_t check_sum = 0;
+ __le32 *p;
int i;
p = buf + buf_size_without_sfub;
@@ -2633,14 +2635,14 @@ qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf,
sizeof(struct secure_flash_update_block));
for (i = 0; i < (sizeof(struct secure_flash_update_block) >> 2); i++)
- check_sum += p[i];
+ check_sum += le32_to_cpu(p[i]);
check_sum = (~check_sum) + 1;
- if (check_sum != p[i]) {
+ if (check_sum != le32_to_cpu(p[i])) {
ql_log(ql_log_warn, vha, 0x7097,
"SFUB checksum failed, 0x%x, 0x%x\n",
- check_sum, p[i]);
+ check_sum, le32_to_cpu(p[i]));
return QLA_COMMAND_ERROR;
}
@@ -2662,7 +2664,7 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
for (; cnt; cnt--, flt_reg++) {
- if (flt_reg->start == start) {
+ if (le32_to_cpu(flt_reg->start) == start) {
memcpy((uint8_t *)region, flt_reg,
sizeof(struct qla_flt_region));
rval = QLA_SUCCESS;
@@ -2683,7 +2685,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t sec_mask, rest_addr, fdata;
void *optrom = NULL;
dma_addr_t optrom_dma;
- int rval;
+ int rval, ret;
struct secure_flash_update_block *sfub;
dma_addr_t sfub_dma;
uint32_t offset = faddr << 2;
@@ -2691,7 +2693,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
struct qla_flt_region region;
bool reset_to_rom = false;
uint32_t risc_size, risc_attr = 0;
- uint32_t *fw_array = NULL;
+ __be32 *fw_array = NULL;
/* Retrieve region info - must be a start address passed in */
rval = qla28xx_get_flash_region(vha, offset, &region);
@@ -2720,14 +2722,14 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
if (ha->flags.secure_adapter && region.attribute) {
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
- "Region %x is secure\n", region.code);
+ "Region %x is secure\n", le16_to_cpu(region.code));
- switch (region.code) {
+ switch (le16_to_cpu(region.code)) {
case FLT_REG_FW:
case FLT_REG_FW_SEC_27XX:
case FLT_REG_MPI_PRI_28XX:
case FLT_REG_MPI_SEC_28XX:
- fw_array = dwptr;
+ fw_array = (__force __be32 *)dwptr;
/* 1st fw array */
risc_size = be32_to_cpu(fw_array[3]);
@@ -2761,7 +2763,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
case FLT_REG_PEP_PRI_28XX:
case FLT_REG_PEP_SEC_28XX:
- fw_array = dwptr;
+ fw_array = (__force __be32 *)dwptr;
/* 1st fw array */
risc_size = be32_to_cpu(fw_array[3]);
@@ -2774,7 +2776,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
default:
ql_log(ql_log_warn + ql_dbg_verbose, vha,
0xffff, "Secure region %x not supported\n",
- region.code);
+ le16_to_cpu(region.code));
rval = QLA_COMMAND_ERROR;
goto done;
}
@@ -2789,8 +2791,8 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
goto done;
}
- rval = qla28xx_extract_sfub_and_verify(vha, dwptr, dwords,
- buf_size_without_sfub, (uint8_t *)sfub);
+ rval = qla28xx_extract_sfub_and_verify(vha, (__le32 *)dwptr,
+ dwords, buf_size_without_sfub, (uint8_t *)sfub);
if (rval != QLA_SUCCESS)
goto done;
@@ -2892,7 +2894,8 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
if (region.attribute && buf_size_without_sfub) {
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
"Sending Secure Flash MB Cmd\n");
- rval = qla28xx_secure_flash_update(vha, 0, region.code,
+ rval = qla28xx_secure_flash_update(vha, 0,
+ le16_to_cpu(region.code),
buf_size_without_sfub, sfub_dma,
sizeof(struct secure_flash_update_block) >> 2);
if (rval != QLA_SUCCESS) {
@@ -2933,17 +2936,17 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
liter += dburst - 1;
faddr += dburst - 1;
dwptr += dburst - 1;
- continue;
}
write_protect:
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
"Protect flash...\n");
- rval = qla24xx_protect_flash(vha);
- if (rval) {
+ ret = qla24xx_protect_flash(vha);
+ if (ret) {
qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
ql_log(ql_log_warn, vha, 0x7099,
"Failed protect flash\n");
+ rval = QLA_COMMAND_ERROR;
}
if (reset_to_rom == true) {
@@ -2951,10 +2954,12 @@ write_protect:
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
- rval = qla2x00_wait_for_hba_online(vha);
- if (rval != QLA_SUCCESS)
+ ret = qla2x00_wait_for_hba_online(vha);
+ if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0xffff,
"Adapter did not come out of reset\n");
+ rval = QLA_COMMAND_ERROR;
+ }
}
done:
@@ -2978,11 +2983,11 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
/* Go with write. */
if (IS_QLA28XX(ha))
- rval = qla28xx_write_flash_data(vha, (uint32_t *)buf,
- offset >> 2, length >> 2);
+ rval = qla28xx_write_flash_data(vha, buf, offset >> 2,
+ length >> 2);
else
- rval = qla24xx_write_flash_data(vha, (uint32_t *)buf,
- offset >> 2, length >> 2);
+ rval = qla24xx_write_flash_data(vha, buf, offset >> 2,
+ length >> 2);
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
scsi_unblock_requests(vha->host);
@@ -3510,7 +3515,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
} else {
for (i = 0; i < 4; i++)
- ha->fw_revision[i] = be32_to_cpu(dcode[4+i]);
+ ha->fw_revision[i] =
+ be32_to_cpu((__force __be32)dcode[4+i]);
ql_dbg(ql_dbg_init, vha, 0x0060,
"Firmware revision (flash) %u.%u.%u (%x).\n",
ha->fw_revision[0], ha->fw_revision[1],
@@ -3525,7 +3531,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
faddr = ha->flt_region_gold_fw;
- qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8);
+ qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x0056,
"Unrecognized golden fw at %#x.\n", faddr);
@@ -3534,7 +3540,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
}
for (i = 0; i < 4; i++)
- ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]);
+ ha->gold_fw_version[i] =
+ be32_to_cpu((__force __be32)dcode[4+i]);
return ret;
}
@@ -3614,7 +3621,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
/* read remaining FCP CMD config data from flash */
fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
- len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
+ len = ha->fcp_prio_cfg->num_entries * sizeof(struct qla_fcp_prio_entry);
max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0],
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 70081b395fb2..bb754a950802 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -27,8 +27,6 @@
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
-#include <target/target_core_base.h>
-#include <target/target_core_fabric.h>
#include "qla_def.h"
#include "qla_target.h"
@@ -50,13 +48,6 @@ MODULE_PARM_DESC(qlini_mode,
"when ready "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql_dm_tgt_ex_pct = 0;
-module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
- "For Dual Mode (qlini_mode=dual), this parameter determines "
- "the percentage of exchanges/cmds FW will allocate resources "
- "for Target mode.");
-
int ql2xuctrlirq = 1;
module_param(ql2xuctrlirq, int, 0644);
MODULE_PARM_DESC(ql2xuctrlirq,
@@ -186,8 +177,7 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
return QLA_SUCCESS;
}
-static inline
-struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
be_id_t d_id)
{
struct scsi_qla_host *host;
@@ -200,7 +190,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
key = be_to_port_id(d_id).b24;
- host = btree_lookup32(&vha->hw->tgt.host_map, key);
+ host = btree_lookup32(&vha->hw->host_map, key);
if (!host)
ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
"Unable to find host %06x\n", key);
@@ -301,7 +291,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
goto abort;
}
- host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
+ host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
"Requeuing unknown ATIO_TYPE7 %p\n", u);
@@ -350,7 +340,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
{
- struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+ struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
atio->u.isp24.fcp_hdr.d_id);
if (unlikely(NULL == host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe03e,
@@ -380,7 +370,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
qlt_issue_marker(vha, ha_locked);
if ((entry->u.isp24.vp_index != 0xFF) &&
- (entry->u.isp24.nport_handle != 0xFFFF)) {
+ (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
host = qlt_find_host_by_vp_idx(vha,
entry->u.isp24.vp_index);
if (unlikely(!host)) {
@@ -444,7 +434,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe073,
"qla_target(%d):%s: CRC2 Response pkt\n",
vha->vp_idx, __func__);
- /* fall through */
+ fallthrough;
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -579,6 +569,18 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
sp->fcport->logout_on_delete = 1;
sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
sp->fcport->send_els_logo = 0;
+
+ if (sp->fcport->flags & FCF_FCSP_DEVICE) {
+ ql_dbg(ql_dbg_edif, vha, 0x20ef,
+ "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__,
+ sp->fcport->port_name);
+ qla2x00_set_fcport_disc_state(sp->fcport,
+ DSC_LOGIN_AUTH_PEND);
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+ sp->fcport->d_id.b24);
+ qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
+ 0, sp->fcport);
+ }
break;
case SRB_NACK_PRLI:
@@ -611,7 +613,7 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
@@ -626,6 +628,9 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
case SRB_NACK_PLOGI:
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
c = "PLOGI";
+ if (vha->hw->flags.edif_enabled &&
+ (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP))
+ fcport->flags |= FCF_FCSP_DEVICE;
break;
case SRB_NACK_PRLI:
fcport->fw_login_state = DSC_LS_PRLI_PEND;
@@ -644,12 +649,10 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->type = type;
sp->name = "nack";
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_nack_sp_done);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
- sp->done = qla2x00_async_nack_sp_done;
ql_dbg(ql_dbg_disc, vha, 0x20f4,
"Async-%s %8phC hndl %x %s\n",
@@ -662,7 +665,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
@@ -695,7 +698,12 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
void qla24xx_delete_sess_fn(struct work_struct *work)
{
fc_port_t *fcport = container_of(work, struct fc_port, del_work);
- struct qla_hw_data *ha = fcport->vha->hw;
+ struct qla_hw_data *ha = NULL;
+
+ if (!fcport || !fcport->vha || !fcport->vha->hw)
+ return;
+
+ ha = fcport->vha->hw;
if (fcport->se_sess) {
ha->tgt.tgt_ops->shutdown_sess(fcport);
@@ -919,6 +927,11 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
qlt_port_logo_t *tmp;
int res;
+ if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
+ res = 0;
+ goto out;
+ }
+
mutex_lock(&vha->vha_tgt.tgt_mutex);
list_for_each_entry(tmp, &vha->logo_list, list) {
@@ -939,6 +952,7 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
list_del(&logo->list);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
+out:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
"Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
@@ -974,6 +988,7 @@ void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ INIT_LIST_HEAD(&logo.list);
if (!own)
qlt_send_first_logo(vha, &logo);
sess->send_els_logo = 0;
@@ -983,8 +998,8 @@ void qlt_free_session_done(struct work_struct *work)
int rc;
if (!own ||
- (own &&
- (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
+ sess->logout_completed = 0;
rc = qla2x00_post_async_logout_work(vha, sess,
NULL);
if (rc != QLA_SUCCESS)
@@ -1011,6 +1026,25 @@ void qlt_free_session_done(struct work_struct *work)
sess->nvme_flag |= NVME_FLAG_DELETING;
qla_nvme_unregister_remote_port(sess);
}
+
+ if (ha->flags.edif_enabled &&
+ (!own || (own &&
+ own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ sess->edif.authok = 0;
+ if (!ha->flags.host_shutting_down) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
+ __func__, sess->port_name);
+ qla2x00_release_all_sadb(vha, sess);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s bypassing release_all_sadb\n",
+ __func__);
+ }
+
+ qla_edif_clear_appdata(vha, sess);
+ qla_edif_sess_down(vha, sess);
+ }
}
/*
@@ -1032,7 +1066,12 @@ void qlt_free_session_done(struct work_struct *work)
}
msleep(100);
cnt++;
- if (cnt > 200)
+ /*
+ * Driver timeout is set to 22 Sec, update count value to loop
+ * long enough for log-out to complete before advancing. Otherwise,
+ * straddling logout can interfere with re-login attempt.
+ */
+ if (cnt > 230)
break;
}
@@ -1113,6 +1152,8 @@ void qlt_free_session_done(struct work_struct *work)
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
sess->free_pending = 0;
+ qla2x00_dfs_remove_rport(vha, sess);
+
ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
sess, sess->port_name, vha->fcport_count);
@@ -1231,14 +1272,15 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
case DSC_DELETE_PEND:
return;
case DSC_DELETED:
- if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
- wake_up_all(&tgt->waitQ);
- if (sess->vha->fcport_count == 0)
- wake_up_all(&sess->vha->fcport_waitQ);
-
if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
- !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
+ !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
+ if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
+ wake_up_all(&tgt->waitQ);
+
+ if (sess->vha->fcport_count == 0)
+ wake_up_all(&sess->vha->fcport_waitQ);
return;
+ }
break;
case DSC_UPD_FCPORT:
/*
@@ -1272,9 +1314,9 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
qla24xx_chk_fcp_state(sess);
- ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
- "Scheduling sess %p for deletion %8phC\n",
- sess, sess->port_name);
+ ql_dbg(ql_log_warn, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion %8phC fc4_type %x\n",
+ sess, sess->port_name, sess->fc4_type);
WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
}
@@ -1515,11 +1557,11 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
"Waiting for sess works (tgt %p)", tgt);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
- while (!list_empty(&tgt->sess_works_list)) {
+ do {
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
- flush_scheduled_work();
+ flush_work(&tgt->sess_work);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
- }
+ } while (!list_empty(&tgt->sess_works_list));
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
@@ -1558,10 +1600,12 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
return;
}
+ mutex_lock(&tgt->ha->optrom_mutex);
mutex_lock(&vha->vha_tgt.tgt_mutex);
tgt->tgt_stop = 0;
tgt->tgt_stopped = 1;
mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ mutex_unlock(&tgt->ha->optrom_mutex);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
tgt);
@@ -1699,7 +1743,7 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -1713,6 +1757,12 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
nack->u.isp24.srr_reject_code_expl = srr_explan;
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+ /* TODO qualify this with EDIF enable */
+ if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
+ (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+ nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
+ }
+
ql_dbg(ql_dbg_tgt, vha, 0xe005,
"qla_target(%d): Sending 24xx Notify Ack %d\n",
vha->vp_idx, nack->u.isp24.status);
@@ -1727,7 +1777,8 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
struct scsi_qla_host *vha = mcmd->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
- uint32_t f_ctl, h;
+ __le32 f_ctl;
+ uint32_t h;
uint8_t *p;
int rc;
struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
@@ -1760,7 +1811,7 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
}
- resp->handle = MAKE_HANDLE(qpair->req->id, h);
+ resp->handle = make_handle(qpair->req->id, h);
resp->entry_type = ABTS_RESP_24XX;
resp->entry_count = 1;
resp->nport_handle = abts->nport_handle;
@@ -1784,7 +1835,7 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
resp->payload.ba_acct.low_seq_cnt = 0x0000;
- resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
} else {
@@ -1816,7 +1867,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
- uint32_t f_ctl;
+ __le32 f_ctl;
uint8_t *p;
ql_dbg(ql_dbg_tgt, vha, 0xe006,
@@ -1859,7 +1910,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
resp->payload.ba_acct.low_seq_cnt = 0x0000;
- resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
} else {
@@ -1969,17 +2020,6 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
key = sid_to_key(s_id);
spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
- uint32_t op_key;
- u64 op_lun;
-
- op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
- op_lun = scsilun_to_int(
- (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
- if (op_key == key && op_lun == lun)
- op->aborted = true;
- }
-
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
uint32_t op_key;
u64 op_lun;
@@ -2026,13 +2066,13 @@ static void qlt_do_tmr_work(struct work_struct *work)
struct qla_tgt_mgmt_cmd *mcmd =
container_of(work, struct qla_tgt_mgmt_cmd, work);
struct qla_hw_data *ha = mcmd->vha->hw;
- int rc = EIO;
+ int rc;
uint32_t tag;
unsigned long flags;
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
- tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
+ tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
break;
default:
tag = 0;
@@ -2081,6 +2121,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd;
struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
+ struct qla_tgt_cmd *abort_cmd;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
@@ -2108,17 +2149,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
*/
mcmd->se_cmd.cpuid = h->cpuid;
- if (ha->tgt.tgt_ops->find_cmd_by_tag) {
- struct qla_tgt_cmd *abort_cmd;
+ abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
+ le32_to_cpu(abts->exchange_addr_to_abort));
+ if (!abort_cmd) {
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EIO;
+ }
+ mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
- abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
- abts->exchange_addr_to_abort);
- if (abort_cmd && abort_cmd->qpair) {
- mcmd->qpair = abort_cmd->qpair;
- mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
- mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
- mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
- }
+ if (abort_cmd->qpair) {
+ mcmd->qpair = abort_cmd->qpair;
+ mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
+ mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
+ mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
}
INIT_WORK(&mcmd->work, qlt_do_tmr_work);
@@ -2135,7 +2178,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
{
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess;
- uint32_t tag = abts->exchange_addr_to_abort;
+ uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
be_id_t s_id;
int rc;
unsigned long flags;
@@ -2225,7 +2268,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
- ctio->nport_handle = mcmd->sess->loop_id;
+ ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = ha->vp_idx;
ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -2282,7 +2325,7 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE;
- ctio->nport_handle = cmd->sess->loop_id;
+ ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -2486,7 +2529,7 @@ static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out));
+ rd_reg_dword_relaxed(req->req_q_out));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -2562,6 +2605,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
struct ctio7_to_24xx *pkt;
struct atio_from_isp *atio = &prm->cmd->atio;
uint16_t temp;
+ struct qla_tgt_cmd *cmd = prm->cmd;
pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
prm->pkt = pkt;
@@ -2582,7 +2626,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
} else
qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle = make_handle(qpair->req->id, h);
pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
@@ -2594,6 +2638,15 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
pkt->u.status0.ox_id = cpu_to_le16(temp);
pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+ if (cmd->edif) {
+ if (cmd->dma_data_direction == DMA_TO_DEVICE)
+ prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
+ if (cmd->dma_data_direction == DMA_FROM_DEVICE)
+ prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
+
+ pkt->u.status0.edif_flags |= EF_EN_EDIF;
+ }
+
return 0;
}
@@ -2842,10 +2895,14 @@ skip_explict_conf:
cpu_to_le16(SS_SENSE_LEN_VALID);
ctio->u.status1.sense_length =
cpu_to_le16(prm->sense_buffer_len);
- for (i = 0; i < prm->sense_buffer_len/4; i++)
- ((uint32_t *)ctio->u.status1.sense_data)[i] =
- cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+ for (i = 0; i < prm->sense_buffer_len/4; i++) {
+ uint32_t v;
+ v = get_unaligned_be32(
+ &((uint32_t *)prm->sense_buffer)[i]);
+ put_unaligned_le32(v,
+ &((uint32_t *)ctio->u.status1.sense_data)[i]);
+ }
qlt_print_dif_err(prm);
} else {
@@ -3095,7 +3152,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
} else
qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle = make_handle(qpair->req->id, h);
pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
@@ -3116,7 +3173,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
- pkt->dseg_count = prm->tot_dsds;
+ pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
/* Fibre channel byte count */
pkt->transfer_length = cpu_to_le32(transfer_length);
@@ -3138,7 +3195,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
- pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+ pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
if (!bundling) {
cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
@@ -3216,8 +3273,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
cmd->state = QLA_TGT_STATE_PROCESSED;
- res = 0;
- goto free;
+ return 0;
}
ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
@@ -3228,8 +3284,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
- if (unlikely(res != 0))
- goto free;
+ if (unlikely(res != 0)) {
+ return res;
+ }
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
@@ -3248,9 +3305,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset);
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
res = 0;
- goto free;
+ goto out_unmap_unlock;
}
/* Does F/W have an IOCBs for this request */
@@ -3281,8 +3337,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
if (xmit_type & QLA_TGT_XMIT_STATUS) {
pkt->u.status0.scsi_status =
cpu_to_le16(prm.rq_result);
- pkt->u.status0.residual =
- cpu_to_le32(prm.residual);
+ if (!cmd->edif)
+ pkt->u.status0.residual =
+ cpu_to_le32(prm.residual);
+
pkt->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_SEND_STATUS);
if (qlt_need_explicit_conf(cmd, 0)) {
@@ -3353,8 +3411,6 @@ out_unmap_unlock:
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-free:
- vha->hw->tgt.tgt_ops->free_cmd(cmd);
return res;
}
EXPORT_SYMBOL(qlt_xmit_response);
@@ -3375,10 +3431,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
prm.sg = NULL;
prm.req_cnt = 1;
- /* Calculate number of entries and segments required */
- if (qlt_pci_map_calc_cnt(&prm) != 0)
- return -EAGAIN;
-
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
@@ -3396,6 +3448,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
return 0;
}
+ /* Calculate number of entries and segments required */
+ if (qlt_pci_map_calc_cnt(&prm) != 0)
+ return -EAGAIN;
+
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
@@ -3575,7 +3631,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
/* terminate */
@@ -3649,7 +3705,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -3768,6 +3824,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:
@@ -3778,7 +3837,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
"multiple abort. %p transport_state %x, t_state %x, "
"se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
- return EIO;
+ return -EIO;
}
cmd->aborted = 1;
cmd->trc_flags |= TRC_ABORT;
@@ -3800,23 +3859,18 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
BUG_ON(cmd->cmd_in_wq);
- if (cmd->sg_mapped)
- qlt_unmap_sg(cmd->vha, cmd);
-
if (!cmd->q_full)
qlt_decr_num_pend_cmds(cmd->vha);
BUG_ON(cmd->sg_mapped);
cmd->jiffies_at_free = get_jiffies_64();
- if (unlikely(cmd->free_sg))
- kfree(cmd->sg);
if (!sess || !sess->se_sess) {
WARN_ON(1);
return;
}
cmd->jiffies_at_free = get_jiffies_64();
- target_free_tag(sess->se_sess, &cmd->se_cmd);
+ cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -3887,7 +3941,7 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
- cmd = (void *) req->outstanding_cmds[h];
+ cmd = req->outstanding_cmds[h];
if (unlikely(cmd == NULL)) {
ql_dbg(ql_dbg_async, vha, 0xe053,
"qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -3931,6 +3985,12 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
if (cmd == NULL)
return;
+ if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
+ cmd->sess) {
+ qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
+ (struct ctio7_from_24xx *)ctio);
+ }
+
se_cmd = &cmd->se_cmd;
cmd->cmd_sent_to_fw = 0;
@@ -4001,6 +4061,16 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
qlt_handle_dif_error(qpair, cmd, ctio);
return;
}
+
+ case CTIO_FAST_AUTH_ERR:
+ case CTIO_FAST_INCOMP_PAD_LEN:
+ case CTIO_FAST_INVALID_REQ:
+ case CTIO_FAST_SPI_ERR:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+ "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ break;
+
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
"qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
@@ -4112,7 +4182,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
- cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
+ cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
if (atio->u.isp24.fcp_cmnd.rddata &&
atio->u.isp24.fcp_cmnd.wrdata) {
@@ -4150,7 +4220,7 @@ out_term:
qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
- target_free_tag(sess->se_sess, &cmd->se_cmd);
+ cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
ha->tgt.tgt_ops->put_sess(sess);
@@ -4277,24 +4347,19 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct fc_port *sess,
struct atio_from_isp *atio)
{
- struct se_session *se_sess = sess->se_sess;
struct qla_tgt_cmd *cmd;
- int tag, cpu;
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
- if (tag < 0)
+ cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
+ if (!cmd)
return NULL;
- cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
- memset(cmd, 0, sizeof(struct qla_tgt_cmd));
cmd->cmd_type = TYPE_TGT_CMD;
memcpy(&cmd->atio, atio, sizeof(*atio));
+ INIT_LIST_HEAD(&cmd->sess_cmd_list);
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
qlt_incr_num_pend_cmds(vha);
cmd->vha = vha;
- cmd->se_cmd.map_tag = tag;
- cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
@@ -4307,6 +4372,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
qlt_assign_qpair(vha, cmd);
cmd->reset_count = vha->hw->base_qpair->chip_reset;
cmd->vp_idx = vha->vp_idx;
+ cmd->edif = sess->edif.enable;
return cmd;
}
@@ -4426,7 +4492,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
case QLA_TGT_CLEAR_TS:
case QLA_TGT_ABORT_TS:
abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
- /* fall through */
+ fallthrough;
case QLA_TGT_CLEAR_ACA:
h = qlt_find_qphint(vha, mcmd->unpacked_lun);
mcmd->qpair = h->qpair;
@@ -4648,15 +4714,6 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
((u32)s_id->b.al_pa));
spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
- uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
-
- if (op_key == key) {
- op->aborted = true;
- count++;
- }
- }
-
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
@@ -4722,6 +4779,34 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
goto out;
}
+ if (vha->hw->flags.edif_enabled &&
+ !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
+ iocb->u.isp24.status_subcode == ELS_PLOGI &&
+ !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to app not available lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
+ if (vha->hw->flags.edif_enabled) {
+ if (DBELL_INACTIVE(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to app not started lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ } else if (iocb->u.isp24.status_subcode == ELS_PLOGI &&
+ !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to unsecure lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+ }
+
pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
if (!pla) {
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
@@ -4747,11 +4832,11 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
qla24xx_post_newsess_work(vha, &port_id,
iocb->u.isp24.port_name,
iocb->u.isp24.u.plogi.node_name,
- pla, FC4_TYPE_UNKNOWN);
+ pla, 0);
else
qla24xx_post_newsess_work(vha, &port_id,
iocb->u.isp24.port_name, NULL,
- pla, FC4_TYPE_UNKNOWN);
+ pla, 0);
goto out;
}
@@ -4787,6 +4872,20 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
sess->d_id = port_id;
sess->login_gen++;
+ sess->loop_id = loop_id;
+
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI) {
+ /* remote port has assigned Port ID */
+ if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
+ vha->d_id = sess->d_id;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC - send port online\n",
+ __func__, sess->port_name);
+
+ qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+ sess->d_id.b24);
+ }
if (iocb->u.isp24.status_subcode == ELS_PRLI) {
sess->fw_login_state = DSC_LS_PRLI_PEND;
@@ -4899,6 +4998,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess = qla2x00_find_fcport_by_wwpn(vha,
iocb->u.isp24.port_name, 1);
+ if (vha->hw->flags.edif_enabled && sess &&
+ (!(sess->flags & FCF_FCSP_DEVICE) ||
+ !sess->edif.authok)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC Term PRLI due to unauthorize PRLI\n",
+ __func__, __LINE__, iocb->u.isp24.port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ break;
+ }
+
if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
@@ -4947,6 +5056,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
bool delete = false;
int sec;
+ if (vha->hw->flags.edif_enabled && sess &&
+ (!(sess->flags & FCF_FCSP_DEVICE) ||
+ !sess->edif.authok)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC Term PRLI due to unauthorize prli\n",
+ __func__, __LINE__, iocb->u.isp24.port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ break;
+ }
+
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
switch (sess->fw_login_state) {
case DSC_LS_PLOGI_PEND:
@@ -5060,7 +5179,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = 1;
break;
}
- /* fall through */
+ fallthrough;
case ELS_LOGO:
case ELS_PRLO:
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -5136,7 +5255,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
/*
- * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * ha->hardware_lock supposed to be held on entry.
+ * Might drop it, then reacquire.
*/
static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
@@ -5310,7 +5430,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = sess->loop_id;
+ ctio24->nport_handle = cpu_to_le16(sess->loop_id);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -5323,13 +5443,14 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
* CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
* if the explicit conformation is used.
*/
- ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.ox_id =
+ cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
ctio24->u.status1.scsi_status = cpu_to_le16(status);
- ctio24->u.status1.residual = get_datalen_for_atio(atio);
+ ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
if (ctio24->u.status1.residual != 0)
- ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+ ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
/* Memory Barrier */
wmb();
@@ -5352,9 +5473,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess;
- struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
- int tag, cpu;
unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
@@ -5384,10 +5503,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
if (!sess)
return;
- se_sess = sess->se_sess;
-
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
- if (tag < 0) {
+ cmd = ha->tgt.tgt_ops->get_cmd(sess);
+ if (!cmd) {
ql_dbg(ql_dbg_io, vha, 0x3009,
"qla_target(%d): %s: Allocation of cmd failed\n",
vha->vp_idx, __func__);
@@ -5402,9 +5519,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
return;
}
- cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
- memset(cmd, 0, sizeof(struct qla_tgt_cmd));
-
qlt_incr_num_pend_cmds(vha);
INIT_LIST_HEAD(&cmd->cmd_list);
memcpy(&cmd->atio, atio, sizeof(*atio));
@@ -5414,7 +5528,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
cmd->qpair = ha->base_qpair;
- cmd->se_cmd.map_cpu = cpu;
if (qfull) {
cmd->q_full = 1;
@@ -5483,8 +5596,7 @@ qlt_free_qfull_cmds(struct qla_qpair *qpair)
"%s: Unexpected cmd in QFull list %p\n", __func__,
cmd);
- list_del(&cmd->cmd_list);
- list_add_tail(&cmd->cmd_list, &free_list);
+ list_move_tail(&cmd->cmd_list, &free_list);
/* piggy back on hardware_lock for protection */
vha->hw->tgt.num_qfull_cmds_alloc--;
@@ -5566,7 +5678,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
if (unlikely(atio->u.isp24.exchange_addr ==
- ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+ cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
ql_dbg(ql_dbg_io, vha, 0x3065,
"qla_target(%d): ATIO_TYPE7 "
"received with UNKNOWN exchange address, "
@@ -5678,7 +5790,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
/* found existing exchange */
qpair->retry_term_cnt++;
if (qpair->retry_term_cnt >= 5) {
- rc = EIO;
+ rc = -EIO;
qpair->retry_term_cnt = 0;
ql_log(ql_log_warn, vha, 0xffff,
"Unable to send ABTS Respond. Dumping firmware.\n");
@@ -5686,9 +5798,9 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
if (qpair == ha->base_qpair)
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
else
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -5729,8 +5841,8 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
entry->compl_status);
if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
- if ((entry->error_subcode1 == 0x1E) &&
- (entry->error_subcode2 == 0)) {
+ if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
+ le32_to_cpu(entry->error_subcode2) == 0) {
if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
ha->tgt.tgt_ops->free_mcmd(mcmd);
return;
@@ -5944,11 +6056,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
"qla_target(%d): Async LOOP_UP occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
if (tgt->link_reinit_iocb_pending) {
qlt_send_notify_ack(ha->base_qpair,
- (void *)&tgt->link_reinit_iocb,
+ &tgt->link_reinit_iocb,
0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
@@ -5962,18 +6073,16 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
"qla_target(%d): Async event %#x occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
break;
case MBA_REJECTED_FCP_CMD:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
"qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
- if (le16_to_cpu(mailbox[3]) == 1) {
+ if (mailbox[3] == 1) {
/* exchange starvation. */
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
@@ -5997,10 +6106,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
"qla_target(%d): Port update async event %#x "
"occurred: updating the ports database (m[0]=%x, m[1]=%x, "
"m[2]=%x, m[3]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
- login_code = le16_to_cpu(mailbox[2]);
+ login_code = mailbox[2];
if (login_code == 0x4) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
"Async MB 2: Got PLOGI Complete\n");
@@ -6228,69 +6336,6 @@ out_term:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-static void qlt_tmr_work(struct qla_tgt *tgt,
- struct qla_tgt_sess_work_param *prm)
-{
- struct atio_from_isp *a = &prm->tm_iocb2;
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess;
- unsigned long flags;
- be_id_t s_id;
- int rc;
- u64 unpacked_lun;
- int fn;
- void *iocb;
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
-
- if (tgt->tgt_stop)
- goto out_term2;
-
- s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
- if (!sess) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has got an extra creation ref */
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- if (!sess)
- goto out_term2;
- } else {
- if (sess->deleted) {
- goto out_term2;
- }
-
- if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
- "%s: kref_get fail %8phC\n",
- __func__, sess->port_name);
- goto out_term2;
- }
- }
-
- iocb = a;
- fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
- unpacked_lun =
- scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
-
- rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- ha->tgt.tgt_ops->put_sess(sess);
-
- if (rc != 0)
- goto out_term;
- return;
-
-out_term2:
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-out_term:
- qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
-}
-
static void qlt_sess_work_fn(struct work_struct *work)
{
struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
@@ -6317,9 +6362,6 @@ static void qlt_sess_work_fn(struct work_struct *work)
case QLA_TGT_SESS_WORK_ABORT:
qlt_abort_work(tgt, prm);
break;
- case QLA_TGT_SESS_WORK_TM:
- qlt_tmr_work(tgt, prm);
- break;
default:
BUG_ON(1);
break;
@@ -6406,7 +6448,6 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->ha = ha;
tgt->vha = base_vha;
init_waitqueue_head(&tgt->waitQ);
- INIT_LIST_HEAD(&tgt->del_sess_list);
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
@@ -6451,15 +6492,15 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
return 0;
}
-void qlt_remove_target_resources(struct qla_hw_data *ha)
+void qla_remove_hostmap(struct qla_hw_data *ha)
{
struct scsi_qla_host *node;
u32 key = 0;
- btree_for_each_safe32(&ha->tgt.host_map, key, node)
- btree_remove32(&ha->tgt.host_map, key);
+ btree_for_each_safe32(&ha->host_map, key, node)
+ btree_remove32(&ha->host_map, key);
- btree_destroy32(&ha->tgt.host_map);
+ btree_destroy32(&ha->host_map);
}
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
@@ -6472,7 +6513,7 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
}
/**
- * qla_tgt_lport_register - register lport with external module
+ * qlt_lport_register - register lport with external module
*
* @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
* @phys_wwpn: physical port WWPN
@@ -6548,7 +6589,7 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
EXPORT_SYMBOL(qlt_lport_register);
/**
- * qla_tgt_lport_deregister - Degister lport
+ * qlt_lport_deregister - Degister lport
*
* @vha: Registered scsi_qla_host pointer
*/
@@ -6677,9 +6718,14 @@ static void qlt_disable_vha(struct scsi_qla_host *vha)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+
+ /*
+ * We are expecting the offline state.
+ * QLA_FUNCTION_FAILED means that adapter is offline.
+ */
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
ql_dbg(ql_dbg_tgt, vha, 0xe081,
- "qla2x00_wait_for_hba_online() failed\n");
+ "adapter is offline\n");
}
/*
@@ -6745,7 +6791,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
return;
for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
- pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
pkt++;
}
@@ -6780,7 +6826,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
"corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
&pkt->u.isp24.fcp_hdr.s_id,
be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
- le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+ pkt->u.isp24.exchange_addr, pkt);
adjust_corrupted_atio(pkt);
qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
@@ -6798,14 +6844,14 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
} else
ha->tgt.atio_ring_ptr++;
- pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
}
wmb();
}
/* Adjust ring index */
- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
}
void
@@ -6818,20 +6864,14 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (!QLA_TGT_MODE_ENABLED())
return;
- WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
- RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+ wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
+ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
+ rd_reg_dword(ISP_ATIO_Q_OUT(vha));
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (IS_QLA2071(ha)) {
- /* 4 ports Baker: Enable Interrupt Handshake */
- icb->msix_atio = 0;
- icb->firmware_options_2 |= BIT_26;
- } else {
- icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= ~BIT_26;
- }
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
@@ -6840,7 +6880,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
/* INTx|MSI */
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
icb->msix_atio = 0;
- icb->firmware_options_2 |= BIT_26;
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"%s: Use INTx for ATIOQ.\n", __func__);
}
@@ -7082,13 +7122,11 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
- int rc;
-
+ mutex_init(&base_vha->vha_tgt.tgt_mutex);
if (!QLA_TGT_MODE_ENABLED())
return;
- if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
@@ -7096,7 +7134,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
}
- mutex_init(&base_vha->vha_tgt.tgt_mutex);
mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
INIT_LIST_HEAD(&base_vha->unknown_atio_list);
@@ -7105,11 +7142,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
qlt_clear_mode(base_vha);
- rc = btree_init32(&ha->tgt.host_map);
- if (rc)
- ql_log(ql_log_info, base_vha, 0xd03d,
- "Unable to initialize ha->host_map btree\n");
-
qlt_update_vp_map(base_vha, SET_VP_IDX);
}
@@ -7230,21 +7262,20 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
u32 key;
int rc;
- if (!QLA_TGT_MODE_ENABLED())
- return;
-
key = vha->d_id.b24;
switch (cmd) {
case SET_VP_IDX:
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
break;
case SET_AL_PA:
- slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ slot = btree_lookup32(&vha->hw->host_map, key);
if (!slot) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
"Save vha in host_map %p %06x\n", vha, key);
- rc = btree_insert32(&vha->hw->tgt.host_map,
+ rc = btree_insert32(&vha->hw->host_map,
key, vha, GFP_ATOMIC);
if (rc)
ql_log(ql_log_info, vha, 0xd03e,
@@ -7254,17 +7285,19 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
}
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
"replace existing vha in host_map %p %06x\n", vha, key);
- btree_update32(&vha->hw->tgt.host_map, key, vha);
+ btree_update32(&vha->hw->host_map, key, vha);
break;
case RESET_VP_IDX:
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
break;
case RESET_AL_PA:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
"clear vha in host_map %p %06x\n", vha, key);
- slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ slot = btree_lookup32(&vha->hw->host_map, key);
if (slot)
- btree_remove32(&vha->hw->tgt.host_map, key);
+ btree_remove32(&vha->hw->host_map, key);
vha->d_id.b24 = 0;
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 6539499e9e95..7df86578214f 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -116,7 +116,6 @@
(min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
#endif
-#endif
#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
? le16_to_cpu((iocb)->u.isp2x.target.extended) \
@@ -135,37 +134,37 @@ struct nack_to_isp {
uint8_t entry_status; /* Entry Status. */
union {
struct {
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
uint8_t target_id;
uint8_t reserved_1;
- uint16_t flags;
- uint16_t resp_code;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_flags;
- uint16_t srr_reject_code;
+ __le16 flags;
+ __le16 resp_code;
+ __le16 status;
+ __le16 task_flags;
+ __le16 seq_id;
+ __le16 srr_rx_id;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_flags;
+ __le16 srr_reject_code;
uint8_t srr_reject_vendor_uniq;
uint8_t srr_reject_code_expl;
uint8_t reserved_2[24];
} isp2x;
struct {
uint32_t handle;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t reserved_1;
- uint16_t flags;
- uint16_t srr_rx_id;
- uint16_t status;
+ __le16 flags;
+ __le16 srr_rx_id;
+ __le16 status;
uint8_t status_subcode;
uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_flags;
+ __le32 exchange_address;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_flags;
uint8_t reserved_4[19];
uint8_t vp_index;
uint8_t srr_reject_vendor_uniq;
@@ -175,8 +174,9 @@ struct nack_to_isp {
} isp24;
} u;
uint8_t reserved[2];
- uint16_t ox_id;
+ __le16 ox_id;
} __packed;
+#define NOTIFY_ACK_FLAGS_FCSP BIT_5
#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
@@ -206,16 +206,16 @@ struct ctio_to_2xxx {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
target_id_t target;
- uint16_t rx_id;
- uint16_t flags;
- uint16_t status;
- uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
- uint16_t dseg_count; /* Data segment count. */
- uint32_t relative_offset;
- uint32_t residual;
- uint16_t reserved_1[3];
- uint16_t scsi_status;
- uint32_t transfer_length;
+ __le16 rx_id;
+ __le16 flags;
+ __le16 status;
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 relative_offset;
+ __le32 residual;
+ __le16 reserved_1[3];
+ __le16 scsi_status;
+ __le32 transfer_length;
struct dsd32 dsd[3];
} __packed;
#define ATIO_PATH_INVALID 0x07
@@ -239,11 +239,16 @@ struct ctio_to_2xxx {
#define CTIO_PORT_LOGGED_OUT 0x29
#define CTIO_PORT_CONF_CHANGED 0x2A
#define CTIO_SRR_RECEIVED 0x45
+#define CTIO_FAST_AUTH_ERR 0x63
+#define CTIO_FAST_INCOMP_PAD_LEN 0x65
+#define CTIO_FAST_INVALID_REQ 0x66
+#define CTIO_FAST_SPI_ERR 0x67
#endif
#ifndef CTIO_RET_TYPE
#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+#endif
struct fcp_hdr {
uint8_t r_ctl;
@@ -257,8 +262,8 @@ struct fcp_hdr {
uint16_t seq_cnt;
__be16 ox_id;
uint16_t rx_id;
- uint32_t parameter;
-} __packed;
+ __le32 parameter;
+};
struct fcp_hdr_le {
le_id_t d_id;
@@ -267,13 +272,13 @@ struct fcp_hdr_le {
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t parameter;
-} __packed;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 parameter;
+};
#define F_CTL_EXCH_CONTEXT_RESP BIT_23
#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
@@ -306,7 +311,7 @@ struct atio7_fcp_cmnd {
* BUILD_BUG_ON in qlt_init().
*/
uint8_t add_cdb[4];
- /* uint32_t data_length; */
+ /* __le32 data_length; */
} __packed;
/*
@@ -316,31 +321,31 @@ struct atio7_fcp_cmnd {
struct atio_from_isp {
union {
struct {
- uint16_t entry_hdr;
+ __le16 entry_hdr;
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
- uint16_t rx_id;
- uint16_t flags;
- uint16_t status;
+ __le16 rx_id;
+ __le16 flags;
+ __le16 status;
uint8_t command_ref;
uint8_t task_codes;
uint8_t task_flags;
uint8_t execution_codes;
uint8_t cdb[MAX_CMDSZ];
- uint32_t data_length;
- uint16_t lun;
+ __le32 data_length;
+ __le16 lun;
uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
- uint16_t reserved_32[6];
- uint16_t ox_id;
+ __le16 reserved_32[6];
+ __le16 ox_id;
} isp2x;
struct {
- uint16_t entry_hdr;
+ __le16 entry_hdr;
uint8_t fcp_cmnd_len_low;
uint8_t fcp_cmnd_len_high:4;
uint8_t attr:4;
- uint32_t exchange_addr;
+ __le32 exchange_addr;
#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
struct fcp_hdr fcp_hdr;
struct atio7_fcp_cmnd fcp_cmnd;
@@ -352,7 +357,7 @@ struct atio_from_isp {
#define FCP_CMD_LENGTH_MASK 0x0fff
#define FCP_CMD_LENGTH_MIN 0x38
uint8_t data[56];
- uint32_t signature;
+ __le32 signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
} raw;
} u;
@@ -395,36 +400,45 @@ struct ctio7_to_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
- uint16_t nport_handle;
+ __le16 nport_handle;
#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
- uint16_t timeout;
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout;
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags;
le_id_t initiator_id;
uint8_t reserved;
- uint32_t exchange_addr;
+ __le32 exchange_addr;
union {
struct {
- uint16_t reserved1;
+ __le16 reserved1;
__le16 flags;
- uint32_t residual;
+ union {
+ __le32 residual;
+ struct {
+ uint8_t rsvd1;
+ uint8_t edif_flags;
+#define EF_EN_EDIF BIT_0
+#define EF_NEW_SA BIT_1
+ uint16_t rsvd2;
+ };
+ };
__le16 ox_id;
- uint16_t scsi_status;
- uint32_t relative_offset;
- uint32_t reserved2;
- uint32_t transfer_length;
- uint32_t reserved3;
+ __le16 scsi_status;
+ __le32 relative_offset;
+ __le32 reserved2;
+ __le32 transfer_length;
+ __le32 reserved3;
struct dsd64 dsd;
} status0;
struct {
- uint16_t sense_length;
+ __le16 sense_length;
__le16 flags;
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
- uint16_t response_len;
- uint16_t reserved;
+ __le16 scsi_status;
+ __le16 response_len;
+ __le16 reserved;
uint8_t sense_data[24];
} status1;
} u;
@@ -440,18 +454,18 @@ struct ctio7_from_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
- uint16_t status;
- uint16_t timeout;
- uint16_t dseg_count; /* Data segment count. */
+ __le16 status;
+ __le16 timeout;
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t reserved1[5];
- uint32_t exchange_address;
- uint16_t reserved2;
- uint16_t flags;
- uint32_t residual;
- uint16_t ox_id;
- uint16_t reserved3;
- uint32_t relative_offset;
+ __le32 exchange_address;
+ __le16 edif_sa_index;
+ __le16 flags;
+ __le32 residual;
+ __le16 ox_id;
+ __le16 reserved3;
+ __le32 relative_offset;
uint8_t reserved4[24];
} __packed;
@@ -489,29 +503,29 @@ struct ctio_crc2_to_fw {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
__le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags; /* additional flags */
#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
le_id_t initiator_id; /* initiator ID */
uint8_t reserved1;
- uint32_t exchange_addr; /* rcv exchange address */
- uint16_t reserved2;
+ __le32 exchange_addr; /* rcv exchange address */
+ __le16 reserved2;
__le16 flags; /* refer to CTIO7 flags values */
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
+ __le16 scsi_status;
__le32 relative_offset;
- uint32_t reserved5;
+ __le32 reserved5;
__le32 transfer_length; /* total fc transfer length */
- uint32_t reserved6;
+ __le32 reserved6;
__le64 crc_context_address __packed; /* Data segment address. */
- uint16_t crc_context_len; /* Data segment length. */
- uint16_t reserved_1; /* MUST be set to 0. */
+ __le16 crc_context_len; /* Data segment length. */
+ __le16 reserved_1; /* MUST be set to 0. */
};
/* CTIO Type CRC_x Status IOCB */
@@ -522,20 +536,20 @@ struct ctio_crc_from_fw {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t status;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
- uint32_t reserved1;
- uint16_t state_flags;
+ __le16 status;
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 reserved1;
+ __le16 state_flags;
#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
- uint32_t exchange_address; /* rcv exchange address */
- uint16_t reserved2;
- uint16_t flags;
- uint32_t resid_xfer_length;
- uint16_t ox_id;
+ __le32 exchange_address; /* rcv exchange address */
+ __le16 reserved2;
+ __le16 flags;
+ __le32 resid_xfer_length;
+ __le16 ox_id;
uint8_t reserved3[12];
- uint16_t runt_guard; /* reported runt blk guard */
+ __le16 runt_guard; /* reported runt blk guard */
uint8_t actual_dif[8];
uint8_t expected_dif[8];
} __packed;
@@ -558,29 +572,29 @@ struct abts_recv_from_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint8_t reserved_1[6];
- uint16_t nport_handle;
+ __le16 nport_handle;
uint8_t reserved_2[2];
uint8_t vp_index;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
uint8_t reserved_4[16];
- uint32_t exchange_addr_to_abort;
+ __le32 exchange_addr_to_abort;
} __packed;
#define ABTS_PARAM_ABORT_SEQ BIT_0
struct ba_acc_le {
- uint16_t reserved;
+ __le16 reserved;
uint8_t seq_id_last;
uint8_t seq_id_valid;
#define SEQ_ID_VALID 0x80
#define SEQ_ID_INVALID 0x00
- uint16_t rx_id;
- uint16_t ox_id;
- uint16_t high_seq_cnt;
- uint16_t low_seq_cnt;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
} __packed;
struct ba_rjt_le {
@@ -604,21 +618,21 @@ struct abts_resp_to_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle;
- uint16_t reserved_1;
- uint16_t nport_handle;
- uint16_t control_flags;
+ __le16 reserved_1;
+ __le16 nport_handle;
+ __le16 control_flags;
#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
uint8_t vp_index;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
union {
struct ba_acc_le ba_acct;
struct ba_rjt_le ba_rjt;
} __packed payload;
- uint32_t reserved_4;
- uint32_t exchange_addr_to_abort;
+ __le32 reserved_4;
+ __le32 exchange_addr_to_abort;
} __packed;
/*
@@ -634,21 +648,21 @@ struct abts_resp_from_24xx_fw {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle;
- uint16_t compl_status;
+ __le16 compl_status;
#define ABTS_RESP_COMPL_SUCCESS 0
#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
- uint16_t nport_handle;
- uint16_t reserved_1;
+ __le16 nport_handle;
+ __le16 reserved_1;
uint8_t reserved_2;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
uint8_t reserved_4[8];
- uint32_t error_subcode1;
+ __le32 error_subcode1;
#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
- uint32_t error_subcode2;
- uint32_t exchange_addr_to_abort;
+ __le32 error_subcode2;
+ __le32 exchange_addr_to_abort;
} __packed;
/********************************************************************\
@@ -671,6 +685,8 @@ struct qla_tgt_func_tmpl {
void (*handle_data)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
uint32_t);
+ struct qla_tgt_cmd *(*get_cmd)(struct fc_port *);
+ void (*rel_cmd)(struct qla_tgt_cmd *);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
void (*free_session)(struct fc_port *);
@@ -797,9 +813,6 @@ struct qla_tgt {
/* Count of sessions refering qla_tgt. Protected by hardware_lock. */
int sess_count;
- /* Protected by hardware_lock */
- struct list_head del_sess_list;
-
spinlock_t sess_work_lock;
struct list_head sess_works_list;
struct work_struct sess_work;
@@ -854,6 +867,7 @@ struct qla_tgt_cmd {
uint8_t cmd_type;
uint8_t pad[7];
struct se_cmd se_cmd;
+ struct list_head sess_cmd_list;
struct fc_port *sess;
struct qla_qpair *qpair;
uint32_t reset_count;
@@ -866,12 +880,12 @@ struct qla_tgt_cmd {
/* to save extra sess dereferences */
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
- unsigned int free_sg:1;
unsigned int write_data_transferred:1;
unsigned int q_full:1;
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
+ unsigned int edif:1;
/*
* This variable may be set from outside the LIO and I/O completion
@@ -928,7 +942,6 @@ struct qla_tgt_sess_work_param {
struct list_head sess_works_list_entry;
#define QLA_TGT_SESS_WORK_ABORT 1
-#define QLA_TGT_SESS_WORK_TM 2
int type;
union {
@@ -1062,8 +1075,6 @@ extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_81xx *);
extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_81xx *);
-extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
- struct sts_entry_24xx *);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
struct vp_config_entry_24xx *);
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 5b0c057def2b..b0a74b036cf4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_tmpl.h"
@@ -48,7 +47,7 @@ qla27xx_read8(void __iomem *window, void *buf, ulong *len)
uint8_t value = ~0;
if (buf) {
- value = RD_REG_BYTE(window);
+ value = rd_reg_byte(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -59,7 +58,7 @@ qla27xx_read16(void __iomem *window, void *buf, ulong *len)
uint16_t value = ~0;
if (buf) {
- value = RD_REG_WORD(window);
+ value = rd_reg_word(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -70,7 +69,7 @@ qla27xx_read32(void __iomem *window, void *buf, ulong *len)
uint32_t value = ~0;
if (buf) {
- value = RD_REG_DWORD(window);
+ value = rd_reg_dword(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -99,7 +98,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
if (buf) {
void __iomem *window = (void __iomem *)reg + offset;
- WRT_REG_DWORD(window, data);
+ wrt_reg_dword(window, data);
}
}
@@ -436,8 +435,13 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_misc, vha, 0xd20a,
"%s: reset risc [%lx]\n", __func__, *len);
- if (buf)
- WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
+ if (buf) {
+ if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0x5001,
+ "%s: unable to soft reset\n", __func__);
+ return INVALID_ENTRY;
+ }
+ }
return qla27xx_next_entry(ent);
}
@@ -870,7 +874,7 @@ bailout:
static void
qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
{
- tmp->capture_timestamp = jiffies;
+ tmp->capture_timestamp = cpu_to_le32(jiffies);
}
static void
@@ -879,25 +883,26 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
WARN_ON_ONCE(sscanf(qla2x00_version_str,
- "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
- v+0, v+1, v+2, v+3, v+4, v+5) != 6);
+ "%hhu.%hhu.%hhu.%hhu",
+ v + 0, v + 1, v + 2, v + 3) != 4);
- tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
- tmp->driver_info[1] = v[5] << 8 | v[4];
- tmp->driver_info[2] = 0x12345678;
+ tmp->driver_info[0] = cpu_to_le32(
+ v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]);
+ tmp->driver_info[1] = cpu_to_le32(v[5] << 8 | v[4]);
+ tmp->driver_info[2] = __constant_cpu_to_le32(0x12345678);
}
static void
qla27xx_firmware_info(struct scsi_qla_host *vha,
struct qla27xx_fwdt_template *tmp)
{
- tmp->firmware_version[0] = vha->hw->fw_major_version;
- tmp->firmware_version[1] = vha->hw->fw_minor_version;
- tmp->firmware_version[2] = vha->hw->fw_subminor_version;
- tmp->firmware_version[3] =
- vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
- tmp->firmware_version[4] =
- vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
+ tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version);
+ tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version);
+ tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version);
+ tmp->firmware_version[3] = cpu_to_le32(
+ vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
+ tmp->firmware_version[4] = cpu_to_le32(
+ vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]);
}
static void
@@ -928,7 +933,8 @@ qla27xx_template_checksum(void *p, ulong size)
static inline int
qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
{
- return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
+ return qla27xx_template_checksum(tmp,
+ le32_to_cpu(tmp->template_size)) == 0;
}
static inline int
@@ -944,7 +950,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
ulong len = 0;
if (qla27xx_fwdt_template_valid(tmp)) {
- len = tmp->template_size;
+ len = le32_to_cpu(tmp->template_size);
tmp = memcpy(buf, tmp, len);
ql27xx_edit_template(vha, tmp);
qla27xx_walk_template(vha, tmp, buf, &len);
@@ -960,7 +966,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
ulong len = 0;
if (qla27xx_fwdt_template_valid(tmp)) {
- len = tmp->template_size;
+ len = le32_to_cpu(tmp->template_size);
qla27xx_walk_template(vha, tmp, NULL, &len);
}
@@ -972,7 +978,7 @@ qla27xx_fwdt_template_size(void *p)
{
struct qla27xx_fwdt_template *tmp = p;
- return tmp->template_size;
+ return le32_to_cpu(tmp->template_size);
}
int
@@ -997,14 +1003,65 @@ qla27xx_fwdt_template_valid(void *p)
}
void
-qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
{
ulong flags = 0;
-#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
-#endif
+ if (!vha->hw->mpi_fw_dump) {
+ ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
+ } else {
+ struct fwdt *fwdt = &vha->hw->fwdt[1];
+ ulong len;
+ void *buf = vha->hw->mpi_fw_dump;
+ bool walk_template_only = false;
+
+ if (vha->hw->mpi_fw_dumped) {
+ /* Use the spare area for any further dumps. */
+ buf += fwdt->dump_size;
+ walk_template_only = true;
+ ql_log(ql_log_warn, vha, 0x02f4,
+ "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n",
+ buf);
+ }
+
+ ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0x02f6,
+ "-> fwdt1 no template\n");
+ goto bailout;
+ }
+ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
+ if (len == 0) {
+ goto bailout;
+ } else if (len != fwdt->dump_size) {
+ ql_log(ql_log_warn, vha, 0x02f7,
+ "-> fwdt1 fwdump residual=%+ld\n",
+ fwdt->dump_size - len);
+ }
+ vha->hw->stat.num_mpi_reset++;
+ if (walk_template_only)
+ goto bailout;
+
+ vha->hw->mpi_fw_dump_len = len;
+ vha->hw->mpi_fw_dumped = 1;
+
+ ql_log(ql_log_warn, vha, 0x02f8,
+ "-> MPI firmware dump saved to buffer (%lu/%p)\n",
+ vha->host_no, vha->hw->mpi_fw_dump);
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+ }
+
+bailout:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+void
+qla27xx_fwdump(scsi_qla_host_t *vha)
+{
+ lockdep_assert_held(&vha->hw->hardware_lock);
if (!vha->hw->fw_dump) {
ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
@@ -1014,42 +1071,30 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
vha->hw->fw_dump);
} else {
struct fwdt *fwdt = vha->hw->fwdt;
- uint j;
ulong len;
void *buf = vha->hw->fw_dump;
- uint count = vha->hw->fw_dump_mpi ? 2 : 1;
-
- for (j = 0; j < count; j++, fwdt++, buf += len) {
- ql_log(ql_log_warn, vha, 0xd011,
- "-> fwdt%u running...\n", j);
- if (!fwdt->template) {
- ql_log(ql_log_warn, vha, 0xd012,
- "-> fwdt%u no template\n", j);
- break;
- }
- len = qla27xx_execute_fwdt_template(vha,
- fwdt->template, buf);
- if (len == 0) {
- goto bailout;
- } else if (len != fwdt->dump_size) {
- ql_log(ql_log_warn, vha, 0xd013,
- "-> fwdt%u fwdump residual=%+ld\n",
- j, fwdt->dump_size - len);
- }
+
+ ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n");
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0xd012,
+ "-> fwdt0 no template\n");
+ return;
}
- vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
- vha->hw->fw_dumped = 1;
+ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
+ if (len == 0) {
+ return;
+ } else if (len != fwdt->dump_size) {
+ ql_log(ql_log_warn, vha, 0xd013,
+ "-> fwdt0 fwdump residual=%+ld\n",
+ fwdt->dump_size - len);
+ }
+
+ vha->hw->fw_dump_len = len;
+ vha->hw->fw_dumped = true;
ql_log(ql_log_warn, vha, 0xd015,
"-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
-
-bailout:
- vha->hw->fw_dump_mpi = 0;
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
-#endif
}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index d2a0014e8b21..6e0987edfceb 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_DMP27_H__
@@ -13,21 +12,21 @@
struct __packed qla27xx_fwdt_template {
__le32 template_type;
__le32 entry_offset;
- uint32_t template_size;
+ __le32 template_size;
uint32_t count; /* borrow field for running/residual count */
__le32 entry_count;
uint32_t template_version;
- uint32_t capture_timestamp;
+ __le32 capture_timestamp;
uint32_t template_checksum;
uint32_t reserved_2;
- uint32_t driver_info[3];
+ __le32 driver_info[3];
uint32_t saved_state[16];
uint32_t reserved_3[8];
- uint32_t firmware_version[5];
+ __le32 firmware_version[5];
};
#define TEMPLATE_TYPE_FWDUMP 99
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index bb03c022e023..03f3e2cd62b5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,15 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.22-k"
+#define QLA2XXX_VERSION "10.02.07.900-k"
#define QLA_DRIVER_MAJOR_VER 10
-#define QLA_DRIVER_MINOR_VER 1
-#define QLA_DRIVER_PATCH_VER 0
-#define QLA_DRIVER_BETA_VER 0
+#define QLA_DRIVER_MINOR_VER 2
+#define QLA_DRIVER_PATCH_VER 7
+#define QLA_DRIVER_BETA_VER 900
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index abe7f79bb789..8fa0056b56dd 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -255,6 +255,7 @@ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ unsigned long flags;
cmd->cmd_in_wq = 0;
@@ -265,9 +266,36 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
cmd->trc_flags |= TRC_CMD_FREE;
cmd->cmd_sent_to_fw = 0;
+ spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags);
+ list_del_init(&cmd->sess_cmd_list);
+ spin_unlock_irqrestore(&cmd->sess->sess_cmd_lock, flags);
+
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
+static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct qla_tgt_cmd *cmd;
+ int tag, cpu;
+
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ return NULL;
+
+ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+ cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
+
+ return cmd;
+}
+
+static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
+{
+ target_free_tag(cmd->sess->se_sess, &cmd->se_cmd);
+}
+
/*
* Called from qla_target_template->free_cmd(), and will call
* tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
@@ -340,15 +368,10 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess)
static void tcm_qla2xxx_close_session(struct se_session *se_sess)
{
struct fc_port *sess = se_sess->fabric_sess_ptr;
- struct scsi_qla_host *vha;
- unsigned long flags;
BUG_ON(!sess);
- vha = sess->vha;
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- target_sess_cmd_list_set_waiting(se_sess);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ target_stop_session(se_sess);
sess->explicit_logout = 1;
tcm_qla2xxx_put_sess(sess);
@@ -428,13 +451,14 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
struct se_portal_group *se_tpg;
struct tcm_qla2xxx_tpg *tpg;
#endif
- int flags = TARGET_SCF_ACK_KREF;
+ int rc, target_flags = TARGET_SCF_ACK_KREF;
+ unsigned long flags;
if (bidi)
- flags |= TARGET_SCF_BIDI_OP;
+ target_flags |= TARGET_SCF_BIDI_OP;
if (se_cmd->cpuid != WORK_CPU_UNBOUND)
- flags |= TARGET_SCF_USE_CPUID;
+ target_flags |= TARGET_SCF_USE_CPUID;
sess = cmd->sess;
if (!sess) {
@@ -456,11 +480,24 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
return 0;
}
#endif
-
cmd->qpair->tgt_counters.qla_core_sbt_cmd++;
- return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
- cmd->unpacked_lun, data_length, fcp_task_attr,
- data_dir, flags);
+
+ spin_lock_irqsave(&sess->sess_cmd_lock, flags);
+ list_add_tail(&cmd->sess_cmd_list, &sess->sess_cmd_list);
+ spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
+
+ rc = target_init_cmd(se_cmd, se_sess, &cmd->sense_buffer[0],
+ cmd->unpacked_lun, data_length, fcp_task_attr,
+ data_dir, target_flags);
+ if (rc)
+ return rc;
+
+ if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
+ GFP_KERNEL))
+ return 0;
+
+ target_submit(se_cmd);
+ return 0;
}
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
@@ -551,13 +588,11 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
struct fc_port *sess = mcmd->sess;
struct se_cmd *se_cmd = &mcmd->se_cmd;
int transl_tmr_func = 0;
- int flags = TARGET_SCF_ACK_KREF;
switch (tmr_func) {
case QLA_TGT_ABTS:
pr_debug("%ld: ABTS received\n", sess->vha->host_no);
transl_tmr_func = TMR_ABORT_TASK;
- flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG;
break;
case QLA_TGT_2G_ABORT_TASK:
pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no);
@@ -590,31 +625,26 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
}
return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
- transl_tmr_func, GFP_ATOMIC, tag, flags);
+ transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
}
static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess,
uint64_t tag)
{
- struct qla_tgt_cmd *cmd = NULL;
- struct se_cmd *secmd;
+ struct qla_tgt_cmd *cmd;
unsigned long flags;
if (!sess->se_sess)
return NULL;
- spin_lock_irqsave(&sess->se_sess->sess_cmd_lock, flags);
- list_for_each_entry(secmd, &sess->se_sess->sess_cmd_list, se_cmd_list) {
- /* skip task management functions, including tmr->task_cmd */
- if (secmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- continue;
-
- if (secmd->tag == tag) {
- cmd = container_of(secmd, struct qla_tgt_cmd, se_cmd);
- break;
- }
+ spin_lock_irqsave(&sess->sess_cmd_lock, flags);
+ list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list) {
+ if (cmd->se_cmd.tag == tag)
+ goto done;
}
- spin_unlock_irqrestore(&sess->se_sess->sess_cmd_lock, flags);
+ cmd = NULL;
+done:
+ spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
return cmd;
}
@@ -623,7 +653,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- struct scsi_qla_host *vha = cmd->vha;
if (cmd->aborted) {
/* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
@@ -636,7 +665,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->se_cmd.transport_state,
cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
- vha->hw->tgt.tgt_ops->free_cmd(cmd);
return 0;
}
@@ -664,7 +692,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- struct scsi_qla_host *vha = cmd->vha;
int xmit_type = QLA_TGT_XMIT_STATUS;
if (cmd->aborted) {
@@ -678,7 +705,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd, kref_read(&cmd->se_cmd.cmd_kref),
cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
- vha->hw->tgt.tgt_ops->free_cmd(cmd);
return 0;
}
cmd->bufflen = se_cmd->data_length;
@@ -744,11 +770,19 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
- struct qla_tgt_cmd *cmd = container_of(se_cmd,
- struct qla_tgt_cmd, se_cmd);
+ struct qla_tgt_cmd *cmd;
+ unsigned long flags;
- if (qlt_abort_cmd(cmd))
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
return;
+
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+
+ spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags);
+ list_del_init(&cmd->sess_cmd_list);
+ spin_unlock_irqrestore(&cmd->sess->sess_cmd_lock, flags);
+
+ qlt_abort_cmd(cmd);
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -797,7 +831,7 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
{
- target_sess_cmd_list_set_waiting(sess->se_sess);
+ target_stop_session(sess->se_sess);
}
static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl,
@@ -827,7 +861,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show( \
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
struct tcm_qla2xxx_tpg, se_tpg); \
\
- return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+ return sprintf(page, "%d\n", tpg->tpg_attrib.name); \
} \
\
static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store( \
@@ -881,40 +915,17 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
/* End items for tcm_qla2xxx_tpg_attrib_cit */
-static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item,
- char *page)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
-
- return snprintf(page, PAGE_SIZE, "%d\n",
- atomic_read(&tpg->lport_tpg_enabled));
-}
-
-static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
+static int tcm_qla2xxx_enable_tpg(struct se_portal_group *se_tpg,
+ bool enable)
{
- struct se_portal_group *se_tpg = to_tpg(item);
struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- unsigned long op;
- int rc;
- rc = kstrtoul(page, 0, &op);
- if (rc < 0) {
- pr_err("kstrtoul() returned %d\n", rc);
- return -EINVAL;
- }
- if ((op != 1) && (op != 0)) {
- pr_err("Illegal value for tpg_enable: %lu\n", op);
- return -EINVAL;
- }
- if (op) {
+ if (enable) {
if (atomic_read(&tpg->lport_tpg_enabled))
return -EEXIST;
@@ -922,13 +933,14 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
qlt_enable_vha(vha);
} else {
if (!atomic_read(&tpg->lport_tpg_enabled))
- return count;
+ return 0;
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
- return count;
+ return 0;
}
static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item,
@@ -969,12 +981,10 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item,
return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
}
-CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable);
CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions);
CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type);
static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
- &tcm_qla2xxx_tpg_attr_enable,
&tcm_qla2xxx_tpg_attr_dynamic_sessions,
&tcm_qla2xxx_tpg_attr_fabric_prot_type,
NULL,
@@ -1048,35 +1058,17 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
kfree(tpg);
}
-static ssize_t tcm_qla2xxx_npiv_tpg_enable_show(struct config_item *item,
- char *page)
+static int tcm_qla2xxx_npiv_enable_tpg(struct se_portal_group *se_tpg,
+ bool enable)
{
- return tcm_qla2xxx_tpg_enable_show(item, page);
-}
-
-static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
- const char *page, size_t count)
-{
- struct se_portal_group *se_tpg = to_tpg(item);
struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- unsigned long op;
- int rc;
- rc = kstrtoul(page, 0, &op);
- if (rc < 0) {
- pr_err("kstrtoul() returned %d\n", rc);
- return -EINVAL;
- }
- if ((op != 1) && (op != 0)) {
- pr_err("Illegal value for tpg_enable: %lu\n", op);
- return -EINVAL;
- }
- if (op) {
+ if (enable) {
if (atomic_read(&tpg->lport_tpg_enabled))
return -EEXIST;
@@ -1084,22 +1076,16 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
qlt_enable_vha(vha);
} else {
if (!atomic_read(&tpg->lport_tpg_enabled))
- return count;
+ return 0;
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
- return count;
+ return 0;
}
-CONFIGFS_ATTR(tcm_qla2xxx_npiv_tpg_, enable);
-
-static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
- &tcm_qla2xxx_npiv_tpg_attr_enable,
- NULL,
-};
-
static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn,
const char *name)
{
@@ -1369,8 +1355,6 @@ static void tcm_qla2xxx_free_session(struct fc_port *sess)
struct se_session *se_sess;
struct tcm_qla2xxx_lport *lport;
- BUG_ON(in_interrupt());
-
se_sess = sess->se_sess;
if (!se_sess) {
pr_err("struct fc_port->se_sess is NULL\n");
@@ -1544,11 +1528,13 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
/*
* Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
*/
-static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag,
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr,
+ .get_cmd = tcm_qla2xxx_get_cmd,
+ .rel_cmd = tcm_qla2xxx_rel_cmd,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
.free_session = tcm_qla2xxx_free_session,
@@ -1842,6 +1828,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.fabric_make_wwn = tcm_qla2xxx_make_lport,
.fabric_drop_wwn = tcm_qla2xxx_drop_lport,
.fabric_make_tpg = tcm_qla2xxx_make_tpg,
+ .fabric_enable_tpg = tcm_qla2xxx_enable_tpg,
.fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
.fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl,
@@ -1882,11 +1869,11 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
.fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
.fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
+ .fabric_enable_tpg = tcm_qla2xxx_npiv_enable_tpg,
.fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
.fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl,
.tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
- .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs,
};
static int tcm_qla2xxx_register_configfs(void)
@@ -1933,6 +1920,21 @@ static int __init tcm_qla2xxx_init(void)
{
int ret;
+ BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32);
+ BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64);
+ BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12);
+ BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4);
+ BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
+ BUILD_BUG_ON(sizeof(struct fcp_hdr) != 24);
+ BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24);
+ BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64);
+
ret = tcm_qla2xxx_register_configfs();
if (ret < 0)
return ret;
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index 4bdf31b1407a..2fa249db6656 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -4,6 +4,6 @@ config SCSI_QLA_ISCSI
depends on PCI && SCSI && NET
select SCSI_ISCSI_ATTRS
select ISCSI_BOOT_SYSFS
- ---help---
+ help
This driver supports the QLogic 40xx (ISP4XXX), 8022 (ISP82XX)
and 8032 (ISP83XX) iSCSI host adapter family.
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 638f72c5ab05..db41d90a5b6e 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/ratelimit.h>
@@ -473,8 +472,7 @@ int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
} else if (device_map[i].device_type == ISCSI_CLASS) {
if (drv_active & (1 << device_map[i].func_num)) {
if (!iscsi_present ||
- (iscsi_present &&
- (iscsi_func_low > device_map[i].func_num)))
+ iscsi_func_low > device_map[i].func_num)
iscsi_func_low = device_map[i].func_num;
iscsi_present++;
@@ -1406,16 +1404,16 @@ exit_isp_reset:
static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
{
u32 val = 0, val1 = 0;
- int i, status = QLA_SUCCESS;
+ int i;
- status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
+ qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
/* Port 0 Rx Buffer Pause Threshold Registers. */
DEBUG2(ql4_printk(KERN_INFO, ha,
"Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
for (i = 0; i < 8; i++) {
- status = qla4_83xx_rd_reg_indirect(ha,
+ qla4_83xx_rd_reg_indirect(ha,
QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
DEBUG2(pr_info("0x%x ", val));
}
@@ -1426,7 +1424,7 @@ static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
DEBUG2(ql4_printk(KERN_INFO, ha,
"Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
for (i = 0; i < 8; i++) {
- status = qla4_83xx_rd_reg_indirect(ha,
+ qla4_83xx_rd_reg_indirect(ha,
QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
DEBUG2(pr_info("0x%x ", val));
}
@@ -1437,7 +1435,7 @@ static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
DEBUG2(ql4_printk(KERN_INFO, ha,
"Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
for (i = 0; i < 4; i++) {
- status = qla4_83xx_rd_reg_indirect(ha,
+ qla4_83xx_rd_reg_indirect(ha,
QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
DEBUG2(pr_info("0x%x ", val));
}
@@ -1448,7 +1446,7 @@ static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
DEBUG2(ql4_printk(KERN_INFO, ha,
"Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
for (i = 0; i < 4; i++) {
- status = qla4_83xx_rd_reg_indirect(ha,
+ qla4_83xx_rd_reg_indirect(ha,
QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
DEBUG2(pr_info("0x%x ", val));
}
@@ -1459,15 +1457,11 @@ static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
DEBUG2(ql4_printk(KERN_INFO, ha,
"Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
for (i = 7; i >= 0; i--) {
- status = qla4_83xx_rd_reg_indirect(ha,
- QLA83XX_PORT0_RXB_TC_STATS,
- &val);
+ qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
(val | (i << 29)));
- status = qla4_83xx_rd_reg_indirect(ha,
- QLA83XX_PORT0_RXB_TC_STATS,
- &val);
+ qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
DEBUG2(pr_info("0x%x ", val));
}
@@ -1477,24 +1471,18 @@ static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
DEBUG2(ql4_printk(KERN_INFO, ha,
"Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
for (i = 7; i >= 0; i--) {
- status = qla4_83xx_rd_reg_indirect(ha,
- QLA83XX_PORT1_RXB_TC_STATS,
- &val);
+ qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
(val | (i << 29)));
- status = qla4_83xx_rd_reg_indirect(ha,
- QLA83XX_PORT1_RXB_TC_STATS,
- &val);
+ qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
DEBUG2(pr_info("0x%x ", val));
}
DEBUG2(pr_info("\n"));
- status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
- &val);
- status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
- &val1);
+ qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, &val);
+ qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, &val1);
DEBUG2(ql4_printk(KERN_INFO, ha,
"IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index 775fdf9fcc87..f10167c71dd3 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL483XX_H
@@ -87,23 +86,6 @@
#define QLA83XX_FW_API 0x356C
#define QLA83XX_DRV_OP_MODE 0x3570
-static const uint32_t qla4_83xx_reg_tbl[] = {
- QLA83XX_PEG_HALT_STATUS1,
- QLA83XX_PEG_HALT_STATUS2,
- QLA83XX_PEG_ALIVE_COUNTER,
- QLA83XX_CRB_DRV_ACTIVE,
- QLA83XX_CRB_DEV_STATE,
- QLA83XX_CRB_DRV_STATE,
- QLA83XX_CRB_DRV_SCRATCH,
- QLA83XX_CRB_DEV_PART_INFO1,
- QLA83XX_CRB_IDC_VER_MAJOR,
- QLA83XX_FW_VER_MAJOR,
- QLA83XX_FW_VER_MINOR,
- QLA83XX_FW_VER_SUB,
- QLA83XX_CMDPEG_STATE,
- QLA83XX_ASIC_TEMP,
-};
-
#define QLA83XX_CRB_WIN_BASE 0x3800
#define QLA83XX_CRB_WIN_FUNC(f) (QLA83XX_CRB_WIN_BASE+((f)*4))
#define QLA83XX_SEM_LOCK_BASE 0x3840
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 463239c972b0..abfa6ef60480 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
@@ -331,21 +330,30 @@ static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show,
static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL);
static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL);
-struct device_attribute *qla4xxx_host_attrs[] = {
- &dev_attr_fw_version,
- &dev_attr_serial_num,
- &dev_attr_iscsi_version,
- &dev_attr_optrom_version,
- &dev_attr_board_id,
- &dev_attr_fw_state,
- &dev_attr_phy_port_cnt,
- &dev_attr_phy_port_num,
- &dev_attr_iscsi_func_cnt,
- &dev_attr_hba_model,
- &dev_attr_fw_timestamp,
- &dev_attr_fw_build_user,
- &dev_attr_fw_ext_timestamp,
- &dev_attr_fw_load_src,
- &dev_attr_fw_uptime,
+static struct attribute *qla4xxx_host_attrs[] = {
+ &dev_attr_fw_version.attr,
+ &dev_attr_serial_num.attr,
+ &dev_attr_iscsi_version.attr,
+ &dev_attr_optrom_version.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_fw_state.attr,
+ &dev_attr_phy_port_cnt.attr,
+ &dev_attr_phy_port_num.attr,
+ &dev_attr_iscsi_func_cnt.attr,
+ &dev_attr_hba_model.attr,
+ &dev_attr_fw_timestamp.attr,
+ &dev_attr_fw_build_user.attr,
+ &dev_attr_fw_ext_timestamp.attr,
+ &dev_attr_fw_load_src.attr,
+ &dev_attr_fw_uptime.attr,
NULL,
};
+
+static const struct attribute_group qla4xxx_host_attr_group = {
+ .attrs = qla4xxx_host_attrs
+};
+
+const struct attribute_group *qla4xxx_host_groups[] = {
+ &qla4xxx_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index 415ee5eb3fc7..c447a9d598a1 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2011-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
@@ -805,7 +804,7 @@ static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
/**
* qla4xxx_process_vendor_specific - handle vendor specific bsg request
- * @job: iscsi_bsg_job to handle
+ * @bsg_job: iscsi_bsg_job to handle
**/
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
{
@@ -852,7 +851,7 @@ int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
/**
* qla4xxx_bsg_request - handle bsg request from ISCSI transport
- * @job: iscsi_bsg_job to handle
+ * @bsg_job: iscsi_bsg_job to handle
*/
int qla4xxx_bsg_request(struct bsg_job *bsg_job)
{
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h
index 88c2401910c0..06db38561566 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.h
+++ b/drivers/scsi/qla4xxx/ql4_bsg.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2011 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL4_BSG_H
#define __QL4_BSG_H
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 5649e9ef59a8..f43e675c5693 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2012 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index 51c365bcf912..171c89165009 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2012 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 817f312023a9..5f82c8afd5e0 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL4_DEF_H
@@ -217,11 +216,21 @@
#define IDC_COMP_TOV 5
#define LINK_UP_COMP_TOV 30
-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+/*
+ * Note: the data structure below does not have a struct iscsi_cmd member since
+ * the qla4xxx driver does not use libiscsi for SCSI I/O.
+ */
+struct qla4xxx_cmd_priv {
+ struct srb *srb;
+};
+
+static inline struct qla4xxx_cmd_priv *qla4xxx_cmd_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
/*
- * SCSI Request Block structure (srb) that is placed
- * on cmd->SCp location of every I/O [We have 22 bytes available]
+ * SCSI Request Block structure (srb) that is associated with each scsi_cmnd.
*/
struct srb {
struct list_head list; /* (8) */
@@ -367,13 +376,13 @@ struct qla4_work_evt {
struct {
enum iscsi_host_event_code code;
uint32_t data_size;
- uint8_t data[0];
+ uint8_t data[];
} aen;
struct {
uint32_t status;
uint32_t pid;
uint32_t data_size;
- uint8_t data[0];
+ uint8_t data[];
} ping;
} u;
};
@@ -436,9 +445,9 @@ struct isp_operations {
void (*wr_reg_direct) (struct scsi_qla_host *, ulong, uint32_t);
int (*rd_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t *);
int (*wr_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t);
- int (*idc_lock) (struct scsi_qla_host *);
+ int (*idc_lock) (struct scsi_qla_host *); /* Context: task, can sleep */
void (*idc_unlock) (struct scsi_qla_host *);
- void (*rom_lock_recovery) (struct scsi_qla_host *);
+ void (*rom_lock_recovery) (struct scsi_qla_host *); /* Context: task, can sleep */
void (*queue_mailbox_command) (struct scsi_qla_host *, uint32_t *, int);
void (*process_mailbox_interrupt) (struct scsi_qla_host *, int);
};
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 699575efc9ba..860ec61b51b9 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef _QLA4X_FW_H
@@ -1029,7 +1028,7 @@ struct crash_record {
uint8_t out_RISC_reg_dump[256]; /* 80 -17F */
uint8_t in_RISC_reg_dump[256]; /*180 -27F */
- uint8_t in_out_RISC_stack_dump[0]; /*280 - ??? */
+ uint8_t in_out_RISC_stack_dump[]; /*280 - ??? */
};
struct conn_event_log_entry {
@@ -1182,7 +1181,6 @@ struct status_entry {
uint32_t handle; /* 04-07 */
uint8_t scsiStatus; /* 08 */
-#define SCSI_CHECK_CONDITION 0x02
uint8_t iscsiFlags; /* 09 */
#define ISCSI_FLAG_RESIDUAL_UNDER 0x02
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index bce96a58f14e..c0873381508d 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QLA4x_GBL_H
@@ -115,7 +114,6 @@ irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id);
void qla4_82xx_queue_iocb(struct scsi_qla_host *ha);
void qla4_82xx_complete_iocb(struct scsi_qla_host *ha);
-int qla4_82xx_crb_win_lock(struct scsi_qla_host *);
void qla4_82xx_crb_win_unlock(struct scsi_qla_host *);
int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32);
@@ -288,5 +286,6 @@ extern int ql4xenablemsix;
extern int ql4xmdcapmask;
extern int ql4xenablemd;
-extern struct device_attribute *qla4xxx_host_attrs[];
+extern const struct attribute_group *qla4xxx_host_groups[];
+
#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 2bf5e3e639e1..301bc09c8365 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <scsi/iscsi_if.h>
@@ -14,7 +13,6 @@
static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
{
uint32_t value;
- uint8_t func_number;
unsigned long flags;
/* Get the function number */
@@ -22,7 +20,6 @@ static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
value = readw(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- func_number = (uint8_t) ((value >> 4) & 0x30);
switch (value & ISP_CONTROL_FN_MASK) {
case ISP_CONTROL_FN0_SCSI:
ha->mac_index = 1;
@@ -122,8 +119,8 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
* the interrupt_handler to think there are responses to be
* processed when there aren't.
*/
- ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
- ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
+ ha->shadow_regs->req_q_out = cpu_to_le32(0);
+ ha->shadow_regs->rsp_q_in = cpu_to_le32(0);
wmb();
writel(0, &ha->reg->req_q_in);
@@ -667,6 +664,9 @@ void qla4xxx_pci_config(struct scsi_qla_host *ha)
pci_set_master(ha->pdev);
status = pci_set_mwi(ha->pdev);
+ if (status)
+ ql4_printk(KERN_WARNING, ha, "Failed to set MWI\n");
+
/*
* We want to respect framework's setting of PCI configuration space
* command register and also want to make sure that all bits of
@@ -945,6 +945,7 @@ void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
/**
* qla4xxx_initialize_adapter - initiailizes hba
* @ha: Pointer to host adapter structure.
+ * @is_reset: Is this init path or reset path
*
* This routine parforms all of the steps necessary to initialize the adapter.
*
@@ -1156,9 +1157,10 @@ int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
/**
* qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
+ * @ha: Pointer to host adapter structure.
+ * @fw_ddb_index: Firmware's device database index
+ * @state: Device state
+ * @conn_err: Unused
*
* This routine processes a Decive Database Changed AEN Event.
**/
@@ -1167,7 +1169,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
uint32_t state, uint32_t conn_err)
{
struct ddb_entry *ddb_entry;
- int status = QLA_ERROR;
/* check for out of range index */
if (fw_ddb_index >= MAX_DDB_ENTRIES)
@@ -1189,7 +1190,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
- return status;
+ return QLA_ERROR;
}
/**
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 655b7bb644d9..9ced6b325cb3 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 17222eb49762..28eab07935ba 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
@@ -78,7 +77,7 @@ static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
* @ha: Pointer to host adapter structure.
* @ddb_entry: Pointer to device database entry
* @lun: SCSI LUN
- * @marker_type: marker identifier
+ * @mrkr_mod: marker identifier
*
* This routine issues a marker IOCB.
**/
@@ -161,7 +160,7 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
- cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
+ cmd_entry->ttlByteCnt = cpu_to_le32(0);
return;
}
@@ -289,7 +288,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- index = (uint32_t)cmd->request->tag;
+ index = scsi_cmd_to_rq(cmd)->tag;
/*
* Check to see if adapter is online before placing request on
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index d2cd33d8d67f..6f0e77dc2a34 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
@@ -183,7 +182,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
cmd->result = DID_OK << 16 | scsi_status;
- if (scsi_status != SCSI_CHECK_CONDITION)
+ if (scsi_status != SAM_STAT_CHECK_CONDITION)
break;
/* Copy Sense Data into sense buffer. */
@@ -582,7 +581,7 @@ exit_prq_error:
/**
* qla4_83xx_loopback_in_progress: Is loopback in progress?
* @ha: Pointer to host adapter structure.
- * @ret: 1 = loopback in progress, 0 = loopback not in progress
+ * returns: 1 = loopback in progress, 0 = loopback not in progress
**/
static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
{
@@ -651,7 +650,7 @@ static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
* @ha: Pointer to host adapter structure.
- * @mailbox_status: Mailbox status.
+ * @mbox_status: Mailbox status.
*
* This routine decodes the mailbox status during the ISR.
* Hardware_lock locked upon entry. runs in interrupt context.
@@ -1044,6 +1043,7 @@ void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
/**
* qla4_82xx_interrupt_service_routine - isr
* @ha: pointer to host adapter structure.
+ * @intr_status: Local interrupt status/type.
*
* This is the main interrupt service routine.
* hardware_lock locked upon entry. runs in interrupt context.
@@ -1069,6 +1069,7 @@ void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
/**
* qla4xxx_interrupt_service_routine - isr
* @ha: pointer to host adapter structure.
+ * @intr_status: Local interrupt status/type.
*
* This is the main interrupt service routine.
* hardware_lock locked upon entry. runs in interrupt context.
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 02636b4785c5..cd71074f3abe 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/ctype.h>
@@ -45,9 +44,9 @@ void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
}
/**
- * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
+ * qla4xxx_is_intr_poll_mode - Are we allowed to poll for interrupts?
* @ha: Pointer to host adapter structure.
- * @ret: 1=polling mode, 0=non-polling mode
+ * returns: 1=polling mode, 0=non-polling mode
**/
static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
{
@@ -646,8 +645,8 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
/* Fill in the request and response queue information. */
init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
- init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
- init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+ init_fw_cb->rqq_len = cpu_to_le16(REQUEST_QUEUE_DEPTH);
+ init_fw_cb->compq_len = cpu_to_le16(RESPONSE_QUEUE_DEPTH);
init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
@@ -657,20 +656,20 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
/* Set up required options. */
init_fw_cb->fw_options |=
- __constant_cpu_to_le16(FWOPT_SESSION_MODE |
- FWOPT_INITIATOR_MODE);
+ cpu_to_le16(FWOPT_SESSION_MODE |
+ FWOPT_INITIATOR_MODE);
if (is_qla80XX(ha))
init_fw_cb->fw_options |=
- __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
+ cpu_to_le16(FWOPT_ENABLE_CRBDB);
- init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+ init_fw_cb->fw_options &= cpu_to_le16(~FWOPT_TARGET_MODE);
init_fw_cb->add_fw_options = 0;
init_fw_cb->add_fw_options |=
- __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
+ cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
init_fw_cb->add_fw_options |=
- __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
+ cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
!= QLA_SUCCESS) {
@@ -810,7 +809,7 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
return QLA_SUCCESS;
}
-/**
+/*
* qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
* @ha: Pointer to host adapter structure.
* @fw_ddb_index: Firmware's device database index
@@ -934,7 +933,7 @@ int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
}
/**
- * qla4xxx_set_fwddb_entry - sets a ddb entry.
+ * qla4xxx_set_ddb_entry - sets a ddb entry.
* @ha: Pointer to host adapter structure.
* @fw_ddb_index: Firmware's device database index
* @fw_ddb_entry_dma: dma address of ddb entry
@@ -1259,8 +1258,7 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
/**
* qla4xxx_reset_target - issues target Reset
* @ha: Pointer to host adapter structure.
- * @db_entry: Pointer to device database entry
- * @un_entry: Pointer to lun entry structure
+ * @ddb_entry: Pointer to device database entry
*
* This routine performs a TARGET RESET on the specified target.
* The caller must ensure that the ddb_entry pointers
@@ -1615,7 +1613,7 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
- chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+ chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
exit_get_chap:
dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
@@ -1657,7 +1655,7 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
chap_table->secret_len = strlen(password);
strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
- chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+ chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
if (is_qla40XX(ha)) {
chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
@@ -1723,7 +1721,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
mutex_lock(&ha->chap_sem);
chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
- if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) {
rval = QLA_ERROR;
goto exit_unlock_uni_chap;
}
@@ -1786,7 +1784,7 @@ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ cpu_to_le16(CHAP_VALID_COOKIE)) {
if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
free_index = i;
continue;
@@ -2107,18 +2105,18 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
if (conn->max_recv_dlength)
fw_ddb_entry->iscsi_max_rcv_data_seg_len =
- __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
+ cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
if (sess->max_r2t)
fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
if (sess->first_burst)
fw_ddb_entry->iscsi_first_burst_len =
- __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
+ cpu_to_le16((sess->first_burst / BYTE_UNITS));
if (sess->max_burst)
fw_ddb_entry->iscsi_max_burst_len =
- __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
+ cpu_to_le16((sess->max_burst / BYTE_UNITS));
if (sess->time2wait)
fw_ddb_entry->iscsi_def_time2wait =
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 3bf418fbd432..f08a5abcb31a 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index e97d79ff16f7..b96c06f50402 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef _QL4XNVRM_H_
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 5a31877c9d04..47adff9f0506 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/delay.h>
#include <linux/io.h>
@@ -376,6 +375,35 @@ qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+
+/*
+ * Context: atomic
+ */
+static int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore3 from PCI HW block */
+ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+ return -1;
+
+ timeout++;
+ udelay(10);
+ }
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
+ return 0;
+}
+
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
+{
+ qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+}
+
void
qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
{
@@ -476,40 +504,6 @@ int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
return rval;
}
-#define CRB_WIN_LOCK_TIMEOUT 100000000
-
-int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
-{
- int i;
- int done = 0, timeout = 0;
-
- while (!done) {
- /* acquire semaphore3 from PCI HW block */
- done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
- if (done == 1)
- break;
- if (timeout >= CRB_WIN_LOCK_TIMEOUT)
- return -1;
-
- timeout++;
-
- /* Yield CPU */
- if (!in_interrupt())
- schedule();
- else {
- for (i = 0; i < 20; i++)
- cpu_relax(); /*This a nop instr on i386*/
- }
- }
- qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
- return 0;
-}
-
-void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
-{
- qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
-}
-
#define IDC_LOCK_TIMEOUT 100000000
/**
@@ -518,12 +512,15 @@ void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
*
* General purpose lock used to synchronize access to
* CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
+ *
+ * Context: task, can sleep
**/
int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
{
- int i;
int done = 0, timeout = 0;
+ might_sleep();
+
while (!done) {
/* acquire semaphore5 from PCI HW block */
done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
@@ -533,14 +530,7 @@ int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
return -1;
timeout++;
-
- /* Yield CPU */
- if (!in_interrupt())
- schedule();
- else {
- for (i = 0; i < 20; i++)
- cpu_relax(); /*This a nop instr on i386*/
- }
+ msleep(100);
}
return 0;
}
@@ -881,15 +871,18 @@ qla4_82xx_decode_crb_addr(unsigned long addr)
static long rom_max_timeout = 100;
static long qla4_82xx_rom_lock_timeout = 100;
+/*
+ * Context: task, can_sleep
+ */
static int
qla4_82xx_rom_lock(struct scsi_qla_host *ha)
{
- int i;
int done = 0, timeout = 0;
+ might_sleep();
+
while (!done) {
/* acquire semaphore2 from PCI HW block */
-
done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
@@ -897,14 +890,7 @@ qla4_82xx_rom_lock(struct scsi_qla_host *ha)
return -1;
timeout++;
-
- /* Yield CPU */
- if (!in_interrupt())
- schedule();
- else {
- for (i = 0; i < 20; i++)
- cpu_relax(); /*This a nop instr on i386*/
- }
+ msleep(20);
}
qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
return 0;
@@ -974,10 +960,10 @@ qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
return ret;
}
-/**
+/*
* This routine does CRB initialize sequence
* to put the ISP into operational state
- **/
+ */
static int
qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
{
@@ -1781,7 +1767,7 @@ qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
{
- int rval = QLA_ERROR;
+ int rval;
/*
* FW Load priority:
@@ -2645,7 +2631,7 @@ static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
uint32_t addr1, addr2, value, data, temp, wrval;
uint8_t stride, stride2;
uint16_t count;
- uint32_t poll, mask, data_size, modify_mask;
+ uint32_t poll, mask, modify_mask;
uint32_t wait_count = 0;
uint32_t *data_ptr = *d_ptr;
struct qla8044_minidump_entry_rddfe *rddfe;
@@ -2661,7 +2647,6 @@ static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
poll = le32_to_cpu(rddfe->poll);
mask = le32_to_cpu(rddfe->mask);
modify_mask = le32_to_cpu(rddfe->modify_mask);
- data_size = le32_to_cpu(rddfe->data_size);
addr2 = addr1 + stride;
@@ -2742,7 +2727,7 @@ static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
uint8_t stride1, stride2;
uint32_t addr3, addr4, addr5, addr6, addr7;
uint16_t count, loop_cnt;
- uint32_t poll, mask;
+ uint32_t mask;
uint32_t *data_ptr = *d_ptr;
struct qla8044_minidump_entry_rdmdio *rdmdio;
@@ -2754,7 +2739,6 @@ static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
stride2 = le32_to_cpu(rdmdio->stride_2);
count = le32_to_cpu(rdmdio->count);
- poll = le32_to_cpu(rdmdio->poll);
mask = le32_to_cpu(rdmdio->mask);
value2 = le32_to_cpu(rdmdio->value_2);
@@ -2813,7 +2797,7 @@ static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
- uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+ uint32_t addr1, addr2, value1, value2, poll, r_value;
struct qla8044_minidump_entry_pollwr *pollwr_hdr;
uint32_t wait_count = 0;
uint32_t rval = QLA_SUCCESS;
@@ -2825,7 +2809,6 @@ static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
value2 = le32_to_cpu(pollwr_hdr->value_2);
poll = le32_to_cpu(pollwr_hdr->poll);
- mask = le32_to_cpu(pollwr_hdr->mask);
while (wait_count < poll) {
ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
@@ -3220,6 +3203,7 @@ md_failed:
/**
* qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
* @ha: pointer to adapter structure
+ * @code: uevent code to act upon
**/
static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
{
@@ -3228,7 +3212,7 @@ static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
switch (code) {
case QL4_UEVENT_CODE_FW_DUMP:
- snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
ha->host_no);
break;
default:
@@ -3650,12 +3634,6 @@ flash_conf_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
return hw->flash_conf_off | faddr;
}
-static inline uint32_t
-flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
-{
- return hw->flash_data_off | faddr;
-}
-
static uint32_t *
qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
uint32_t faddr, uint32_t length)
@@ -3680,7 +3658,7 @@ qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
"Do ROM fast read failed\n");
goto done_read;
}
- dwptr[i] = __constant_cpu_to_le32(val);
+ dwptr[i] = cpu_to_le32(val);
}
done_read:
@@ -3688,9 +3666,9 @@ done_read:
return dwptr;
}
-/**
+/*
* Address and length are byte address
- **/
+ */
static uint8_t *
qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
uint32_t offset, uint32_t length)
@@ -3743,9 +3721,9 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
goto no_flash_data;
}
- if (*wptr == __constant_cpu_to_le16(0xffff))
+ if (*wptr == cpu_to_le16(0xffff))
goto no_flash_data;
- if (flt->version != __constant_cpu_to_le16(1)) {
+ if (flt->version != cpu_to_le16(1)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: "
"version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
@@ -3848,7 +3826,7 @@ qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
- if (*wptr == __constant_cpu_to_le16(0xffff))
+ if (*wptr == cpu_to_le16(0xffff))
goto no_flash_data;
if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -3905,7 +3883,7 @@ qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
QLA82XX_IDC_PARAM_ADDR , 8);
- if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+ if (*wptr == cpu_to_le32(0xffffffff)) {
ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
} else {
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 98fe78613eb7..52a5209ae42a 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QLA_NX_H
#define __QLA_NX_H
@@ -599,23 +598,6 @@ enum qla_regs {
QLA8XXX_CRB_TEMP_STATE,
};
-static const uint32_t qla4_82xx_reg_tbl[] = {
- QLA82XX_PEG_HALT_STATUS1,
- QLA82XX_PEG_HALT_STATUS2,
- QLA82XX_PEG_ALIVE_COUNTER,
- QLA82XX_CRB_DRV_ACTIVE,
- QLA82XX_CRB_DEV_STATE,
- QLA82XX_CRB_DRV_STATE,
- QLA82XX_CRB_DRV_SCRATCH,
- QLA82XX_CRB_DEV_PART_INFO,
- QLA82XX_CRB_DRV_IDC_VERSION,
- QLA82XX_FW_VERSION_MAJOR,
- QLA82XX_FW_VERSION_MINOR,
- QLA82XX_FW_VERSION_SUB,
- CRB_CMDPEG_STATE,
- CRB_TEMP_STATE,
-};
-
/* Every driver should use these Device State */
#define QLA8XXX_DEV_COLD 1
#define QLA8XXX_DEV_INITIALIZING 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5504ab11decc..9e849f6b0d0f 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/moduleparam.h>
#include <linux/slab.h>
@@ -188,11 +187,46 @@ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
QLA82XX_LEGACY_INTR_CONFIG;
+static const uint32_t qla4_82xx_reg_tbl[] = {
+ QLA82XX_PEG_HALT_STATUS1,
+ QLA82XX_PEG_HALT_STATUS2,
+ QLA82XX_PEG_ALIVE_COUNTER,
+ QLA82XX_CRB_DRV_ACTIVE,
+ QLA82XX_CRB_DEV_STATE,
+ QLA82XX_CRB_DRV_STATE,
+ QLA82XX_CRB_DRV_SCRATCH,
+ QLA82XX_CRB_DEV_PART_INFO,
+ QLA82XX_CRB_DRV_IDC_VERSION,
+ QLA82XX_FW_VERSION_MAJOR,
+ QLA82XX_FW_VERSION_MINOR,
+ QLA82XX_FW_VERSION_SUB,
+ CRB_CMDPEG_STATE,
+ CRB_TEMP_STATE,
+};
+
+static const uint32_t qla4_83xx_reg_tbl[] = {
+ QLA83XX_PEG_HALT_STATUS1,
+ QLA83XX_PEG_HALT_STATUS2,
+ QLA83XX_PEG_ALIVE_COUNTER,
+ QLA83XX_CRB_DRV_ACTIVE,
+ QLA83XX_CRB_DEV_STATE,
+ QLA83XX_CRB_DRV_STATE,
+ QLA83XX_CRB_DRV_SCRATCH,
+ QLA83XX_CRB_DEV_PART_INFO1,
+ QLA83XX_CRB_IDC_VER_MAJOR,
+ QLA83XX_FW_VER_MAJOR,
+ QLA83XX_FW_VER_MINOR,
+ QLA83XX_FW_VER_SUB,
+ QLA83XX_CMDPEG_STATE,
+ QLA83XX_ASIC_TEMP,
+};
+
static struct scsi_host_template qla4xxx_driver_template = {
.module = THIS_MODULE,
.name = DRIVER_NAME,
.proc_name = DRIVER_NAME,
.queuecommand = qla4xxx_queuecommand,
+ .cmd_size = sizeof(struct qla4xxx_cmd_priv),
.eh_abort_handler = qla4xxx_eh_abort,
.eh_device_reset_handler = qla4xxx_eh_device_reset,
@@ -208,7 +242,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
- .shost_attrs = qla4xxx_host_attrs,
+ .shost_groups = qla4xxx_host_groups,
.host_reset = qla4xxx_host_reset,
.vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
};
@@ -226,6 +260,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.start_conn = qla4xxx_conn_start,
.create_conn = qla4xxx_conn_create,
.bind_conn = qla4xxx_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qla4xxx_conn_destroy,
.set_param = iscsi_set_param,
@@ -585,7 +620,7 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
}
/**
- * qla4xxx_create chap_list - Create CHAP list from FLASH
+ * qla4xxx_create_chap_list - Create CHAP list from FLASH
* @ha: pointer to adapter structure
*
* Read flash and make a list of CHAP entries, during login when a CHAP entry
@@ -636,7 +671,6 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
goto exit_chap_list;
}
- memset(ha->chap_list, 0, chap_size);
memcpy(ha->chap_list, chap_flash_data, chap_size);
exit_chap_list:
@@ -652,7 +686,6 @@ static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
if (!ha->chap_list) {
ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
- rval = QLA_ERROR;
goto exit_get_chap;
}
@@ -664,14 +697,12 @@ static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
if (chap_index > max_chap_entries) {
ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
- rval = QLA_ERROR;
goto exit_get_chap;
}
*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
if ((*chap_entry)->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
- rval = QLA_ERROR;
+ cpu_to_le16(CHAP_VALID_COOKIE)) {
*chap_entry = NULL;
} else {
rval = QLA_SUCCESS;
@@ -714,7 +745,7 @@ static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if ((chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+ cpu_to_le16(CHAP_VALID_COOKIE)) &&
(i > MAX_RESRV_CHAP_IDX)) {
free_index = i;
break;
@@ -763,7 +794,7 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
for (i = chap_tbl_idx; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE))
+ cpu_to_le16(CHAP_VALID_COOKIE))
continue;
chap_rec->chap_tbl_idx = i;
@@ -784,8 +815,6 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
valid_chap_entries++;
if (valid_chap_entries == *num_entries)
break;
- else
- continue;
}
mutex_unlock(&ha->chap_sem);
@@ -811,7 +840,7 @@ static int __qla4xxx_is_chap_active(struct device *dev, void *data)
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
- if (iscsi_session_chkready(cls_session))
+ if (iscsi_is_session_online(cls_session))
goto exit_is_chap_active;
if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
@@ -894,7 +923,7 @@ static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
goto exit_delete_chap;
}
- chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
+ chap_table->cookie = cpu_to_le16(0xFFFF);
offset = FLASH_CHAP_OFFSET |
(chap_tbl_idx * sizeof(struct ql4_chap_table));
@@ -966,7 +995,7 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
"%s: No such sysfs attribute\n", __func__);
rc = -ENOSYS;
goto exit_set_chap;
- };
+ }
}
if (chap_rec.chap_type == CHAP_TYPE_IN)
@@ -1220,7 +1249,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
exit_host_stats:
if (ql_iscsi_stats)
- dma_free_coherent(&ha->pdev->dev, host_stats_size,
+ dma_free_coherent(&ha->pdev->dev, stats_size,
ql_iscsi_stats, iscsi_stats_dma);
ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
@@ -1845,12 +1874,10 @@ exit_get_stats:
static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
{
struct iscsi_cls_session *session;
- struct iscsi_session *sess;
unsigned long flags;
enum blk_eh_timer_return ret = BLK_EH_DONE;
session = starget_to_session(scsi_target(sc->device));
- sess = session->dd_data;
spin_lock_irqsave(&session->lock, flags);
if (session->state == ISCSI_SESSION_FAILED)
@@ -2875,7 +2902,7 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
chap_tbl.secret_len);
}
}
- /* fall through */
+ fallthrough;
default:
return iscsi_session_get_param(cls_sess, param, buf);
}
@@ -3059,7 +3086,6 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
struct ddb_entry *ddb_entry;
uint16_t ddb_index;
struct iscsi_session *sess;
- struct sockaddr *dst_addr;
int ret;
if (!ep) {
@@ -3068,7 +3094,6 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
}
qla_ep = ep->dd_data;
- dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
ha->host_no));
@@ -3208,6 +3233,7 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
+ iscsi_put_endpoint(ep);
return 0;
}
@@ -3613,7 +3639,6 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
struct dev_db_entry *fw_ddb_entry)
{
uint16_t options;
- int rc = 0;
options = le16_to_cpu(fw_ddb_entry->options);
SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
@@ -3712,7 +3737,7 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
COPY_ISID(fw_ddb_entry->isid, sess->isid);
- return rc;
+ return 0;
}
static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
@@ -4028,7 +4053,7 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
srb->ddb = ddb_entry;
srb->cmd = cmd;
srb->flags = 0;
- CMD_SP(cmd) = (void *)srb;
+ qla4xxx_cmd_priv(cmd)->srb = srb;
return srb;
}
@@ -4041,7 +4066,7 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
- CMD_SP(cmd) = NULL;
+ qla4xxx_cmd_priv(cmd)->srb = NULL;
}
void qla4xxx_srb_compl(struct kref *ref)
@@ -4054,7 +4079,7 @@ void qla4xxx_srb_compl(struct kref *ref)
mempool_free(srb, ha->srb_mempool);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/**
@@ -4128,7 +4153,7 @@ qc_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc_fail_command:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -4145,8 +4170,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
- if (ha->fw_dump)
- vfree(ha->fw_dump);
+ vfree(ha->fw_dump);
ha->queues_len = 0;
ha->queues = NULL;
@@ -4166,8 +4190,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_pool_destroy(ha->chap_dma_pool);
- if (ha->chap_list)
- vfree(ha->chap_list);
+ vfree(ha->chap_list);
ha->chap_list = NULL;
dma_pool_destroy(ha->fw_ddb_dma_pool);
@@ -4185,8 +4208,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
iounmap(ha->reg);
}
- if (ha->reset_tmplt.buff)
- vfree(ha->reset_tmplt.buff);
+ vfree(ha->reset_tmplt.buff);
pci_release_regions(ha->pdev);
}
@@ -4515,7 +4537,7 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
/**
* qla4xxx_timer - checks every second for work to do.
- * @ha: Pointer to host adapter structure.
+ * @t: Context to obtain pointer to host adapter structure.
**/
static void qla4xxx_timer(struct timer_list *t)
{
@@ -4617,7 +4639,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
* the scsi/block layer is going to prevent
* the tag from being released.
*/
- if (cmd != NULL && CMD_SP(cmd))
+ if (cmd != NULL && qla4xxx_cmd_priv(cmd)->srb)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5073,7 +5095,7 @@ int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
" start scan\n", ha->host_no, __func__,
ddb_entry->fw_ddb_index);
- scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ queue_work(ddb_entry->sess->workq, &ddb_entry->sess->scan_work);
}
return QLA_SUCCESS;
}
@@ -5269,7 +5291,7 @@ static void qla4xxx_do_work(struct scsi_qla_host *ha)
/**
* qla4xxx_do_dpc - dpc routine
- * @data: in our case pointer to adapter structure
+ * @work: Context to obtain pointer to host adapter structure.
*
* This routine is a task that is schedule by the interrupt handler
* to perform the background processing for interrupts. We put it
@@ -5492,7 +5514,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
{
int status = 0;
- unsigned long mem_base, mem_len, db_base, db_len;
+ unsigned long mem_base, mem_len;
struct pci_dev *pdev = ha->pdev;
status = pci_request_regions(pdev, DRIVER_NAME);
@@ -5536,9 +5558,6 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
((uint8_t *)ha->nx_pcibase);
}
- db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
- db_len = pci_resource_len(pdev, 4);
-
return 0;
iospace_error_exit:
return -ENOMEM;
@@ -5714,7 +5733,7 @@ static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
- rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
@@ -5823,7 +5842,7 @@ qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
(char *)&boot_conn->chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");
@@ -6023,7 +6042,7 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
- __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ cpu_to_le16(CHAP_VALID_COOKIE)) {
continue;
}
@@ -6249,14 +6268,12 @@ kset_free:
static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
struct ql4_tuple_ddb *tddb)
{
- struct scsi_qla_host *ha;
struct iscsi_cls_session *cls_sess;
struct iscsi_cls_conn *cls_conn;
struct iscsi_session *sess;
struct iscsi_conn *conn;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
- ha = ddb_entry->ha;
cls_sess = ddb_entry->sess;
sess = cls_sess->dd_data;
cls_conn = ddb_entry->conn;
@@ -6375,10 +6392,8 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
}
exit_check:
- if (fw_tddb)
- vfree(fw_tddb);
- if (tmp_tddb)
- vfree(tmp_tddb);
+ vfree(fw_tddb);
+ vfree(tmp_tddb);
return ret;
}
@@ -6530,10 +6545,8 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
}
exit_check:
- if (fw_tddb)
- vfree(fw_tddb);
- if (tmp_tddb)
- vfree(tmp_tddb);
+ vfree(fw_tddb);
+ vfree(tmp_tddb);
return ret;
}
@@ -6940,7 +6953,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
if (is_reset == RESET_ADAPTER) {
iscsi_block_session(cls_sess);
/* Use the relogin path to discover new devices
- * by short-circuting the logic of setting
+ * by short-circuiting the logic of setting
* timer to relogin - instead set the flags
* to initiate login right away.
*/
@@ -7813,10 +7826,8 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
ret = -ESRCH;
exit_ddb_logout:
- if (flash_tddb)
- vfree(flash_tddb);
- if (tmp_tddb)
- vfree(tmp_tddb);
+ vfree(flash_tddb);
+ vfree(tmp_tddb);
if (fw_ddb_entry)
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
@@ -8591,7 +8602,7 @@ exit_login_resp:
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
- * @pci_device_id: pointer to pci_device entry
+ * @ent: pointer to pci_device entry
*
* This routine will probe for Qlogic 4xxx iSCSI host adapters.
* It returns zero if successful. It also initializes all data necessary for
@@ -8977,7 +8988,7 @@ static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
}
/**
* qla4xxx_remove_adapter - callback function to remove adapter.
- * @pci_dev: PCI device pointer
+ * @pdev: PCI device pointer
**/
static void qla4xxx_remove_adapter(struct pci_dev *pdev)
{
@@ -9067,7 +9078,7 @@ struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
if (!cmd)
return srb;
- srb = (struct srb *)CMD_SP(cmd);
+ srb = qla4xxx_cmd_priv(cmd)->srb;
if (!srb)
return srb;
@@ -9109,7 +9120,7 @@ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
do {
/* Checking to see if its returned to OS */
- rp = (struct srb *) CMD_SP(cmd);
+ rp = qla4xxx_cmd_priv(cmd)->srb;
if (rp == NULL) {
done++;
break;
@@ -9144,8 +9155,8 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
/**
* qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
* @ha: pointer to HBA
- * @t: target id
- * @l: lun id
+ * @stgt: pointer to SCSI target
+ * @sdev: pointer to SCSI device
*
* This function waits for all outstanding commands to a lun to complete. It
* returns 0 if all pending commands are returned and 1 otherwise.
@@ -9203,7 +9214,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
- srb = (struct srb *) CMD_SP(cmd);
+ srb = qla4xxx_cmd_priv(cmd)->srb;
if (!srb) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
@@ -9270,7 +9281,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
"dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
- cmd, jiffies, cmd->request->timeout / HZ,
+ cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
rval = qla4xxx_isp_check_reg(ha);
@@ -9337,7 +9348,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
"to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
- ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
+ ha->host_no, cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
rval = qla4xxx_isp_check_reg(ha);
@@ -9612,9 +9623,10 @@ qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
}
/**
- * qla4xxx_pci_mmio_enabled() gets called if
+ * qla4xxx_pci_mmio_enabled() - gets called if
* qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
* and read/write to the device still works.
+ * @pdev: PCI device pointer
**/
static pci_ers_result_t
qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index f11eaa773339..fb1c14269f00 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#define QLA4XXX_DRIVER_VERSION "5.04.00-k6"
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 8f709002f746..8f05e3707d69 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -31,8 +31,12 @@
#include <asm/irq.h>
#include <asm/dma.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "qlogicfas408.h"
/* Set the following to 2 to use normal interrupt (active high/totempole-
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 136681ad18a5..3e065d5fc80c 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -4,9 +4,9 @@
Use at your own risk. Support Tort Reform so you won't have to read all
these silly disclaimers.
- Copyright 1994, Tom Zerucha.
+ Copyright 1994, Tom Zerucha.
tz@execpc.com
-
+
Additional Code, and much appreciated help by
Michael A. Griffith
grif@cs.ucr.edu
@@ -22,12 +22,12 @@
Functions as standalone, loadable, and PCMCIA driver, the latter from
Dave Hinds' PCMCIA package.
-
+
Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
SCSI driver cleanup and audit. This driver still needs work on the
following
- - Non terminating hardware waits
- - Some layering violations with its pcmcia stub
+ - Non terminating hardware waits
+ - Some layering violations with its pcmcia stub
Redistributable under terms of the GNU General Public License
@@ -55,8 +55,12 @@
#include <asm/irq.h>
#include <asm/dma.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "qlogicfas408.h"
/*----------------------------------------------------------------*/
@@ -92,8 +96,9 @@ static void ql_zap(struct qlogicfas408_priv *priv)
/*
* Do a pseudo-dma tranfer
*/
-
-static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int reqlen)
+
+static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request,
+ int reqlen)
{
int j;
int qbase = priv->qbase;
@@ -108,7 +113,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
request += 128;
}
while (reqlen >= 84 && !(j & 0xc0)) /* 2/3 */
- if ((j = inb(qbase + 8)) & 4)
+ if ((j = inb(qbase + 8)) & 4)
{
insl(qbase + 4, request, 21);
reqlen -= 84;
@@ -123,11 +128,11 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
/* until both empty and int (or until reclen is 0) */
rtrc(7)
j = 0;
- while (reqlen && !((j & 0x10) && (j & 0xc0)))
+ while (reqlen && !((j & 0x10) && (j & 0xc0)))
{
/* while bytes to receive and not empty */
j &= 0xc0;
- while (reqlen && !((j = inb(qbase + 8)) & 0x10))
+ while (reqlen && !((j = inb(qbase + 8)) & 0x10))
{
*request++ = inb(qbase + 4);
reqlen--;
@@ -161,7 +166,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
j = 0;
while (reqlen && !((j & 2) && (j & 0xc0))) {
/* while bytes to send and not full */
- while (reqlen && !((j = inb(qbase + 8)) & 2))
+ while (reqlen && !((j = inb(qbase + 8)) & 2))
{
outb(*request++, qbase + 4);
reqlen--;
@@ -175,7 +180,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
}
/*
- * Wait for interrupt flag (polled - not real hardware interrupt)
+ * Wait for interrupt flag (polled - not real hardware interrupt)
*/
static int ql_wai(struct qlogicfas408_priv *priv)
@@ -205,14 +210,14 @@ static int ql_wai(struct qlogicfas408_priv *priv)
}
/*
- * Initiate scsi command - queueing handler
+ * Initiate scsi command - queueing handler
* caller must hold host lock
*/
static void ql_icmd(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
- int qbase = priv->qbase;
+ int qbase = priv->qbase;
int int_type = priv->int_type;
unsigned int i;
@@ -253,14 +258,13 @@ static void ql_icmd(struct scsi_cmnd *cmd)
}
/*
- * Process scsi command - usually after interrupt
+ * Process scsi command - usually after interrupt
*/
-static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
+static void ql_pcmd(struct scsi_cmnd *cmd)
{
unsigned int i, j;
unsigned long k;
- unsigned int result; /* ultimate return result */
unsigned int status; /* scsi returned status */
unsigned int message; /* scsi returned message */
unsigned int phase; /* recorded scsi phase */
@@ -274,13 +278,15 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
j = inb(qbase + 6);
i = inb(qbase + 5);
if (i == 0x20) {
- return (DID_NO_CONNECT << 16);
+ set_host_byte(cmd, DID_NO_CONNECT);
+ return;
}
i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */
if (i != 0x18) {
printk(KERN_ERR "Ql:Bad Interrupt status:%02x\n", i);
ql_zap(priv);
- return (DID_BAD_INTR << 16);
+ set_host_byte(cmd, DID_BAD_INTR);
+ return;
}
j &= 7; /* j = inb( qbase + 7 ) >> 5; */
@@ -293,9 +299,10 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
printk(KERN_ERR "Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n",
j, i, inb(qbase + 7) & 0x1f);
ql_zap(priv);
- return (DID_ERROR << 16);
+ set_host_byte(cmd, DID_ERROR);
+ return;
}
- result = DID_OK;
+
if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
outb(1, qbase + 3); /* clear fifo */
/* note that request_bufflen is the total xfer size when sg is used */
@@ -314,28 +321,31 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
if (priv->qabort) {
REG0;
- return ((priv->qabort == 1 ?
- DID_ABORT : DID_RESET) << 16);
+ set_host_byte(cmd,
+ priv->qabort == 1 ?
+ DID_ABORT : DID_RESET);
}
buf = sg_virt(sg);
if (ql_pdma(priv, phase, buf, sg->length))
break;
}
REG0;
- rtrc(2)
+ rtrc(2);
/*
* Wait for irq (split into second state of irq handler
- * if this can take time)
+ * if this can take time)
*/
- if ((k = ql_wai(priv)))
- return (k << 16);
+ if ((k = ql_wai(priv))) {
+ set_host_byte(cmd, k);
+ return;
+ }
k = inb(qbase + 5); /* should be 0x10, bus service */
}
/*
- * Enter Status (and Message In) Phase
+ * Enter Status (and Message In) Phase
*/
-
+
k = jiffies + WATCHDOG;
while (time_before(jiffies, k) && !priv->qabort &&
@@ -344,57 +354,72 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
if (time_after_eq(jiffies, k)) {
ql_zap(priv);
- return (DID_TIME_OUT << 16);
+ set_host_byte(cmd, DID_TIME_OUT);
+ return;
}
/* FIXME: timeout ?? */
while (inb(qbase + 5))
cpu_relax(); /* clear pending ints */
- if (priv->qabort)
- return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ if (priv->qabort) {
+ set_host_byte(cmd,
+ priv->qabort == 1 ? DID_ABORT : DID_RESET);
+ return;
+ }
outb(0x11, qbase + 3); /* get status and message */
- if ((k = ql_wai(priv)))
- return (k << 16);
+ if ((k = ql_wai(priv))) {
+ set_host_byte(cmd, k);
+ return;
+ }
i = inb(qbase + 5); /* get chip irq stat */
j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */
status = inb(qbase + 2);
message = inb(qbase + 2);
/*
- * Should get function complete int if Status and message, else
- * bus serv if only status
+ * Should get function complete int if Status and message, else
+ * bus serv if only status
*/
if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) {
printk(KERN_ERR "Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j);
- result = DID_ERROR;
+ set_host_byte(cmd, DID_ERROR);
}
outb(0x12, qbase + 3); /* done, disconnect */
- rtrc(1)
- if ((k = ql_wai(priv)))
- return (k << 16);
+ rtrc(1);
+ if ((k = ql_wai(priv))) {
+ set_host_byte(cmd, k);
+ return;
+ }
/*
- * Should get bus service interrupt and disconnect interrupt
+ * Should get bus service interrupt and disconnect interrupt
*/
-
+
i = inb(qbase + 5); /* should be bus service */
while (!priv->qabort && ((i & 0x20) != 0x20)) {
barrier();
cpu_relax();
i |= inb(qbase + 5);
}
- rtrc(0)
+ rtrc(0);
- if (priv->qabort)
- return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
-
- return (result << 16) | (message << 8) | (status & STATUS_MASK);
+ if (priv->qabort) {
+ set_host_byte(cmd,
+ priv->qabort == 1 ? DID_ABORT : DID_RESET);
+ return;
+ }
+
+ set_host_byte(cmd, DID_OK);
+ if (message != COMMAND_COMPLETE)
+ scsi_msg_to_host_byte(cmd, message);
+ set_status_byte(cmd, status);
+ return;
}
/*
- * Interrupt handler
+ * Interrupt handler
*/
static void ql_ihandl(void *dev_id)
@@ -415,13 +440,13 @@ static void ql_ihandl(void *dev_id)
return;
}
icmd = priv->qlcmd;
- icmd->result = ql_pcmd(icmd);
+ ql_pcmd(icmd);
priv->qlcmd = NULL;
/*
- * If result is CHECK CONDITION done calls qcommand to request
- * sense
+ * If result is CHECK CONDITION done calls qcommand to request
+ * sense
*/
- (icmd->scsi_done) (icmd);
+ scsi_done(icmd);
}
irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id)
@@ -439,17 +464,19 @@ irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id)
* Queued command
*/
-static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done) (struct scsi_cmnd *))
+static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
+
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_GOOD);
if (scmd_id(cmd) == priv->qinitid) {
- cmd->result = DID_BAD_TARGET << 16;
+ set_host_byte(cmd, DID_BAD_TARGET);
done(cmd);
return 0;
}
- cmd->scsi_done = done;
/* wait for the last command's interrupt to finish */
while (priv->qlcmd != NULL) {
barrier();
@@ -461,8 +488,8 @@ static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
DEF_SCSI_QCMD(qlogicfas408_queuecommand)
-/*
- * Return bios parameters
+/*
+ * Return bios parameters
*/
int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
@@ -487,7 +514,7 @@ int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
/*
* Abort a command in progress
*/
-
+
int qlogicfas408_abort(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
@@ -566,9 +593,9 @@ void qlogicfas408_setup(int qbase, int id, int int_type)
int qlogicfas408_detect(int qbase, int int_type)
{
- REG1;
+ REG1;
return (((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7) &&
- ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7));
+ ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7));
}
/*
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index d539beef3ce8..8c961ff03fcd 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -30,6 +30,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/firmware.h>
+#include <linux/pgtable.h>
#include <asm/byteorder.h>
@@ -37,7 +38,6 @@
#include <asm/dma.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -200,15 +200,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
/* Write mailbox command registers. */
switch (mbox_param[param[0]] >> 4) {
case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
- /* Fall through */
+ fallthrough;
case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
- /* Fall through */
+ fallthrough;
case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
- /* Fall through */
+ fallthrough;
case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
- /* Fall through */
+ fallthrough;
case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
- /* Fall through */
+ fallthrough;
case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
}
@@ -259,15 +259,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
/* Read back output parameters. */
switch (mbox_param[param[0]] & 0xf) {
case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
- /* Fall through */
+ fallthrough;
case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
- /* Fall through */
+ fallthrough;
case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
- /* Fall through */
+ fallthrough;
case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
- /* Fall through */
+ fallthrough;
case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
- /* Fall through */
+ fallthrough;
case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
}
@@ -890,7 +890,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
cmd->control_flags |= CFLAG_WRITE;
else
cmd->control_flags |= CFLAG_READ;
- cmd->time_out = Cmnd->request->timeout/HZ;
+ cmd->time_out = scsi_cmd_to_rq(Cmnd)->timeout / HZ;
memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
}
@@ -909,7 +909,8 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
sg_count = dma_map_sg(&qpti->op->dev, sg,
scsi_sg_count(Cmnd),
Cmnd->sc_data_direction);
-
+ if (!sg_count)
+ return -1;
ds = cmd->dataseg;
cmd->segment_cnt = sg_count;
@@ -1013,16 +1014,15 @@ static int qlogicpti_slave_configure(struct scsi_device *sdev)
*
* "This code must fly." -davem
*/
-static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
+static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct Scsi_Host *host = Cmnd->device->host;
struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
struct Command_Entry *cmd;
u_int out_ptr;
int in_ptr;
- Cmnd->scsi_done = done;
-
in_ptr = qpti->req_in_ptr;
cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
out_ptr = sbus_readw(qpti->qregs + MBOX4);
@@ -1214,7 +1214,7 @@ static irqreturn_t qpti_intr(int irq, void *dev_id)
struct scsi_cmnd *next;
next = (struct scsi_cmnd *) dq->host_scribble;
- dq->scsi_done(dq);
+ scsi_done(dq);
dq = next;
} while (dq != NULL);
}
@@ -1468,22 +1468,10 @@ static struct platform_driver qpti_sbus_driver = {
.probe = qpti_sbus_probe,
.remove = qpti_sbus_remove,
};
-
-static int __init qpti_init(void)
-{
- return platform_driver_register(&qpti_sbus_driver);
-}
-
-static void __exit qpti_exit(void)
-{
- platform_driver_unregister(&qpti_sbus_driver);
-}
+module_platform_driver(qpti_sbus_driver);
MODULE_DESCRIPTION("QlogicISP SBUS driver");
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_LICENSE("GPL");
MODULE_VERSION("2.1");
MODULE_FIRMWARE("qlogic/isp1000.bin");
-
-module_init(qpti_init);
-module_exit(qpti_exit);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 930e4803d888..c59eac7a32f2 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -55,7 +55,6 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
-#include <linux/async.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -86,28 +85,6 @@ unsigned int scsi_logging_level;
EXPORT_SYMBOL(scsi_logging_level);
#endif
-/*
- * Domain for asynchronous system resume operations. It is marked 'exclusive'
- * to avoid being included in the async_synchronize_full() that is invoked by
- * dpm_resume().
- */
-ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
-EXPORT_SYMBOL(scsi_sd_pm_domain);
-
-/**
- * scsi_put_command - Free a scsi command block
- * @cmd: command block to free
- *
- * Returns: Nothing.
- *
- * Notes: The command must not belong to any lists.
- */
-void scsi_put_command(struct scsi_cmnd *cmd)
-{
- scsi_del_cmd_from_list(cmd);
- BUG_ON(delayed_work_pending(&cmd->abort_work));
-}
-
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd)
{
@@ -158,7 +135,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
(level > 1)) {
scsi_print_result(cmd, "Done", disposition);
scsi_print_command(cmd);
- if (status_byte(cmd->result) == CHECK_CONDITION)
+ if (scsi_status_is_check_condition(cmd->result))
scsi_print_sense(cmd);
if (level > 3)
scmd_printk(KERN_INFO, cmd,
@@ -199,19 +176,12 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
if (atomic_read(&sdev->device_blocked))
atomic_set(&sdev->device_blocked, 0);
- /*
- * If we have valid sense information, then some kind of recovery
- * must have taken place. Make a note of this.
- */
- if (SCSI_SENSE_VALID(cmd))
- cmd->result |= (DRIVER_SENSE << 24);
-
SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
"Notifying upper driver of completion "
"(result %x)\n", cmd->result));
good_bytes = scsi_bufflen(cmd);
- if (!blk_rq_is_passthrough(cmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
int old_good_bytes = good_bytes;
drv = scsi_cmd_to_driver(cmd);
if (drv->done)
@@ -228,6 +198,15 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
scsi_io_completion(cmd, good_bytes);
}
+
+/*
+ * 4096 is big enough for saturating fast SCSI LUNs.
+ */
+int scsi_device_max_queue_depth(struct scsi_device *sdev)
+{
+ return min_t(int, sdev->host->can_queue, 4096);
+}
+
/**
* scsi_change_queue_depth - change a device's queue depth
* @sdev: SCSI Device in question
@@ -237,6 +216,8 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
*/
int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
{
+ depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
+
if (depth > 0) {
sdev->queue_depth = depth;
wmb();
@@ -245,6 +226,8 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
if (sdev->request_queue)
blk_set_queue_depth(sdev->request_queue, depth);
+ sbitmap_resize(&sdev->budget_map, sdev->queue_depth);
+
return sdev->queue_depth;
}
EXPORT_SYMBOL(scsi_change_queue_depth);
@@ -338,6 +321,31 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
return get_unaligned_be16(&buffer[2]) + 4;
}
+static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+{
+ unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ int result;
+
+ /*
+ * Fetch the VPD page header to find out how big the page
+ * is. This is done to prevent problems on legacy devices
+ * which can not handle allocation lengths as large as
+ * potentially requested by the caller.
+ */
+ result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
+ if (result < 0)
+ return 0;
+
+ if (result < SCSI_VPD_HEADER_SIZE) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: short VPD page 0x%02x length: %d bytes\n",
+ __func__, page, result);
+ return 0;
+ }
+
+ return result;
+}
+
/**
* scsi_get_vpd_page - Get Vital Product Data from a SCSI device
* @sdev: The device to ask
@@ -347,47 +355,38 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
*
* SCSI devices may optionally supply Vital Product Data. Each 'page'
* of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
- * If the device supports this VPD page, this routine returns a pointer
- * to a buffer containing the data from that page. The caller is
- * responsible for calling kfree() on this pointer when it is no longer
- * needed. If we cannot retrieve the VPD page this routine returns %NULL.
+ * If the device supports this VPD page, this routine fills @buf
+ * with the data from that page and return 0. If the VPD page is not
+ * supported or its content cannot be retrieved, -EINVAL is returned.
*/
int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
int buf_len)
{
- int i, result;
-
- if (sdev->skip_vpd_pages)
- goto fail;
-
- /* Ask for all the pages supported by this device */
- result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
- if (result < 4)
- goto fail;
+ int result, vpd_len;
- /* If the user actually wanted this page, we can skip the rest */
- if (page == 0)
- return 0;
+ if (!scsi_device_supports_vpd(sdev))
+ return -EINVAL;
- for (i = 4; i < min(result, buf_len); i++)
- if (buf[i] == page)
- goto found;
+ vpd_len = scsi_get_vpd_size(sdev, page);
+ if (vpd_len <= 0)
+ return -EINVAL;
- if (i < result && i >= buf_len)
- /* ran off the end of the buffer, give us benefit of doubt */
- goto found;
- /* The device claims it doesn't support the requested page */
- goto fail;
+ vpd_len = min(vpd_len, buf_len);
- found:
- result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
+ /*
+ * Fetch the actual page. Since the appropriate size was reported
+ * by the device it is now safe to ask for something bigger.
+ */
+ memset(buf, 0, buf_len);
+ result = scsi_vpd_inquiry(sdev, buf, page, vpd_len);
if (result < 0)
- goto fail;
+ return -EINVAL;
+ else if (result > vpd_len)
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: VPD page 0x%02x result %d > %d bytes\n",
+ __func__, page, result, vpd_len);
return 0;
-
- fail:
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
@@ -401,9 +400,17 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
{
struct scsi_vpd *vpd_buf;
- int vpd_len = SCSI_VPD_PG_LEN, result;
+ int vpd_len, result;
+
+ vpd_len = scsi_get_vpd_size(sdev, page);
+ if (vpd_len <= 0)
+ return NULL;
retry_pg:
+ /*
+ * Fetch the actual page. Since the appropriate size was reported
+ * by the device it is now safe to ask for something bigger.
+ */
vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
if (!vpd_buf)
return NULL;
@@ -414,6 +421,9 @@ retry_pg:
return NULL;
}
if (result > vpd_len) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: VPD page 0x%02x result %d > %d bytes\n",
+ __func__, page, result, vpd_len);
vpd_len = result;
kfree(vpd_buf);
goto retry_pg;
@@ -473,6 +483,12 @@ void scsi_attach_vpd(struct scsi_device *sdev)
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
if (vpd_buf->data[i] == 0x89)
scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
+ if (vpd_buf->data[i] == 0xb0)
+ scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
+ if (vpd_buf->data[i] == 0xb1)
+ scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
+ if (vpd_buf->data[i] == 0xb2)
+ scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
}
kfree(vpd_buf);
}
@@ -493,22 +509,33 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
{
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
- int result;
+ int result, request_len;
if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
return -EINVAL;
+ /* RSOC header + size of command we are asking about */
+ request_len = 4 + COMMAND_SIZE(opcode);
+ if (request_len > len) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: len %u bytes, opcode 0x%02x needs %u\n",
+ __func__, len, opcode, request_len);
+ return -EINVAL;
+ }
+
memset(cmd, 0, 16);
cmd[0] = MAINTENANCE_IN;
cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
cmd[2] = 1; /* One command format */
cmd[3] = opcode;
- put_unaligned_be32(len, &cmd[6]);
+ put_unaligned_be32(request_len, &cmd[6]);
memset(buffer, 0, len);
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
- &sshdr, 30 * HZ, 3, NULL);
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
+ request_len, &sshdr, 30 * HZ, 3, NULL);
+ if (result < 0)
+ return result;
if (result && scsi_sense_valid(&sshdr) &&
sshdr.sense_key == ILLEGAL_REQUEST &&
(sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
@@ -559,8 +586,10 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
- module_put(sdev->host->hostt->module);
+ struct module *mod = sdev->host->hostt->module;
+
put_device(&sdev->sdev_gendev);
+ module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
@@ -764,17 +793,10 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
-/* This should go away in the future, it doesn't do anything anymore */
-bool scsi_use_blk_mq = true;
-module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
-
static int __init init_scsi(void)
{
int error;
- error = scsi_init_queue();
- if (error)
- return error;
error = scsi_init_procfs();
if (error)
goto cleanup_queue;
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
deleted file mode 100644
index 4fd75a3aff66..000000000000
--- a/drivers/scsi/scsi.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * scsi.h Copyright (C) 1992 Drew Eckhardt
- * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale
- * generic SCSI package header file by
- * Initial versions: Drew Eckhardt
- * Subsequent revisions: Eric Youngdale
- *
- * <drew@colorado.edu>
- *
- * Modified by Eric Youngdale eric@andante.org to
- * add scatter-gather, multiple outstanding request, and other
- * enhancements.
- */
-/*
- * NOTE: this file only contains compatibility glue for old drivers. All
- * these wrappers will be removed sooner or later. For new code please use
- * the interfaces declared in the headers in include/scsi/
- */
-
-#ifndef _SCSI_H
-#define _SCSI_H
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi.h>
-
-/*
- * Some defs, in case these are not defined elsewhere.
- */
-#ifndef TRUE
-#define TRUE 1
-#endif
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-struct Scsi_Host;
-struct scsi_cmnd;
-struct scsi_device;
-struct scsi_target;
-struct scatterlist;
-
-#endif /* _SCSI_H */
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
new file mode 100644
index 000000000000..96ee35256a16
--- /dev/null
+++ b/drivers/scsi/scsi_bsg.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bsg.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/sg.h>
+#include "scsi_priv.h"
+
+#define uptr64(val) ((void __user *)(uintptr_t)(val))
+
+static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
+ fmode_t mode, unsigned int timeout)
+{
+ struct scsi_cmnd *scmd;
+ struct request *rq;
+ struct bio *bio;
+ int ret;
+
+ if (hdr->protocol != BSG_PROTOCOL_SCSI ||
+ hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
+ return -EINVAL;
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+ pr_warn_once("BIDI support in bsg has been removed.\n");
+ return -EOPNOTSUPP;
+ }
+
+ rq = scsi_alloc_request(q, hdr->dout_xfer_len ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ rq->timeout = timeout;
+
+ scmd = blk_mq_rq_to_pdu(rq);
+ scmd->cmd_len = hdr->request_len;
+ if (scmd->cmd_len > sizeof(scmd->cmnd)) {
+ ret = -EINVAL;
+ goto out_put_request;
+ }
+
+ ret = -EFAULT;
+ if (copy_from_user(scmd->cmnd, uptr64(hdr->request), scmd->cmd_len))
+ goto out_put_request;
+ ret = -EPERM;
+ if (!scsi_cmd_allowed(scmd->cmnd, mode))
+ goto out_put_request;
+
+ ret = 0;
+ if (hdr->dout_xfer_len) {
+ ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
+ hdr->dout_xfer_len, GFP_KERNEL);
+ } else if (hdr->din_xfer_len) {
+ ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
+ hdr->din_xfer_len, GFP_KERNEL);
+ }
+
+ if (ret)
+ goto out_put_request;
+
+ bio = rq->bio;
+ blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
+
+ /*
+ * fill in all the output members
+ */
+ hdr->device_status = scmd->result & 0xff;
+ hdr->transport_status = host_byte(scmd->result);
+ hdr->driver_status = 0;
+ if (scsi_status_is_check_condition(scmd->result))
+ hdr->driver_status = DRIVER_SENSE;
+ hdr->info = 0;
+ if (hdr->device_status || hdr->transport_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->response_len = 0;
+
+ if (scmd->sense_len && hdr->response) {
+ int len = min_t(unsigned int, hdr->max_response_len,
+ scmd->sense_len);
+
+ if (copy_to_user(uptr64(hdr->response), scmd->sense_buffer,
+ len))
+ ret = -EFAULT;
+ else
+ hdr->response_len = len;
+ }
+
+ if (rq_data_dir(rq) == READ)
+ hdr->din_resid = scmd->resid_len;
+ else
+ hdr->dout_resid = scmd->resid_len;
+
+ blk_rq_unmap_user(bio);
+
+out_put_request:
+ blk_mq_free_request(rq);
+ return ret;
+}
+
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev)
+{
+ return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev,
+ dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn);
+}
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 90349498f686..6e50e81a8216 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -7,9 +7,18 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi_common.h>
+MODULE_LICENSE("GPL v2");
+
+/* Command group 3 is reserved and should never be used. */
+const unsigned char scsi_command_size_tbl[8] = {
+ 6, 10, 10, 12, 16, 12, 10, 10
+};
+EXPORT_SYMBOL(scsi_command_size_tbl);
+
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
* encouraged once assigned by ANSI/INCITS T10).
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 44cb054d5e66..629853662b82 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -7,23 +7,22 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2018 Douglas Gilbert
+ * Copyright (C) 2001 - 2021 Douglas Gilbert
*
- * For documentation see http://sg.danny.cz/sg/sdebug26.html
+ * For documentation see http://sg.danny.cz/sg/scsi_debug.html
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
-
+#include <linux/align.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <linux/genhd.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
@@ -38,6 +37,10 @@
#include <linux/hrtimer.h>
#include <linux/uuid.h>
#include <linux/t10-pi.h>
+#include <linux/msdos_partition.h>
+#include <linux/random.h>
+#include <linux/xarray.h>
+#include <linux/prefetch.h>
#include <net/checksum.h>
@@ -56,8 +59,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20190125";
+#define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20210520";
#define MY_NAME "scsi_debug"
@@ -79,6 +82,7 @@ static const char *sdebug_version_date = "20190125";
#define INSUFF_RES_ASC 0x55
#define INSUFF_RES_ASCQ 0x3
#define POWER_ON_RESET_ASCQ 0x0
+#define POWER_ON_OCCURRED_ASCQ 0x1
#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
#define CAPACITY_CHANGED_ASCQ 0x9
@@ -90,6 +94,12 @@ static const char *sdebug_version_date = "20190125";
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
#define WRITE_ERROR_ASC 0xc
+#define UNALIGNED_WRITE_ASCQ 0x4
+#define WRITE_BOUNDARY_ASCQ 0x5
+#define READ_INVDATA_ASCQ 0x6
+#define READ_BOUNDARY_ASCQ 0x7
+#define ATTEMPT_ACCESS_GAP 0x9
+#define INSUFF_ZONE_ASCQ 0xe
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -104,9 +114,12 @@ static const char *sdebug_version_date = "20190125";
#define DEF_ATO 1
#define DEF_CDB_LEN 10
#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
+#define DEF_DEV_SIZE_PRE_INIT 0
#define DEF_DEV_SIZE_MB 8
+#define DEF_ZBC_DEV_SIZE_MB 128
#define DEF_DIF 0
#define DEF_DIX 0
+#define DEF_PER_HOST_STORE false
#define DEF_D_SENSE 0
#define DEF_EVERY_NTH 0
#define DEF_FAKE_RW 0
@@ -125,6 +138,7 @@ static const char *sdebug_version_date = "20190125";
#define DEF_PHYSBLK_EXP 0
#define DEF_OPT_XFERLEN_EXP 0
#define DEF_PTYPE TYPE_DISK
+#define DEF_RANDOM false
#define DEF_REMOVABLE false
#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
#define DEF_SECTOR_SIZE 512
@@ -138,9 +152,15 @@ static const char *sdebug_version_date = "20190125";
#define DEF_STRICT 0
#define DEF_STATISTICS false
#define DEF_SUBMIT_QUEUES 1
+#define DEF_TUR_MS_TO_READY 0
#define DEF_UUID_CTL 0
#define JDELAY_OVERRIDDEN -9999
+/* Default parameters for ZBC drives */
+#define DEF_ZBC_ZONE_SIZE_MB 128
+#define DEF_ZBC_MAX_OPEN_ZONES 8
+#define DEF_ZBC_NR_CONV_ZONES 1
+
#define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */
@@ -154,7 +174,7 @@ static const char *sdebug_version_date = "20190125";
#define SDEBUG_OPT_MAC_TIMEOUT 128
#define SDEBUG_OPT_SHORT_TRANSFER 0x100
#define SDEBUG_OPT_Q_NOISE 0x200
-#define SDEBUG_OPT_ALL_TSF 0x400
+#define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
#define SDEBUG_OPT_RARE_TSF 0x800
#define SDEBUG_OPT_N_WCE 0x1000
#define SDEBUG_OPT_RESET_NOISE 0x2000
@@ -169,44 +189,28 @@ static const char *sdebug_version_date = "20190125";
SDEBUG_OPT_SHORT_TRANSFER | \
SDEBUG_OPT_HOST_BUSY | \
SDEBUG_OPT_CMD_ABORT)
-/* When "every_nth" > 0 then modulo "every_nth" commands:
- * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
- * - a RECOVERED_ERROR is simulated on successful read and write
- * commands if SDEBUG_OPT_RECOVERED_ERR is set.
- * - a TRANSPORT_ERROR is simulated on successful read and write
- * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
- * - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
- * CMD_ABORT
- *
- * When "every_nth" < 0 then after "- every_nth" commands the selected
- * error will be injected. The error will be injected on every subsequent
- * command until some other action occurs; for example, the user writing
- * a new value (other than -1 or 1) to every_nth:
- * echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
- */
+#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
+ SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
* priority order. In the subset implemented here lower numbers have higher
* priority. The UA numbers should be a sequence starting from 0 with
* SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
-#define SDEBUG_UA_BUS_RESET 1
-#define SDEBUG_UA_MODE_CHANGED 2
-#define SDEBUG_UA_CAPACITY_CHANGED 3
-#define SDEBUG_UA_LUNS_CHANGED 4
-#define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
-#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
-#define SDEBUG_NUM_UAS 7
+#define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
+#define SDEBUG_UA_BUS_RESET 2
+#define SDEBUG_UA_MODE_CHANGED 3
+#define SDEBUG_UA_CAPACITY_CHANGED 4
+#define SDEBUG_UA_LUNS_CHANGED 5
+#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
+#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
+#define SDEBUG_NUM_UAS 8
/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
-/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
- * or "peripheral device" addressing (value 0) */
-#define SAM2_LUN_ADDRESS_METHOD 0
-
/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
* (for response) per submit queue at one time. Can be reduced by max_queue
* option. Command responses are not queued when jdelay=0 and ndelay=0. The
@@ -216,23 +220,25 @@ static const char *sdebug_version_date = "20190125";
*/
#define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
-#define DEF_CMD_PER_LUN 255
+#define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
-#define F_D_IN 1
-#define F_D_OUT 2
+/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
+#define F_D_IN 1 /* Data-in command (e.g. READ) */
+#define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
#define F_D_UNKN 8
-#define F_RL_WLUN_OK 0x10
-#define F_SKIP_UA 0x20
-#define F_DELAY_OVERR 0x40
-#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
-#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
-#define F_INV_OP 0x200
-#define F_FAKE_RW 0x400
-#define F_M_ACCESS 0x800 /* media access */
-#define F_SSU_DELAY 0x1000
-#define F_SYNC_DELAY 0x2000
-
+#define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
+#define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
+#define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
+#define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
+#define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
+#define F_INV_OP 0x200 /* invalid opcode (not supported) */
+#define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
+#define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
+#define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
+#define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
+
+/* Useful combinations of the above flags */
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
@@ -242,6 +248,37 @@ static const char *sdebug_version_date = "20190125";
#define SDEBUG_MAX_CMD_LEN 32
+#define SDEB_XA_NOT_IN_USE XA_MARK_1
+
+/* Zone types (zbcr05 table 25) */
+enum sdebug_z_type {
+ ZBC_ZTYPE_CNV = 0x1,
+ ZBC_ZTYPE_SWR = 0x2,
+ ZBC_ZTYPE_SWP = 0x3,
+ /* ZBC_ZTYPE_SOBR = 0x4, */
+ ZBC_ZTYPE_GAP = 0x5,
+};
+
+/* enumeration names taken from table 26, zbcr05 */
+enum sdebug_z_cond {
+ ZBC_NOT_WRITE_POINTER = 0x0,
+ ZC1_EMPTY = 0x1,
+ ZC2_IMPLICIT_OPEN = 0x2,
+ ZC3_EXPLICIT_OPEN = 0x3,
+ ZC4_CLOSED = 0x4,
+ ZC6_READ_ONLY = 0xd,
+ ZC5_FULL = 0xe,
+ ZC7_OFFLINE = 0xf,
+};
+
+struct sdeb_zone_state { /* ZBC: per zone state */
+ enum sdebug_z_type z_type;
+ enum sdebug_z_cond z_cond;
+ bool z_non_seq_resource;
+ unsigned int z_size;
+ sector_t z_start;
+ sector_t z_wp;
+};
struct sdebug_dev_info {
struct list_head dev_list;
@@ -252,31 +289,58 @@ struct sdebug_dev_info {
struct sdebug_host_info *sdbg_host;
unsigned long uas_bm[1];
atomic_t num_in_q;
- atomic_t stopped;
+ atomic_t stopped; /* 1: by SSU, 2: device start */
bool used;
+
+ /* For ZBC devices */
+ enum blk_zoned_model zmodel;
+ unsigned int zcap;
+ unsigned int zsize;
+ unsigned int zsize_shift;
+ unsigned int nr_zones;
+ unsigned int nr_conv_zones;
+ unsigned int nr_seq_zones;
+ unsigned int nr_imp_open;
+ unsigned int nr_exp_open;
+ unsigned int nr_closed;
+ unsigned int max_open;
+ ktime_t create_ts; /* time since bootup that this device was created */
+ struct sdeb_zone_state *zstate;
};
struct sdebug_host_info {
struct list_head host_list;
+ int si_idx; /* sdeb_store_info (per host) xarray index */
struct Scsi_Host *shost;
struct device dev;
struct list_head dev_info_list;
};
+/* There is an xarray of pointers to this struct's objects, one per host */
+struct sdeb_store_info {
+ rwlock_t macc_lck; /* for atomic media access on this store */
+ u8 *storep; /* user data storage (ram) */
+ struct t10_pi_tuple *dif_storep; /* protection info */
+ void *map_storep; /* provisioning map */
+};
+
#define to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
- SDEB_DEFER_WQ = 2};
+ SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
+ ktime_t cmpl_ts;/* time since boot to complete this cmd */
int sqa_idx; /* index of sdebug_queue array */
int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
+ int hc_idx; /* hostwide tag index */
int issuing_cpu;
bool init_hrt;
bool init_wq;
+ bool init_poll;
bool aborted; /* true when blk_abort_request() already called */
enum sdeb_defer_type defer_t;
};
@@ -287,13 +351,6 @@ struct sdebug_queued_cmd {
*/
struct sdebug_defer *sd_dp;
struct scsi_cmnd *a_cmnd;
- unsigned int inj_recovered:1;
- unsigned int inj_transport:1;
- unsigned int inj_dif:1;
- unsigned int inj_dix:1;
- unsigned int inj_short:1;
- unsigned int inj_host_busy:1;
- unsigned int inj_cmd_abort:1;
};
struct sdebug_queue {
@@ -307,6 +364,8 @@ static atomic_t sdebug_cmnd_count; /* number of incoming commands */
static atomic_t sdebug_completions; /* count of deferred completions */
static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
+static atomic_t sdeb_inject_pending;
+static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
struct opcode_info_t {
u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
@@ -338,7 +397,7 @@ enum sdeb_opcode_index {
SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
SDEB_I_MAINT_IN = 14,
SDEB_I_MAINT_OUT = 15,
- SDEB_I_VERIFY = 16, /* 10 only */
+ SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
SDEB_I_RESERVE = 18, /* 6, 10 */
SDEB_I_RELEASE = 19, /* 6, 10 */
@@ -351,7 +410,10 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_SAME = 26, /* 10, 16 */
SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
SDEB_I_COMP_WRITE = 28,
- SDEB_I_LAST_ELEMENT = 29, /* keep this last (previous + 1) */
+ SDEB_I_PRE_FETCH = 29, /* 10, 16 */
+ SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
+ SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
+ SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
};
@@ -367,7 +429,7 @@ static const unsigned char opcode_ind_arr[256] = {
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
- 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
+ 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
/* 0x40; 0x40->0x5f: 10 byte cdbs */
0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
@@ -381,8 +443,10 @@ static const unsigned char opcode_ind_arr[256] = {
0, SDEB_I_VARIABLE_LEN,
/* 0x80; 0x80->0x9f: 16 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
- SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
- 0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
+ SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
+ 0, 0, 0, SDEB_I_VERIFY,
+ SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
+ SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
@@ -423,11 +487,25 @@ static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+
+static int sdebug_do_add_host(bool mk_new_store);
+static int sdebug_add_host_helper(int per_host_idx);
+static void sdebug_do_remove_host(bool the_end);
+static int sdebug_add_store(void);
+static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
+static void sdebug_erase_all_stores(bool apart_from_first);
/*
* The following are overflow arrays for cdbs that "hit" the same index in
@@ -467,6 +545,12 @@ static const struct opcode_info_t write_iarr[] = {
0xbf, 0xc7, 0, 0, 0, 0} },
};
+static const struct opcode_info_t verify_iarr[] = {
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
+ NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+};
+
static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
@@ -513,11 +597,35 @@ static const struct opcode_info_t sync_cache_iarr[] = {
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
};
+static const struct opcode_info_t pre_fetch_iarr[] = {
+ {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
+ {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
+};
+
+static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
+ {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
+ {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
+ {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
+ {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
+ {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
+ {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
+};
+
+static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
+ {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
+ {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
+};
+
/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
* plus the terminating elements for logic that scans this table such as
* REPORT SUPPORTED OPERATION CODES. */
-static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
+static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 0 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -567,9 +675,10 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
/* 15 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
- {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} },
+ {ARRAY_SIZE(verify_iarr), 0x8f, 0,
+ F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
+ verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
{32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
@@ -608,23 +717,38 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
-
-/* 29 */
+ {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
+ resp_pre_fetch, pre_fetch_iarr,
+ {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} }, /* PRE-FETCH (10) */
+
+/* 30 */
+ {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
+ resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
+ {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
+ {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
+ resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
+ {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
+/* sentinel */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static int sdebug_add_host = DEF_NUM_HOST;
+static int sdebug_num_hosts;
+static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
static int sdebug_ato = DEF_ATO;
static int sdebug_cdb_len = DEF_CDB_LEN;
static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
-static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
static int sdebug_dif = DEF_DIF;
static int sdebug_dix = DEF_DIX;
static int sdebug_dsense = DEF_D_SENSE;
static int sdebug_every_nth = DEF_EVERY_NTH;
static int sdebug_fake_rw = DEF_FAKE_RW;
static unsigned int sdebug_guard = DEF_GUARD;
+static int sdebug_host_max_queue; /* per host */
static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int sdebug_max_luns = DEF_MAX_LUNS;
static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
@@ -643,6 +767,7 @@ static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
static int sdebug_scsi_level = DEF_SCSI_LEVEL;
static int sdebug_sector_size = DEF_SECTOR_SIZE;
+static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
static unsigned int sdebug_lbpu = DEF_LBPU;
@@ -655,16 +780,29 @@ static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
static int sdebug_uuid_ctl = DEF_UUID_CTL;
+static bool sdebug_random = DEF_RANDOM;
+static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
static bool sdebug_removable = DEF_REMOVABLE;
static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
static bool sdebug_strict = DEF_STRICT;
static bool sdebug_any_injecting_opt;
+static bool sdebug_no_rwlock;
static bool sdebug_verbose;
static bool have_dif_prot;
static bool write_since_sync;
static bool sdebug_statistics = DEF_STATISTICS;
static bool sdebug_wp;
+/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
+static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
+static char *sdeb_zbc_model_s;
+
+enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
+ SAM_LUN_AM_FLAT = 0x1,
+ SAM_LUN_AM_LOGICAL_UNIT = 0x2,
+ SAM_LUN_AM_EXTENDED = 0x3};
+static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
+static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity; /* in sectors */
@@ -678,9 +816,11 @@ static int sdebug_sectors_per; /* sectors per cylinder */
static LIST_HEAD(sdebug_host_list);
static DEFINE_SPINLOCK(sdebug_host_list_lock);
-static unsigned char *fake_storep; /* ramdisk storage */
-static struct t10_pi_tuple *dif_storep; /* protection info */
-static void *map_storep; /* provisioning map */
+static struct xarray per_store_arr;
+static struct xarray *per_store_ap = &per_store_arr;
+static int sdeb_first_idx = -1; /* invalid index ==> none created */
+static int sdeb_most_recent_idx = -1;
+static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
static unsigned long map_size;
static int num_aborts;
@@ -692,10 +832,21 @@ static int dix_writes;
static int dix_reads;
static int dif_errors;
+/* ZBC global data */
+static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
+static int sdeb_zbc_zone_cap_mb;
+static int sdeb_zbc_zone_size_mb;
+static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
+static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
+
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
+static int poll_queues; /* iouring iopoll interface.*/
static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
static DEFINE_RWLOCK(atomic_rw);
+static DEFINE_RWLOCK(atomic_rw2);
+
+static rwlock_t *ramdisk_lck_a[2];
static char sdebug_proc_name[] = MY_NAME;
static const char *my_name = MY_NAME;
@@ -708,13 +859,15 @@ static struct device_driver sdebug_driverfs_driver = {
};
static const int check_condition_result =
- (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ SAM_STAT_CHECK_CONDITION;
static const int illegal_condition_result =
- (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
+ (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
static const int device_qfull_result =
- (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
+ (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
+
+static const int condition_met_result = SAM_STAT_CONDITION_MET;
/* Only do the extra work involved in logical block provisioning if one or
@@ -727,18 +880,25 @@ static inline bool scsi_debug_lbp(void)
(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
}
-static void *lba2fake_store(unsigned long long lba)
+static void *lba2fake_store(struct sdeb_store_info *sip,
+ unsigned long long lba)
{
- lba = do_div(lba, sdebug_store_sectors);
+ struct sdeb_store_info *lsip = sip;
- return fake_storep + lba * sdebug_sector_size;
+ lba = do_div(lba, sdebug_store_sectors);
+ if (!sip || !sip->storep) {
+ WARN_ON_ONCE(true);
+ lsip = xa_load(per_store_ap, 0); /* should never be NULL */
+ }
+ return lsip->storep + lba * sdebug_sector_size;
}
-static struct t10_pi_tuple *dif_store(sector_t sector)
+static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
+ sector_t sector)
{
sector = sector_div(sector, sdebug_store_sectors);
- return dif_storep + sector;
+ return sip->dif_storep + sector;
}
static void sdebug_max_tgts_luns(void)
@@ -779,7 +939,7 @@ static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
}
asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
+ scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
memset(sks, 0, sizeof(sks));
sks[0] = 0x80;
if (c_d)
@@ -805,17 +965,14 @@ static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
{
- unsigned char *sbuff;
-
- sbuff = scp->sense_buffer;
- if (!sbuff) {
+ if (!scp->sense_buffer) {
sdev_printk(KERN_ERR, scp->device,
"%s: sense_buffer is NULL\n", __func__);
return;
}
- memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
+ memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
+ scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
if (sdebug_verbose)
sdev_printk(KERN_INFO, scp->device,
@@ -933,6 +1090,12 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (sdebug_verbose)
cp = "power on reset";
break;
+ case SDEBUG_UA_POOCCUR:
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
+ POWER_ON_OCCURRED_ASCQ);
+ if (sdebug_verbose)
+ cp = "power on occurred";
+ break;
case SDEBUG_UA_BUS_RESET:
mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
BUS_RESET_ASCQ);
@@ -1040,7 +1203,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
__func__, off_dst, scsi_bufflen(scp), act_len,
scsi_get_resid(scp));
n = scsi_bufflen(scp) - (off_dst + act_len);
- scsi_set_resid(scp, min(scsi_get_resid(scp), n));
+ scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
return 0;
}
@@ -1353,13 +1516,15 @@ static int inquiry_vpd_b0(unsigned char *arr)
}
/* Block device characteristics VPD page (SBC-3) */
-static int inquiry_vpd_b1(unsigned char *arr)
+static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
{
memset(arr, 0, 0x3c);
arr[0] = 0;
arr[1] = 1; /* non rotating medium (e.g. solid state) */
arr[2] = 0;
arr[3] = 5; /* less than 1.8" */
+ if (devip->zmodel == BLK_ZONED_HA)
+ arr[4] = 1 << 4; /* zoned field = 01b */
return 0x3c;
}
@@ -1383,6 +1548,32 @@ static int inquiry_vpd_b2(unsigned char *arr)
return 0x4;
}
+/* Zoned block device characteristics VPD page (ZBC mandatory) */
+static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
+{
+ memset(arr, 0, 0x3c);
+ arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
+ /*
+ * Set Optimal number of open sequential write preferred zones and
+ * Optimal number of non-sequentially written sequential write
+ * preferred zones fields to 'not reported' (0xffffffff). Leave other
+ * fields set to zero, apart from Max. number of open swrz_s field.
+ */
+ put_unaligned_be32(0xffffffff, &arr[4]);
+ put_unaligned_be32(0xffffffff, &arr[8]);
+ if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
+ put_unaligned_be32(devip->max_open, &arr[12]);
+ else
+ put_unaligned_be32(0xffffffff, &arr[12]);
+ if (devip->zcap < devip->zsize) {
+ arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
+ put_unaligned_be64(devip->zsize, &arr[20]);
+ } else {
+ arr[19] = 0;
+ }
+ return 0x3c;
+}
+
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -1391,14 +1582,17 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned char pq_pdt;
unsigned char *arr;
unsigned char *cmd = scp->cmnd;
- int alloc_len, n, ret;
- bool have_wlun, is_disk;
+ u32 alloc_len, n;
+ int ret;
+ bool have_wlun, is_disk, is_zbc, is_disk_zbc;
alloc_len = get_unaligned_be16(cmd + 3);
arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
is_disk = (sdebug_ptype == TYPE_DISK);
+ is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ is_disk_zbc = (is_disk || is_zbc);
have_wlun = scsi_is_wlun(scp->device->lun);
if (have_wlun)
pq_pdt = TYPE_WLUN; /* present, wlun */
@@ -1412,7 +1606,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
kfree(arr);
return check_condition_result;
} else if (0x1 & cmd[1]) { /* EVPD bit set */
- int lu_id_num, port_group_id, target_dev_id, len;
+ int lu_id_num, port_group_id, target_dev_id;
+ u32 len;
char lu_id_str[6];
int host_no = devip->sdbg_host->shost->host_no;
@@ -1436,11 +1631,14 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[n++] = 0x86; /* extended inquiry */
arr[n++] = 0x87; /* mode page policy */
arr[n++] = 0x88; /* SCSI ports */
- if (is_disk) { /* SBC only */
+ if (is_disk_zbc) { /* SBC or ZBC */
arr[n++] = 0x89; /* ATA information */
arr[n++] = 0xb0; /* Block limits */
arr[n++] = 0xb1; /* Block characteristics */
- arr[n++] = 0xb2; /* Logical Block Prov */
+ if (is_disk)
+ arr[n++] = 0xb2; /* LB Provisioning */
+ if (is_zbc)
+ arr[n++] = 0xb6; /* ZB dev. char. */
}
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
@@ -1479,27 +1677,30 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else if (0x88 == cmd[2]) { /* SCSI Ports */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
- } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
+ } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
arr[1] = cmd[2]; /*sanity */
n = inquiry_vpd_89(&arr[4]);
put_unaligned_be16(n, arr + 2);
- } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
+ } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b0(&arr[4]);
- } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
+ } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_vpd_b1(&arr[4]);
+ arr[3] = inquiry_vpd_b1(devip, &arr[4]);
} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b2(&arr[4]);
+ } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_vpd_b6(devip, &arr[4]);
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
kfree(arr);
return check_condition_result;
}
- len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
+ len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
ret = fill_from_dev_buffer(scp, arr,
- min(len, SDEBUG_MAX_INQ_ARR_SZ));
+ min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
kfree(arr);
return ret;
}
@@ -1529,83 +1730,79 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
put_unaligned_be16(0x525, arr + n);
n += 2;
+ } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
+ put_unaligned_be16(0x624, arr + n);
+ n += 2;
}
put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
ret = fill_from_dev_buffer(scp, arr,
- min(alloc_len, SDEBUG_LONG_INQ_SZ));
+ min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
kfree(arr);
return ret;
}
+/* See resp_iec_m_pg() for how this data is manipulated */
static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
0, 0, 0x0, 0x0};
static int resp_requests(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
- unsigned char *sbuff;
unsigned char *cmd = scp->cmnd;
- unsigned char arr[SCSI_SENSE_BUFFERSIZE];
- bool dsense;
- int len = 18;
+ unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
+ bool dsense = !!(cmd[1] & 1);
+ u32 alloc_len = cmd[4];
+ u32 len = 18;
+ int stopped_state = atomic_read(&devip->stopped);
memset(arr, 0, sizeof(arr));
- dsense = !!(cmd[1] & 1);
- sbuff = scp->sense_buffer;
- if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
+ if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
+ if (dsense) {
+ arr[0] = 0x72;
+ arr[1] = NOT_READY;
+ arr[2] = LOGICAL_UNIT_NOT_READY;
+ arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
+ len = 8;
+ } else {
+ arr[0] = 0x70;
+ arr[2] = NOT_READY; /* NO_SENSE in sense_key */
+ arr[7] = 0xa; /* 18 byte sense buffer */
+ arr[12] = LOGICAL_UNIT_NOT_READY;
+ arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
+ }
+ } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
+ /* Information exceptions control mode page: TEST=1, MRIE=6 */
if (dsense) {
arr[0] = 0x72;
arr[1] = 0x0; /* NO_SENSE in sense_key */
arr[2] = THRESHOLD_EXCEEDED;
- arr[3] = 0xff; /* TEST set and MRIE==6 */
+ arr[3] = 0xff; /* Failure prediction(false) */
len = 8;
} else {
arr[0] = 0x70;
arr[2] = 0x0; /* NO_SENSE in sense_key */
arr[7] = 0xa; /* 18 byte sense buffer */
arr[12] = THRESHOLD_EXCEEDED;
- arr[13] = 0xff; /* TEST set and MRIE==6 */
+ arr[13] = 0xff; /* Failure prediction(false) */
}
- } else {
- memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
- if (arr[0] >= 0x70 && dsense == sdebug_dsense)
- ; /* have sense and formats match */
- else if (arr[0] <= 0x70) {
- if (dsense) {
- memset(arr, 0, 8);
- arr[0] = 0x72;
- len = 8;
- } else {
- memset(arr, 0, 18);
- arr[0] = 0x70;
- arr[7] = 0xa;
- }
- } else if (dsense) {
- memset(arr, 0, 8);
- arr[0] = 0x72;
- arr[1] = sbuff[2]; /* sense key */
- arr[2] = sbuff[12]; /* asc */
- arr[3] = sbuff[13]; /* ascq */
+ } else { /* nothing to report */
+ if (dsense) {
len = 8;
+ memset(arr, 0, len);
+ arr[0] = 0x72;
} else {
- memset(arr, 0, 18);
+ memset(arr, 0, len);
arr[0] = 0x70;
- arr[2] = sbuff[1];
arr[7] = 0xa;
- arr[12] = sbuff[1];
- arr[13] = sbuff[3];
}
-
}
- mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
- return fill_from_dev_buffer(scp, arr, len);
+ return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
}
-static int resp_start_stop(struct scsi_cmnd *scp,
- struct sdebug_dev_info *devip)
+static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
unsigned char *cmd = scp->cmnd;
- int power_cond, stop;
+ int power_cond, want_stop, stopped_state;
bool changing;
power_cond = (cmd[4] & 0xf0) >> 4;
@@ -1613,10 +1810,33 @@ static int resp_start_stop(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
return check_condition_result;
}
- stop = !(cmd[4] & 1);
- changing = atomic_read(&devip->stopped) == !stop;
- atomic_xchg(&devip->stopped, stop);
- if (!changing || cmd[1] & 0x1) /* state unchanged or IMMED set */
+ want_stop = !(cmd[4] & 1);
+ stopped_state = atomic_read(&devip->stopped);
+ if (stopped_state == 2) {
+ ktime_t now_ts = ktime_get_boottime();
+
+ if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
+ u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
+
+ if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
+ /* tur_ms_to_ready timer extinguished */
+ atomic_set(&devip->stopped, 0);
+ stopped_state = 0;
+ }
+ }
+ if (stopped_state == 2) {
+ if (want_stop) {
+ stopped_state = 1; /* dummy up success */
+ } else { /* Disallow tur_ms_to_ready delay to be overridden */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
+ return check_condition_result;
+ }
+ }
+ }
+ changing = (stopped_state != want_stop);
+ if (changing)
+ atomic_xchg(&devip->stopped, want_stop);
+ if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
return SDEG_RES_IMMED_MASK;
else
return 0;
@@ -1658,7 +1878,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
{
unsigned char *cmd = scp->cmnd;
unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
- int alloc_len;
+ u32 alloc_len;
alloc_len = get_unaligned_be32(cmd + 10);
/* following just in case virtual_gb changed */
@@ -1679,6 +1899,13 @@ static int resp_readcap16(struct scsi_cmnd *scp,
arr[14] |= 0x40;
}
+ /*
+ * Since the scsi_debug READ CAPACITY implementation always reports the
+ * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
+ */
+ if (devip->zmodel == BLK_ZONED_HM)
+ arr[12] |= 1 << 4;
+
arr[15] = sdebug_lowest_aligned & 0xff;
if (have_dif_prot) {
@@ -1687,7 +1914,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
}
return fill_from_dev_buffer(scp, arr,
- min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
+ min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
}
#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
@@ -1698,8 +1925,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp,
unsigned char *cmd = scp->cmnd;
unsigned char *arr;
int host_no = devip->sdbg_host->shost->host_no;
- int n, ret, alen, rlen;
int port_group_a, port_group_b, port_a, port_b;
+ u32 alen, n, rlen;
+ int ret;
alen = get_unaligned_be32(cmd + 6);
arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
@@ -1761,9 +1989,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp,
* - The constructed command length
* - The maximum array size
*/
- rlen = min(alen,n);
+ rlen = min(alen, n);
ret = fill_from_dev_buffer(scp, arr,
- min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
+ min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
kfree(arr);
return ret;
}
@@ -2113,12 +2341,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
{
int pcontrol, pcode, subpcode, bd_len;
unsigned char dev_spec;
- int alloc_len, offset, len, target_dev_id;
+ u32 alloc_len, offset, len;
+ int target_dev_id;
int target = scp->device->id;
unsigned char *ap;
unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, bad_pcode;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
pcontrol = (cmd[2] & 0xc0) >> 6;
@@ -2127,7 +2356,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
msense_6 = (MODE_SENSE == cmd[0]);
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
- if (is_disk && !dbd)
+ is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ if ((is_disk || is_zbc) && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
@@ -2139,8 +2369,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
}
target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
(devip->target * 1000) - 3;
- /* for disks set DPOFUA bit and clear write protect (WP) bit */
- if (is_disk) {
+ /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
+ if (is_disk || is_zbc) {
dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
if (sdebug_wp)
dev_spec |= 0x80;
@@ -2200,7 +2430,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
bad_pcode = true;
break;
case 0x8: /* Caching page, direct access */
- if (is_disk) {
+ if (is_disk || is_zbc) {
len = resp_caching_pg(ap, pcontrol, target);
offset += len;
} else
@@ -2238,6 +2468,9 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
target);
len += resp_caching_pg(ap + len, pcontrol,
target);
+ } else if (is_zbc) {
+ len += resp_caching_pg(ap + len, pcontrol,
+ target);
}
len += resp_ctrl_m_pg(ap + len, pcontrol, target);
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2265,7 +2498,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
arr[0] = offset - 1;
else
put_unaligned_be16((offset - 2), arr + 0);
- return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
+ return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
}
#define SDEBUG_MAX_MSELECT_SZ 512
@@ -2296,11 +2529,11 @@ static int resp_mode_select(struct scsi_cmnd *scp,
__func__, param_len, res);
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
- if (md_len > 2) {
+ off = bd_len + (mselect6 ? 4 : 8);
+ if (md_len > 2 || off >= res) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
return check_condition_result;
}
- off = bd_len + (mselect6 ? 4 : 8);
mpage = arr[off] & 0x3f;
ps = !!(arr[off] & 0x80);
if (ps) {
@@ -2375,12 +2608,25 @@ static int resp_ie_l_pg(unsigned char *arr)
return sizeof(ie_l_pg);
}
+static int resp_env_rep_l_spg(unsigned char *arr)
+{
+ unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
+ 0x0, 40, 72, 0xff, 45, 18, 0, 0,
+ 0x1, 0x0, 0x23, 0x8,
+ 0x0, 55, 72, 35, 55, 45, 0, 0,
+ };
+
+ memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
+ return sizeof(env_rep_l_spg);
+}
+
#define SDEBUG_MAX_LSENSE_SZ 512
static int resp_log_sense(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
- int ppc, sp, pcode, subpcode, alloc_len, len, n;
+ int ppc, sp, pcode, subpcode;
+ u32 alloc_len, len, n;
unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
unsigned char *cmd = scp->cmnd;
@@ -2426,38 +2672,311 @@ static int resp_log_sense(struct scsi_cmnd *scp,
arr[n++] = 0xff; /* this page */
arr[n++] = 0xd;
arr[n++] = 0x0; /* Temperature */
+ arr[n++] = 0xd;
+ arr[n++] = 0x1; /* Environment reporting */
+ arr[n++] = 0xd;
+ arr[n++] = 0xff; /* all 0xd subpages */
arr[n++] = 0x2f;
arr[n++] = 0x0; /* Informational exceptions */
+ arr[n++] = 0x2f;
+ arr[n++] = 0xff; /* all 0x2f subpages */
arr[3] = n - 4;
break;
case 0xd: /* Temperature subpages */
n = 4;
arr[n++] = 0xd;
arr[n++] = 0x0; /* Temperature */
+ arr[n++] = 0xd;
+ arr[n++] = 0x1; /* Environment reporting */
+ arr[n++] = 0xd;
+ arr[n++] = 0xff; /* these subpages */
arr[3] = n - 4;
break;
case 0x2f: /* Informational exceptions subpages */
n = 4;
arr[n++] = 0x2f;
arr[n++] = 0x0; /* Informational exceptions */
+ arr[n++] = 0x2f;
+ arr[n++] = 0xff; /* these subpages */
arr[3] = n - 4;
break;
default:
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
return check_condition_result;
}
+ } else if (subpcode > 0) {
+ arr[0] |= 0x40;
+ arr[1] = subpcode;
+ if (pcode == 0xd && subpcode == 1)
+ arr[3] = resp_env_rep_l_spg(arr + 4);
+ else {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+ }
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
}
- len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
+ len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
return fill_from_dev_buffer(scp, arr,
- min(len, SDEBUG_MAX_INQ_ARR_SZ));
+ min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
+}
+
+static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
+{
+ return devip->nr_zones != 0;
+}
+
+static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
+ unsigned long long lba)
+{
+ u32 zno = lba >> devip->zsize_shift;
+ struct sdeb_zone_state *zsp;
+
+ if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
+ return &devip->zstate[zno];
+
+ /*
+ * If the zone capacity is less than the zone size, adjust for gap
+ * zones.
+ */
+ zno = 2 * zno - devip->nr_conv_zones;
+ WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
+ zsp = &devip->zstate[zno];
+ if (lba >= zsp->z_start + zsp->z_size)
+ zsp++;
+ WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
+ return zsp;
+}
+
+static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
+{
+ return zsp->z_type == ZBC_ZTYPE_CNV;
+}
+
+static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
+{
+ return zsp->z_type == ZBC_ZTYPE_GAP;
}
-static inline int check_device_access_params(struct scsi_cmnd *scp,
- unsigned long long lba, unsigned int num, bool write)
+static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
{
+ return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
+}
+
+static void zbc_close_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ enum sdebug_z_cond zc;
+
+ if (!zbc_zone_is_seq(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
+ return;
+
+ if (zc == ZC2_IMPLICIT_OPEN)
+ devip->nr_imp_open--;
+ else
+ devip->nr_exp_open--;
+
+ if (zsp->z_wp == zsp->z_start) {
+ zsp->z_cond = ZC1_EMPTY;
+ } else {
+ zsp->z_cond = ZC4_CLOSED;
+ devip->nr_closed++;
+ }
+}
+
+static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp = &devip->zstate[0];
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++, zsp++) {
+ if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
+ zbc_close_zone(devip, zsp);
+ return;
+ }
+ }
+}
+
+static void zbc_open_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp, bool explicit)
+{
+ enum sdebug_z_cond zc;
+
+ if (!zbc_zone_is_seq(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
+ (!explicit && zc == ZC2_IMPLICIT_OPEN))
+ return;
+
+ /* Close an implicit open zone if necessary */
+ if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ else if (devip->max_open &&
+ devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
+ zbc_close_imp_open_zone(devip);
+
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+ if (explicit) {
+ zsp->z_cond = ZC3_EXPLICIT_OPEN;
+ devip->nr_exp_open++;
+ } else {
+ zsp->z_cond = ZC2_IMPLICIT_OPEN;
+ devip->nr_imp_open++;
+ }
+}
+
+static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ switch (zsp->z_cond) {
+ case ZC2_IMPLICIT_OPEN:
+ devip->nr_imp_open--;
+ break;
+ case ZC3_EXPLICIT_OPEN:
+ devip->nr_exp_open--;
+ break;
+ default:
+ WARN_ONCE(true, "Invalid zone %llu condition %x\n",
+ zsp->z_start, zsp->z_cond);
+ break;
+ }
+ zsp->z_cond = ZC5_FULL;
+}
+
+static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ unsigned long long lba, unsigned int num)
+{
+ struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
+ unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
+
+ if (!zbc_zone_is_seq(zsp))
+ return;
+
+ if (zsp->z_type == ZBC_ZTYPE_SWR) {
+ zsp->z_wp += num;
+ if (zsp->z_wp >= zend)
+ zbc_set_zone_full(devip, zsp);
+ return;
+ }
+
+ while (num) {
+ if (lba != zsp->z_wp)
+ zsp->z_non_seq_resource = true;
+
+ end = lba + num;
+ if (end >= zend) {
+ n = zend - lba;
+ zsp->z_wp = zend;
+ } else if (end > zsp->z_wp) {
+ n = num;
+ zsp->z_wp = end;
+ } else {
+ n = num;
+ }
+ if (zsp->z_wp >= zend)
+ zbc_set_zone_full(devip, zsp);
+
+ num -= n;
+ lba += n;
+ if (num) {
+ zsp++;
+ zend = zsp->z_start + zsp->z_size;
+ }
+ }
+}
+
+static int check_zbc_access_params(struct scsi_cmnd *scp,
+ unsigned long long lba, unsigned int num, bool write)
+{
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+ struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
+ struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
+
+ if (!write) {
+ if (devip->zmodel == BLK_ZONED_HA)
+ return 0;
+ /* For host-managed, reads cannot cross zone types boundaries */
+ if (zsp->z_type != zsp_end->z_type) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ READ_INVDATA_ASCQ);
+ return check_condition_result;
+ }
+ return 0;
+ }
+
+ /* Writing into a gap zone is not allowed */
+ if (zbc_zone_is_gap(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
+ ATTEMPT_ACCESS_GAP);
+ return check_condition_result;
+ }
+
+ /* No restrictions for writes within conventional zones */
+ if (zbc_zone_is_conv(zsp)) {
+ if (!zbc_zone_is_conv(zsp_end)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ WRITE_BOUNDARY_ASCQ);
+ return check_condition_result;
+ }
+ return 0;
+ }
+
+ if (zsp->z_type == ZBC_ZTYPE_SWR) {
+ /* Writes cannot cross sequential zone boundaries */
+ if (zsp_end != zsp) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ WRITE_BOUNDARY_ASCQ);
+ return check_condition_result;
+ }
+ /* Cannot write full zones */
+ if (zsp->z_cond == ZC5_FULL) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+ /* Writes must be aligned to the zone WP */
+ if (lba != zsp->z_wp) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ UNALIGNED_WRITE_ASCQ);
+ return check_condition_result;
+ }
+ }
+
+ /* Handle implicit open of closed and empty zones */
+ if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
+ if (devip->max_open &&
+ devip->nr_exp_open >= devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT,
+ INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ return check_condition_result;
+ }
+ zbc_open_zone(devip, zsp, false);
+ }
+
+ return 0;
+}
+
+static inline int check_device_access_params
+ (struct scsi_cmnd *scp, unsigned long long lba,
+ unsigned int num, bool write)
+{
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+
if (lba + num > sdebug_capacity) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
return check_condition_result;
@@ -2472,17 +2991,37 @@ static inline int check_device_access_params(struct scsi_cmnd *scp,
mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
return check_condition_result;
}
+ if (sdebug_dev_is_zoned(devip))
+ return check_zbc_access_params(scp, lba, num, write);
+
return 0;
}
+/*
+ * Note: if BUG_ON() fires it usually indicates a problem with the parser
+ * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
+ * that access any of the "stores" in struct sdeb_store_info should call this
+ * function with bug_if_fake_rw set to true.
+ */
+static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
+ bool bug_if_fake_rw)
+{
+ if (sdebug_fake_rw) {
+ BUG_ON(bug_if_fake_rw); /* See note above */
+ return NULL;
+ }
+ return xa_load(per_store_ap, devip->sdbg_host->si_idx);
+}
+
/* Returns number of bytes copied or -1 if error. */
-static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
- u32 num, bool do_write)
+static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
+ u32 sg_skip, u64 lba, u32 num, bool do_write)
{
int ret;
u64 block, rest = 0;
- struct scsi_data_buffer *sdb = &scmd->sdb;
enum dma_data_direction dir;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ u8 *fsp;
if (do_write) {
dir = DMA_TO_DEVICE;
@@ -2491,24 +3030,25 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
dir = DMA_FROM_DEVICE;
}
- if (!sdb->length)
+ if (!sdb->length || !sip)
return 0;
- if (scmd->sc_data_direction != dir)
+ if (scp->sc_data_direction != dir)
return -1;
+ fsp = sip->storep;
block = do_div(lba, sdebug_store_sectors);
if (block + num > sdebug_store_sectors)
rest = block + num - sdebug_store_sectors;
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep + (block * sdebug_sector_size),
+ fsp + (block * sdebug_sector_size),
(num - rest) * sdebug_sector_size, sg_skip, do_write);
if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep, rest * sdebug_sector_size,
+ fsp, rest * sdebug_sector_size,
sg_skip + ((num - rest) * sdebug_sector_size),
do_write);
}
@@ -2516,34 +3056,49 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
return ret;
}
-/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into lba2fake_store(lba,num) and return true. If comparison fails then
+/* Returns number of bytes copied or -1 if error. */
+static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
+{
+ struct scsi_data_buffer *sdb = &scp->sdb;
+
+ if (!sdb->length)
+ return 0;
+ if (scp->sc_data_direction != DMA_TO_DEVICE)
+ return -1;
+ return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
+ num * sdebug_sector_size, 0, true);
+}
+
+/* If sip->storep+lba compares equal to arr(num), then copy top half of
+ * arr into sip->storep+lba and return true. If comparison fails then
* return false. */
-static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
+static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
+ const u8 *arr, bool compare_only)
{
bool res;
u64 block, rest = 0;
u32 store_blks = sdebug_store_sectors;
u32 lb_size = sdebug_sector_size;
+ u8 *fsp = sip->storep;
block = do_div(lba, store_blks);
if (block + num > store_blks)
rest = block + num - store_blks;
- res = !memcmp(fake_storep + (block * lb_size), arr,
- (num - rest) * lb_size);
+ res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
if (!res)
return res;
if (rest)
- res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
+ res = memcmp(fsp, arr + ((num - rest) * lb_size),
rest * lb_size);
if (!res)
return res;
+ if (compare_only)
+ return true;
arr += num * lb_size;
- memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
+ memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
if (rest)
- memcpy(fake_storep, arr + ((num - rest) * lb_size),
- rest * lb_size);
+ memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
return res;
}
@@ -2586,24 +3141,27 @@ static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
return 0;
}
-static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
+static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
unsigned int sectors, bool read)
{
size_t resid;
void *paddr;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ struct t10_pi_tuple *dif_storep = sip->dif_storep;
const void *dif_store_end = dif_storep + sdebug_store_sectors;
struct sg_mapping_iter miter;
/* Bytes of protection data to copy into sgl */
resid = sectors * sizeof(*dif_storep);
- sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
- scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
- (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
+ sg_miter_start(&miter, scsi_prot_sglist(scp),
+ scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
+ (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
while (sg_miter_next(&miter) && resid > 0) {
- size_t len = min(miter.length, resid);
- void *start = dif_store(sector);
+ size_t len = min_t(size_t, miter.length, resid);
+ void *start = dif_store(sip, sector);
size_t rest = 0;
if (dif_store_end < start + len)
@@ -2629,45 +3187,119 @@ static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
sg_miter_stop(&miter);
}
-static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
+ int ret = 0;
unsigned int i;
- struct t10_pi_tuple *sdt;
sector_t sector;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ struct t10_pi_tuple *sdt;
for (i = 0; i < sectors; i++, ei_lba++) {
- int ret;
-
sector = start_sec + i;
- sdt = dif_store(sector);
+ sdt = dif_store(sip, sector);
if (sdt->app_tag == cpu_to_be16(0xffff))
continue;
- ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
- if (ret) {
- dif_errors++;
- return ret;
+ /*
+ * Because scsi_debug acts as both initiator and
+ * target we proceed to verify the PI even if
+ * RDPROTECT=3. This is done so the "initiator" knows
+ * which type of error to return. Otherwise we would
+ * have to iterate over the PI twice.
+ */
+ if (scp->cmnd[1] >> 5) { /* RDPROTECT */
+ ret = dif_verify(sdt, lba2fake_store(sip, sector),
+ sector, ei_lba);
+ if (ret) {
+ dif_errors++;
+ break;
+ }
}
}
- dif_copy_prot(SCpnt, start_sec, sectors, true);
+ dif_copy_prot(scp, start_sec, sectors, true);
dix_reads++;
- return 0;
+ return ret;
+}
+
+static inline void
+sdeb_read_lock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __acquire(&sip->macc_lck);
+ else
+ __acquire(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ read_lock(&sip->macc_lck);
+ else
+ read_lock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_read_unlock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __release(&sip->macc_lck);
+ else
+ __release(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ read_unlock(&sip->macc_lck);
+ else
+ read_unlock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_write_lock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __acquire(&sip->macc_lck);
+ else
+ __acquire(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ write_lock(&sip->macc_lck);
+ else
+ write_lock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_write_unlock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __release(&sip->macc_lck);
+ else
+ __release(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ write_unlock(&sip->macc_lck);
+ else
+ write_unlock(&sdeb_fake_rw_lck);
+ }
}
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
- u8 *cmd = scp->cmnd;
- struct sdebug_queued_cmd *sqcp;
- u64 lba;
+ bool check_prot;
u32 num;
u32 ei_lba;
- unsigned long iflags;
int ret;
- bool check_prot;
+ u64 lba;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ u8 *cmd = scp->cmnd;
switch (cmd[0]) {
case READ_16:
@@ -2720,15 +3352,11 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
"to DIF device\n");
}
- if (unlikely(sdebug_any_injecting_opt)) {
- sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
-
- if (sqcp) {
- if (sqcp->inj_short)
- num /= 2;
- }
- } else
- sqcp = NULL;
+ if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
+ atomic_read(&sdeb_inject_pending))) {
+ num /= 2;
+ atomic_set(&sdeb_inject_pending, 0);
+ }
ret = check_device_access_params(scp, lba, num, false);
if (ret)
@@ -2749,69 +3377,63 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
- read_lock_irqsave(&atomic_rw, iflags);
+ sdeb_read_lock(sip);
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
- int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
-
- if (prot_ret) {
- read_unlock_irqrestore(&atomic_rw, iflags);
- mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
- return illegal_condition_result;
+ switch (prot_verify_read(scp, lba, num, ei_lba)) {
+ case 1: /* Guard tag error */
+ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
+ sdeb_read_unlock(sip);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ return check_condition_result;
+ } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
+ sdeb_read_unlock(sip);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ return illegal_condition_result;
+ }
+ break;
+ case 3: /* Reference tag error */
+ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
+ sdeb_read_unlock(sip);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
+ return check_condition_result;
+ } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
+ sdeb_read_unlock(sip);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
+ return illegal_condition_result;
+ }
+ break;
}
}
- ret = do_device_access(scp, 0, lba, num, false);
- read_unlock_irqrestore(&atomic_rw, iflags);
+ ret = do_device_access(sip, scp, 0, lba, num, false);
+ sdeb_read_unlock(sip);
if (unlikely(ret == -1))
return DID_ERROR << 16;
scsi_set_resid(scp, scsi_bufflen(scp) - ret);
- if (unlikely(sqcp)) {
- if (sqcp->inj_recovered) {
- mk_sense_buffer(scp, RECOVERED_ERROR,
- THRESHOLD_EXCEEDED, 0);
- return check_condition_result;
- } else if (sqcp->inj_transport) {
- mk_sense_buffer(scp, ABORTED_COMMAND,
- TRANSPORT_PROBLEM, ACK_NAK_TO);
+ if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
+ atomic_read(&sdeb_inject_pending))) {
+ if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
+ mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
+ atomic_set(&sdeb_inject_pending, 0);
return check_condition_result;
- } else if (sqcp->inj_dif) {
+ } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
/* Logical block guard check failed */
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ atomic_set(&sdeb_inject_pending, 0);
return illegal_condition_result;
- } else if (sqcp->inj_dix) {
+ } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ atomic_set(&sdeb_inject_pending, 0);
return illegal_condition_result;
}
}
return 0;
}
-static void dump_sector(unsigned char *buf, int len)
-{
- int i, j, n;
-
- pr_err(">>> Sector Dump <<<\n");
- for (i = 0 ; i < len ; i += 16) {
- char b[128];
-
- for (j = 0, n = 0; j < 16; j++) {
- unsigned char c = buf[i+j];
-
- if (c >= 0x20 && c < 0x7e)
- n += scnprintf(b + n, sizeof(b) - n,
- " %c ", buf[i+j]);
- else
- n += scnprintf(b + n, sizeof(b) - n,
- "%02x ", buf[i+j]);
- }
- pr_err("%04d: %s\n", i, b);
- }
-}
-
static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
@@ -2857,10 +3479,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
sdt = piter.addr + ppage_offset;
daddr = diter.addr + dpage_offset;
- ret = dif_verify(sdt, daddr, sector, ei_lba);
- if (ret) {
- dump_sector(daddr, sdebug_sector_size);
- goto out;
+ if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
+ ret = dif_verify(sdt, daddr, sector, ei_lba);
+ if (ret)
+ goto out;
}
sector++;
@@ -2901,7 +3523,8 @@ static sector_t map_index_to_lba(unsigned long index)
return lba;
}
-static unsigned int map_state(sector_t lba, unsigned int *num)
+static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int *num)
{
sector_t end;
unsigned int mapped;
@@ -2909,19 +3532,20 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
unsigned long next;
index = lba_to_map_index(lba);
- mapped = test_bit(index, map_storep);
+ mapped = test_bit(index, sip->map_storep);
if (mapped)
- next = find_next_zero_bit(map_storep, map_size, index);
+ next = find_next_zero_bit(sip->map_storep, map_size, index);
else
- next = find_next_bit(map_storep, map_size, index);
+ next = find_next_bit(sip->map_storep, map_size, index);
end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
*num = end - lba;
return mapped;
}
-static void map_region(sector_t lba, unsigned int len)
+static void map_region(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int len)
{
sector_t end = lba + len;
@@ -2929,15 +3553,17 @@ static void map_region(sector_t lba, unsigned int len)
unsigned long index = lba_to_map_index(lba);
if (index < map_size)
- set_bit(index, map_storep);
+ set_bit(index, sip->map_storep);
lba = map_index_to_lba(index + 1);
}
}
-static void unmap_region(sector_t lba, unsigned int len)
+static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int len)
{
sector_t end = lba + len;
+ u8 *fsp = sip->storep;
while (lba < end) {
unsigned long index = lba_to_map_index(lba);
@@ -2945,17 +3571,16 @@ static void unmap_region(sector_t lba, unsigned int len)
if (lba == map_index_to_lba(index) &&
lba + sdebug_unmap_granularity <= end &&
index < map_size) {
- clear_bit(index, map_storep);
+ clear_bit(index, sip->map_storep);
if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
- memset(fake_storep +
- lba * sdebug_sector_size,
+ memset(fsp + lba * sdebug_sector_size,
(sdebug_lbprz & 1) ? 0 : 0xff,
sdebug_sector_size *
sdebug_unmap_granularity);
}
- if (dif_storep) {
- memset(dif_storep + lba, 0xff,
- sizeof(*dif_storep) *
+ if (sip->dif_storep) {
+ memset(sip->dif_storep + lba, 0xff,
+ sizeof(*sip->dif_storep) *
sdebug_unmap_granularity);
}
}
@@ -2965,13 +3590,13 @@ static void unmap_region(sector_t lba, unsigned int len)
static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
- u8 *cmd = scp->cmnd;
- u64 lba;
+ bool check_prot;
u32 num;
u32 ei_lba;
- unsigned long iflags;
int ret;
- bool check_prot;
+ u64 lba;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ u8 *cmd = scp->cmnd;
switch (cmd[0]) {
case WRITE_16:
@@ -3024,26 +3649,49 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
}
+
+ sdeb_write_lock(sip);
ret = check_device_access_params(scp, lba, num, true);
- if (ret)
+ if (ret) {
+ sdeb_write_unlock(sip);
return ret;
- write_lock_irqsave(&atomic_rw, iflags);
+ }
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
- int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
-
- if (prot_ret) {
- write_unlock_irqrestore(&atomic_rw, iflags);
- mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
- return illegal_condition_result;
+ switch (prot_verify_write(scp, lba, num, ei_lba)) {
+ case 1: /* Guard tag error */
+ if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
+ sdeb_write_unlock(sip);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ return illegal_condition_result;
+ } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
+ sdeb_write_unlock(sip);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ return check_condition_result;
+ }
+ break;
+ case 3: /* Reference tag error */
+ if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
+ sdeb_write_unlock(sip);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
+ return illegal_condition_result;
+ } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
+ sdeb_write_unlock(sip);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
+ return check_condition_result;
+ }
+ break;
}
}
- ret = do_device_access(scp, 0, lba, num, true);
+ ret = do_device_access(sip, scp, 0, lba, num, true);
if (unlikely(scsi_debug_lbp()))
- map_region(lba, num);
- write_unlock_irqrestore(&atomic_rw, iflags);
+ map_region(sip, lba, num);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
+ sdeb_write_unlock(sip);
if (unlikely(-1 == ret))
return DID_ERROR << 16;
else if (unlikely(sdebug_verbose &&
@@ -3052,23 +3700,21 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
"%s: write: cdb indicated=%u, IO sent=%d bytes\n",
my_name, num * sdebug_sector_size, ret);
- if (unlikely(sdebug_any_injecting_opt)) {
- struct sdebug_queued_cmd *sqcp =
- (struct sdebug_queued_cmd *)scp->host_scribble;
-
- if (sqcp) {
- if (sqcp->inj_recovered) {
- mk_sense_buffer(scp, RECOVERED_ERROR,
- THRESHOLD_EXCEEDED, 0);
- return check_condition_result;
- } else if (sqcp->inj_dif) {
- /* Logical block guard check failed */
- mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
- return illegal_condition_result;
- } else if (sqcp->inj_dix) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
- return illegal_condition_result;
- }
+ if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
+ atomic_read(&sdeb_inject_pending))) {
+ if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
+ mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
+ atomic_set(&sdeb_inject_pending, 0);
+ return check_condition_result;
+ } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ atomic_set(&sdeb_inject_pending, 0);
+ return illegal_condition_result;
+ } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ atomic_set(&sdeb_inject_pending, 0);
+ return illegal_condition_result;
}
}
return 0;
@@ -3084,13 +3730,13 @@ static int resp_write_scat(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
u8 *lrdp = NULL;
u8 *up;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
u8 wrprotect;
u16 lbdof, num_lrd, k;
u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
u32 lb_size = sdebug_sector_size;
u32 ei_lba;
u64 lba;
- unsigned long iflags;
int ret, res;
bool is_16;
static const u32 lrd_size = 32; /* + parameter list header size */
@@ -3152,7 +3798,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
goto err_out;
}
- write_lock_irqsave(&atomic_rw, iflags);
+ sdeb_write_lock(sip);
sg_off = lbdof_blen;
/* Spec says Buffer xfer Length field in number of LBs in dout */
cum_lb = 0;
@@ -3195,9 +3841,12 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
}
- ret = do_device_access(scp, sg_off, lba, num, true);
+ ret = do_device_access(sip, scp, sg_off, lba, num, true);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
if (unlikely(scsi_debug_lbp()))
- map_region(lba, num);
+ map_region(sip, lba, num);
if (unlikely(-1 == ret)) {
ret = DID_ERROR << 16;
goto err_out_unlock;
@@ -3206,28 +3855,24 @@ static int resp_write_scat(struct scsi_cmnd *scp,
"%s: write: cdb indicated=%u, IO sent=%d bytes\n",
my_name, num_by, ret);
- if (unlikely(sdebug_any_injecting_opt)) {
- struct sdebug_queued_cmd *sqcp =
- (struct sdebug_queued_cmd *)scp->host_scribble;
-
- if (sqcp) {
- if (sqcp->inj_recovered) {
- mk_sense_buffer(scp, RECOVERED_ERROR,
- THRESHOLD_EXCEEDED, 0);
- ret = illegal_condition_result;
- goto err_out_unlock;
- } else if (sqcp->inj_dif) {
- /* Logical block guard check failed */
- mk_sense_buffer(scp, ABORTED_COMMAND,
- 0x10, 1);
- ret = illegal_condition_result;
- goto err_out_unlock;
- } else if (sqcp->inj_dix) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST,
- 0x10, 1);
- ret = illegal_condition_result;
- goto err_out_unlock;
- }
+ if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
+ atomic_read(&sdeb_inject_pending))) {
+ if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
+ mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
+ atomic_set(&sdeb_inject_pending, 0);
+ ret = check_condition_result;
+ goto err_out_unlock;
+ } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ atomic_set(&sdeb_inject_pending, 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ atomic_set(&sdeb_inject_pending, 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
}
}
sg_off += num_by;
@@ -3235,7 +3880,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
ret = 0;
err_out_unlock:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ sdeb_write_unlock(sip);
err_out:
kfree(lrdp);
return ret;
@@ -3244,27 +3889,34 @@ err_out:
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
- int ret;
- unsigned long iflags;
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
unsigned long long i;
- u32 lb_size = sdebug_sector_size;
u64 block, lbaa;
+ u32 lb_size = sdebug_sector_size;
+ int ret;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
u8 *fs1p;
+ u8 *fsp;
+
+ sdeb_write_lock(sip);
ret = check_device_access_params(scp, lba, num, true);
- if (ret)
+ if (ret) {
+ sdeb_write_unlock(sip);
return ret;
-
- write_lock_irqsave(&atomic_rw, iflags);
+ }
if (unmap && scsi_debug_lbp()) {
- unmap_region(lba, num);
+ unmap_region(sip, lba, num);
goto out;
}
lbaa = lba;
block = do_div(lbaa, sdebug_store_sectors);
/* if ndob then zero 1 logical block, else fetch 1 logical block */
- fs1p = fake_storep + (block * lb_size);
+ fsp = sip->storep;
+ fs1p = fsp + (block * lb_size);
if (ndob) {
memset(fs1p, 0, lb_size);
ret = 0;
@@ -3272,7 +3924,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
- write_unlock_irqrestore(&atomic_rw, iflags);
+ sdeb_write_unlock(sip);
return DID_ERROR << 16;
} else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
@@ -3283,12 +3935,15 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
for (i = 1 ; i < num ; i++) {
lbaa = lba + i;
block = do_div(lbaa, sdebug_store_sectors);
- memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+ memmove(fsp + (block * lb_size), fs1p, lb_size);
}
if (scsi_debug_lbp())
- map_region(lba, num);
+ map_region(sip, lba, num);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
out:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ sdeb_write_unlock(sip);
return 0;
}
@@ -3400,12 +4055,11 @@ static int resp_comp_write(struct scsi_cmnd *scp,
{
u8 *cmd = scp->cmnd;
u8 *arr;
- u8 *fake_storep_hold;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
u64 lba;
u32 dnum;
u32 lb_size = sdebug_sector_size;
u8 num;
- unsigned long iflags;
int ret;
int retval = 0;
@@ -3434,14 +4088,9 @@ static int resp_comp_write(struct scsi_cmnd *scp,
return check_condition_result;
}
- write_lock_irqsave(&atomic_rw, iflags);
+ sdeb_write_lock(sip);
- /* trick do_device_access() to fetch both compare and write buffers
- * from data-in into arr. Safe (atomic) since write_lock held. */
- fake_storep_hold = fake_storep;
- fake_storep = arr;
- ret = do_device_access(scp, 0, 0, dnum, true);
- fake_storep = fake_storep_hold;
+ ret = do_dout_fetch(scp, dnum, arr);
if (ret == -1) {
retval = DID_ERROR << 16;
goto cleanup;
@@ -3449,15 +4098,15 @@ static int resp_comp_write(struct scsi_cmnd *scp,
sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
"indicated=%u, IO sent=%d bytes\n", my_name,
dnum * lb_size, ret);
- if (!comp_write_worker(lba, num, arr)) {
+ if (!comp_write_worker(sip, lba, num, arr, false)) {
mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
retval = check_condition_result;
goto cleanup;
}
if (scsi_debug_lbp())
- map_region(lba, num);
+ map_region(sip, lba, num);
cleanup:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ sdeb_write_unlock(sip);
kfree(arr);
return retval;
}
@@ -3472,10 +4121,9 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
unsigned char *buf;
struct unmap_block_desc *desc;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
unsigned int i, payload_len, descriptors;
int ret;
- unsigned long iflags;
-
if (!scsi_debug_lbp())
return 0; /* fib and say its done */
@@ -3502,7 +4150,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
desc = (void *)&buf[8];
- write_lock_irqsave(&atomic_rw, iflags);
+ sdeb_write_lock(sip);
for (i = 0 ; i < descriptors ; i++) {
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
@@ -3512,13 +4160,13 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (ret)
goto out;
- unmap_region(lba, num);
+ unmap_region(sip, lba, num);
}
ret = 0;
out:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ sdeb_write_unlock(sip);
kfree(buf);
return ret;
@@ -3532,8 +4180,8 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
u64 lba;
u32 alloc_len, mapped, num;
- u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
int ret;
+ u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
lba = get_unaligned_be64(cmd + 2);
alloc_len = get_unaligned_be32(cmd + 10);
@@ -3545,9 +4193,11 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
if (ret)
return ret;
- if (scsi_debug_lbp())
- mapped = map_state(lba, &num);
- else {
+ if (scsi_debug_lbp()) {
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+
+ mapped = map_state(sip, lba, &num);
+ } else {
mapped = 1;
/* following just in case virtual_gb changed */
sdebug_capacity = get_sdebug_capacity();
@@ -3585,13 +4235,62 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
return check_condition_result;
}
- if (!write_since_sync || cmd[1] & 0x2)
+ if (!write_since_sync || (cmd[1] & 0x2))
res = SDEG_RES_IMMED_MASK;
else /* delay if write_since_sync and IMMED clear */
write_since_sync = false;
return res;
}
+/*
+ * Assuming the LBA+num_blocks is not out-of-range, this function will return
+ * CONDITION MET if the specified blocks will/have fitted in the cache, and
+ * a GOOD status otherwise. Model a disk with a big cache and yield
+ * CONDITION MET. Actually tries to bring range in main memory into the
+ * cache associated with the CPU(s).
+ */
+static int resp_pre_fetch(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 lba;
+ u64 block, rest = 0;
+ u32 nblks;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ u8 *fsp = sip->storep;
+
+ if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
+ lba = get_unaligned_be32(cmd + 2);
+ nblks = get_unaligned_be16(cmd + 7);
+ } else { /* PRE-FETCH(16) */
+ lba = get_unaligned_be64(cmd + 2);
+ nblks = get_unaligned_be32(cmd + 10);
+ }
+ if (lba + nblks > sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+ if (!fsp)
+ goto fini;
+ /* PRE-FETCH spec says nothing about LBP or PI so skip them */
+ block = do_div(lba, sdebug_store_sectors);
+ if (block + nblks > sdebug_store_sectors)
+ rest = block + nblks - sdebug_store_sectors;
+
+ /* Try to bring the PRE-FETCH range into CPU's cache */
+ sdeb_read_lock(sip);
+ prefetch_range(fsp + (sdebug_sector_size * block),
+ (nblks - rest) * sdebug_sector_size);
+ if (rest)
+ prefetch_range(fsp, rest * sdebug_sector_size);
+ sdeb_read_unlock(sip);
+fini:
+ if (cmd[1] & 0x2)
+ res = SDEG_RES_IMMED_MASK;
+ return res | condition_met_result;
+}
+
#define RL_BUCKET_ELEMS 8
/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
@@ -3675,6 +4374,8 @@ static int resp_report_luns(struct scsi_cmnd *scp,
if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
break;
int_to_scsilun(lun++, lun_p);
+ if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
+ lun_p->scsi_lun[0] |= 0x40;
}
if (j < RL_BUCKET_ELEMS)
break;
@@ -3693,17 +4394,532 @@ static int resp_report_luns(struct scsi_cmnd *scp,
return res;
}
+static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ bool is_bytchk3 = false;
+ u8 bytchk;
+ int ret, j;
+ u32 vnum, a_num, off;
+ const u32 lb_size = sdebug_sector_size;
+ u64 lba;
+ u8 *arr;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+
+ bytchk = (cmd[1] >> 1) & 0x3;
+ if (bytchk == 0) {
+ return 0; /* always claim internal verify okay */
+ } else if (bytchk == 2) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
+ return check_condition_result;
+ } else if (bytchk == 3) {
+ is_bytchk3 = true; /* 1 block sent, compared repeatedly */
+ }
+ switch (cmd[0]) {
+ case VERIFY_16:
+ lba = get_unaligned_be64(cmd + 2);
+ vnum = get_unaligned_be32(cmd + 10);
+ break;
+ case VERIFY: /* is VERIFY(10) */
+ lba = get_unaligned_be32(cmd + 2);
+ vnum = get_unaligned_be16(cmd + 7);
+ break;
+ default:
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ if (vnum == 0)
+ return 0; /* not an error */
+ a_num = is_bytchk3 ? 1 : vnum;
+ /* Treat following check like one for read (i.e. no write) access */
+ ret = check_device_access_params(scp, lba, a_num, false);
+ if (ret)
+ return ret;
+
+ arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+ /* Not changing store, so only need read access */
+ sdeb_read_lock(sip);
+
+ ret = do_dout_fetch(scp, a_num, arr);
+ if (ret == -1) {
+ ret = DID_ERROR << 16;
+ goto cleanup;
+ } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, __func__, a_num * lb_size, ret);
+ }
+ if (is_bytchk3) {
+ for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
+ memcpy(arr + off, arr, lb_size);
+ }
+ ret = 0;
+ if (!comp_write_worker(sip, lba, vnum, arr, true)) {
+ mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
+ ret = check_condition_result;
+ goto cleanup;
+ }
+cleanup:
+ sdeb_read_unlock(sip);
+ kfree(arr);
+ return ret;
+}
+
+#define RZONES_DESC_HD 64
+
+/* Report zones depending on start LBA and reporting options */
+static int resp_report_zones(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned int rep_max_zones, nrz = 0;
+ int ret = 0;
+ u32 alloc_len, rep_opts, rep_len;
+ bool partial;
+ u64 lba, zs_lba;
+ u8 *arr = NULL, *desc;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp = NULL;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ zs_lba = get_unaligned_be64(cmd + 2);
+ alloc_len = get_unaligned_be32(cmd + 10);
+ if (alloc_len == 0)
+ return 0; /* not an error */
+ rep_opts = cmd[14] & 0x3f;
+ partial = cmd[14] & 0x80;
+
+ if (zs_lba >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+
+ rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
+
+ arr = kzalloc(alloc_len, GFP_ATOMIC);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+
+ sdeb_read_lock(sip);
+
+ desc = arr + 64;
+ for (lba = zs_lba; lba < sdebug_capacity;
+ lba = zsp->z_start + zsp->z_size) {
+ if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
+ break;
+ zsp = zbc_zone(devip, lba);
+ switch (rep_opts) {
+ case 0x00:
+ /* All zones */
+ break;
+ case 0x01:
+ /* Empty zones */
+ if (zsp->z_cond != ZC1_EMPTY)
+ continue;
+ break;
+ case 0x02:
+ /* Implicit open zones */
+ if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
+ continue;
+ break;
+ case 0x03:
+ /* Explicit open zones */
+ if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
+ continue;
+ break;
+ case 0x04:
+ /* Closed zones */
+ if (zsp->z_cond != ZC4_CLOSED)
+ continue;
+ break;
+ case 0x05:
+ /* Full zones */
+ if (zsp->z_cond != ZC5_FULL)
+ continue;
+ break;
+ case 0x06:
+ case 0x07:
+ case 0x10:
+ /*
+ * Read-only, offline, reset WP recommended are
+ * not emulated: no zones to report;
+ */
+ continue;
+ case 0x11:
+ /* non-seq-resource set */
+ if (!zsp->z_non_seq_resource)
+ continue;
+ break;
+ case 0x3e:
+ /* All zones except gap zones. */
+ if (zbc_zone_is_gap(zsp))
+ continue;
+ break;
+ case 0x3f:
+ /* Not write pointer (conventional) zones */
+ if (zbc_zone_is_seq(zsp))
+ continue;
+ break;
+ default:
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ INVALID_FIELD_IN_CDB, 0);
+ ret = check_condition_result;
+ goto fini;
+ }
+
+ if (nrz < rep_max_zones) {
+ /* Fill zone descriptor */
+ desc[0] = zsp->z_type;
+ desc[1] = zsp->z_cond << 4;
+ if (zsp->z_non_seq_resource)
+ desc[1] |= 1 << 1;
+ put_unaligned_be64((u64)zsp->z_size, desc + 8);
+ put_unaligned_be64((u64)zsp->z_start, desc + 16);
+ put_unaligned_be64((u64)zsp->z_wp, desc + 24);
+ desc += 64;
+ }
+
+ if (partial && nrz >= rep_max_zones)
+ break;
+
+ nrz++;
+ }
+
+ /* Report header */
+ /* Zone list length. */
+ put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
+ /* Maximum LBA */
+ put_unaligned_be64(sdebug_capacity - 1, arr + 8);
+ /* Zone starting LBA granularity. */
+ if (devip->zcap < devip->zsize)
+ put_unaligned_be64(devip->zsize, arr + 16);
+
+ rep_len = (unsigned long)desc - (unsigned long)arr;
+ ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
+
+fini:
+ sdeb_read_unlock(sip);
+ kfree(arr);
+ return ret;
+}
+
+/* Logic transplanted from tcmu-runner, file_zbc.c */
+static void zbc_open_all(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp = &devip->zstate[0];
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++, zsp++) {
+ if (zsp->z_cond == ZC4_CLOSED)
+ zbc_open_zone(devip, &devip->zstate[i], true);
+ }
+}
+
+static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 z_id;
+ enum sdebug_z_cond zc;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ sdeb_write_lock(sip);
+
+ if (all) {
+ /* Check if all closed zones can be open */
+ if (devip->max_open &&
+ devip->nr_exp_open + devip->nr_closed > devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ res = check_condition_result;
+ goto fini;
+ }
+ /* Open all closed zones */
+ zbc_open_all(devip);
+ goto fini;
+ }
+
+ /* Open the specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zc = zsp->z_cond;
+ if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
+ goto fini;
+
+ if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_open_zone(devip, zsp, true);
+fini:
+ sdeb_write_unlock(sip);
+ return res;
+}
+
+static void zbc_close_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_close_zone(devip, &devip->zstate[i]);
+}
+
+static int resp_close_zone(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ sdeb_write_lock(sip);
+
+ if (all) {
+ zbc_close_all(devip);
+ goto fini;
+ }
+
+ /* Close specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_close_zone(devip, zsp);
+fini:
+ sdeb_write_unlock(sip);
+ return res;
+}
+
+static void zbc_finish_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp, bool empty)
+{
+ enum sdebug_z_cond zc = zsp->z_cond;
+
+ if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
+ zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
+ if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+ zsp->z_wp = zsp->z_start + zsp->z_size;
+ zsp->z_cond = ZC5_FULL;
+ }
+}
+
+static void zbc_finish_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_finish_zone(devip, &devip->zstate[i], false);
+}
+
+static int resp_finish_zone(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ sdeb_write_lock(sip);
+
+ if (all) {
+ zbc_finish_all(devip);
+ goto fini;
+ }
+
+ /* Finish the specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_finish_zone(devip, zsp, true);
+fini:
+ sdeb_write_unlock(sip);
+ return res;
+}
+
+static void zbc_rwp_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ enum sdebug_z_cond zc;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+
+ if (!zbc_zone_is_seq(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+
+ if (zsp->z_wp > zsp->z_start)
+ memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
+ (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
+
+ zsp->z_non_seq_resource = false;
+ zsp->z_wp = zsp->z_start;
+ zsp->z_cond = ZC1_EMPTY;
+}
+
+static void zbc_rwp_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_rwp_zone(devip, &devip->zstate[i]);
+}
+
+static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ sdeb_write_lock(sip);
+
+ if (all) {
+ zbc_rwp_all(devip);
+ goto fini;
+ }
+
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_rwp_zone(devip, zsp);
+fini:
+ sdeb_write_unlock(sip);
+ return res;
+}
+
static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
{
- u32 tag = blk_mq_unique_tag(cmnd->request);
- u16 hwq = blk_mq_unique_tag_to_hwq(tag);
+ u16 hwq;
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
+
+ hwq = blk_mq_unique_tag_to_hwq(tag);
pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
if (WARN_ON_ONCE(hwq >= submit_queues))
hwq = 0;
+
return sdebug_q_arr + hwq;
}
+static u32 get_tag(struct scsi_cmnd *cmnd)
+{
+ return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
+}
+
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
@@ -3716,7 +4932,6 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct scsi_cmnd *scp;
struct sdebug_dev_info *devip;
- sd_dp->defer_t = SDEB_DEFER_NONE;
if (unlikely(aborted))
sd_dp->aborted = false;
qc_idx = sd_dp->qc_idx;
@@ -3731,12 +4946,13 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
return;
}
spin_lock_irqsave(&sqp->qc_lock, iflags);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
sqcp = &sqp->qc_arr[qc_idx];
scp = sqcp->a_cmnd;
if (unlikely(scp == NULL)) {
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
- sd_dp->sqa_idx, qc_idx);
+ pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
+ sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
return;
}
devip = (struct sdebug_dev_info *)scp->device->hostdata;
@@ -3775,7 +4991,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
pr_info("bypassing scsi_done() due to aborted cmd\n");
return;
}
- scp->scsi_done(scp); /* callback to mid level */
+ scsi_done(scp); /* callback to mid level */
}
/* When high resolution timer goes off this function is called. */
@@ -3798,6 +5014,118 @@ static void sdebug_q_cmd_wq_complete(struct work_struct *work)
static bool got_shared_uuid;
static uuid_t shared_uuid;
+static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ sector_t capacity = get_sdebug_capacity();
+ sector_t conv_capacity;
+ sector_t zstart = 0;
+ unsigned int i;
+
+ /*
+ * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
+ * a zone size allowing for at least 4 zones on the device. Otherwise,
+ * use the specified zone size checking that at least 2 zones can be
+ * created for the device.
+ */
+ if (!sdeb_zbc_zone_size_mb) {
+ devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
+ >> ilog2(sdebug_sector_size);
+ while (capacity < devip->zsize << 2 && devip->zsize >= 2)
+ devip->zsize >>= 1;
+ if (devip->zsize < 2) {
+ pr_err("Device capacity too small\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
+ pr_err("Zone size is not a power of 2\n");
+ return -EINVAL;
+ }
+ devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
+ >> ilog2(sdebug_sector_size);
+ if (devip->zsize >= capacity) {
+ pr_err("Zone size too large for device capacity\n");
+ return -EINVAL;
+ }
+ }
+
+ devip->zsize_shift = ilog2(devip->zsize);
+ devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
+
+ if (sdeb_zbc_zone_cap_mb == 0) {
+ devip->zcap = devip->zsize;
+ } else {
+ devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
+ ilog2(sdebug_sector_size);
+ if (devip->zcap > devip->zsize) {
+ pr_err("Zone capacity too large\n");
+ return -EINVAL;
+ }
+ }
+
+ conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
+ if (conv_capacity >= capacity) {
+ pr_err("Number of conventional zones too large\n");
+ return -EINVAL;
+ }
+ devip->nr_conv_zones = sdeb_zbc_nr_conv;
+ devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
+ devip->zsize_shift;
+ devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
+
+ /* Add gap zones if zone capacity is smaller than the zone size */
+ if (devip->zcap < devip->zsize)
+ devip->nr_zones += devip->nr_seq_zones;
+
+ if (devip->zmodel == BLK_ZONED_HM) {
+ /* zbc_max_open_zones can be 0, meaning "not reported" */
+ if (sdeb_zbc_max_open >= devip->nr_zones - 1)
+ devip->max_open = (devip->nr_zones - 1) / 2;
+ else
+ devip->max_open = sdeb_zbc_max_open;
+ }
+
+ devip->zstate = kcalloc(devip->nr_zones,
+ sizeof(struct sdeb_zone_state), GFP_KERNEL);
+ if (!devip->zstate)
+ return -ENOMEM;
+
+ for (i = 0; i < devip->nr_zones; i++) {
+ zsp = &devip->zstate[i];
+
+ zsp->z_start = zstart;
+
+ if (i < devip->nr_conv_zones) {
+ zsp->z_type = ZBC_ZTYPE_CNV;
+ zsp->z_cond = ZBC_NOT_WRITE_POINTER;
+ zsp->z_wp = (sector_t)-1;
+ zsp->z_size =
+ min_t(u64, devip->zsize, capacity - zstart);
+ } else if ((zstart & (devip->zsize - 1)) == 0) {
+ if (devip->zmodel == BLK_ZONED_HM)
+ zsp->z_type = ZBC_ZTYPE_SWR;
+ else
+ zsp->z_type = ZBC_ZTYPE_SWP;
+ zsp->z_cond = ZC1_EMPTY;
+ zsp->z_wp = zsp->z_start;
+ zsp->z_size =
+ min_t(u64, devip->zcap, capacity - zstart);
+ } else {
+ zsp->z_type = ZBC_ZTYPE_GAP;
+ zsp->z_cond = ZBC_NOT_WRITE_POINTER;
+ zsp->z_wp = (sector_t)-1;
+ zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
+ capacity - zstart);
+ }
+
+ WARN_ON_ONCE((int)zsp->z_size <= 0);
+ zstart += zsp->z_size;
+ }
+
+ return 0;
+}
+
static struct sdebug_dev_info *sdebug_device_create(
struct sdebug_host_info *sdbg_host, gfp_t flags)
{
@@ -3817,6 +5145,18 @@ static struct sdebug_dev_info *sdebug_device_create(
}
}
devip->sdbg_host = sdbg_host;
+ if (sdeb_zbc_in_use) {
+ devip->zmodel = sdeb_zbc_model;
+ if (sdebug_device_create_zones(devip)) {
+ kfree(devip);
+ return NULL;
+ }
+ } else {
+ devip->zmodel = BLK_ZONED_NONE;
+ }
+ devip->sdbg_host = sdbg_host;
+ devip->create_ts = ktime_get_boottime();
+ atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
}
return devip;
@@ -3833,6 +5173,7 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
pr_err("Host info NULL\n");
return NULL;
}
+
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
(devip->target == sdev->id) &&
@@ -3856,7 +5197,7 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
open_devip->lun = sdev->lun;
open_devip->sdbg_host = sdbg_host;
atomic_set(&open_devip->num_in_q, 0);
- set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
+ set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
open_devip->used = true;
return open_devip;
}
@@ -3948,8 +5289,8 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
if (sd_dp) {
- l_defer_t = sd_dp->defer_t;
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ l_defer_t = READ_ONCE(sd_dp->defer_t);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
} else
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
@@ -3988,8 +5329,8 @@ static void stop_all_queued(void)
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
if (sd_dp) {
- l_defer_t = sd_dp->defer_t;
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ l_defer_t = READ_ONCE(sd_dp->defer_t);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
} else
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
@@ -4143,11 +5484,10 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
return SUCCESS;
}
-static void __init sdebug_build_parts(unsigned char *ramp,
- unsigned long store_size)
+static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
{
- struct partition *pp;
- int starts[SDEBUG_MAX_PARTS + 2];
+ struct msdos_partition *pp;
+ int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
int sectors_per_part, num_sectors, k;
int heads_by_sects, start_sec, end_sec;
@@ -4158,23 +5498,27 @@ static void __init sdebug_build_parts(unsigned char *ramp,
sdebug_num_parts = SDEBUG_MAX_PARTS;
pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
}
- num_sectors = (int)sdebug_store_sectors;
+ num_sectors = (int)get_sdebug_capacity();
sectors_per_part = (num_sectors - sdebug_sectors_per)
/ sdebug_num_parts;
heads_by_sects = sdebug_heads * sdebug_sectors_per;
starts[0] = sdebug_sectors_per;
- for (k = 1; k < sdebug_num_parts; ++k)
+ max_part_secs = sectors_per_part;
+ for (k = 1; k < sdebug_num_parts; ++k) {
starts[k] = ((k * sectors_per_part) / heads_by_sects)
* heads_by_sects;
+ if (starts[k] - starts[k - 1] < max_part_secs)
+ max_part_secs = starts[k] - starts[k - 1];
+ }
starts[sdebug_num_parts] = num_sectors;
starts[sdebug_num_parts + 1] = 0;
ramp[510] = 0x55; /* magic partition markings */
ramp[511] = 0xAA;
- pp = (struct partition *)(ramp + 0x1be);
+ pp = (struct msdos_partition *)(ramp + 0x1be);
for (k = 0; starts[k + 1]; ++k, ++pp) {
start_sec = starts[k];
- end_sec = starts[k + 1] - 1;
+ end_sec = starts[k] + max_part_secs - 1;
pp->boot_ind = 0;
pp->cyl = start_sec / heads_by_sects;
@@ -4226,26 +5570,15 @@ static void clear_queue_stats(void)
atomic_set(&sdebug_a_tsf, 0);
}
-static void setup_inject(struct sdebug_queue *sqp,
- struct sdebug_queued_cmd *sqcp)
+static bool inject_on_this_cmd(void)
{
- if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
- if (sdebug_every_nth > 0)
- sqcp->inj_recovered = sqcp->inj_transport
- = sqcp->inj_dif
- = sqcp->inj_dix = sqcp->inj_short
- = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
- return;
- }
- sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
- sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
- sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
- sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
- sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
- sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
- sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
+ if (sdebug_every_nth == 0)
+ return false;
+ return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
}
+#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
+
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -4257,8 +5590,12 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
+ bool new_sd_dp;
+ bool inject = false;
+ bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
+ int k, num_in_q, qdepth;
unsigned long iflags;
- int k, num_in_q, qdepth, inject;
+ u64 ns_from_boot = 0;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
@@ -4274,7 +5611,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (delta_jiff == 0)
goto respond_in_thread;
- /* schedule the response at a later time if resources permit */
sqp = get_queue(cmnd);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (unlikely(atomic_read(&sqp->blocked))) {
@@ -4283,7 +5619,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
num_in_q = atomic_read(&devip->num_in_q);
qdepth = cmnd->device->queue_depth;
- inject = 0;
if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
if (scsi_result) {
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
@@ -4297,7 +5632,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
(atomic_inc_return(&sdebug_a_tsf) >=
abs(sdebug_every_nth))) {
atomic_set(&sdebug_a_tsf, 0);
- inject = 1;
+ inject = true;
scsi_result = device_qfull_result;
}
}
@@ -4307,44 +5642,54 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (scsi_result)
goto respond_in_thread;
- else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
- scsi_result = device_qfull_result;
+ scsi_result = device_qfull_result;
if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
- sdev_printk(KERN_INFO, sdp,
- "%s: max_queue=%d exceeded, %s\n",
- __func__, sdebug_max_queue,
- (scsi_result ? "status: TASK SET FULL" :
- "report: host busy"));
- if (scsi_result)
- goto respond_in_thread;
- else
- return SCSI_MLQUEUE_HOST_BUSY;
+ sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
+ __func__, sdebug_max_queue);
+ goto respond_in_thread;
}
- __set_bit(k, sqp->in_use_bm);
+ set_bit(k, sqp->in_use_bm);
atomic_inc(&devip->num_in_q);
sqcp = &sqp->qc_arr[k];
sqcp->a_cmnd = cmnd;
cmnd->host_scribble = (unsigned char *)sqcp;
sd_dp = sqcp->sd_dp;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
- setup_inject(sqp, sqcp);
- if (sd_dp == NULL) {
+
+ if (!sd_dp) {
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
- if (sd_dp == NULL)
+ if (!sd_dp) {
+ atomic_dec(&devip->num_in_q);
+ clear_bit(k, sqp->in_use_bm);
return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ new_sd_dp = true;
+ } else {
+ new_sd_dp = false;
}
- cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
+ /* Set the hostwide tag */
+ if (sdebug_host_max_queue)
+ sd_dp->hc_idx = get_tag(cmnd);
+
+ if (polled)
+ ns_from_boot = ktime_get_boottime_ns();
+
+ /* one of the resp_*() response functions is called here */
+ cmnd->result = pfp ? pfp(cmnd, devip) : 0;
if (cmnd->result & SDEG_RES_IMMED_MASK) {
- /*
- * This is the F_DELAY_OVERR case. No delay.
- */
cmnd->result &= ~SDEG_RES_IMMED_MASK;
delta_jiff = ndelay = 0;
}
if (cmnd->result == 0 && scsi_result != 0)
cmnd->result = scsi_result;
+ if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
+ if (atomic_read(&sdeb_inject_pending)) {
+ mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
+ atomic_set(&sdeb_inject_pending, 0);
+ cmnd->result = check_condition_result;
+ }
+ }
if (unlikely(sdebug_verbose && cmnd->result))
sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
@@ -4354,48 +5699,105 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
ktime_t kt;
if (delta_jiff > 0) {
- kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
- } else
- kt = ndelay;
- if (!sd_dp->init_hrt) {
- sd_dp->init_hrt = true;
- sqcp->sd_dp = sd_dp;
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
+ u64 ns = jiffies_to_nsecs(delta_jiff);
+
+ if (sdebug_random && ns < U32_MAX) {
+ ns = prandom_u32_max((u32)ns);
+ } else if (sdebug_random) {
+ ns >>= 12; /* scale to 4 usec precision */
+ if (ns < U32_MAX) /* over 4 hours max */
+ ns = prandom_u32_max((u32)ns);
+ ns <<= 12;
+ }
+ kt = ns_to_ktime(ns);
+ } else { /* ndelay has a 4.2 second max */
+ kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
+ (u32)ndelay;
+ if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
+ u64 d = ktime_get_boottime_ns() - ns_from_boot;
+
+ if (kt <= d) { /* elapsed duration >= kt */
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ sqcp->a_cmnd = NULL;
+ atomic_dec(&devip->num_in_q);
+ clear_bit(k, sqp->in_use_bm);
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ if (new_sd_dp)
+ kfree(sd_dp);
+ /* call scsi_done() from this thread */
+ scsi_done(cmnd);
+ return 0;
+ }
+ /* otherwise reduce kt by elapsed time */
+ kt -= d;
+ }
+ }
+ if (polled) {
+ sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ if (!sd_dp->init_poll) {
+ sd_dp->init_poll = true;
+ sqcp->sd_dp = sd_dp;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
+ }
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ } else {
+ if (!sd_dp->init_hrt) {
+ sd_dp->init_hrt = true;
+ sqcp->sd_dp = sd_dp;
+ hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
+ }
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
+ /* schedule the invocation of scsi_done() for a later time */
+ hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
- sd_dp->defer_t = SDEB_DEFER_HRT;
- hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
- if (!sd_dp->init_wq) {
- sd_dp->init_wq = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
+ if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
+ atomic_read(&sdeb_inject_pending)))
+ sd_dp->aborted = true;
+ if (polled) {
+ sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ if (!sd_dp->init_poll) {
+ sd_dp->init_poll = true;
+ sqcp->sd_dp = sd_dp;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
+ }
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ } else {
+ if (!sd_dp->init_wq) {
+ sd_dp->init_wq = true;
+ sqcp->sd_dp = sd_dp;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
+ }
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
+ schedule_work(&sd_dp->ew.work);
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
- sd_dp->defer_t = SDEB_DEFER_WQ;
- if (unlikely(sqcp->inj_cmd_abort))
- sd_dp->aborted = true;
- schedule_work(&sd_dp->ew.work);
- if (unlikely(sqcp->inj_cmd_abort)) {
+ if (unlikely(sd_dp->aborted)) {
sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
- cmnd->request->tag);
- blk_abort_request(cmnd->request);
+ scsi_cmd_to_rq(cmnd)->tag);
+ blk_abort_request(scsi_cmd_to_rq(cmnd));
+ atomic_set(&sdeb_inject_pending, 0);
+ sd_dp->aborted = false;
}
}
- if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
- (scsi_result == device_qfull_result)))
- sdev_printk(KERN_INFO, sdp,
- "%s: num_in_q=%d +1, %s%s\n", __func__,
- num_in_q, (inject ? "<inject> " : ""),
- "status: TASK SET FULL");
+ if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
+ sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
+ num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
return 0;
respond_in_thread: /* call back to mid-layer using invocation thread */
@@ -4403,7 +5805,7 @@ respond_in_thread: /* call back to mid-layer using invocation thread */
cmnd->result &= ~SDEG_RES_IMMED_MASK;
if (cmnd->result == 0 && scsi_result != 0)
cmnd->result = scsi_result;
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
return 0;
}
@@ -4426,55 +5828,70 @@ module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(guard, sdebug_guard, uint, S_IRUGO);
module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
-module_param_string(inq_vendor, sdebug_inq_vendor_id,
- sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
+module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
module_param_string(inq_product, sdebug_inq_product_id,
- sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
+ sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
module_param_string(inq_rev, sdebug_inq_product_rev,
- sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
+ sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
+module_param_string(inq_vendor, sdebug_inq_vendor_id,
+ sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
+module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
-module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
+module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
-module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
-module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
+module_param_named(medium_error_count, sdebug_medium_error_count, int,
+ S_IRUGO | S_IWUSR);
+module_param_named(medium_error_start, sdebug_medium_error_start, int,
+ S_IRUGO | S_IWUSR);
module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
+module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
+module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(per_host_store, sdebug_per_host_store, bool,
+ S_IRUGO | S_IWUSR);
module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
-module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
+module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
module_param_named(submit_queues, submit_queues, int, S_IRUGO);
+module_param_named(poll_queues, poll_queues, int, S_IRUGO);
+module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
-module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
+module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
S_IRUGO | S_IWUSR);
module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
module_param_named(write_same_length, sdebug_write_same_length, int,
S_IRUGO | S_IWUSR);
+module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
+module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
+module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
+module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
+module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SDEBUG_VERSION);
-MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
+MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
@@ -4487,36 +5904,44 @@ MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
-MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
+MODULE_PARM_DESC(host_max_queue,
+ "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
SDEBUG_VERSION "\")");
+MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
+MODULE_PARM_DESC(lbprz,
+ "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
-MODULE_PARM_DESC(lbprz,
- "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
+MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
-MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
+MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
+MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
+MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
+MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
-MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
+MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
+MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
+MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
@@ -4527,6 +5952,11 @@ MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(wp, "Write Protect (def=0)");
MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
+MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
+MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
+MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
+MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
+MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
#define SDEBUG_INFO_LEN 256
static char sdebug_info[SDEBUG_INFO_LEN];
@@ -4575,6 +6005,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{
int f, j, l;
struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
SDEBUG_VERSION, sdebug_version_date);
@@ -4594,11 +6025,12 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
dix_reads, dix_writes, dif_errors);
seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
sdebug_statistics);
- seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
+ seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
atomic_read(&sdebug_cmnd_count),
atomic_read(&sdebug_completions),
"miss_cpus", atomic_read(&sdebug_miss_cpus),
- atomic_read(&sdebug_a_tsf));
+ atomic_read(&sdebug_a_tsf),
+ atomic_read(&sdeb_mq_poll_count));
seq_printf(m, "submit_queues=%d\n", submit_queues);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
@@ -4610,6 +6042,34 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
"first,last bits", f, l);
}
}
+
+ seq_printf(m, "this host_no=%d\n", host->host_no);
+ if (!xa_empty(per_store_ap)) {
+ bool niu;
+ int idx;
+ unsigned long l_idx;
+ struct sdeb_store_info *sip;
+
+ seq_puts(m, "\nhost list:\n");
+ j = 0;
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ idx = sdhp->si_idx;
+ seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
+ sdhp->shost->host_no, idx);
+ ++j;
+ }
+ seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
+ sdeb_most_recent_idx);
+ j = 0;
+ xa_for_each(per_store_ap, l_idx, sip) {
+ niu = xa_get_mark(per_store_ap, l_idx,
+ SDEB_XA_NOT_IN_USE);
+ idx = (int)l_idx;
+ seq_printf(m, " %d: idx=%d%s\n", j, idx,
+ (niu ? " not_in_use" : ""));
+ ++j;
+ }
+ }
return 0;
}
@@ -4733,7 +6193,13 @@ static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
{
int n;
+ /* Cannot change from or to TYPE_ZBC with sysfs */
+ if (sdebug_ptype == TYPE_ZBC)
+ return -EINVAL;
+
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ if (n == TYPE_ZBC)
+ return -EINVAL;
sdebug_ptype = n;
return count;
}
@@ -4765,25 +6231,41 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n, idx;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ bool want_store = (n == 0);
+ struct sdebug_host_info *sdhp;
+
n = (n > 0);
sdebug_fake_rw = (sdebug_fake_rw > 0);
- if (sdebug_fake_rw != n) {
- if ((0 == n) && (NULL == fake_storep)) {
- unsigned long sz =
- (unsigned long)sdebug_dev_size_mb *
- 1048576;
-
- fake_storep = vzalloc(sz);
- if (NULL == fake_storep) {
- pr_err("out of memory, 9\n");
- return -ENOMEM;
+ if (sdebug_fake_rw == n)
+ return count; /* not transitioning so do nothing */
+
+ if (want_store) { /* 1 --> 0 transition, set up store */
+ if (sdeb_first_idx < 0) {
+ idx = sdebug_add_store();
+ if (idx < 0)
+ return idx;
+ } else {
+ idx = sdeb_first_idx;
+ xa_clear_mark(per_store_ap, idx,
+ SDEB_XA_NOT_IN_USE);
+ }
+ /* make all hosts use same store */
+ list_for_each_entry(sdhp, &sdebug_host_list,
+ host_list) {
+ if (sdhp->si_idx != idx) {
+ xa_set_mark(per_store_ap, sdhp->si_idx,
+ SDEB_XA_NOT_IN_USE);
+ sdhp->si_idx = idx;
}
}
- sdebug_fake_rw = n;
+ sdeb_most_recent_idx = idx;
+ } else { /* 0 --> 1 transition is trigger for shrink */
+ sdebug_erase_all_stores(true /* apart from first */);
}
+ sdebug_fake_rw = n;
return count;
}
return -EINVAL;
@@ -4831,6 +6313,24 @@ static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(dev_size_mb);
+static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
+}
+
+static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_per_host_store = v;
+ return count;
+}
+static DRIVER_ATTR_RW(per_host_store);
+
static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
@@ -4845,20 +6345,67 @@ static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
size_t count)
{
int nth;
+ char work[20];
- if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
- sdebug_every_nth = nth;
- if (nth && !sdebug_statistics) {
- pr_info("every_nth needs statistics=1, set it\n");
- sdebug_statistics = true;
+ if (sscanf(buf, "%10s", work) == 1) {
+ if (strncasecmp(work, "0x", 2) == 0) {
+ if (kstrtoint(work + 2, 16, &nth) == 0)
+ goto every_nth_done;
+ } else {
+ if (kstrtoint(work, 10, &nth) == 0)
+ goto every_nth_done;
}
- tweak_cmnd_count();
- return count;
}
return -EINVAL;
+
+every_nth_done:
+ sdebug_every_nth = nth;
+ if (nth && !sdebug_statistics) {
+ pr_info("every_nth needs statistics=1, set it\n");
+ sdebug_statistics = true;
+ }
+ tweak_cmnd_count();
+ return count;
}
static DRIVER_ATTR_RW(every_nth);
+static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
+}
+static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+ bool changed;
+
+ if (kstrtoint(buf, 0, &n))
+ return -EINVAL;
+ if (n >= 0) {
+ if (n > (int)SAM_LUN_AM_FLAT) {
+ pr_warn("only LUN address methods 0 and 1 are supported\n");
+ return -EINVAL;
+ }
+ changed = ((int)sdebug_lun_am != n);
+ sdebug_lun_am = n;
+ if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
+ struct sdebug_host_info *sdhp;
+ struct sdebug_dev_info *dp;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
+ set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+ }
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(lun_format);
+
static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
@@ -4911,7 +6458,8 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
struct sdebug_queue *sqp;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
- (n <= SDEBUG_CANQUEUE)) {
+ (n <= SDEBUG_CANQUEUE) &&
+ (sdebug_host_max_queue == 0)) {
block_unblock_all_queues(true);
k = 0;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
@@ -4934,6 +6482,34 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
}
static DRIVER_ATTR_RW(max_queue);
+static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
+}
+
+static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
+}
+
+static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_no_rwlock = v;
+ return count;
+}
+static DRIVER_ATTR_RW(no_rwlock);
+
+/*
+ * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
+ * in range [0, sdebug_host_max_queue), we can't change it.
+ */
+static DRIVER_ATTR_RO(host_max_queue);
+
static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
@@ -4956,6 +6532,10 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
int n;
bool changed;
+ /* Ignore capacity change for ZBC drives for now */
+ if (sdeb_zbc_in_use)
+ return -ENOTSUPP;
+
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
changed = (sdebug_virtual_gb != n);
sdebug_virtual_gb = n;
@@ -4983,26 +6563,42 @@ static DRIVER_ATTR_RW(virtual_gb);
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
+ /* absolute number of hosts currently active is what is shown */
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
}
-static int sdebug_add_adapter(void);
-static void sdebug_remove_adapter(void);
-
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
+ bool found;
+ unsigned long idx;
+ struct sdeb_store_info *sip;
+ bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
int delta_hosts;
if (sscanf(buf, "%d", &delta_hosts) != 1)
return -EINVAL;
if (delta_hosts > 0) {
do {
- sdebug_add_adapter();
+ found = false;
+ if (want_phs) {
+ xa_for_each_marked(per_store_ap, idx, sip,
+ SDEB_XA_NOT_IN_USE) {
+ sdeb_most_recent_idx = (int)idx;
+ found = true;
+ break;
+ }
+ if (found) /* re-use case */
+ sdebug_add_host_helper((int)idx);
+ else
+ sdebug_do_add_host(true);
+ } else {
+ sdebug_do_add_host(false);
+ }
} while (--delta_hosts);
} else if (delta_hosts < 0) {
do {
- sdebug_remove_adapter();
+ sdebug_do_remove_host(false);
} while (++delta_hosts);
}
return count;
@@ -5086,14 +6682,19 @@ static DRIVER_ATTR_RO(ato);
static ssize_t map_show(struct device_driver *ddp, char *buf)
{
- ssize_t count;
+ ssize_t count = 0;
if (!scsi_debug_lbp())
return scnprintf(buf, PAGE_SIZE, "0-%u\n",
sdebug_store_sectors);
- count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
- (int)map_size, map_storep);
+ if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
+ struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
+
+ if (sip)
+ count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ (int)map_size, sip->map_storep);
+ }
buf[count++] = '\n';
buf[count] = '\0';
@@ -5101,6 +6702,24 @@ static ssize_t map_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(map);
+static ssize_t random_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
+}
+
+static ssize_t random_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_random = v;
+ return count;
+}
+static DRIVER_ATTR_RW(random);
+
static ssize_t removable_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
@@ -5177,12 +6796,57 @@ static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
}
static DRIVER_ATTR_RW(cdb_len);
+static const char * const zbc_model_strs_a[] = {
+ [BLK_ZONED_NONE] = "none",
+ [BLK_ZONED_HA] = "host-aware",
+ [BLK_ZONED_HM] = "host-managed",
+};
+
+static const char * const zbc_model_strs_b[] = {
+ [BLK_ZONED_NONE] = "no",
+ [BLK_ZONED_HA] = "aware",
+ [BLK_ZONED_HM] = "managed",
+};
+
+static const char * const zbc_model_strs_c[] = {
+ [BLK_ZONED_NONE] = "0",
+ [BLK_ZONED_HA] = "1",
+ [BLK_ZONED_HM] = "2",
+};
+
+static int sdeb_zbc_model_str(const char *cp)
+{
+ int res = sysfs_match_string(zbc_model_strs_a, cp);
+
+ if (res < 0) {
+ res = sysfs_match_string(zbc_model_strs_b, cp);
+ if (res < 0) {
+ res = sysfs_match_string(zbc_model_strs_c, cp);
+ if (res < 0)
+ return -EINVAL;
+ }
+ }
+ return res;
+}
+
+static ssize_t zbc_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ zbc_model_strs_a[sdeb_zbc_model]);
+}
+static DRIVER_ATTR_RO(zbc);
+
+static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
+}
+static DRIVER_ATTR_RO(tur_ms_to_ready);
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
directory) is that auxiliary actions can be triggered when an attribute
- is changed. For example see: sdebug_add_host_store() above.
+ is changed. For example see: add_host_store() above.
*/
static struct attribute *sdebug_drv_attrs[] = {
@@ -5191,17 +6855,21 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_ptype.attr,
&driver_attr_dsense.attr,
&driver_attr_fake_rw.attr,
+ &driver_attr_host_max_queue.attr,
&driver_attr_no_lun_0.attr,
&driver_attr_num_tgts.attr,
&driver_attr_dev_size_mb.attr,
&driver_attr_num_parts.attr,
&driver_attr_every_nth.attr,
+ &driver_attr_lun_format.attr,
&driver_attr_max_luns.attr,
&driver_attr_max_queue.attr,
+ &driver_attr_no_rwlock.attr,
&driver_attr_no_uld.attr,
&driver_attr_scsi_level.attr,
&driver_attr_virtual_gb.attr,
&driver_attr_add_host.attr,
+ &driver_attr_per_host_store.attr,
&driver_attr_vpd_use_hostno.attr,
&driver_attr_sector_size.attr,
&driver_attr_statistics.attr,
@@ -5211,12 +6879,15 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_guard.attr,
&driver_attr_ato.attr,
&driver_attr_map.attr,
+ &driver_attr_random.attr,
&driver_attr_removable.attr,
&driver_attr_host_lock.attr,
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
&driver_attr_uuid_ctl.attr,
&driver_attr_cdb_len.attr,
+ &driver_attr_tur_ms_to_ready.attr,
+ &driver_attr_zbc.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5225,11 +6896,13 @@ static struct device *pseudo_primary;
static int __init scsi_debug_init(void)
{
+ bool want_store = (sdebug_fake_rw == 0);
unsigned long sz;
- int host_to_add;
- int k;
- int ret;
+ int k, ret, hosts_to_add;
+ int idx = -1;
+ ramdisk_lck_a[0] = &atomic_rw;
+ ramdisk_lck_a[1] = &atomic_rw2;
atomic_set(&retired_max_queue, 0);
if (sdebug_ndelay >= 1000 * 1000 * 1000) {
@@ -5282,9 +6955,19 @@ static int __init scsi_debug_init(void)
pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
return -EINVAL;
}
+
+ sdebug_lun_am = sdebug_lun_am_i;
+ if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
+ pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
+ sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
+ }
+
if (sdebug_max_luns > 256) {
- pr_warn("max_luns can be no more than 256, use default\n");
- sdebug_max_luns = DEF_MAX_LUNS;
+ if (sdebug_max_luns > 16384) {
+ pr_warn("max_luns can be no more than 16384, use default\n");
+ sdebug_max_luns = DEF_MAX_LUNS;
+ }
+ sdebug_lun_am = SAM_LUN_AM_FLAT;
}
if (sdebug_lowest_aligned > 0x3fff) {
@@ -5296,6 +6979,26 @@ static int __init scsi_debug_init(void)
pr_err("submit_queues must be 1 or more\n");
return -EINVAL;
}
+
+ if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
+ pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
+ return -EINVAL;
+ }
+
+ if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
+ (sdebug_host_max_queue < 0)) {
+ pr_err("host_max_queue must be in range [0 %d]\n",
+ SDEBUG_CANQUEUE);
+ return -EINVAL;
+ }
+
+ if (sdebug_host_max_queue &&
+ (sdebug_max_queue != sdebug_host_max_queue)) {
+ sdebug_max_queue = sdebug_host_max_queue;
+ pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
+ sdebug_max_queue);
+ }
+
sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
GFP_KERNEL);
if (sdebug_q_arr == NULL)
@@ -5303,6 +7006,41 @@ static int __init scsi_debug_init(void)
for (k = 0; k < submit_queues; ++k)
spin_lock_init(&sdebug_q_arr[k].qc_lock);
+ /*
+ * check for host managed zoned block device specified with
+ * ptype=0x14 or zbc=XXX.
+ */
+ if (sdebug_ptype == TYPE_ZBC) {
+ sdeb_zbc_model = BLK_ZONED_HM;
+ } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
+ k = sdeb_zbc_model_str(sdeb_zbc_model_s);
+ if (k < 0) {
+ ret = k;
+ goto free_q_arr;
+ }
+ sdeb_zbc_model = k;
+ switch (sdeb_zbc_model) {
+ case BLK_ZONED_NONE:
+ case BLK_ZONED_HA:
+ sdebug_ptype = TYPE_DISK;
+ break;
+ case BLK_ZONED_HM:
+ sdebug_ptype = TYPE_ZBC;
+ break;
+ default:
+ pr_err("Invalid ZBC model\n");
+ ret = -EINVAL;
+ goto free_q_arr;
+ }
+ }
+ if (sdeb_zbc_model != BLK_ZONED_NONE) {
+ sdeb_zbc_in_use = true;
+ if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
+ sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
+ }
+
+ if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
+ sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
if (sdebug_dev_size_mb < 1)
sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
sz = (unsigned long)sdebug_dev_size_mb * 1048576;
@@ -5325,36 +7063,6 @@ static int __init scsi_debug_init(void)
sdebug_cylinders_per = (unsigned long)sdebug_capacity /
(sdebug_sectors_per * sdebug_heads);
}
-
- if (sdebug_fake_rw == 0) {
- fake_storep = vzalloc(sz);
- if (NULL == fake_storep) {
- pr_err("out of memory, 1\n");
- ret = -ENOMEM;
- goto free_q_arr;
- }
- if (sdebug_num_parts > 0)
- sdebug_build_parts(fake_storep, sz);
- }
-
- if (sdebug_dix) {
- int dif_size;
-
- dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
- dif_storep = vmalloc(dif_size);
-
- pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
-
- if (dif_storep == NULL) {
- pr_err("out of mem. (DIX)\n");
- ret = -ENOMEM;
- goto free_vm;
- }
-
- memset(dif_storep, 0xff, dif_size);
- }
-
- /* Logical Block Provisioning */
if (scsi_debug_lbp()) {
sdebug_unmap_max_blocks =
clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
@@ -5370,26 +7078,16 @@ static int __init scsi_debug_init(void)
sdebug_unmap_alignment) {
pr_err("ERR: unmap_granularity <= unmap_alignment\n");
ret = -EINVAL;
- goto free_vm;
+ goto free_q_arr;
}
-
- map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
- map_storep = vmalloc(array_size(sizeof(long),
- BITS_TO_LONGS(map_size)));
-
- pr_info("%lu provisioning blocks\n", map_size);
-
- if (map_storep == NULL) {
- pr_err("out of mem. (MAP)\n");
- ret = -ENOMEM;
- goto free_vm;
+ }
+ xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ if (want_store) {
+ idx = sdebug_add_store();
+ if (idx < 0) {
+ ret = idx;
+ goto free_q_arr;
}
-
- bitmap_zero(map_storep, map_size);
-
- /* Map first 1KB for partition table */
- if (sdebug_num_parts)
- map_region(0, 2);
}
pseudo_primary = root_device_register("pseudo_0");
@@ -5409,18 +7107,28 @@ static int __init scsi_debug_init(void)
goto bus_unreg;
}
- host_to_add = sdebug_add_host;
+ hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
- for (k = 0; k < host_to_add; k++) {
- if (sdebug_add_adapter()) {
- pr_err("sdebug_add_adapter failed k=%d\n", k);
- break;
+ for (k = 0; k < hosts_to_add; k++) {
+ if (want_store && k == 0) {
+ ret = sdebug_add_host_helper(idx);
+ if (ret < 0) {
+ pr_err("add_host_helper k=%d, error=%d\n",
+ k, -ret);
+ break;
+ }
+ } else {
+ ret = sdebug_do_add_host(want_store &&
+ sdebug_per_host_store);
+ if (ret < 0) {
+ pr_err("add_host k=%d error=%d\n", k, -ret);
+ break;
+ }
}
}
-
if (sdebug_verbose)
- pr_info("built %d host(s)\n", sdebug_add_host);
+ pr_info("built %d host(s)\n", sdebug_num_hosts);
return 0;
@@ -5429,9 +7137,7 @@ bus_unreg:
dev_unreg:
root_device_unregister(pseudo_primary);
free_vm:
- vfree(map_storep);
- vfree(dif_storep);
- vfree(fake_storep);
+ sdebug_erase_store(idx, NULL);
free_q_arr:
kfree(sdebug_q_arr);
return ret;
@@ -5439,19 +7145,18 @@ free_q_arr:
static void __exit scsi_debug_exit(void)
{
- int k = sdebug_add_host;
+ int k = sdebug_num_hosts;
stop_all_queued();
for (; k; k--)
- sdebug_remove_adapter();
+ sdebug_do_remove_host(true);
free_all_queued();
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
- vfree(map_storep);
- vfree(dif_storep);
- vfree(fake_storep);
+ sdebug_erase_all_stores(false);
+ xa_destroy(per_store_ap);
kfree(sdebug_q_arr);
}
@@ -5466,29 +7171,146 @@ static void sdebug_release_adapter(struct device *dev)
kfree(sdbg_host);
}
-static int sdebug_add_adapter(void)
+/* idx must be valid, if sip is NULL then it will be obtained using idx */
+static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
{
- int k, devs_per_host;
- int error = 0;
+ if (idx < 0)
+ return;
+ if (!sip) {
+ if (xa_empty(per_store_ap))
+ return;
+ sip = xa_load(per_store_ap, idx);
+ if (!sip)
+ return;
+ }
+ vfree(sip->map_storep);
+ vfree(sip->dif_storep);
+ vfree(sip->storep);
+ xa_erase(per_store_ap, idx);
+ kfree(sip);
+}
+
+/* Assume apart_from_first==false only in shutdown case. */
+static void sdebug_erase_all_stores(bool apart_from_first)
+{
+ unsigned long idx;
+ struct sdeb_store_info *sip = NULL;
+
+ xa_for_each(per_store_ap, idx, sip) {
+ if (apart_from_first)
+ apart_from_first = false;
+ else
+ sdebug_erase_store(idx, sip);
+ }
+ if (apart_from_first)
+ sdeb_most_recent_idx = sdeb_first_idx;
+}
+
+/*
+ * Returns store xarray new element index (idx) if >=0 else negated errno.
+ * Limit the number of stores to 65536.
+ */
+static int sdebug_add_store(void)
+{
+ int res;
+ u32 n_idx;
+ unsigned long iflags;
+ unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
+ struct sdeb_store_info *sip = NULL;
+ struct xa_limit xal = { .max = 1 << 16, .min = 0 };
+
+ sip = kzalloc(sizeof(*sip), GFP_KERNEL);
+ if (!sip)
+ return -ENOMEM;
+
+ xa_lock_irqsave(per_store_ap, iflags);
+ res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
+ if (unlikely(res < 0)) {
+ xa_unlock_irqrestore(per_store_ap, iflags);
+ kfree(sip);
+ pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
+ return res;
+ }
+ sdeb_most_recent_idx = n_idx;
+ if (sdeb_first_idx < 0)
+ sdeb_first_idx = n_idx;
+ xa_unlock_irqrestore(per_store_ap, iflags);
+
+ res = -ENOMEM;
+ sip->storep = vzalloc(sz);
+ if (!sip->storep) {
+ pr_err("user data oom\n");
+ goto err;
+ }
+ if (sdebug_num_parts > 0)
+ sdebug_build_parts(sip->storep, sz);
+
+ /* DIF/DIX: what T10 calls Protection Information (PI) */
+ if (sdebug_dix) {
+ int dif_size;
+
+ dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
+ sip->dif_storep = vmalloc(dif_size);
+
+ pr_info("dif_storep %u bytes @ %pK\n", dif_size,
+ sip->dif_storep);
+
+ if (!sip->dif_storep) {
+ pr_err("DIX oom\n");
+ goto err;
+ }
+ memset(sip->dif_storep, 0xff, dif_size);
+ }
+ /* Logical Block Provisioning */
+ if (scsi_debug_lbp()) {
+ map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+ sip->map_storep = vmalloc(array_size(sizeof(long),
+ BITS_TO_LONGS(map_size)));
+
+ pr_info("%lu provisioning blocks\n", map_size);
+
+ if (!sip->map_storep) {
+ pr_err("LBP map oom\n");
+ goto err;
+ }
+
+ bitmap_zero(sip->map_storep, map_size);
+
+ /* Map first 1KB for partition table */
+ if (sdebug_num_parts)
+ map_region(sip, 0, 2);
+ }
+
+ rwlock_init(&sip->macc_lck);
+ return (int)n_idx;
+err:
+ sdebug_erase_store((int)n_idx, sip);
+ pr_warn("%s: failed, errno=%d\n", __func__, -res);
+ return res;
+}
+
+static int sdebug_add_host_helper(int per_host_idx)
+{
+ int k, devs_per_host, idx;
+ int error = -ENOMEM;
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
- if (sdbg_host == NULL) {
- pr_err("out of memory at line %d\n", __LINE__);
+ if (!sdbg_host)
return -ENOMEM;
- }
+ idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
+ if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
+ xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
+ sdbg_host->si_idx = idx;
INIT_LIST_HEAD(&sdbg_host->dev_info_list);
devs_per_host = sdebug_num_tgts * sdebug_max_luns;
for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
- if (!sdbg_devinfo) {
- pr_err("out of memory at line %d\n", __LINE__);
- error = -ENOMEM;
+ if (!sdbg_devinfo)
goto clean;
- }
}
spin_lock(&sdebug_host_list_lock);
@@ -5498,44 +7320,77 @@ static int sdebug_add_adapter(void)
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
+ dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
-
if (error)
goto clean;
- ++sdebug_add_host;
- return error;
+ ++sdebug_num_hosts;
+ return 0;
clean:
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo->zstate);
kfree(sdbg_devinfo);
}
-
kfree(sdbg_host);
+ pr_warn("%s: failed, errno=%d\n", __func__, -error);
return error;
}
-static void sdebug_remove_adapter(void)
+static int sdebug_do_add_host(bool mk_new_store)
+{
+ int ph_idx = sdeb_most_recent_idx;
+
+ if (mk_new_store) {
+ ph_idx = sdebug_add_store();
+ if (ph_idx < 0)
+ return ph_idx;
+ }
+ return sdebug_add_host_helper(ph_idx);
+}
+
+static void sdebug_do_remove_host(bool the_end)
{
+ int idx = -1;
struct sdebug_host_info *sdbg_host = NULL;
+ struct sdebug_host_info *sdbg_host2;
spin_lock(&sdebug_host_list_lock);
if (!list_empty(&sdebug_host_list)) {
sdbg_host = list_entry(sdebug_host_list.prev,
struct sdebug_host_info, host_list);
- list_del(&sdbg_host->host_list);
+ idx = sdbg_host->si_idx;
+ }
+ if (!the_end && idx >= 0) {
+ bool unique = true;
+
+ list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
+ if (sdbg_host2 == sdbg_host)
+ continue;
+ if (idx == sdbg_host2->si_idx) {
+ unique = false;
+ break;
+ }
+ }
+ if (unique) {
+ xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
+ if (idx == sdeb_most_recent_idx)
+ --sdeb_most_recent_idx;
+ }
}
+ if (sdbg_host)
+ list_del(&sdbg_host->host_list);
spin_unlock(&sdebug_host_list_lock);
if (!sdbg_host)
return;
device_unregister(&sdbg_host->dev);
- --sdebug_add_host;
+ --sdebug_num_hosts;
}
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -5551,12 +7406,15 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
}
num_in_q = atomic_read(&devip->num_in_q);
+ if (qdepth > SDEBUG_CANQUEUE) {
+ qdepth = SDEBUG_CANQUEUE;
+ pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
+ qdepth, SDEBUG_CANQUEUE);
+ }
if (qdepth < 1)
qdepth = 1;
- /* allow to exceed max host qc_arr elements for testing */
- if (qdepth > SDEBUG_CANQUEUE + 10)
- qdepth = SDEBUG_CANQUEUE + 10;
- scsi_change_queue_depth(sdev, qdepth);
+ if (qdepth != sdev->queue_depth)
+ scsi_change_queue_depth(sdev, qdepth);
if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
@@ -5580,10 +7438,170 @@ static bool fake_timeout(struct scsi_cmnd *scp)
return false;
}
-static bool fake_host_busy(struct scsi_cmnd *scp)
+/* Response to TUR or media access command when device stopped */
+static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
- return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
- (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
+ int stopped_state;
+ u64 diff_ns = 0;
+ ktime_t now_ts = ktime_get_boottime();
+ struct scsi_device *sdp = scp->device;
+
+ stopped_state = atomic_read(&devip->stopped);
+ if (stopped_state == 2) {
+ if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
+ diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
+ if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
+ /* tur_ms_to_ready timer extinguished */
+ atomic_set(&devip->stopped, 0);
+ return 0;
+ }
+ }
+ mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, sdp,
+ "%s: Not ready: in process of becoming ready\n", my_name);
+ if (scp->cmnd[0] == TEST_UNIT_READY) {
+ u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
+
+ if (diff_ns <= tur_nanosecs_to_ready)
+ diff_ns = tur_nanosecs_to_ready - diff_ns;
+ else
+ diff_ns = tur_nanosecs_to_ready;
+ /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
+ do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
+ scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ diff_ns);
+ return check_condition_result;
+ }
+ }
+ mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
+ my_name);
+ return check_condition_result;
+}
+
+static void sdebug_map_queues(struct Scsi_Host *shost)
+{
+ int i, qoff;
+
+ if (shost->nr_hw_queues == 1)
+ return;
+
+ for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
+ struct blk_mq_queue_map *map = &shost->tag_set.map[i];
+
+ map->nr_queues = 0;
+
+ if (i == HCTX_TYPE_DEFAULT)
+ map->nr_queues = submit_queues - poll_queues;
+ else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = poll_queues;
+
+ if (!map->nr_queues) {
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ map->queue_offset = qoff;
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ }
+}
+
+static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ bool first;
+ bool retiring = false;
+ int num_entries = 0;
+ unsigned int qc_idx = 0;
+ unsigned long iflags;
+ ktime_t kt_from_boot = ktime_get_boottime();
+ struct sdebug_queue *sqp;
+ struct sdebug_queued_cmd *sqcp;
+ struct scsi_cmnd *scp;
+ struct sdebug_dev_info *devip;
+ struct sdebug_defer *sd_dp;
+
+ sqp = sdebug_q_arr + queue_num;
+
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+
+ qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+ if (qc_idx >= sdebug_max_queue)
+ goto unlock;
+
+ for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
+ if (first) {
+ first = false;
+ if (!test_bit(qc_idx, sqp->in_use_bm))
+ continue;
+ } else {
+ qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
+ }
+ if (qc_idx >= sdebug_max_queue)
+ break;
+
+ sqcp = &sqp->qc_arr[qc_idx];
+ sd_dp = sqcp->sd_dp;
+ if (unlikely(!sd_dp))
+ continue;
+ scp = sqcp->a_cmnd;
+ if (unlikely(scp == NULL)) {
+ pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
+ queue_num, qc_idx, __func__);
+ break;
+ }
+ if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
+ if (kt_from_boot < sd_dp->cmpl_ts)
+ continue;
+
+ } else /* ignoring non REQ_POLLED requests */
+ continue;
+ devip = (struct sdebug_dev_info *)scp->device->hostdata;
+ if (likely(devip))
+ atomic_dec(&devip->num_in_q);
+ else
+ pr_err("devip=NULL from %s\n", __func__);
+ if (unlikely(atomic_read(&retired_max_queue) > 0))
+ retiring = true;
+
+ sqcp->a_cmnd = NULL;
+ if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
+ pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
+ sqp, queue_num, qc_idx, __func__);
+ break;
+ }
+ if (unlikely(retiring)) { /* user has reduced max_queue */
+ int k, retval;
+
+ retval = atomic_read(&retired_max_queue);
+ if (qc_idx >= retval) {
+ pr_err("index %d too large\n", retval);
+ break;
+ }
+ k = find_last_bit(sqp->in_use_bm, retval);
+ if ((k < sdebug_max_queue) || (k == retval))
+ atomic_set(&retired_max_queue, 0);
+ else
+ atomic_set(&retired_max_queue, k + 1);
+ }
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ scsi_done(scp); /* callback to mid level */
+ num_entries++;
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
+ break;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+
+ if (num_entries > 0)
+ atomic_add(num_entries, &sdeb_mq_poll_count);
+ return num_entries;
}
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
@@ -5599,14 +7617,20 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
int k, na;
int errsts = 0;
+ u64 lun_index = sdp->lun & 0x3FFF;
u32 flags;
u16 sa;
u8 opcode = cmd[0];
bool has_wlun_rl;
+ bool inject_now;
scsi_set_resid(scp, 0);
- if (sdebug_statistics)
+ if (sdebug_statistics) {
atomic_inc(&sdebug_cmnd_count);
+ inject_now = inject_on_this_cmd();
+ } else {
+ inject_now = false;
+ }
if (unlikely(sdebug_verbose &&
!(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
char b[120];
@@ -5622,12 +7646,12 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
(u32)cmd[k]);
}
sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
- blk_mq_unique_tag(scp->request), b);
+ blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
}
- if (fake_host_busy(scp))
+ if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
return SCSI_MLQUEUE_HOST_BUSY;
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
- if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
+ if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
goto err_out;
sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
@@ -5638,6 +7662,9 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
if (NULL == devip)
goto err_out;
}
+ if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
+ atomic_set(&sdeb_inject_pending, 1);
+
na = oip->num_attached;
r_pfp = oip->pfp;
if (na) { /* multiple commands with this opcode */
@@ -5702,14 +7729,11 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
if (errsts)
goto check_cond;
}
- if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
- mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
- if (sdebug_verbose)
- sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
- "%s\n", my_name, "initializing command "
- "required");
- errsts = check_condition_result;
- goto fini;
+ if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
+ atomic_read(&devip->stopped))) {
+ errsts = resp_not_ready(scp, devip);
+ if (errsts)
+ goto fini;
}
if (sdebug_fake_rw && (F_FAKE_RW & flags))
goto fini;
@@ -5723,7 +7747,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
fini:
- if (F_DELAY_OVERR & flags)
+ if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
return schedule_resp(scp, devip, errsts, pfp, 0, 0);
else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
sdebug_ndelay > 10000)) {
@@ -5759,6 +7783,8 @@ static struct scsi_host_template sdebug_driver_template = {
.ioctl = scsi_debug_ioctl,
.queuecommand = scsi_debug_queuecommand,
.change_queue_depth = sdebug_change_qdepth,
+ .map_queues = sdebug_map_queues,
+ .mq_poll = sdebug_blk_mq_poll,
.eh_abort_handler = scsi_debug_abort,
.eh_device_reset_handler = scsi_debug_device_reset,
.eh_target_reset_handler = scsi_debug_target_reset,
@@ -5784,6 +7810,7 @@ static int sdebug_driver_probe(struct device *dev)
sdbg_host = to_sdebug_host(dev);
sdebug_driver_template.can_queue = sdebug_max_queue;
+ sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
if (!sdebug_clustering)
sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
@@ -5798,9 +7825,36 @@ static int sdebug_driver_probe(struct device *dev)
my_name, submit_queues, nr_cpu_ids);
submit_queues = nr_cpu_ids;
}
- /* Decide whether to tell scsi subsystem that we want mq */
- /* Following should give the same answer for each host */
+ /*
+ * Decide whether to tell scsi subsystem that we want mq. The
+ * following should give the same answer for each host.
+ */
hpnt->nr_hw_queues = submit_queues;
+ if (sdebug_host_max_queue)
+ hpnt->host_tagset = 1;
+
+ /* poll queues are possible for nr_hw_queues > 1 */
+ if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
+ pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
+ my_name, poll_queues, hpnt->nr_hw_queues);
+ poll_queues = 0;
+ }
+
+ /*
+ * Poll queues don't need interrupts, but we need at least one I/O queue
+ * left over for non-polled I/O.
+ * If condition not met, trim poll_queues to 1 (just for simplicity).
+ */
+ if (poll_queues >= submit_queues) {
+ if (submit_queues < 3)
+ pr_warn("%s: trim poll_queues to 1\n", my_name);
+ else
+ pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
+ my_name, submit_queues - 1);
+ poll_queues = 1;
+ }
+ if (poll_queues)
+ hpnt->nr_maps = 3;
sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
@@ -5865,34 +7919,30 @@ static int sdebug_driver_probe(struct device *dev)
pr_err("scsi_add_host failed\n");
error = -ENODEV;
scsi_host_put(hpnt);
- } else
+ } else {
scsi_scan_host(hpnt);
+ }
return error;
}
-static int sdebug_driver_remove(struct device *dev)
+static void sdebug_driver_remove(struct device *dev)
{
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = to_sdebug_host(dev);
- if (!sdbg_host) {
- pr_err("Unable to locate host info\n");
- return -ENODEV;
- }
-
scsi_remove_host(sdbg_host->shost);
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo->zstate);
kfree(sdbg_devinfo);
}
scsi_host_put(sdbg_host->shost);
- return 0;
}
static int pseudo_lld_bus_match(struct device *dev,
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index c19ea7ab54cb..217b70c678c3 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -8,8 +8,8 @@
#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
static const char *const scsi_cmd_flags[] = {
SCSI_CMD_FLAG_NAME(TAGGED),
- SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA),
SCSI_CMD_FLAG_NAME(INITIALIZED),
+ SCSI_CMD_FLAG_NAME(LAST),
};
#undef SCSI_CMD_FLAG_NAME
@@ -33,14 +33,12 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
void scsi_show_rq(struct seq_file *m, struct request *rq)
{
- struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
int timeout_ms = jiffies_to_msecs(rq->timeout);
- const u8 *const cdb = READ_ONCE(cmd->cmnd);
char buf[80] = "(?)";
- if (cdb)
- __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
+ __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
seq_printf(m, ", .cmd=%s, .retries=%d, .result = %#x, .flags=", buf,
cmd->retries, cmd->result);
scsi_flags_show(m, cmd->flags, scsi_cmd_flags,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index eed31021e788..c7080454aea9 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -171,6 +171,7 @@ static struct {
{"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1},
{"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */
+ {"Generic", "Ultra HS-SD/MMC", "2.09", BLIST_IGN_MEDIA_CHANGE | BLIST_INQUIRY_36},
{"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
@@ -184,6 +185,7 @@ static struct {
{"HP", "C3323-300", "4269", BLIST_NOTQ},
{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
{"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+ {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
@@ -239,6 +241,7 @@ static struct {
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES |
BLIST_INQUIRY_36},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
@@ -558,7 +561,8 @@ static int scsi_dev_info_list_add_str(char *dev_list)
}
/**
- * get_device_flags - get device specific flags from the dynamic device list.
+ * scsi_get_device_flags - get device specific flags from the dynamic
+ * device list.
* @sdev: &scsi_device to get flags for
* @vendor: vendor name
* @model: model name
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 42f0550d6b11..7b56e00c7df6 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * SCSI device handler infrastruture.
+ * SCSI device handler infrastructure.
*
* Copyright IBM Corporation, 2007
* Authors:
@@ -63,6 +63,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
{"LENOVO", "DE_Series", "rdac", },
+ {"FUJITSU", "ETERNUS_AHB", "rdac", },
{NULL, NULL, NULL },
};
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ae2fa170f6ad..6995c8979230 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,8 +50,6 @@
#include <asm/unaligned.h>
-static void scsi_eh_done(struct scsi_cmnd *scmd);
-
/*
* These should *probably* be handled by the host itself.
* Since it is allowed to sleep, it probably should.
@@ -60,8 +58,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd);
#define HOST_RESET_SETTLE_TIME (10)
static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
-static int scsi_try_to_abort_cmd(struct scsi_host_template *,
- struct scsi_cmnd *);
+static enum scsi_disposition scsi_try_to_abort_cmd(struct scsi_host_template *,
+ struct scsi_cmnd *);
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
@@ -116,13 +114,32 @@ static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
return 1;
}
+static bool scsi_cmd_retry_allowed(struct scsi_cmnd *cmd)
+{
+ if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ return true;
+
+ return ++cmd->retries <= cmd->allowed;
+}
+
+static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *host = sdev->host;
+
+ if (host->hostt->eh_should_retry_cmd)
+ return host->hostt->eh_should_retry_cmd(cmd);
+
+ return true;
+}
+
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
*
* Note: this function must be called only for a command that has timed out.
* Because the block layer marks a request as complete before it calls
- * scsi_times_out(), a .scsi_done() call from the LLD for a command that has
+ * scsi_timeout(), a .scsi_done() call from the LLD for a command that has
* timed out do not have any effect. Hence it is safe to call
* scsi_finish_command() from this function.
*/
@@ -132,46 +149,71 @@ scmd_eh_abort_handler(struct work_struct *work)
struct scsi_cmnd *scmd =
container_of(work, struct scsi_cmnd, abort_work.work);
struct scsi_device *sdev = scmd->device;
- int rtn;
+ struct Scsi_Host *shost = sdev->host;
+ enum scsi_disposition rtn;
+ unsigned long flags;
- if (scsi_host_eh_past_deadline(sdev->host)) {
+ if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not aborting\n"));
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
+ goto out;
+ }
+
+ SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"aborting command\n"));
- rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
- if (rtn == SUCCESS) {
- set_host_byte(scmd, DID_TIME_OUT);
- if (scsi_host_eh_past_deadline(sdev->host)) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "eh timeout, not retrying "
- "aborted command\n"));
- } else if (!scsi_noretry_cmd(scmd) &&
- (++scmd->retries <= scmd->allowed)) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_WARNING, scmd,
- "retry aborted command\n"));
- scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
- return;
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_WARNING, scmd,
- "finish aborted command\n"));
- scsi_finish_command(scmd);
- return;
- }
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "cmd abort %s\n",
- (rtn == FAST_IO_FAIL) ?
- "not send" : "failed"));
- }
+ rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
+ if (rtn != SUCCESS) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "cmd abort %s\n",
+ (rtn == FAST_IO_FAIL) ?
+ "not send" : "failed"));
+ goto out;
+ }
+ set_host_byte(scmd, DID_TIME_OUT);
+ if (scsi_host_eh_past_deadline(shost)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "eh timeout, not retrying "
+ "aborted command\n"));
+ goto out;
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del_init(&scmd->eh_entry);
+
+ /*
+ * If the abort succeeds, and there is no further
+ * EH action, clear the ->last_reset time.
+ */
+ if (list_empty(&shost->eh_abort_list) &&
+ list_empty(&shost->eh_cmd_q))
+ if (shost->eh_deadline != -1)
+ shost->last_reset = 0;
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (!scsi_noretry_cmd(scmd) &&
+ scsi_cmd_retry_allowed(scmd) &&
+ scsi_eh_should_retry_cmd(scmd)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "retry aborted command\n"));
+ scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "finish aborted command\n"));
+ scsi_finish_command(scmd);
}
+ return;
+
+out:
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del_init(&scmd->eh_entry);
+ spin_unlock_irqrestore(shost->host_lock, flags);
scsi_eh_scmd_add(scmd);
}
@@ -203,6 +245,8 @@ scsi_abort_command(struct scsi_cmnd *scmd)
spin_lock_irqsave(shost->host_lock, flags);
if (shost->eh_deadline != -1 && !shost->last_reset)
shost->last_reset = jiffies;
+ BUG_ON(!list_empty(&scmd->eh_entry));
+ list_add_tail(&scmd->eh_entry, &shost->eh_abort_list);
spin_unlock_irqrestore(shost->host_lock, flags);
scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
@@ -222,7 +266,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
*/
static void scsi_eh_reset(struct scsi_cmnd *scmd)
{
- if (!blk_rq_is_passthrough(scmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
if (sdrv->eh_reset)
sdrv->eh_reset(scmd);
@@ -272,7 +316,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
}
/**
- * scsi_times_out - Timeout function for normal scsi commands.
+ * scsi_timeout - Timeout function for normal scsi commands.
* @req: request that is timing out.
*
* Notes:
@@ -281,7 +325,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
*/
-enum blk_eh_timer_return scsi_times_out(struct request *req)
+enum blk_eh_timer_return scsi_timeout(struct request *req)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
enum blk_eh_timer_return rtn = BLK_EH_DONE;
@@ -290,6 +334,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
+ atomic_inc(&scmd->device->iotmo_cnt);
if (host->eh_deadline != -1 && !host->last_reset)
host->last_reset = jiffies;
@@ -419,14 +464,12 @@ static void scsi_report_sense(struct scsi_device *sdev,
evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
scsi_report_lun_change(sdev);
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
"LUN assignments on this target have "
"changed. The Linux SCSI layer does not "
"automatically remap LUN assignments.\n");
} else if (sshdr->asc == 0x3f)
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
- "operating parameters on this target have "
+ "Operating parameters on this target have "
"changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n");
@@ -440,8 +483,13 @@ static void scsi_report_sense(struct scsi_device *sdev,
if (sshdr->asc == 0x29) {
evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
- sdev_printk(KERN_WARNING, sdev,
- "Power-on or device reset occurred\n");
+ /*
+ * Do not print message if it is an expected side-effect
+ * of runtime PM.
+ */
+ if (!sdev->silence_suspend)
+ sdev_printk(KERN_WARNING, sdev,
+ "Power-on or device reset occurred\n");
}
if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
@@ -467,6 +515,11 @@ static void scsi_report_sense(struct scsi_device *sdev,
}
}
+static inline void set_scsi_ml_byte(struct scsi_cmnd *cmd, u8 status)
+{
+ cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
+}
+
/**
* scsi_check_sense - Examine scsi cmd sense
* @scmd: Cmd to have sense checked.
@@ -478,7 +531,7 @@ static void scsi_report_sense(struct scsi_device *sdev,
* When a deferred error is detected the current command has
* not been executed and needs retrying.
*/
-int scsi_check_sense(struct scsi_cmnd *scmd)
+enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
{
struct scsi_device *sdev = scmd->device;
struct scsi_sense_hdr sshdr;
@@ -492,7 +545,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
return NEEDS_RETRY;
if (sdev->handler && sdev->handler->check_sense) {
- int rc;
+ enum scsi_disposition rc;
rc = sdev->handler->check_sense(sdev, &sshdr);
if (rc != SCSI_RETURN_NOT_HANDLED)
@@ -500,7 +553,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
/* handler does not care. Drop down to default handling */
}
- if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
+ if (scmd->cmnd[0] == TEST_UNIT_READY &&
+ scmd->submitter != SUBMITTED_BY_SCSI_ERROR_HANDLER)
/*
* nasty: for mid-layer issued TURs, we need to return the
* actual sense data without any recovery attempt. For eh
@@ -596,22 +650,22 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
case DATA_PROTECT:
if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
/* Thin provisioning hard threshold reached */
- set_host_byte(scmd, DID_ALLOC_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_NOSPC);
return SUCCESS;
}
- /* FALLTHROUGH */
+ fallthrough;
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
case BLANK_CHECK:
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
return SUCCESS;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- set_host_byte(scmd, DID_MEDIUM_ERROR);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_MED_ERROR);
return SUCCESS;
}
return NEEDS_RETRY;
@@ -620,8 +674,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- set_host_byte(scmd, DID_TARGET_FAILURE);
- /* FALLTHROUGH */
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
+ fallthrough;
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
@@ -630,7 +684,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26 || /* Parameter value invalid */
sshdr.asc == 0x27) { /* Write protected */
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
}
return SUCCESS;
@@ -703,7 +757,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev)
* don't allow for the possibility of retries here, and we are a lot
* more restrictive about what we consider acceptable.
*/
-static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd)
{
/*
* first check the host byte, to see if there is anything in there
@@ -722,41 +776,35 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
return FAILED;
/*
- * next, check the message byte.
- */
- if (msg_byte(scmd->result) != COMMAND_COMPLETE)
- return FAILED;
-
- /*
* now, check the status byte to see if this indicates
* anything special.
*/
- switch (status_byte(scmd->result)) {
- case GOOD:
+ switch (get_status_byte(scmd)) {
+ case SAM_STAT_GOOD:
scsi_handle_queue_ramp_up(scmd->device);
- /* FALLTHROUGH */
- case COMMAND_TERMINATED:
+ fallthrough;
+ case SAM_STAT_COMMAND_TERMINATED:
return SUCCESS;
- case CHECK_CONDITION:
+ case SAM_STAT_CHECK_CONDITION:
return scsi_check_sense(scmd);
- case CONDITION_GOOD:
- case INTERMEDIATE_GOOD:
- case INTERMEDIATE_C_GOOD:
+ case SAM_STAT_CONDITION_MET:
+ case SAM_STAT_INTERMEDIATE:
+ case SAM_STAT_INTERMEDIATE_CONDITION_MET:
/*
* who knows? FIXME(eric)
*/
return SUCCESS;
- case RESERVATION_CONFLICT:
+ case SAM_STAT_RESERVATION_CONFLICT:
if (scmd->cmnd[0] == TEST_UNIT_READY)
/* it is a success, we probed the device and
* found it */
return SUCCESS;
/* otherwise, we failed to send the command */
return FAILED;
- case QUEUE_FULL:
+ case SAM_STAT_TASK_SET_FULL:
scsi_handle_queue_full(scmd->device);
- /* fall through */
- case BUSY:
+ fallthrough;
+ case SAM_STAT_BUSY:
return NEEDS_RETRY;
default:
return FAILED;
@@ -768,7 +816,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
* scsi_eh_done - Completion function for error handling.
* @scmd: Cmd that is done.
*/
-static void scsi_eh_done(struct scsi_cmnd *scmd)
+void scsi_eh_done(struct scsi_cmnd *scmd)
{
struct completion *eh_action;
@@ -784,10 +832,10 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
* scsi_try_host_reset - ask host adapter to reset itself
* @scmd: SCSI cmd to send host reset.
*/
-static int scsi_try_host_reset(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_try_host_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
- int rtn;
+ enum scsi_disposition rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
@@ -814,10 +862,10 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
* scsi_try_bus_reset - ask host to perform a bus reset
* @scmd: SCSI cmd to send bus reset.
*/
-static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_try_bus_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
- int rtn;
+ enum scsi_disposition rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
@@ -856,10 +904,10 @@ static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
* timer on it, and set the host back to a consistent state prior to
* returning.
*/
-static int scsi_try_target_reset(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_try_target_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
- int rtn;
+ enum scsi_disposition rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
@@ -887,9 +935,9 @@ static int scsi_try_target_reset(struct scsi_cmnd *scmd)
* timer on it, and set the host back to a consistent state prior to
* returning.
*/
-static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
{
- int rtn;
+ enum scsi_disposition rtn;
struct scsi_host_template *hostt = scmd->device->host->hostt;
if (!hostt->eh_device_reset_handler)
@@ -918,8 +966,8 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
* if the device is temporarily unavailable (eg due to a
* link down on FibreChannel)
*/
-static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt,
- struct scsi_cmnd *scmd)
+static enum scsi_disposition
+scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd)
{
if (!hostt->eh_abort_handler)
return FAILED;
@@ -941,7 +989,7 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
* @scmd: SCSI command structure to hijack
* @ses: structure to save restore information
* @cmnd: CDB to send. Can be NULL if no new cmnd is needed
- * @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB)
+ * @cmnd_size: size in bytes of @cmnd (must be <= MAX_COMMAND_SIZE)
* @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
*
* This function is used to save a scsi command information before re-execution
@@ -963,22 +1011,21 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
* command.
*/
ses->cmd_len = scmd->cmd_len;
- ses->cmnd = scmd->cmnd;
ses->data_direction = scmd->sc_data_direction;
ses->sdb = scmd->sdb;
ses->result = scmd->result;
- ses->resid_len = scmd->req.resid_len;
+ ses->resid_len = scmd->resid_len;
ses->underflow = scmd->underflow;
ses->prot_op = scmd->prot_op;
ses->eh_eflags = scmd->eh_eflags;
scmd->prot_op = SCSI_PROT_NORMAL;
scmd->eh_eflags = 0;
- scmd->cmnd = ses->eh_cmnd;
- memset(scmd->cmnd, 0, BLK_MAX_CDB);
+ memcpy(ses->cmnd, scmd->cmnd, sizeof(ses->cmnd));
+ memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->result = 0;
- scmd->req.resid_len = 0;
+ scmd->resid_len = 0;
if (sense_bytes) {
scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -994,7 +1041,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
} else {
scmd->sc_data_direction = DMA_NONE;
if (cmnd) {
- BUG_ON(cmnd_size > BLK_MAX_CDB);
+ BUG_ON(cmnd_size > sizeof(scmd->cmnd));
memcpy(scmd->cmnd, cmnd, cmnd_size);
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
}
@@ -1027,11 +1074,11 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
* Restore original data
*/
scmd->cmd_len = ses->cmd_len;
- scmd->cmnd = ses->cmnd;
+ memcpy(scmd->cmnd, ses->cmnd, sizeof(ses->cmnd));
scmd->sc_data_direction = ses->data_direction;
scmd->sdb = ses->sdb;
scmd->result = ses->result;
- scmd->req.resid_len = ses->resid_len;
+ scmd->resid_len = ses->resid_len;
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
scmd->eh_eflags = ses->eh_eflags;
@@ -1052,8 +1099,8 @@ EXPORT_SYMBOL(scsi_eh_restore_cmnd);
* Return value:
* SUCCESS or FAILED or NEEDS_RETRY
*/
-static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
- int cmnd_size, int timeout, unsigned sense_bytes)
+static enum scsi_disposition scsi_send_eh_cmnd(struct scsi_cmnd *scmd,
+ unsigned char *cmnd, int cmnd_size, int timeout, unsigned sense_bytes)
{
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
@@ -1068,7 +1115,7 @@ retry:
shost->eh_action = &done;
scsi_log_send(scmd);
- scmd->scsi_done = scsi_eh_done;
+ scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
/*
* Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
@@ -1089,12 +1136,13 @@ retry:
if (sdev->sdev_state != SDEV_BLOCK)
rtn = shost->hostt->queuecommand(shost, scmd);
else
- rtn = SCSI_MLQUEUE_DEVICE_BUSY;
+ rtn = FAILED;
mutex_unlock(&sdev->state_mutex);
if (rtn) {
if (timeleft > stall_for) {
scsi_eh_restore_cmnd(scmd, &ses);
+
timeleft -= stall_for;
msleep(jiffies_to_msecs(stall_for));
goto retry;
@@ -1160,14 +1208,15 @@ retry:
* that we obtain it on our own. This function will *not* return until
* the command either times out, or it completes.
*/
-static int scsi_request_sense(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd)
{
return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
}
-static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
+static enum scsi_disposition
+scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn)
{
- if (!blk_rq_is_passthrough(scmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
if (sdrv->eh_action)
rtn = sdrv->eh_action(scmd, rtn);
@@ -1218,7 +1267,7 @@ int scsi_eh_get_sense(struct list_head *work_q,
{
struct scsi_cmnd *scmd, *next;
struct Scsi_Host *shost;
- int rtn;
+ enum scsi_disposition rtn;
/*
* If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO,
@@ -1237,7 +1286,7 @@ int scsi_eh_get_sense(struct list_head *work_q,
current->comm));
break;
}
- if (status_byte(scmd->result) != CHECK_CONDITION)
+ if (!scsi_status_is_check_condition(scmd->result))
/*
* don't request sense if there's no check condition
* status because the error we're processing isn't one
@@ -1264,11 +1313,18 @@ int scsi_eh_get_sense(struct list_head *work_q,
* upper level.
*/
if (rtn == SUCCESS)
- /* we don't want this command reissued, just
- * finished with the sense data, so set
- * retries to the max allowed to ensure it
- * won't get reissued */
- scmd->retries = scmd->allowed;
+ /*
+ * We don't want this command reissued, just finished
+ * with the sense data, so set retries to the max
+ * allowed to ensure it won't get reissued. If the user
+ * has requested infinite retries, we also want to
+ * finish this command, so force completion by setting
+ * retries and allowed to the same value.
+ */
+ if (scmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ scmd->retries = scmd->allowed = 1;
+ else
+ scmd->retries = scmd->allowed;
else if (rtn != NEEDS_RETRY)
continue;
@@ -1289,7 +1345,8 @@ EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
static int scsi_eh_tur(struct scsi_cmnd *scmd)
{
static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
- int retry_cnt = 1, rtn;
+ int retry_cnt = 1;
+ enum scsi_disposition rtn;
retry_tur:
rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
@@ -1302,7 +1359,7 @@ retry_tur:
case NEEDS_RETRY:
if (retry_cnt--)
goto retry_tur;
- /*FALLTHRU*/
+ fallthrough;
case SUCCESS:
return 0;
default:
@@ -1377,10 +1434,12 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
if (scmd->device->allow_restart) {
- int i, rtn = NEEDS_RETRY;
+ int i;
+ enum scsi_disposition rtn = NEEDS_RETRY;
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
- rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
+ rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
+ scmd->device->eh_timeout, 0);
if (rtn == SUCCESS)
return 0;
@@ -1412,6 +1471,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdev,
"%s: skip START_UNIT, past eh deadline\n",
current->comm));
+ scsi_device_put(sdev);
break;
}
stu_scmd = NULL;
@@ -1470,7 +1530,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *bdr_scmd, *next;
struct scsi_device *sdev;
- int rtn;
+ enum scsi_disposition rtn;
shost_for_each_device(sdev, shost) {
if (scsi_host_eh_past_deadline(shost)) {
@@ -1478,6 +1538,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdev,
"%s: skip BDR, past eh deadline\n",
current->comm));
+ scsi_device_put(sdev);
break;
}
bdr_scmd = NULL;
@@ -1536,7 +1597,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
while (!list_empty(&tmp_list)) {
struct scsi_cmnd *next, *scmd;
- int rtn;
+ enum scsi_disposition rtn;
unsigned int id;
if (scsi_host_eh_past_deadline(shost)) {
@@ -1594,7 +1655,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
struct scsi_cmnd *scmd, *chan_scmd, *next;
LIST_HEAD(check_list);
unsigned int channel;
- int rtn;
+ enum scsi_disposition rtn;
/*
* we really want to loop over the various channels, and do this on
@@ -1665,7 +1726,7 @@ static int scsi_eh_host_reset(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *next;
LIST_HEAD(check_list);
- int rtn;
+ enum scsi_disposition rtn;
if (!list_empty(work_q)) {
scmd = list_entry(work_q->next,
@@ -1722,39 +1783,39 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
* scsi_noretry_cmd - determine if command should be failed fast
* @scmd: SCSI cmd to examine.
*/
-int scsi_noretry_cmd(struct scsi_cmnd *scmd)
+bool scsi_noretry_cmd(struct scsi_cmnd *scmd)
{
+ struct request *req = scsi_cmd_to_rq(scmd);
+
switch (host_byte(scmd->result)) {
case DID_OK:
break;
case DID_TIME_OUT:
goto check_type;
case DID_BUS_BUSY:
- return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
+ return !!(req->cmd_flags & REQ_FAILFAST_TRANSPORT);
case DID_PARITY:
- return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
+ return !!(req->cmd_flags & REQ_FAILFAST_DEV);
case DID_ERROR:
- if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
- status_byte(scmd->result) == RESERVATION_CONFLICT)
- return 0;
- /* fall through */
+ if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
+ return false;
+ fallthrough;
case DID_SOFT_ERROR:
- return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
+ return !!(req->cmd_flags & REQ_FAILFAST_DRIVER);
}
- if (status_byte(scmd->result) != CHECK_CONDITION)
- return 0;
+ if (!scsi_status_is_check_condition(scmd->result))
+ return false;
check_type:
/*
* assume caller has checked sense and determined
* the check condition was retryable.
*/
- if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
- blk_rq_is_passthrough(scmd->request))
- return 1;
- else
- return 0;
+ if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req))
+ return true;
+
+ return false;
}
/**
@@ -1771,9 +1832,9 @@ check_type:
* doesn't require the error handler read (i.e. we don't need to
* abort/reset), this function should return SUCCESS.
*/
-int scsi_decide_disposition(struct scsi_cmnd *scmd)
+enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
{
- int rtn;
+ enum scsi_disposition rtn;
/*
* if the device is offline, then we clearly just pass the result back
@@ -1808,7 +1869,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
set_host_byte(scmd, DID_TIME_OUT);
return SUCCESS;
}
- /* FALLTHROUGH */
+ fallthrough;
case DID_NO_CONNECT:
case DID_BAD_TARGET:
/*
@@ -1844,15 +1905,20 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* the fast io fail tmo fired), so send IO directly upwards.
*/
return SUCCESS;
+ case DID_TRANSPORT_MARGINAL:
+ /*
+ * caller has decided not to do retries on
+ * abort success, so send IO directly upwards
+ */
+ return SUCCESS;
case DID_ERROR:
- if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
- status_byte(scmd->result) == RESERVATION_CONFLICT)
+ if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
/*
* execute reservation conflict processing code
* lower down
*/
break;
- /* fallthrough */
+ fallthrough;
case DID_BUS_BUSY:
case DID_PARITY:
goto maybe_retry;
@@ -1875,23 +1941,17 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
}
/*
- * next, check the message byte.
- */
- if (msg_byte(scmd->result) != COMMAND_COMPLETE)
- return FAILED;
-
- /*
* check the status byte to see if this indicates anything special.
*/
- switch (status_byte(scmd->result)) {
- case QUEUE_FULL:
+ switch (get_status_byte(scmd)) {
+ case SAM_STAT_TASK_SET_FULL:
scsi_handle_queue_full(scmd->device);
/*
* the case of trying to send too many commands to a
* tagged queueing device.
*/
- /* FALLTHROUGH */
- case BUSY:
+ fallthrough;
+ case SAM_STAT_BUSY:
/*
* device can't talk to us at the moment. Should only
* occur (SAM-3) when the task queue is empty, so will cause
@@ -1899,16 +1959,16 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* device.
*/
return ADD_TO_MLQUEUE;
- case GOOD:
+ case SAM_STAT_GOOD:
if (scmd->cmnd[0] == REPORT_LUNS)
scmd->device->sdev_target->expecting_lun_change = 0;
scsi_handle_queue_ramp_up(scmd->device);
- /* FALLTHROUGH */
- case COMMAND_TERMINATED:
+ fallthrough;
+ case SAM_STAT_COMMAND_TERMINATED:
return SUCCESS;
- case TASK_ABORTED:
+ case SAM_STAT_TASK_ABORTED:
goto maybe_retry;
- case CHECK_CONDITION:
+ case SAM_STAT_CHECK_CONDITION:
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
@@ -1917,22 +1977,20 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* to collect the sense and redo the decide
* disposition */
return rtn;
- case CONDITION_GOOD:
- case INTERMEDIATE_GOOD:
- case INTERMEDIATE_C_GOOD:
- case ACA_ACTIVE:
+ case SAM_STAT_CONDITION_MET:
+ case SAM_STAT_INTERMEDIATE:
+ case SAM_STAT_INTERMEDIATE_CONDITION_MET:
+ case SAM_STAT_ACA_ACTIVE:
/*
* who knows? FIXME(eric)
*/
return SUCCESS;
- case RESERVATION_CONFLICT:
+ case SAM_STAT_RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
- set_host_byte(scmd, DID_NEXUS_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_RESV_CONFLICT);
return SUCCESS; /* causes immediate i/o error */
- default:
- return FAILED;
}
return FAILED;
@@ -1942,8 +2000,7 @@ maybe_retry:
* the request was not marked fast fail. Note that above,
* even if the request is marked fast fail, we still requeue
* for queue congestion conditions (QUEUE_FULL or BUSY) */
- if ((++scmd->retries) <= scmd->allowed
- && !scsi_noretry_cmd(scmd)) {
+ if (scsi_cmd_retry_allowed(scmd) && !scsi_noretry_cmd(scmd)) {
return NEEDS_RETRY;
} else {
/*
@@ -1953,9 +2010,11 @@ maybe_retry:
}
}
-static void eh_lock_door_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret eh_lock_door_done(struct request *req,
+ blk_status_t status)
{
- blk_put_request(req);
+ blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
/**
@@ -1971,27 +2030,28 @@ static void eh_lock_door_done(struct request *req, blk_status_t status)
*/
static void scsi_eh_lock_door(struct scsi_device *sdev)
{
+ struct scsi_cmnd *scmd;
struct request *req;
- struct scsi_request *rq;
- req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0);
+ req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return;
- rq = scsi_req(req);
+ scmd = blk_mq_rq_to_pdu(req);
- rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
- rq->cmd[1] = 0;
- rq->cmd[2] = 0;
- rq->cmd[3] = 0;
- rq->cmd[4] = SCSI_REMOVAL_PREVENT;
- rq->cmd[5] = 0;
- rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+ scmd->cmnd[0] = ALLOW_MEDIUM_REMOVAL;
+ scmd->cmnd[1] = 0;
+ scmd->cmnd[2] = 0;
+ scmd->cmnd[3] = 0;
+ scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
+ scmd->cmnd[5] = 0;
+ scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+ scmd->allowed = 5;
req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
- rq->retries = 5;
+ req->end_io = eh_lock_door_done;
- blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
+ blk_execute_rq_nowait(req, true);
}
/**
@@ -2089,8 +2149,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&
- !scsi_noretry_cmd(scmd) &&
- (++scmd->retries <= scmd->allowed)) {
+ !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) &&
+ scsi_eh_should_retry_cmd(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush retry cmd\n",
@@ -2100,10 +2160,10 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
/*
* If just we got sense for the device (called
* scsi_eh_get_sense), scmd->result is already
- * set, do not set DRIVER_TIMEOUT.
+ * set, do not set DID_TIME_OUT.
*/
if (!scmd->result)
- scmd->result |= (DRIVER_TIMEOUT << 24);
+ scmd->result |= (DID_TIME_OUT << 16);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush finish cmd\n",
@@ -2314,11 +2374,6 @@ void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
}
EXPORT_SYMBOL(scsi_report_device_reset);
-static void
-scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
-{
-}
-
/**
* scsi_ioctl_reset: explicitly reset a host/bus/target/device
* @dev: scsi_device to operate on
@@ -2331,7 +2386,8 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
struct Scsi_Host *shost = dev->host;
struct request *rq;
unsigned long flags;
- int error = 0, rtn, val;
+ int error = 0, val;
+ enum scsi_disposition rtn;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
@@ -2352,10 +2408,8 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
scmd = (struct scsi_cmnd *)(rq + 1);
scsi_init_command(dev, scmd);
- scmd->request = rq;
- scmd->cmnd = scsi_req(rq)->cmd;
- scmd->scsi_done = scsi_reset_provider_done_command;
+ scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->cmd_len = 0;
@@ -2374,22 +2428,22 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
rtn = scsi_try_bus_device_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
- /* FALLTHROUGH */
+ fallthrough;
case SG_SCSI_RESET_TARGET:
rtn = scsi_try_target_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
- /* FALLTHROUGH */
+ fallthrough;
case SG_SCSI_RESET_BUS:
rtn = scsi_try_bus_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
- /* FALLTHROUGH */
+ fallthrough;
case SG_SCSI_RESET_HOST:
rtn = scsi_try_host_reset(scmd);
if (rtn == SUCCESS)
break;
- /* FALLTHROUGH */
+ fallthrough;
default:
rtn = FAILED;
break;
@@ -2412,7 +2466,6 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
wake_up(&shost->host_wait);
scsi_run_host_queues(shost);
- scsi_put_command(scmd);
kfree(rq);
out_put_autopm_host:
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 8f3af87b6bb0..2d20da55fb64 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/cdrom.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -63,29 +64,6 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
return 1;
}
-/*
-
- * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
- * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used.
- *
- * dev is the SCSI device struct ptr, *(int *) arg is the length of the
- * input data, if any, not including the command string & counts,
- * *((int *)arg + 1) is the output buffer size in bytes.
- *
- * *(char *) ((int *) arg)[2] the actual command byte.
- *
- * Note that if more than MAX_BUF bytes are requested to be transferred,
- * the ioctl will fail with error EINVAL.
- *
- * This size *does not* include the initial lengths that were passed.
- *
- * The SCSI command is read from the memory location immediately after the
- * length words, and the input data is right after the command. The SCSI
- * routines know the command size based on the opcode decode.
- *
- * The output area is then filled in starting from the command byte.
- */
-
static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
int timeout, int retries)
{
@@ -101,8 +79,9 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"Ioctl returned 0x%x\n", result));
- if (driver_byte(result) == DRIVER_SENSE &&
- scsi_sense_valid(&sshdr)) {
+ if (result < 0)
+ goto out;
+ if (scsi_sense_valid(&sshdr)) {
switch (sshdr.sense_key) {
case ILLEGAL_REQUEST:
if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
@@ -117,14 +96,14 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
case NOT_READY: /* This happens if there is no disc in drive */
if (sdev->removable)
break;
- /* FALLTHROUGH */
+ fallthrough;
case UNIT_ATTENTION:
if (sdev->removable) {
sdev->changed = 1;
result = 0; /* This is no longer considered an error */
break;
}
- /* FALLTHROUGH -- for non-removable media */
+ fallthrough; /* for non-removable media */
default:
sdev_printk(KERN_INFO, sdev,
"ioctl_internal_command return code = %x\n",
@@ -133,7 +112,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
break;
}
}
-
+out:
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"IOCTL Releasing command\n"));
return result;
@@ -188,10 +167,700 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
? -EFAULT: 0;
}
+static int sg_get_version(int __user *p)
+{
+ static const int sg_version_num = 30527;
+ return put_user(sg_version_num, p);
+}
+
+static int sg_set_timeout(struct scsi_device *sdev, int __user *p)
+{
+ int timeout, err = get_user(timeout, p);
+
+ if (!err)
+ sdev->sg_timeout = clock_t_to_jiffies(timeout);
-static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg)
+ return err;
+}
+
+static int sg_get_reserved_size(struct scsi_device *sdev, int __user *p)
{
- char scsi_cmd[MAX_COMMAND_SIZE];
+ int val = min(sdev->sg_reserved_size,
+ queue_max_bytes(sdev->request_queue));
+
+ return put_user(val, p);
+}
+
+static int sg_set_reserved_size(struct scsi_device *sdev, int __user *p)
+{
+ int size, err = get_user(size, p);
+
+ if (err)
+ return err;
+
+ if (size < 0)
+ return -EINVAL;
+
+ sdev->sg_reserved_size = min_t(unsigned int, size,
+ queue_max_bytes(sdev->request_queue));
+ return 0;
+}
+
+/*
+ * will always return that we are ATAPI even for a real SCSI drive, I'm not
+ * so sure this is worth doing anything about (why would you care??)
+ */
+static int sg_emulated_host(struct request_queue *q, int __user *p)
+{
+ return put_user(1, p);
+}
+
+static int scsi_get_idlun(struct scsi_device *sdev, void __user *argp)
+{
+ struct scsi_idlun v = {
+ .dev_id = (sdev->id & 0xff) +
+ ((sdev->lun & 0xff) << 8) +
+ ((sdev->channel & 0xff) << 16) +
+ ((sdev->host->host_no & 0xff) << 24),
+ .host_unique_id = sdev->host->unique_id
+ };
+ if (copy_to_user(argp, &v, sizeof(struct scsi_idlun)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scsi_send_start_stop(struct scsi_device *sdev, int data)
+{
+ u8 cdb[MAX_COMMAND_SIZE] = { };
+
+ cdb[0] = START_STOP;
+ cdb[4] = data;
+ return ioctl_internal_command(sdev, cdb, START_STOP_TIMEOUT,
+ NORMAL_RETRIES);
+}
+
+/*
+ * Check if the given command is allowed.
+ *
+ * Only a subset of commands are allowed for unprivileged users. Commands used
+ * to format the media, update the firmware, etc. are not permitted.
+ */
+bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode)
+{
+ /* root can do any command. */
+ if (capable(CAP_SYS_RAWIO))
+ return true;
+
+ /* Anybody who can open the device can do a read-safe command */
+ switch (cmd[0]) {
+ /* Basic read-only commands */
+ case TEST_UNIT_READY:
+ case REQUEST_SENSE:
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case READ_BUFFER:
+ case READ_DEFECT_DATA:
+ case READ_CAPACITY: /* also GPCMD_READ_CDVD_CAPACITY */
+ case READ_LONG:
+ case INQUIRY:
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case LOG_SENSE:
+ case START_STOP:
+ case GPCMD_VERIFY_10:
+ case VERIFY_16:
+ case REPORT_LUNS:
+ case SERVICE_ACTION_IN_16:
+ case RECEIVE_DIAGNOSTIC:
+ case MAINTENANCE_IN: /* also GPCMD_SEND_KEY, which is a write command */
+ case GPCMD_READ_BUFFER_CAPACITY:
+ /* Audio CD commands */
+ case GPCMD_PLAY_CD:
+ case GPCMD_PLAY_AUDIO_10:
+ case GPCMD_PLAY_AUDIO_MSF:
+ case GPCMD_PLAY_AUDIO_TI:
+ case GPCMD_PAUSE_RESUME:
+ /* CD/DVD data reading */
+ case GPCMD_READ_CD:
+ case GPCMD_READ_CD_MSF:
+ case GPCMD_READ_DISC_INFO:
+ case GPCMD_READ_DVD_STRUCTURE:
+ case GPCMD_READ_HEADER:
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ case GPCMD_READ_SUBCHANNEL:
+ case GPCMD_READ_TOC_PMA_ATIP:
+ case GPCMD_REPORT_KEY:
+ case GPCMD_SCAN:
+ case GPCMD_GET_CONFIGURATION:
+ case GPCMD_READ_FORMAT_CAPACITIES:
+ case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
+ case GPCMD_GET_PERFORMANCE:
+ case GPCMD_SEEK:
+ case GPCMD_STOP_PLAY_SCAN:
+ /* ZBC */
+ case ZBC_IN:
+ return true;
+ /* Basic writing commands */
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_VERIFY:
+ case WRITE_12:
+ case WRITE_VERIFY_12:
+ case WRITE_16:
+ case WRITE_LONG:
+ case WRITE_LONG_2:
+ case WRITE_SAME:
+ case WRITE_SAME_16:
+ case WRITE_SAME_32:
+ case ERASE:
+ case GPCMD_MODE_SELECT_10:
+ case MODE_SELECT:
+ case LOG_SELECT:
+ case GPCMD_BLANK:
+ case GPCMD_CLOSE_TRACK:
+ case GPCMD_FLUSH_CACHE:
+ case GPCMD_FORMAT_UNIT:
+ case GPCMD_REPAIR_RZONE_TRACK:
+ case GPCMD_RESERVE_RZONE_TRACK:
+ case GPCMD_SEND_DVD_STRUCTURE:
+ case GPCMD_SEND_EVENT:
+ case GPCMD_SEND_OPC:
+ case GPCMD_SEND_CUE_SHEET:
+ case GPCMD_SET_SPEED:
+ case GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL:
+ case GPCMD_LOAD_UNLOAD:
+ case GPCMD_SET_STREAMING:
+ case GPCMD_SET_READ_AHEAD:
+ /* ZBC */
+ case ZBC_OUT:
+ return (mode & FMODE_WRITE);
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(scsi_cmd_allowed);
+
+static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
+ struct sg_io_hdr *hdr, fmode_t mode)
+{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+
+ if (hdr->cmd_len < 6)
+ return -EMSGSIZE;
+ if (copy_from_user(scmd->cmnd, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
+ if (!scsi_cmd_allowed(scmd->cmnd, mode))
+ return -EPERM;
+ scmd->cmd_len = hdr->cmd_len;
+
+ rq->timeout = msecs_to_jiffies(hdr->timeout);
+ if (!rq->timeout)
+ rq->timeout = sdev->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+ rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+ return 0;
+}
+
+static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
+ struct bio *bio)
+{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ int r, ret = 0;
+
+ /*
+ * fill in all the output members
+ */
+ hdr->status = scmd->result & 0xff;
+ hdr->masked_status = status_byte(scmd->result);
+ hdr->msg_status = COMMAND_COMPLETE;
+ hdr->host_status = host_byte(scmd->result);
+ hdr->driver_status = 0;
+ if (scsi_status_is_check_condition(hdr->status))
+ hdr->driver_status = DRIVER_SENSE;
+ hdr->info = 0;
+ if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->resid = scmd->resid_len;
+ hdr->sb_len_wr = 0;
+
+ if (scmd->sense_len && hdr->sbp) {
+ int len = min((unsigned int) hdr->mx_sb_len, scmd->sense_len);
+
+ if (!copy_to_user(hdr->sbp, scmd->sense_buffer, len))
+ hdr->sb_len_wr = len;
+ else
+ ret = -EFAULT;
+ }
+
+ r = blk_rq_unmap_user(bio);
+ if (!ret)
+ ret = r;
+
+ return ret;
+}
+
+static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
+{
+ unsigned long start_time;
+ ssize_t ret = 0;
+ int writing = 0;
+ int at_head = 0;
+ struct request *rq;
+ struct scsi_cmnd *scmd;
+ struct bio *bio;
+
+ if (hdr->interface_id != 'S')
+ return -EINVAL;
+
+ if (hdr->dxfer_len > (queue_max_hw_sectors(sdev->request_queue) << 9))
+ return -EIO;
+
+ if (hdr->dxfer_len)
+ switch (hdr->dxfer_direction) {
+ default:
+ return -EINVAL;
+ case SG_DXFER_TO_DEV:
+ writing = 1;
+ break;
+ case SG_DXFER_TO_FROM_DEV:
+ case SG_DXFER_FROM_DEV:
+ break;
+ }
+ if (hdr->flags & SG_FLAG_Q_AT_HEAD)
+ at_head = 1;
+
+ rq = scsi_alloc_request(sdev->request_queue, writing ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
+
+ if (hdr->cmd_len > sizeof(scmd->cmnd)) {
+ ret = -EINVAL;
+ goto out_put_request;
+ }
+
+ ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode);
+ if (ret < 0)
+ goto out_put_request;
+
+ ret = blk_rq_map_user_io(rq, NULL, hdr->dxferp, hdr->dxfer_len,
+ GFP_KERNEL, hdr->iovec_count && hdr->dxfer_len,
+ hdr->iovec_count, 0, rq_data_dir(rq));
+ if (ret)
+ goto out_put_request;
+
+ bio = rq->bio;
+ scmd->allowed = 0;
+
+ start_time = jiffies;
+
+ blk_execute_rq(rq, at_head);
+
+ hdr->duration = jiffies_to_msecs(jiffies - start_time);
+
+ ret = scsi_complete_sghdr_rq(rq, hdr, bio);
+
+out_put_request:
+ blk_mq_free_request(rq);
+ return ret;
+}
+
+/**
+ * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
+ * @q: request queue to send scsi commands down
+ * @mode: mode used to open the file through which the ioctl has been
+ * submitted
+ * @sic: userspace structure describing the command to perform
+ *
+ * Send down the scsi command described by @sic to the device below
+ * the request queue @q.
+ *
+ * Notes:
+ * - This interface is deprecated - users should use the SG_IO
+ * interface instead, as this is a more flexible approach to
+ * performing SCSI commands on a device.
+ * - The SCSI command length is determined by examining the 1st byte
+ * of the given command. There is no way to override this.
+ * - Data transfers are limited to PAGE_SIZE
+ * - The length (x + y) must be at least OMAX_SB_LEN bytes long to
+ * accommodate the sense buffer when an error occurs.
+ * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
+ * old code will not be surprised.
+ * - If a Unix error occurs (e.g. ENOMEM) then the user will receive
+ * a negative return and the Unix error code in 'errno'.
+ * If the SCSI command succeeds then 0 is returned.
+ * Positive numbers returned are the compacted SCSI error codes (4
+ * bytes in one int) where the lowest byte is the SCSI status.
+ */
+static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode,
+ struct scsi_ioctl_command __user *sic)
+{
+ struct request *rq;
+ int err;
+ unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ struct scsi_cmnd *scmd;
+ char *buffer = NULL;
+
+ if (!sic)
+ return -EINVAL;
+
+ /*
+ * get in an out lengths, verify they don't exceed a page worth of data
+ */
+ if (get_user(in_len, &sic->inlen))
+ return -EFAULT;
+ if (get_user(out_len, &sic->outlen))
+ return -EFAULT;
+ if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
+ return -EINVAL;
+ if (get_user(opcode, sic->data))
+ return -EFAULT;
+
+ bytes = max(in_len, out_len);
+ if (bytes) {
+ buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
+ if (!buffer)
+ return -ENOMEM;
+
+ }
+
+ rq = scsi_alloc_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto error_free_buffer;
+ }
+ scmd = blk_mq_rq_to_pdu(rq);
+
+ cmdlen = COMMAND_SIZE(opcode);
+
+ /*
+ * get command and data to send to device, if any
+ */
+ err = -EFAULT;
+ scmd->cmd_len = cmdlen;
+ if (copy_from_user(scmd->cmnd, sic->data, cmdlen))
+ goto error;
+
+ if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+ goto error;
+
+ err = -EPERM;
+ if (!scsi_cmd_allowed(scmd->cmnd, mode))
+ goto error;
+
+ /* default. possible overridden later */
+ scmd->allowed = 5;
+
+ switch (opcode) {
+ case SEND_DIAGNOSTIC:
+ case FORMAT_UNIT:
+ rq->timeout = FORMAT_UNIT_TIMEOUT;
+ scmd->allowed = 1;
+ break;
+ case START_STOP:
+ rq->timeout = START_STOP_TIMEOUT;
+ break;
+ case MOVE_MEDIUM:
+ rq->timeout = MOVE_MEDIUM_TIMEOUT;
+ break;
+ case READ_ELEMENT_STATUS:
+ rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
+ break;
+ case READ_DEFECT_DATA:
+ rq->timeout = READ_DEFECT_DATA_TIMEOUT;
+ scmd->allowed = 1;
+ break;
+ default:
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ break;
+ }
+
+ if (bytes) {
+ err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
+ if (err)
+ goto error;
+ }
+
+ blk_execute_rq(rq, false);
+
+ err = scmd->result & 0xff; /* only 8 bit SCSI status */
+ if (err) {
+ if (scmd->sense_len && scmd->sense_buffer) {
+ /* limit sense len for backward compatibility */
+ if (copy_to_user(sic->data, scmd->sense_buffer,
+ min(scmd->sense_len, 16U)))
+ err = -EFAULT;
+ }
+ } else {
+ if (copy_to_user(sic->data, buffer, out_len))
+ err = -EFAULT;
+ }
+
+error:
+ blk_mq_free_request(rq);
+
+error_free_buffer:
+ kfree(buffer);
+
+ return err;
+}
+
+int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_sg_io_hdr hdr32 = {
+ .interface_id = hdr->interface_id,
+ .dxfer_direction = hdr->dxfer_direction,
+ .cmd_len = hdr->cmd_len,
+ .mx_sb_len = hdr->mx_sb_len,
+ .iovec_count = hdr->iovec_count,
+ .dxfer_len = hdr->dxfer_len,
+ .dxferp = (uintptr_t)hdr->dxferp,
+ .cmdp = (uintptr_t)hdr->cmdp,
+ .sbp = (uintptr_t)hdr->sbp,
+ .timeout = hdr->timeout,
+ .flags = hdr->flags,
+ .pack_id = hdr->pack_id,
+ .usr_ptr = (uintptr_t)hdr->usr_ptr,
+ .status = hdr->status,
+ .masked_status = hdr->masked_status,
+ .msg_status = hdr->msg_status,
+ .sb_len_wr = hdr->sb_len_wr,
+ .host_status = hdr->host_status,
+ .driver_status = hdr->driver_status,
+ .resid = hdr->resid,
+ .duration = hdr->duration,
+ .info = hdr->info,
+ };
+
+ if (copy_to_user(argp, &hdr32, sizeof(hdr32)))
+ return -EFAULT;
+
+ return 0;
+ }
+#endif
+
+ if (copy_to_user(argp, hdr, sizeof(*hdr)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(put_sg_io_hdr);
+
+int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+ struct compat_sg_io_hdr hdr32;
+
+ if (in_compat_syscall()) {
+ if (copy_from_user(&hdr32, argp, sizeof(hdr32)))
+ return -EFAULT;
+
+ *hdr = (struct sg_io_hdr) {
+ .interface_id = hdr32.interface_id,
+ .dxfer_direction = hdr32.dxfer_direction,
+ .cmd_len = hdr32.cmd_len,
+ .mx_sb_len = hdr32.mx_sb_len,
+ .iovec_count = hdr32.iovec_count,
+ .dxfer_len = hdr32.dxfer_len,
+ .dxferp = compat_ptr(hdr32.dxferp),
+ .cmdp = compat_ptr(hdr32.cmdp),
+ .sbp = compat_ptr(hdr32.sbp),
+ .timeout = hdr32.timeout,
+ .flags = hdr32.flags,
+ .pack_id = hdr32.pack_id,
+ .usr_ptr = compat_ptr(hdr32.usr_ptr),
+ .status = hdr32.status,
+ .masked_status = hdr32.masked_status,
+ .msg_status = hdr32.msg_status,
+ .sb_len_wr = hdr32.sb_len_wr,
+ .host_status = hdr32.host_status,
+ .driver_status = hdr32.driver_status,
+ .resid = hdr32.resid,
+ .duration = hdr32.duration,
+ .info = hdr32.info,
+ };
+
+ return 0;
+ }
+#endif
+
+ if (copy_from_user(hdr, argp, sizeof(*hdr)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(get_sg_io_hdr);
+
+#ifdef CONFIG_COMPAT
+struct compat_cdrom_generic_command {
+ unsigned char cmd[CDROM_PACKET_SIZE];
+ compat_caddr_t buffer;
+ compat_uint_t buflen;
+ compat_int_t stat;
+ compat_caddr_t sense;
+ unsigned char data_direction;
+ unsigned char pad[3];
+ compat_int_t quiet;
+ compat_int_t timeout;
+ compat_caddr_t unused;
+};
+#endif
+
+static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc,
+ const void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_cdrom_generic_command cgc32;
+
+ if (copy_from_user(&cgc32, arg, sizeof(cgc32)))
+ return -EFAULT;
+
+ *cgc = (struct cdrom_generic_command) {
+ .buffer = compat_ptr(cgc32.buffer),
+ .buflen = cgc32.buflen,
+ .stat = cgc32.stat,
+ .sense = compat_ptr(cgc32.sense),
+ .data_direction = cgc32.data_direction,
+ .quiet = cgc32.quiet,
+ .timeout = cgc32.timeout,
+ .unused = compat_ptr(cgc32.unused),
+ };
+ memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE);
+ return 0;
+ }
+#endif
+ if (copy_from_user(cgc, arg, sizeof(*cgc)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
+ void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_cdrom_generic_command cgc32 = {
+ .buffer = (uintptr_t)(cgc->buffer),
+ .buflen = cgc->buflen,
+ .stat = cgc->stat,
+ .sense = (uintptr_t)(cgc->sense),
+ .data_direction = cgc->data_direction,
+ .quiet = cgc->quiet,
+ .timeout = cgc->timeout,
+ .unused = (uintptr_t)(cgc->unused),
+ };
+ memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE);
+
+ if (copy_to_user(arg, &cgc32, sizeof(cgc32)))
+ return -EFAULT;
+
+ return 0;
+ }
+#endif
+ if (copy_to_user(arg, cgc, sizeof(*cgc)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode,
+ void __user *arg)
+{
+ struct cdrom_generic_command cgc;
+ struct sg_io_hdr hdr;
+ int err;
+
+ err = scsi_get_cdrom_generic_arg(&cgc, arg);
+ if (err)
+ return err;
+
+ cgc.timeout = clock_t_to_jiffies(cgc.timeout);
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.interface_id = 'S';
+ hdr.cmd_len = sizeof(cgc.cmd);
+ hdr.dxfer_len = cgc.buflen;
+ switch (cgc.data_direction) {
+ case CGC_DATA_UNKNOWN:
+ hdr.dxfer_direction = SG_DXFER_UNKNOWN;
+ break;
+ case CGC_DATA_WRITE:
+ hdr.dxfer_direction = SG_DXFER_TO_DEV;
+ break;
+ case CGC_DATA_READ:
+ hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+ break;
+ case CGC_DATA_NONE:
+ hdr.dxfer_direction = SG_DXFER_NONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hdr.dxferp = cgc.buffer;
+ hdr.sbp = cgc.sense;
+ if (hdr.sbp)
+ hdr.mx_sb_len = sizeof(struct request_sense);
+ hdr.timeout = jiffies_to_msecs(cgc.timeout);
+ hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd;
+ hdr.cmd_len = sizeof(cgc.cmd);
+
+ err = sg_io(sdev, &hdr, mode);
+ if (err == -EFAULT)
+ return -EFAULT;
+
+ if (hdr.status)
+ return -EIO;
+
+ cgc.stat = err;
+ cgc.buflen = hdr.resid;
+ if (scsi_put_cdrom_generic_arg(&cgc, arg))
+ return -EFAULT;
+
+ return err;
+}
+
+static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode,
+ void __user *argp)
+{
+ struct sg_io_hdr hdr;
+ int error;
+
+ error = get_sg_io_hdr(&hdr, argp);
+ if (error)
+ return error;
+ error = sg_io(sdev, &hdr, mode);
+ if (error == -EFAULT)
+ return error;
+ if (put_sg_io_hdr(&hdr, argp))
+ return -EFAULT;
+ return error;
+}
+
+/**
+ * scsi_ioctl - Dispatch ioctl to scsi device
+ * @sdev: scsi device receiving ioctl
+ * @mode: mode the block/char device is opened with
+ * @cmd: which ioctl is it
+ * @arg: data associated with ioctl
+ *
+ * Description: The scsi_ioctl() function differs from most ioctls in that it
+ * does not take a major/minor number as the dev field. Rather, it takes
+ * a pointer to a &struct scsi_device.
+ */
+int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd,
+ void __user *arg)
+{
+ struct request_queue *q = sdev->request_queue;
struct scsi_sense_hdr sense_hdr;
/* Check for deprecated ioctls ... all the ioctls which don't
@@ -211,26 +880,34 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg
}
switch (cmd) {
+ case SG_GET_VERSION_NUM:
+ return sg_get_version(arg);
+ case SG_SET_TIMEOUT:
+ return sg_set_timeout(sdev, arg);
+ case SG_GET_TIMEOUT:
+ return jiffies_to_clock_t(sdev->sg_timeout);
+ case SG_GET_RESERVED_SIZE:
+ return sg_get_reserved_size(sdev, arg);
+ case SG_SET_RESERVED_SIZE:
+ return sg_set_reserved_size(sdev, arg);
+ case SG_EMULATED_HOST:
+ return sg_emulated_host(q, arg);
+ case SG_IO:
+ return scsi_ioctl_sg_io(sdev, mode, arg);
+ case SCSI_IOCTL_SEND_COMMAND:
+ return sg_scsi_ioctl(q, mode, arg);
+ case CDROM_SEND_PACKET:
+ return scsi_cdrom_send_packet(sdev, mode, arg);
+ case CDROMCLOSETRAY:
+ return scsi_send_start_stop(sdev, 3);
+ case CDROMEJECT:
+ return scsi_send_start_stop(sdev, 2);
case SCSI_IOCTL_GET_IDLUN:
- if (!access_ok(arg, sizeof(struct scsi_idlun)))
- return -EFAULT;
-
- __put_user((sdev->id & 0xff)
- + ((sdev->lun & 0xff) << 8)
- + ((sdev->channel & 0xff) << 16)
- + ((sdev->host->host_no & 0xff) << 24),
- &((struct scsi_idlun __user *)arg)->dev_id);
- __put_user(sdev->host->unique_id,
- &((struct scsi_idlun __user *)arg)->host_unique_id);
- return 0;
+ return scsi_get_idlun(sdev, arg);
case SCSI_IOCTL_GET_BUS_NUMBER:
return put_user(sdev->host->host_no, (int __user *)arg);
case SCSI_IOCTL_PROBE_HOST:
return ioctl_probe(sdev->host, arg);
- case SCSI_IOCTL_SEND_COMMAND:
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- return -EACCES;
- return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg);
case SCSI_IOCTL_DOORLOCK:
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
case SCSI_IOCTL_DOORUNLOCK:
@@ -239,66 +916,27 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg
return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT,
NORMAL_RETRIES, &sense_hdr);
case SCSI_IOCTL_START_UNIT:
- scsi_cmd[0] = START_STOP;
- scsi_cmd[1] = 0;
- scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
- scsi_cmd[4] = 1;
- return ioctl_internal_command(sdev, scsi_cmd,
- START_STOP_TIMEOUT, NORMAL_RETRIES);
+ return scsi_send_start_stop(sdev, 1);
case SCSI_IOCTL_STOP_UNIT:
- scsi_cmd[0] = START_STOP;
- scsi_cmd[1] = 0;
- scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
- scsi_cmd[4] = 0;
- return ioctl_internal_command(sdev, scsi_cmd,
- START_STOP_TIMEOUT, NORMAL_RETRIES);
+ return scsi_send_start_stop(sdev, 0);
case SCSI_IOCTL_GET_PCI:
return scsi_ioctl_get_pci(sdev, arg);
case SG_SCSI_RESET:
return scsi_ioctl_reset(sdev, arg);
}
- return -ENOIOCTLCMD;
-}
-
-/**
- * scsi_ioctl - Dispatch ioctl to scsi device
- * @sdev: scsi device receiving ioctl
- * @cmd: which ioctl is it
- * @arg: data associated with ioctl
- *
- * Description: The scsi_ioctl() function differs from most ioctls in that it
- * does not take a major/minor number as the dev field. Rather, it takes
- * a pointer to a &struct scsi_device.
- */
-int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
-{
- int ret = scsi_ioctl_common(sdev, cmd, arg);
-
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- if (sdev->host->hostt->ioctl)
- return sdev->host->hostt->ioctl(sdev, cmd, arg);
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(scsi_ioctl);
#ifdef CONFIG_COMPAT
-int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
-{
- int ret = scsi_ioctl_common(sdev, cmd, arg);
-
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- if (sdev->host->hostt->compat_ioctl)
+ if (in_compat_syscall()) {
+ if (!sdev->host->hostt->compat_ioctl)
+ return -EINVAL;
return sdev->host->hostt->compat_ioctl(sdev, cmd, arg);
-
- return ret;
-}
-EXPORT_SYMBOL(scsi_compat_ioctl);
+ }
#endif
+ if (!sdev->host->hostt->ioctl)
+ return -EINVAL;
+ return sdev->host->hostt->ioctl(sdev, cmd, arg);
+}
+EXPORT_SYMBOL(scsi_ioctl);
/*
* We can process a reset even when a device isn't fully operable.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 610ee41fa54c..8b89fab7c420 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -21,6 +21,7 @@
#include <linux/hardirq.h>
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
+#include <linux/blk-integrity.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
@@ -52,51 +53,17 @@
#define SCSI_INLINE_SG_CNT 2
#endif
-static struct kmem_cache *scsi_sdb_cache;
static struct kmem_cache *scsi_sense_cache;
-static struct kmem_cache *scsi_sense_isadma_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
-static inline struct kmem_cache *
-scsi_select_sense_cache(bool unchecked_isa_dma)
-{
- return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
-}
-
-static void scsi_free_sense_buffer(bool unchecked_isa_dma,
- unsigned char *sense_buffer)
-{
- kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
- sense_buffer);
-}
-
-static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
- gfp_t gfp_mask, int numa_node)
-{
- return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
- gfp_mask, numa_node);
-}
-
int scsi_init_sense_cache(struct Scsi_Host *shost)
{
- struct kmem_cache *cache;
int ret = 0;
mutex_lock(&scsi_sense_cache_mutex);
- cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
- if (cache)
- goto exit;
-
- if (shost->unchecked_isa_dma) {
- scsi_sense_isadma_cache =
- kmem_cache_create("scsi_sense_cache(DMA)",
- SCSI_SENSE_BUFFERSIZE, 0,
- SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
- if (!scsi_sense_isadma_cache)
- ret = -ENOMEM;
- } else {
+ if (!scsi_sense_cache) {
scsi_sense_cache =
kmem_cache_create_usercopy("scsi_sense_cache",
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
@@ -104,18 +71,10 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
if (!scsi_sense_cache)
ret = -ENOMEM;
}
- exit:
mutex_unlock(&scsi_sense_cache_mutex);
return ret;
}
-/*
- * When to reinvoke queueing after a resource shortage. It's 3 msecs to
- * not change behaviour from the previous unplug mechanism, experimentation
- * may prove this needs changing.
- */
-#define SCSI_QUEUE_DELAY 3
-
static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{
@@ -152,15 +111,22 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
}
}
-static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
{
- if (cmd->request->rq_flags & RQF_DONTPREP) {
- cmd->request->rq_flags &= ~RQF_DONTPREP;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+
+ if (rq->rq_flags & RQF_DONTPREP) {
+ rq->rq_flags &= ~RQF_DONTPREP;
scsi_mq_uninit_cmd(cmd);
} else {
WARN_ON_ONCE(true);
}
- blk_mq_requeue_request(cmd->request, true);
+
+ if (msecs) {
+ blk_mq_requeue_request(rq, false);
+ blk_mq_delay_kick_requeue_list(rq->q, msecs);
+ } else
+ blk_mq_requeue_request(rq, true);
}
/**
@@ -195,31 +161,24 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
* Requeue this command. It will go before all other commands
* that are already in the queue. Schedule requeue work under
* lock such that the kblockd_schedule_work() call happens
- * before blk_cleanup_queue() finishes.
+ * before blk_mq_destroy_queue() finishes.
*/
cmd->result = 0;
- blk_mq_requeue_request(cmd->request, true);
+ blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true);
}
-/*
- * Function: scsi_queue_insert()
- *
- * Purpose: Insert a command in the midlevel queue.
- *
- * Arguments: cmd - command that we are adding to queue.
- * reason - why we are inserting command to queue.
- *
- * Lock status: Assumed that lock is not held upon entry.
+/**
+ * scsi_queue_insert - Reinsert a command in the queue.
+ * @cmd: command that we are adding to queue.
+ * @reason: why we are inserting command to queue.
*
- * Returns: Nothing.
+ * We do this for one of two cases. Either the host is busy and it cannot accept
+ * any more commands for the time being, or the device returned QUEUE_FULL and
+ * can accept no more commands.
*
- * Notes: We do this for one of two cases. Either the host is busy
- * and it cannot accept any more commands for the time being,
- * or the device returned QUEUE_FULL and can accept no more
- * commands.
- * Notes: This could be called either from an interrupt context or a
- * normal process context.
+ * Context: This could be called either from an interrupt context or a normal
+ * process context.
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
@@ -236,7 +195,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* @bufflen: len of buffer
* @sense: optional sense buffer
* @sshdr: optional decoded sense header
- * @timeout: request timeout in seconds
+ * @timeout: request timeout in HZ
* @retries: number of times to retry request
* @flags: flags for ->cmd_flags
* @rq_flags: flags for ->rq_flags
@@ -248,27 +207,30 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
- int timeout, int retries, u64 flags, req_flags_t rq_flags,
- int *resid)
+ int timeout, int retries, blk_opf_t flags,
+ req_flags_t rq_flags, int *resid)
{
struct request *req;
- struct scsi_request *rq;
- int ret = DRIVER_ERROR << 24;
+ struct scsi_cmnd *scmd;
+ int ret;
- req = blk_get_request(sdev->request_queue,
+ req = scsi_alloc_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+ rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
if (IS_ERR(req))
- return ret;
- rq = scsi_req(req);
-
- if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
- buffer, bufflen, GFP_NOIO))
- goto out;
-
- rq->cmd_len = COMMAND_SIZE(cmd[0]);
- memcpy(rq->cmd, cmd, rq->cmd_len);
- rq->retries = retries;
+ return PTR_ERR(req);
+
+ if (bufflen) {
+ ret = blk_rq_map_kern(sdev->request_queue, req,
+ buffer, bufflen, GFP_NOIO);
+ if (ret)
+ goto out;
+ }
+ scmd = blk_mq_rq_to_pdu(req);
+ scmd->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(scmd->cmnd, cmd, scmd->cmd_len);
+ scmd->allowed = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
req->rq_flags |= rq_flags | RQF_QUIET;
@@ -276,7 +238,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
/*
* head injection *required* here otherwise quiesce won't work
*/
- blk_execute_rq(req->q, NULL, req, 1);
+ blk_execute_rq(req, true);
/*
* Some devices (USB mass-storage in particular) may transfer
@@ -284,43 +246,25 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
- if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
- memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
+ if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
+ memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
if (resid)
- *resid = rq->resid_len;
- if (sense && rq->sense_len)
- memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
+ *resid = scmd->resid_len;
+ if (sense && scmd->sense_len)
+ memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
if (sshdr)
- scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
- ret = rq->result;
+ scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
+ sshdr);
+ ret = scmd->result;
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
return ret;
}
EXPORT_SYMBOL(__scsi_execute);
/*
- * Function: scsi_init_cmd_errh()
- *
- * Purpose: Initialize cmd fields related to error handling.
- *
- * Arguments: cmd - command that is ready to be queued.
- *
- * Notes: This function has the job of initializing a number of
- * fields related to error handling. Typically this will
- * be called once for each command, as required.
- */
-static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
-{
- scsi_set_resid(cmd, 0);
- memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- if (cmd->cmd_len == 0)
- cmd->cmd_len = scsi_command_size(cmd->cmnd);
-}
-
-/*
* Wake up the error handler if necessary. Avoid as follows that the error
* handler is not woken up if host in-flight requests number ==
* shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
@@ -354,7 +298,8 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
- atomic_dec(&sdev->device_busy);
+ sbitmap_put(&sdev->budget_map, cmd->budget_token);
+ cmd->budget_token = -1;
}
static void scsi_kick_queue(struct request_queue *q)
@@ -401,7 +346,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
-
+
scsi_device_put(sdev);
}
out:
@@ -410,7 +355,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
static inline bool scsi_device_is_busy(struct scsi_device *sdev)
{
- if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
+ if (scsi_device_busy(sdev) >= sdev->queue_depth)
return true;
if (atomic_read(&sdev->device_blocked) > 0)
return true;
@@ -477,9 +422,9 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
* it and the queue. Mitigate by taking a reference to the
* queue and never touching the sdev again after we drop the
* host lock. Note: if __scsi_remove_device() invokes
- * blk_cleanup_queue() before the queue is run from this
+ * blk_mq_destroy_queue() before the queue is run from this
* function then blk_run_queue() will return immediately since
- * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
+ * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
*/
slq = sdev->request_queue;
if (!blk_get_queue(slq))
@@ -496,17 +441,11 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
spin_unlock_irqrestore(shost->host_lock, flags);
}
-/*
- * Function: scsi_run_queue()
- *
- * Purpose: Select a proper request queue to serve next
- *
- * Arguments: q - last request's queue
- *
- * Returns: Nothing
+/**
+ * scsi_run_queue - Select a proper request queue to serve next.
+ * @q: last request's queue
*
- * Notes: The previous command was completely finished, start
- * a new one if possible.
+ * The previous command was completely finished, start a new one if possible.
*/
static void scsi_run_queue(struct request_queue *q)
{
@@ -540,7 +479,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
- if (!blk_rq_is_passthrough(cmd->request)) {
+ if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
@@ -548,7 +487,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
}
}
-static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
+void scsi_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table,
@@ -557,12 +496,38 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
sg_free_table_chained(&cmd->prot_sdb->table,
SCSI_INLINE_PROT_SG_CNT);
}
+EXPORT_SYMBOL_GPL(scsi_free_sgtables);
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
- scsi_del_cmd_from_list(cmd);
+}
+
+static void scsi_run_queue_async(struct scsi_device *sdev)
+{
+ if (scsi_target(sdev)->single_lun ||
+ !list_empty(&sdev->host->starved_list)) {
+ kblockd_schedule_work(&sdev->requeue_work);
+ } else {
+ /*
+ * smp_mb() present in sbitmap_queue_clear() or implied in
+ * .end_io is for ordering writing .device_busy in
+ * scsi_device_unbusy() and reading sdev->restarts.
+ */
+ int old = atomic_read(&sdev->restarts);
+
+ /*
+ * ->restarts has to be kept as non-zero if new budget
+ * contention occurs.
+ *
+ * No need to run queue when either another re-run
+ * queue wins in updating ->restarts or a new budget
+ * contention occurs.
+ */
+ if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
+ blk_mq_run_hw_queues(sdev->request_queue, true);
+ }
}
/* Returns false when no more bytes to process, true if there are more */
@@ -576,10 +541,11 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_update_request(req, error, bytes))
return true;
+ // XXX:
if (blk_queue_add_random(q))
- add_disk_randomness(req->rq_disk);
+ add_disk_randomness(req->q->disk);
- if (!blk_rq_is_scsi(req)) {
+ if (!blk_rq_is_passthrough(req)) {
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
cmd->flags &= ~SCMD_INITIALIZED;
}
@@ -609,72 +575,127 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
__blk_mq_end_request(req, error);
- if (scsi_target(sdev)->single_lun ||
- !list_empty(&sdev->host->starved_list))
- kblockd_schedule_work(&sdev->requeue_work);
- else
- blk_mq_run_hw_queues(q, true);
+ scsi_run_queue_async(sdev);
percpu_ref_put(&q->q_usage_counter);
return false;
}
+static inline u8 get_scsi_ml_byte(int result)
+{
+ return (result >> 8) & 0xff;
+}
+
/**
* scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
- * @cmd: SCSI command
* @result: scsi error code
*
- * Translate a SCSI result code into a blk_status_t value. May reset the host
- * byte of @cmd->result.
+ * Translate a SCSI result code into a blk_status_t value.
*/
-static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
+static blk_status_t scsi_result_to_blk_status(int result)
{
+ /*
+ * Check the scsi-ml byte first in case we converted a host or status
+ * byte.
+ */
+ switch (get_scsi_ml_byte(result)) {
+ case SCSIML_STAT_OK:
+ break;
+ case SCSIML_STAT_RESV_CONFLICT:
+ return BLK_STS_NEXUS;
+ case SCSIML_STAT_NOSPC:
+ return BLK_STS_NOSPC;
+ case SCSIML_STAT_MED_ERROR:
+ return BLK_STS_MEDIUM;
+ case SCSIML_STAT_TGT_FAILURE:
+ return BLK_STS_TARGET;
+ }
+
switch (host_byte(result)) {
case DID_OK:
- /*
- * Also check the other bytes than the status byte in result
- * to handle the case when a SCSI LLD sets result to
- * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION.
- */
- if (scsi_status_is_good(result) && (result & ~0xff) == 0)
+ if (scsi_status_is_good(result))
return BLK_STS_OK;
return BLK_STS_IOERR;
case DID_TRANSPORT_FAILFAST:
+ case DID_TRANSPORT_MARGINAL:
return BLK_STS_TRANSPORT;
- case DID_TARGET_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_TARGET;
- case DID_NEXUS_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_NEXUS;
- case DID_ALLOC_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_NOSPC;
- case DID_MEDIUM_ERROR:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_MEDIUM;
default:
return BLK_STS_IOERR;
}
}
-/* Helper for scsi_io_completion() when "reprep" action required. */
-static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
- struct request_queue *q)
+/**
+ * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ * A request could be merge of IOs which require different failure
+ * handling. This function determines the number of bytes which
+ * can be failed from the beginning of the request without
+ * crossing into area which need to be retried further.
+ *
+ * Return:
+ * The number of bytes to fail.
+ */
+static unsigned int scsi_rq_err_bytes(const struct request *rq)
{
- /* A new command will be prepared and issued. */
- scsi_mq_requeue_cmd(cmd);
+ blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ unsigned int bytes = 0;
+ struct bio *bio;
+
+ if (!(rq->rq_flags & RQF_MIXED_MERGE))
+ return blk_rq_bytes(rq);
+
+ /*
+ * Currently the only 'mixing' which can happen is between
+ * different fastfail types. We can safely fail portions
+ * which have all the failfast bits that the first one has -
+ * the ones which are at least as eager to fail as the first
+ * one.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ if ((bio->bi_opf & ff) != ff)
+ break;
+ bytes += bio->bi_iter.bi_size;
+ }
+
+ /* this could lead to infinite loop */
+ BUG_ON(blk_rq_bytes(rq) && !bytes);
+ return bytes;
}
+static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
+{
+ struct request *req = scsi_cmd_to_rq(cmd);
+ unsigned long wait_for;
+
+ if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ return false;
+
+ wait_for = (cmd->allowed + 1) * req->timeout;
+ if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
+ wait_for/HZ);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * When ALUA transition state is returned, reprep the cmd to
+ * use the ALUA handler's transition timeout. Delay the reprep
+ * 1 sec to avoid aggressive retries of the target in that
+ * state.
+ */
+#define ALUA_TRANSITION_REPREP_DELAY 1000
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- struct request_queue *q = cmd->device->request_queue;
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
int level = 0;
- enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
- ACTION_DELAYED_RETRY} action;
- unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
+ ACTION_RETRY, ACTION_DELAYED_RETRY} action;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -684,7 +705,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
if (sense_valid)
sense_current = !scsi_sense_is_deferred(&sshdr);
- blk_stat = scsi_result_to_blk_status(cmd, result);
+ blk_stat = scsi_result_to_blk_status(result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -754,6 +775,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x07: /* operation in progress */
case 0x08: /* Long write in progress */
case 0x09: /* self test in progress */
+ case 0x11: /* notify (enable spinup) required */
case 0x14: /* space allocation in progress */
case 0x1a: /* start stop unit in progress */
case 0x1b: /* sanitize in progress */
@@ -761,6 +783,9 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x24: /* depopulation in progress */
action = ACTION_DELAYED_RETRY;
break;
+ case 0x0a: /* ALUA state transition */
+ action = ACTION_DELAYED_REPREP;
+ break;
default:
action = ACTION_FAIL;
break;
@@ -772,6 +797,15 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
/* See SSC3rXX or current. */
action = ACTION_FAIL;
break;
+ case DATA_PROTECT:
+ action = ACTION_FAIL;
+ if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) ||
+ (sshdr.asc == 0x55 &&
+ (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) {
+ /* Insufficient zone resources */
+ blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
+ }
+ break;
default:
action = ACTION_FAIL;
break;
@@ -779,8 +813,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
} else
action = ACTION_FAIL;
- if (action != ACTION_FAIL &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
+ if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
action = ACTION_FAIL;
switch (action) {
@@ -802,16 +835,19 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
*/
if (!level && __ratelimit(&_rs)) {
scsi_print_result(cmd, NULL, FAILED);
- if (driver_byte(result) == DRIVER_SENSE)
+ if (sense_valid)
scsi_print_sense(cmd);
scsi_print_command(cmd);
}
}
- if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
+ if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
return;
- /*FALLTHRU*/
+ fallthrough;
case ACTION_REPREP:
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
+ break;
+ case ACTION_DELAYED_REPREP:
+ scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -834,7 +870,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
{
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
struct scsi_sense_hdr sshdr;
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -846,19 +882,18 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
/*
* SG_IO wants current and deferred errors
*/
- scsi_req(req)->sense_len =
- min(8 + cmd->sense_buffer[7],
- SCSI_SENSE_BUFFERSIZE);
+ cmd->sense_len = min(8 + cmd->sense_buffer[7],
+ SCSI_SENSE_BUFFERSIZE);
}
if (sense_current)
- *blk_statp = scsi_result_to_blk_status(cmd, result);
+ *blk_statp = scsi_result_to_blk_status(result);
} else if (blk_rq_bytes(req) == 0 && sense_current) {
/*
* Flush commands do not transfers any data, and thus cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets *blk_statp explicitly for the problem case.
*/
- *blk_statp = scsi_result_to_blk_status(cmd, result);
+ *blk_statp = scsi_result_to_blk_status(result);
}
/*
* Recovered errors need reporting, but they're always treated as
@@ -890,59 +925,44 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
* if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
* intermediate statuses (both obsolete in SAM-4) as good.
*/
- if (status_byte(result) && scsi_status_is_good(result)) {
+ if ((result & 0xff) && scsi_status_is_good(result)) {
result = 0;
*blk_statp = BLK_STS_OK;
}
return result;
}
-/*
- * Function: scsi_io_completion()
- *
- * Purpose: Completion processing for block device I/O requests.
- *
- * Arguments: cmd - command that is finished.
- *
- * Lock status: Assumed that no lock is held upon entry.
- *
- * Returns: Nothing
- *
- * Notes: We will finish off the specified number of sectors. If we
- * are done, the command block will be released and the queue
- * function will be goosed. If we are not done then we have to
- * figure out what to do next:
- *
- * a) We can call scsi_requeue_command(). The request
- * will be unprepared and put back on the queue. Then
- * a new command will be created for it. This should
- * be used if we made forward progress, or if we want
- * to switch from READ(10) to READ(6) for example.
- *
- * b) We can call __scsi_queue_insert(). The request will
- * be put back on the queue and retried using the same
- * command as before, possibly after a delay.
- *
- * c) We can call scsi_end_request() with blk_stat other than
- * BLK_STS_OK, to fail the remainder of the request.
+/**
+ * scsi_io_completion - Completion processing for SCSI commands.
+ * @cmd: command that is finished.
+ * @good_bytes: number of processed bytes.
+ *
+ * We will finish off the specified number of sectors. If we are done, the
+ * command block will be released and the queue function will be goosed. If we
+ * are not done then we have to figure out what to do next:
+ *
+ * a) We can call scsi_mq_requeue_cmd(). The request will be
+ * unprepared and put back on the queue. Then a new command will
+ * be created for it. This should be used if we made forward
+ * progress, or if we want to switch from READ(10) to READ(6) for
+ * example.
+ *
+ * b) We can call scsi_io_completion_action(). The request will be
+ * put back on the queue and retried using the same command as
+ * before, possibly after a delay.
+ *
+ * c) We can call scsi_end_request() with blk_stat other than
+ * BLK_STS_OK, to fail the remainder of the request.
*/
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- struct request_queue *q = cmd->device->request_queue;
- struct request *req = cmd->request;
+ struct request *req = scsi_cmd_to_rq(cmd);
blk_status_t blk_stat = BLK_STS_OK;
if (unlikely(result)) /* a nz result may or may not be an error */
result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
- if (unlikely(blk_rq_is_passthrough(req))) {
- /*
- * scsi_result_to_blk_status may have reset the host_byte
- */
- scsi_req(req)->result = cmd->result;
- }
-
/*
* Next deal with any sectors which we were able to correctly
* handle.
@@ -952,8 +972,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
blk_rq_sectors(req), good_bytes));
/*
- * Next deal with any sectors which we were able to correctly
- * handle. Failed, zero length commands always need to drop down
+ * Failed, zero length commands always need to drop down
* to retry code. Fast path should return in this block.
*/
if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
@@ -971,64 +990,92 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
/*
* If there had been no error, but we have leftover bytes in the
- * requeues just queue the command up again.
+ * request just queue the command up again.
*/
if (likely(result == 0))
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
else
scsi_io_completion_action(cmd, result);
}
-static blk_status_t scsi_init_sgtable(struct request *req,
- struct scsi_data_buffer *sdb)
+static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
+ struct request *rq)
+{
+ return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
+ !op_is_write(req_op(rq)) &&
+ sdev->host->hostt->dma_need_drain(rq);
+}
+
+/**
+ * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists
+ * @cmd: SCSI command data structure to initialize.
+ *
+ * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled
+ * for @cmd.
+ *
+ * Returns:
+ * * BLK_STS_OK - on success
+ * * BLK_STS_RESOURCE - if the failure is retryable
+ * * BLK_STS_IOERR - if the failure is fatal
+ */
+blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
{
+ struct scsi_device *sdev = cmd->device;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
+ struct scatterlist *last_sg = NULL;
+ blk_status_t ret;
+ bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
int count;
+ if (WARN_ON_ONCE(!nr_segs))
+ return BLK_STS_IOERR;
+
+ /*
+ * Make sure there is space for the drain. The driver must adjust
+ * max_hw_segments to be prepared for this.
+ */
+ if (need_drain)
+ nr_segs++;
+
/*
* If sg table allocation fails, requeue request later.
*/
- if (unlikely(sg_alloc_table_chained(&sdb->table,
- blk_rq_nr_phys_segments(req), sdb->table.sgl,
- SCSI_INLINE_SG_CNT)))
+ if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
+ cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
return BLK_STS_RESOURCE;
- /*
+ /*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
- count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
- BUG_ON(count > sdb->table.nents);
- sdb->table.nents = count;
- sdb->length = blk_rq_payload_bytes(req);
- return BLK_STS_OK;
-}
+ count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
-/*
- * Function: scsi_init_io()
- *
- * Purpose: SCSI I/O initialize function.
- *
- * Arguments: cmd - Command descriptor we wish to initialize
- *
- * Returns: BLK_STS_OK on success
- * BLK_STS_RESOURCE if the failure is retryable
- * BLK_STS_IOERR if the failure is fatal
- */
-blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
-{
- struct request *rq = cmd->request;
- blk_status_t ret;
+ if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
+ unsigned int pad_len =
+ (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
- if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
- return BLK_STS_IOERR;
+ last_sg->length += pad_len;
+ cmd->extra_len += pad_len;
+ }
+
+ if (need_drain) {
+ sg_unmark_end(last_sg);
+ last_sg = sg_next(last_sg);
+ sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
+ sg_mark_end(last_sg);
+
+ cmd->extra_len += sdev->dma_drain_len;
+ count++;
+ }
- ret = scsi_init_sgtable(rq, &cmd->sdb);
- if (ret)
- return ret;
+ BUG_ON(count > cmd->sdb.table.nents);
+ cmd->sdb.table.nents = count;
+ cmd->sdb.length = blk_rq_payload_bytes(rq);
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
- int ivecs, count;
+ int ivecs;
if (WARN_ON_ONCE(!prot_sdb)) {
/*
@@ -1060,10 +1107,10 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
return BLK_STS_OK;
out_free_sgtables:
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
return ret;
}
-EXPORT_SYMBOL(scsi_init_io);
+EXPORT_SYMBOL(scsi_alloc_sgtables);
/**
* scsi_initialize_rq - initialize struct scsi_cmnd partially
@@ -1072,20 +1119,31 @@ EXPORT_SYMBOL(scsi_init_io);
* This function initializes the members of struct scsi_cmnd that must be
* initialized before request processing starts and that won't be
* reinitialized if a SCSI command is requeued.
- *
- * Called from inside blk_get_request() for pass-through requests and from
- * inside scsi_init_command() for filesystem requests.
*/
static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- scsi_req_init(&cmd->req);
+ memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
+ cmd->cmd_len = MAX_COMMAND_SIZE;
+ cmd->sense_len = 0;
init_rcu_head(&cmd->rcu);
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
}
+struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
+ blk_mq_req_flags_t flags)
+{
+ struct request *rq;
+
+ rq = blk_mq_alloc_request(q, opf, flags);
+ if (!IS_ERR(rq))
+ scsi_initialize_rq(rq);
+ return rq;
+}
+EXPORT_SYMBOL_GPL(scsi_alloc_request);
+
/*
* Only called when the request isn't completed by SCSI, and not freed by
* SCSI
@@ -1098,69 +1156,19 @@ static void scsi_cleanup_rq(struct request *rq)
}
}
-/* Add a command to the list used by the aacraid and dpt_i2o drivers */
-void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
-{
- struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
- unsigned long flags;
-
- if (shost->use_cmd_list) {
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_add_tail(&cmd->list, &sdev->cmd_list);
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
-}
-
-/* Remove a command from the list used by the aacraid and dpt_i2o drivers */
-void scsi_del_cmd_from_list(struct scsi_cmnd *cmd)
-{
- struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
- unsigned long flags;
-
- if (shost->use_cmd_list) {
- spin_lock_irqsave(&sdev->list_lock, flags);
- BUG_ON(list_empty(&cmd->list));
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
-}
-
-/* Called after a request has been started. */
+/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
- void *buf = cmd->sense_buffer;
- void *prot = cmd->prot_sdb;
- struct request *rq = blk_mq_rq_from_pdu(cmd);
- unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
- unsigned long jiffies_at_alloc;
- int retries;
- bool in_flight;
+ struct request *rq = scsi_cmd_to_rq(cmd);
- if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
- flags |= SCMD_INITIALIZED;
+ if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) {
+ cmd->flags |= SCMD_INITIALIZED;
scsi_initialize_rq(rq);
}
- jiffies_at_alloc = cmd->jiffies_at_alloc;
- retries = cmd->retries;
- in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /* zero out the cmd, except for the embedded scsi_request */
- memset((char *)cmd + sizeof(cmd->req), 0,
- sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
-
cmd->device = dev;
- cmd->sense_buffer = buf;
- cmd->prot_sdb = prot;
- cmd->flags = flags;
+ INIT_LIST_HEAD(&cmd->eh_entry);
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- cmd->jiffies_at_alloc = jiffies_at_alloc;
- cmd->retries = retries;
- if (in_flight)
- __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
-
- scsi_add_cmd_to_list(cmd);
}
static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
@@ -1175,7 +1183,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
* submit a request without an attached bio.
*/
if (req->bio) {
- blk_status_t ret = scsi_init_io(cmd);
+ blk_status_t ret = scsi_alloc_sgtables(cmd);
if (unlikely(ret != BLK_STS_OK))
return ret;
} else {
@@ -1184,55 +1192,16 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
}
- cmd->cmd_len = scsi_req(req)->cmd_len;
- cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
- cmd->allowed = scsi_req(req)->retries;
return BLK_STS_OK;
}
-/*
- * Setup a normal block command. These are simple request from filesystems
- * that still need to be translated to SCSI CDBs from the ULD.
- */
-static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev,
- struct request *req)
-{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
-
- if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
- blk_status_t ret = sdev->handler->prep_fn(sdev, req);
- if (ret != BLK_STS_OK)
- return ret;
- }
-
- cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
- memset(cmd->cmnd, 0, BLK_MAX_CDB);
- return scsi_cmd_to_driver(cmd)->init_command(cmd);
-}
-
-static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
- struct request *req)
-{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
-
- if (!blk_rq_bytes(req))
- cmd->sc_data_direction = DMA_NONE;
- else if (rq_data_dir(req) == WRITE)
- cmd->sc_data_direction = DMA_TO_DEVICE;
- else
- cmd->sc_data_direction = DMA_FROM_DEVICE;
-
- if (blk_rq_is_scsi(req))
- return scsi_setup_scsi_cmnd(sdev, req);
- else
- return scsi_setup_fs_cmnd(sdev, req);
-}
-
static blk_status_t
-scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+scsi_device_state_check(struct scsi_device *sdev, struct request *req)
{
switch (sdev->sdev_state) {
+ case SDEV_CREATED:
+ return BLK_STS_OK;
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
/*
@@ -1240,8 +1209,11 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
* commands. The device must be brought online
* before trying any recovery commands.
*/
- sdev_printk(KERN_ERR, sdev,
- "rejecting I/O to offline device\n");
+ if (!sdev->offline_already) {
+ sdev->offline_already = true;
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to offline device\n");
+ }
return BLK_STS_IOERR;
case SDEV_DEL:
/*
@@ -1256,37 +1228,38 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
return BLK_STS_RESOURCE;
case SDEV_QUIESCE:
/*
- * If the devices is blocked we defer normal commands.
+ * If the device is blocked we only accept power management
+ * commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
+ if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
return BLK_STS_RESOURCE;
return BLK_STS_OK;
default:
/*
* For any other not fully online state we only allow
- * special commands. In particular any user initiated
- * command is not allowed.
+ * power management commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
- return BLK_STS_IOERR;
+ if (req && !(req->rq_flags & RQF_PM))
+ return BLK_STS_OFFLINE;
return BLK_STS_OK;
}
}
/*
- * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
- * return 0.
- *
- * Called with the queue_lock held.
+ * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
+ * and return the token else return -1.
*/
static inline int scsi_dev_queue_ready(struct request_queue *q,
struct scsi_device *sdev)
{
- unsigned int busy;
+ int token;
- busy = atomic_inc_return(&sdev->device_busy) - 1;
+ token = sbitmap_get(&sdev->budget_map);
if (atomic_read(&sdev->device_blocked)) {
- if (busy)
+ if (token < 0)
+ goto out;
+
+ if (scsi_device_busy(sdev) > 1)
goto out_dec;
/*
@@ -1298,13 +1271,12 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
"unblocking device at zero depth\n"));
}
- if (busy >= sdev->queue_depth)
- goto out_dec;
-
- return 1;
+ return token;
out_dec:
- atomic_dec(&sdev->device_busy);
- return 0;
+ if (token >= 0)
+ sbitmap_put(&sdev->budget_map, token);
+out:
+ return -1;
}
/*
@@ -1448,11 +1420,14 @@ static bool scsi_mq_lld_busy(struct request_queue *q)
return false;
}
-static void scsi_softirq_done(struct request *rq)
+/*
+ * Block layer request completion callback. May be called from interrupt
+ * context.
+ */
+static void scsi_complete(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
- int disposition;
+ enum scsi_disposition disposition;
INIT_LIST_HEAD(&cmd->eh_entry);
@@ -1461,34 +1436,29 @@ static void scsi_softirq_done(struct request *rq)
atomic_inc(&cmd->device->ioerr_cnt);
disposition = scsi_decide_disposition(cmd);
- if (disposition != SUCCESS &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
- scmd_printk(KERN_ERR, cmd,
- "timing out command, waited %lus\n",
- wait_for/HZ);
+ if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
disposition = SUCCESS;
- }
scsi_log_completion(cmd, disposition);
switch (disposition) {
- case SUCCESS:
- scsi_finish_command(cmd);
- break;
- case NEEDS_RETRY:
- scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
- break;
- case ADD_TO_MLQUEUE:
- scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
- break;
- default:
- scsi_eh_scmd_add(cmd);
- break;
+ case SUCCESS:
+ scsi_finish_command(cmd);
+ break;
+ case NEEDS_RETRY:
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
+ break;
+ case ADD_TO_MLQUEUE:
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+ break;
+ default:
+ scsi_eh_scmd_add(cmd);
+ break;
}
}
/**
- * scsi_dispatch_command - Dispatch a command to the low-level driver.
+ * scsi_dispatch_cmd - Dispatch a command to the low-level driver.
* @cmd: command block we are dispatching.
*
* Return: nonzero return request was rejected and device's queue needs to be
@@ -1564,7 +1534,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
return rtn;
done:
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -1575,18 +1545,42 @@ static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
sizeof(struct scatterlist);
}
-static blk_status_t scsi_mq_prep_fn(struct request *req)
+static blk_status_t scsi_prepare_cmd(struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
+ bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
struct scatterlist *sg;
scsi_init_command(sdev, cmd);
- cmd->request = req;
- cmd->tag = req->tag;
+ cmd->eh_eflags = 0;
+ cmd->prot_type = 0;
+ cmd->prot_flags = 0;
+ cmd->submitter = 0;
+ memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+ cmd->underflow = 0;
+ cmd->transfersize = 0;
+ cmd->host_scribble = NULL;
+ cmd->result = 0;
+ cmd->extra_len = 0;
+ cmd->state = 0;
+ if (in_flight)
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+
+ /*
+ * Only clear the driver-private command data if the LLD does not supply
+ * a function to initialize that data.
+ */
+ if (!shost->hostt->init_cmd_priv)
+ memset(cmd + 1, 0, shost->hostt->cmd_size);
+
cmd->prot_op = SCSI_PROT_NORMAL;
+ if (blk_rq_bytes(req))
+ cmd->sc_data_direction = rq_dma_dir(req);
+ else
+ cmd->sc_data_direction = DMA_NONE;
sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
cmd->sdb.table.sgl = sg;
@@ -1598,46 +1592,120 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
(struct scatterlist *)(cmd->prot_sdb + 1);
}
- blk_mq_start_request(req);
+ /*
+ * Special handling for passthrough commands, which don't go to the ULP
+ * at all:
+ */
+ if (blk_rq_is_passthrough(req))
+ return scsi_setup_scsi_cmnd(sdev, req);
+
+ if (sdev->handler && sdev->handler->prep_fn) {
+ blk_status_t ret = sdev->handler->prep_fn(sdev, req);
+
+ if (ret != BLK_STS_OK)
+ return ret;
+ }
- return scsi_setup_cmnd(sdev, req);
+ /* Usually overridden by the ULP */
+ cmd->allowed = 0;
+ memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
+ return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
-static void scsi_mq_done(struct scsi_cmnd *cmd)
+static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly)
{
+ struct request *req = scsi_cmd_to_rq(cmd);
+
+ switch (cmd->submitter) {
+ case SUBMITTED_BY_BLOCK_LAYER:
+ break;
+ case SUBMITTED_BY_SCSI_ERROR_HANDLER:
+ return scsi_eh_done(cmd);
+ case SUBMITTED_BY_SCSI_RESET_IOCTL:
+ return;
+ }
+
+ if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q)))
+ return;
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
return;
trace_scsi_dispatch_cmd_done(cmd);
- /*
- * If the block layer didn't complete the request due to a timeout
- * injection, scsi must clear its internal completed state so that the
- * timeout handler will see it needs to escalate its own error
- * recovery.
- */
- if (unlikely(!blk_mq_complete_request(cmd->request)))
- clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
+ if (complete_directly)
+ blk_mq_complete_request_direct(req, scsi_complete);
+ else
+ blk_mq_complete_request(req);
+}
+
+void scsi_done(struct scsi_cmnd *cmd)
+{
+ scsi_done_internal(cmd, false);
}
+EXPORT_SYMBOL(scsi_done);
-static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
+void scsi_done_direct(struct scsi_cmnd *cmd)
+{
+ scsi_done_internal(cmd, true);
+}
+EXPORT_SYMBOL(scsi_done_direct);
+
+static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{
- struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
- atomic_dec(&sdev->device_busy);
+ sbitmap_put(&sdev->budget_map, budget_token);
}
-static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
+/*
+ * When to reinvoke queueing after a resource shortage. It's 3 msecs to
+ * not change behaviour from the previous unplug mechanism, experimentation
+ * may prove this needs changing.
+ */
+#define SCSI_QUEUE_DELAY 3
+
+static int scsi_mq_get_budget(struct request_queue *q)
{
- struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
+ int token = scsi_dev_queue_ready(q, sdev);
- if (scsi_dev_queue_ready(q, sdev))
- return true;
+ if (token >= 0)
+ return token;
- if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
- blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
- return false;
+ atomic_inc(&sdev->restarts);
+
+ /*
+ * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
+ * .restarts must be incremented before .device_busy is read because the
+ * code in scsi_run_queue_async() depends on the order of these operations.
+ */
+ smp_mb__after_atomic();
+
+ /*
+ * If all in-flight requests originated from this LUN are completed
+ * before reading .device_busy, sdev->device_busy will be observed as
+ * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
+ * soon. Otherwise, completion of one of these requests will observe
+ * the .restarts flag, and the request queue will be run for handling
+ * this request, see scsi_end_request().
+ */
+ if (unlikely(scsi_device_busy(sdev) == 0 &&
+ !scsi_device_blocked(sdev)))
+ blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
+ return -1;
+}
+
+static void scsi_mq_set_rq_budget_token(struct request *req, int token)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+
+ cmd->budget_token = token;
+}
+
+static int scsi_mq_get_rq_budget_token(struct request *req)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+
+ return cmd->budget_token;
}
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1651,12 +1719,14 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_status_t ret;
int reason;
+ WARN_ON_ONCE(cmd->budget_token < 0);
+
/*
* If the device is not in running state we will reject some or all
* commands.
*/
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
- ret = scsi_prep_state_check(sdev, req);
+ ret = scsi_device_state_check(sdev, req);
if (ret != BLK_STS_OK)
goto out_put_budget;
}
@@ -1668,13 +1738,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
goto out_dec_target_busy;
if (!(req->rq_flags & RQF_DONTPREP)) {
- ret = scsi_mq_prep_fn(req);
+ ret = scsi_prepare_cmd(req);
if (ret != BLK_STS_OK)
goto out_dec_host_busy;
req->rq_flags |= RQF_DONTPREP;
} else {
clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
- blk_mq_start_request(req);
}
cmd->flags &= SCMD_PRESERVED_FLAGS;
@@ -1683,9 +1752,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (bd->last)
cmd->flags |= SCMD_LAST;
- scsi_init_cmd_errh(cmd);
- cmd->scsi_done = scsi_mq_done;
+ scsi_set_resid(cmd, 0);
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
+ blk_mq_start_request(req);
reason = scsi_dispatch_cmd(cmd);
if (reason) {
scsi_set_blocked(cmd, reason);
@@ -1701,20 +1772,26 @@ out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
out_put_budget:
- scsi_mq_put_budget(hctx);
+ scsi_mq_put_budget(q, cmd->budget_token);
+ cmd->budget_token = -1;
switch (ret) {
case BLK_STS_OK:
break;
case BLK_STS_RESOURCE:
- if (atomic_read(&sdev->device_busy) ||
- scsi_device_blocked(sdev))
+ case BLK_STS_ZONE_RESOURCE:
+ if (scsi_device_blocked(sdev))
ret = BLK_STS_DEV_RESOURCE;
break;
+ case BLK_STS_AGAIN:
+ cmd->result = DID_BUS_BUSY << 16;
+ if (req->rq_flags & RQF_DONTPREP)
+ scsi_mq_uninit_cmd(cmd);
+ break;
default:
if (unlikely(!scsi_device_online(sdev)))
- scsi_req(req)->result = DID_NO_CONNECT << 16;
+ cmd->result = DID_NO_CONNECT << 16;
else
- scsi_req(req)->result = DID_ERROR << 16;
+ cmd->result = DID_ERROR << 16;
/*
* Make sure to release all allocated resources when
* we hit an error, as we will never see this command
@@ -1722,34 +1799,24 @@ out_put_budget:
*/
if (req->rq_flags & RQF_DONTPREP)
scsi_mq_uninit_cmd(cmd);
+ scsi_run_queue_async(sdev);
break;
}
return ret;
}
-static enum blk_eh_timer_return scsi_timeout(struct request *req,
- bool reserved)
-{
- if (reserved)
- return BLK_EH_RESET_TIMER;
- return scsi_times_out(req);
-}
-
static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct Scsi_Host *shost = set->driver_data;
- const bool unchecked_isa_dma = shost->unchecked_isa_dma;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
struct scatterlist *sg;
+ int ret = 0;
- if (unchecked_isa_dma)
- cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
- cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
- GFP_KERNEL, numa_node);
+ cmd->sense_buffer =
+ kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
- cmd->req.sense = cmd->sense_buffer;
if (scsi_host_get_prot(shost)) {
sg = (void *)cmd + sizeof(struct scsi_cmnd) +
@@ -1757,25 +1824,53 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
}
- return 0;
+ if (shost->hostt->init_cmd_priv) {
+ ret = shost->hostt->init_cmd_priv(shost, cmd);
+ if (ret < 0)
+ kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
+ }
+
+ return ret;
}
static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
+ struct Scsi_Host *shost = set->driver_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
- cmd->sense_buffer);
+ if (shost->hostt->exit_cmd_priv)
+ shost->hostt->exit_cmd_priv(shost, cmd);
+ kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
+}
+
+
+static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct Scsi_Host *shost = hctx->driver_data;
+
+ if (shost->hostt->mq_poll)
+ return shost->hostt->mq_poll(shost, hctx->queue_num);
+
+ return 0;
+}
+
+static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct Scsi_Host *shost = data;
+
+ hctx->driver_data = shost;
+ return 0;
}
-static int scsi_map_queues(struct blk_mq_tag_set *set)
+static void scsi_map_queues(struct blk_mq_tag_set *set)
{
struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
- return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
}
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
@@ -1796,13 +1891,7 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
- if (dev->dma_mask) {
- shost->max_sectors = min_t(unsigned int, shost->max_sectors,
- dma_max_mapping_size(dev) >> SECTOR_SHIFT);
- }
blk_queue_max_hw_sectors(q, shost->max_sectors);
- if (shost->unchecked_isa_dma)
- blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);
@@ -1825,25 +1914,26 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
.get_budget = scsi_mq_get_budget,
.put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
- .complete = scsi_softirq_done,
+ .complete = scsi_complete,
.timeout = scsi_timeout,
#ifdef CONFIG_BLK_DEBUG_FS
.show_rq = scsi_show_rq,
#endif
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
- .initialize_rq_fn = scsi_initialize_rq,
.cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
+ .init_hctx = scsi_init_hctx,
+ .poll = scsi_mq_poll,
+ .set_rq_budget_token = scsi_mq_set_rq_budget_token,
+ .get_rq_budget_token = scsi_mq_get_rq_budget_token,
};
static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
- struct request_queue *q = hctx->queue;
- struct scsi_device *sdev = q->queuedata;
- struct Scsi_Host *shost = sdev->host;
+ struct Scsi_Host *shost = hctx->driver_data;
shost->hostt->commit_rqs(shost, hctx->queue_num);
}
@@ -1853,34 +1943,26 @@ static const struct blk_mq_ops scsi_mq_ops = {
.put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
.commit_rqs = scsi_commit_rqs,
- .complete = scsi_softirq_done,
+ .complete = scsi_complete,
.timeout = scsi_timeout,
#ifdef CONFIG_BLK_DEBUG_FS
.show_rq = scsi_show_rq,
#endif
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
- .initialize_rq_fn = scsi_initialize_rq,
.cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
+ .init_hctx = scsi_init_hctx,
+ .poll = scsi_mq_poll,
+ .set_rq_budget_token = scsi_mq_set_rq_budget_token,
+ .get_rq_budget_token = scsi_mq_get_rq_budget_token,
};
-struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
-{
- sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
- if (IS_ERR(sdev->request_queue))
- return NULL;
-
- sdev->request_queue->queuedata = sdev;
- __scsi_init_queue(sdev->host, sdev->request_queue);
- blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue);
- return sdev->request_queue;
-}
-
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
+ struct blk_mq_tag_set *tag_set = &shost->tag_set;
sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
scsi_mq_inline_sgl_size(shost));
@@ -1889,26 +1971,33 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
cmd_size += sizeof(struct scsi_data_buffer) +
sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
- memset(&shost->tag_set, 0, sizeof(shost->tag_set));
+ memset(tag_set, 0, sizeof(*tag_set));
if (shost->hostt->commit_rqs)
- shost->tag_set.ops = &scsi_mq_ops;
+ tag_set->ops = &scsi_mq_ops;
else
- shost->tag_set.ops = &scsi_mq_ops_no_commit;
- shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
- shost->tag_set.queue_depth = shost->can_queue;
- shost->tag_set.cmd_size = cmd_size;
- shost->tag_set.numa_node = NUMA_NO_NODE;
- shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- shost->tag_set.flags |=
+ tag_set->ops = &scsi_mq_ops_no_commit;
+ tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
+ tag_set->nr_maps = shost->nr_maps ? : 1;
+ tag_set->queue_depth = shost->can_queue;
+ tag_set->cmd_size = cmd_size;
+ tag_set->numa_node = dev_to_node(shost->dma_dev);
+ tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
+ tag_set->flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
- shost->tag_set.driver_data = shost;
+ tag_set->driver_data = shost;
+ if (shost->host_tagset)
+ tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
- return blk_mq_alloc_tag_set(&shost->tag_set);
+ return blk_mq_alloc_tag_set(tag_set);
}
-void scsi_mq_destroy_tags(struct Scsi_Host *shost)
+void scsi_mq_free_tags(struct kref *kref)
{
+ struct Scsi_Host *shost = container_of(kref, typeof(*shost),
+ tagset_refcnt);
+
blk_mq_free_tag_set(&shost->tag_set);
+ complete(&shost->tagset_freed);
}
/**
@@ -1930,23 +2019,22 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
return sdev;
}
+/*
+ * pktcdvd should have been integrated into the SCSI layers, but for historical
+ * reasons like the old IDE driver it isn't. This export allows it to safely
+ * probe if a given device is a SCSI one and only attach to that.
+ */
+#ifdef CONFIG_CDROM_PKTCDVD_MODULE
EXPORT_SYMBOL_GPL(scsi_device_from_queue);
+#endif
-/*
- * Function: scsi_block_requests()
- *
- * Purpose: Utility function used by low-level drivers to prevent further
- * commands from being queued to the device.
- *
- * Arguments: shost - Host in question
- *
- * Returns: Nothing
- *
- * Lock status: No locks are assumed held.
+/**
+ * scsi_block_requests - Utility function used by low-level drivers to prevent
+ * further commands from being queued to the device.
+ * @shost: host in question
*
- * Notes: There is no timer nor any other means by which the requests
- * get unblocked other than the low-level driver calling
- * scsi_unblock_requests().
+ * There is no timer nor any other means by which the requests get unblocked
+ * other than the low-level driver calling scsi_unblock_requests().
*/
void scsi_block_requests(struct Scsi_Host *shost)
{
@@ -1954,25 +2042,15 @@ void scsi_block_requests(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_block_requests);
-/*
- * Function: scsi_unblock_requests()
- *
- * Purpose: Utility function used by low-level drivers to allow further
- * commands from being queued to the device.
- *
- * Arguments: shost - Host in question
- *
- * Returns: Nothing
- *
- * Lock status: No locks are assumed held.
- *
- * Notes: There is no timer nor any other means by which the requests
- * get unblocked other than the low-level driver calling
- * scsi_unblock_requests().
- *
- * This is done as an API function so that changes to the
- * internals of the scsi mid-layer won't require wholesale
- * changes to drivers that use this feature.
+/**
+ * scsi_unblock_requests - Utility function used by low-level drivers to allow
+ * further commands to be queued to the device.
+ * @shost: host in question
+ *
+ * There is no timer nor any other means by which the requests get unblocked
+ * other than the low-level driver calling scsi_unblock_requests(). This is done
+ * as an API function so that changes to the internals of the scsi mid-layer
+ * won't require wholesale changes to drivers that use this feature.
*/
void scsi_unblock_requests(struct Scsi_Host *shost)
{
@@ -1981,24 +2059,9 @@ void scsi_unblock_requests(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_unblock_requests);
-int __init scsi_init_queue(void)
-{
- scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
- sizeof(struct scsi_data_buffer),
- 0, 0, NULL);
- if (!scsi_sdb_cache) {
- printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
void scsi_exit_queue(void)
{
kmem_cache_destroy(scsi_sense_cache);
- kmem_cache_destroy(scsi_sense_isadma_cache);
- kmem_cache_destroy(scsi_sdb_cache);
}
/**
@@ -2006,7 +2069,6 @@ void scsi_exit_queue(void)
* @sdev: SCSI device to be queried
* @pf: Page format bit (1 == standard, 0 == vendor specific)
* @sp: Save page bit (0 == don't save, 1 == save)
- * @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @len: length of request buffer.
* @timeout: command timeout
@@ -2019,10 +2081,9 @@ void scsi_exit_queue(void)
* status on error
*
*/
-int
-scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
- unsigned char *buffer, int len, int timeout, int retries,
- struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
+int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
+ unsigned char *buffer, int len, int timeout, int retries,
+ struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{
unsigned char cmd[10];
unsigned char *real_buffer;
@@ -2031,8 +2092,15 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
memset(cmd, 0, sizeof(cmd));
cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
- if (sdev->use_10_for_ms) {
- if (len > 65535)
+ /*
+ * Use MODE SELECT(10) if the device asked for it or if the mode page
+ * and the mode select header cannot fit within the maximumm 255 bytes
+ * of the MODE SELECT(6) command.
+ */
+ if (sdev->use_10_for_ms ||
+ len + 4 > 255 ||
+ data->block_descriptor_length > 255) {
+ if (len > 65535 - 8)
return -EINVAL;
real_buffer = kmalloc(8 + len, GFP_KERNEL);
if (!real_buffer)
@@ -2045,15 +2113,13 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
real_buffer[3] = data->device_specific;
real_buffer[4] = data->longlba ? 0x01 : 0;
real_buffer[5] = 0;
- real_buffer[6] = data->block_descriptor_length >> 8;
- real_buffer[7] = data->block_descriptor_length;
+ put_unaligned_be16(data->block_descriptor_length,
+ &real_buffer[6]);
cmd[0] = MODE_SELECT_10;
- cmd[7] = len >> 8;
- cmd[8] = len;
+ put_unaligned_be16(len, &cmd[7]);
} else {
- if (len > 255 || data->block_descriptor_length > 255 ||
- data->longlba)
+ if (data->longlba)
return -EINVAL;
real_buffer = kmalloc(4 + len, GFP_KERNEL);
@@ -2065,7 +2131,6 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
real_buffer[1] = data->medium_type;
real_buffer[2] = data->device_specific;
real_buffer[3] = data->block_descriptor_length;
-
cmd[0] = MODE_SELECT;
cmd[4] = len;
@@ -2081,7 +2146,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
/**
* scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
* @sdev: SCSI device to be queried
- * @dbd: set if mode sense will allow block descriptors to be returned
+ * @dbd: set to prevent mode sense from returning block descriptors
* @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @len: length of request buffer.
@@ -2091,9 +2156,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
* @sshdr: place to put sense data (or NULL if no sense to be collected).
* must be SCSI_SENSE_BUFFERSIZE big.
*
- * Returns zero if unsuccessful, or the header offset (either 4
- * or 8 depending on whether a six or ten byte command was
- * issued) if successful.
+ * Returns zero if successful, or a negative error number on failure
*/
int
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
@@ -2118,18 +2181,18 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
sshdr = &my_sshdr;
retry:
- use_10_for_ms = sdev->use_10_for_ms;
+ use_10_for_ms = sdev->use_10_for_ms || len > 255;
if (use_10_for_ms) {
- if (len < 8)
- len = 8;
+ if (len < 8 || len > 65535)
+ return -EINVAL;
cmd[0] = MODE_SENSE_10;
- cmd[8] = len;
+ put_unaligned_be16(len, &cmd[7]);
header_length = 8;
} else {
if (len < 4)
- len = 4;
+ return -EINVAL;
cmd[0] = MODE_SENSE;
cmd[4] = len;
@@ -2140,58 +2203,65 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
sshdr, timeout, retries, NULL);
+ if (result < 0)
+ return result;
/* This code looks awful: what it's doing is making sure an
* ILLEGAL REQUEST sense return identifies the actual command
* byte as the problem. MODE_SENSE commands can return
* ILLEGAL REQUEST if the code page isn't supported */
- if (use_10_for_ms && !scsi_status_is_good(result) &&
- driver_byte(result) == DRIVER_SENSE) {
+ if (!scsi_status_is_good(result)) {
if (scsi_sense_valid(sshdr)) {
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
(sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
- /*
- * Invalid command operation code
+ /*
+ * Invalid command operation code: retry using
+ * MODE SENSE(6) if this was a MODE SENSE(10)
+ * request, except if the request mode page is
+ * too large for MODE SENSE single byte
+ * allocation length field.
*/
- sdev->use_10_for_ms = 0;
+ if (use_10_for_ms) {
+ if (len > 255)
+ return -EIO;
+ sdev->use_10_for_ms = 0;
+ goto retry;
+ }
+ }
+ if (scsi_status_is_check_condition(result) &&
+ sshdr->sense_key == UNIT_ATTENTION &&
+ retry_count) {
+ retry_count--;
goto retry;
}
}
+ return -EIO;
+ }
+ if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
+ (modepage == 6 || modepage == 8))) {
+ /* Initio breakage? */
+ header_length = 0;
+ data->length = 13;
+ data->medium_type = 0;
+ data->device_specific = 0;
+ data->longlba = 0;
+ data->block_descriptor_length = 0;
+ } else if (use_10_for_ms) {
+ data->length = get_unaligned_be16(&buffer[0]) + 2;
+ data->medium_type = buffer[2];
+ data->device_specific = buffer[3];
+ data->longlba = buffer[4] & 0x01;
+ data->block_descriptor_length = get_unaligned_be16(&buffer[6]);
+ } else {
+ data->length = buffer[0] + 1;
+ data->medium_type = buffer[1];
+ data->device_specific = buffer[2];
+ data->block_descriptor_length = buffer[3];
}
+ data->header_length = header_length;
- if(scsi_status_is_good(result)) {
- if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
- (modepage == 6 || modepage == 8))) {
- /* Initio breakage? */
- header_length = 0;
- data->length = 13;
- data->medium_type = 0;
- data->device_specific = 0;
- data->longlba = 0;
- data->block_descriptor_length = 0;
- } else if(use_10_for_ms) {
- data->length = buffer[0]*256 + buffer[1] + 2;
- data->medium_type = buffer[2];
- data->device_specific = buffer[3];
- data->longlba = buffer[4] & 0x01;
- data->block_descriptor_length = buffer[6]*256
- + buffer[7];
- } else {
- data->length = buffer[0] + 1;
- data->medium_type = buffer[1];
- data->device_specific = buffer[2];
- data->block_descriptor_length = buffer[3];
- }
- data->header_length = header_length;
- } else if ((status_byte(result) == CHECK_CONDITION) &&
- scsi_sense_valid(sshdr) &&
- sshdr->sense_key == UNIT_ATTENTION && retry_count) {
- retry_count--;
- goto retry;
- }
-
- return result;
+ return 0;
}
EXPORT_SYMBOL(scsi_mode_sense);
@@ -2253,7 +2323,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
goto illegal;
}
break;
-
+
case SDEV_RUNNING:
switch (oldstate) {
case SDEV_CREATED:
@@ -2295,6 +2365,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) {
case SDEV_RUNNING:
case SDEV_CREATED_BLOCK:
+ case SDEV_QUIESCE:
case SDEV_OFFLINE:
break;
default:
@@ -2340,6 +2411,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
break;
}
+ sdev->offline_already = false;
sdev->sdev_state = state;
return 0;
@@ -2355,7 +2427,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
EXPORT_SYMBOL(scsi_device_set_state);
/**
- * sdev_evt_emit - emit a single SCSI device uevent
+ * scsi_evt_emit - emit a single SCSI device uevent
* @sdev: associated SCSI device
* @evt: event to emit
*
@@ -2403,7 +2475,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
}
/**
- * sdev_evt_thread - send a uevent for each scsi event
+ * scsi_evt_thread - send a uevent for each scsi event
* @work: work struct for scsi_device
*
* Dispatch queued events to their associated scsi_device kobjects
@@ -2529,15 +2601,13 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
- * scsi_device_quiesce - Block user issued commands.
+ * scsi_device_quiesce - Block all commands except power management.
* @sdev: scsi device to quiesce.
*
* This works by trying to transition to the SDEV_QUIESCE state
* (which must be a legal transition). When the device is in this
- * state, only special requests will be accepted, all others will
- * be deferred. Since special requests may also be requeued requests,
- * a successful return doesn't guarantee the device will be
- * totally quiescent.
+ * state, only power management requests will be accepted, all others will
+ * be deferred.
*
* Must be called with user context, may sleep.
*
@@ -2599,12 +2669,12 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
+ if (sdev->sdev_state == SDEV_QUIESCE)
+ scsi_device_set_state(sdev, SDEV_RUNNING);
if (sdev->quiesced_by) {
sdev->quiesced_by = NULL;
blk_clear_pm_only(sdev->request_queue);
}
- if (sdev->sdev_state == SDEV_QUIESCE)
- scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
@@ -2635,6 +2705,40 @@ scsi_target_resume(struct scsi_target *starget)
}
EXPORT_SYMBOL(scsi_target_resume);
+static int __scsi_internal_device_block_nowait(struct scsi_device *sdev)
+{
+ if (scsi_device_set_state(sdev, SDEV_BLOCK))
+ return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
+
+ return 0;
+}
+
+void scsi_start_queue(struct scsi_device *sdev)
+{
+ if (cmpxchg(&sdev->queue_stopped, 1, 0))
+ blk_mq_unquiesce_queue(sdev->request_queue);
+}
+
+static void scsi_stop_queue(struct scsi_device *sdev, bool nowait)
+{
+ /*
+ * The atomic variable of ->queue_stopped covers that
+ * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue.
+ *
+ * However, we still need to wait until quiesce is done
+ * in case that queue has been stopped.
+ */
+ if (!cmpxchg(&sdev->queue_stopped, 0, 1)) {
+ if (nowait)
+ blk_mq_quiesce_queue_nowait(sdev->request_queue);
+ else
+ blk_mq_quiesce_queue(sdev->request_queue);
+ } else {
+ if (!nowait)
+ blk_mq_wait_quiesce_done(sdev->request_queue);
+ }
+}
+
/**
* scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
* @sdev: device to block
@@ -2651,24 +2755,16 @@ EXPORT_SYMBOL(scsi_target_resume);
*/
int scsi_internal_device_block_nowait(struct scsi_device *sdev)
{
- struct request_queue *q = sdev->request_queue;
- int err = 0;
-
- err = scsi_device_set_state(sdev, SDEV_BLOCK);
- if (err) {
- err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
-
- if (err)
- return err;
- }
+ int ret = __scsi_internal_device_block_nowait(sdev);
- /*
+ /*
* The device has transitioned to SDEV_BLOCK. Stop the
* block layer from calling the midlayer with this device's
- * request queue.
+ * request queue.
*/
- blk_mq_quiesce_queue_nowait(q);
- return 0;
+ if (!ret)
+ scsi_stop_queue(sdev, true);
+ return ret;
}
EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
@@ -2689,24 +2785,16 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
*/
static int scsi_internal_device_block(struct scsi_device *sdev)
{
- struct request_queue *q = sdev->request_queue;
int err;
mutex_lock(&sdev->state_mutex);
- err = scsi_internal_device_block_nowait(sdev);
+ err = __scsi_internal_device_block_nowait(sdev);
if (err == 0)
- blk_mq_quiesce_queue(q);
+ scsi_stop_queue(sdev, false);
mutex_unlock(&sdev->state_mutex);
return err;
}
-
-void scsi_start_queue(struct scsi_device *sdev)
-{
- struct request_queue *q = sdev->request_queue;
-
- blk_mq_unquiesce_queue(q);
-}
/**
* scsi_internal_device_unblock_nowait - resume a device after a block request
@@ -2845,6 +2933,56 @@ scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
}
EXPORT_SYMBOL_GPL(scsi_target_unblock);
+int
+scsi_host_block(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ int ret = 0;
+
+ /*
+ * Call scsi_internal_device_block_nowait so we can avoid
+ * calling synchronize_rcu() for each LUN.
+ */
+ shost_for_each_device(sdev, shost) {
+ mutex_lock(&sdev->state_mutex);
+ ret = scsi_internal_device_block_nowait(sdev);
+ mutex_unlock(&sdev->state_mutex);
+ if (ret) {
+ scsi_device_put(sdev);
+ break;
+ }
+ }
+
+ /*
+ * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so
+ * calling synchronize_rcu() once is enough.
+ */
+ WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
+
+ if (!ret)
+ synchronize_rcu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_host_block);
+
+int
+scsi_host_unblock(struct Scsi_Host *shost, int new_state)
+{
+ struct scsi_device *sdev;
+ int ret = 0;
+
+ shost_for_each_device(sdev, shost) {
+ ret = scsi_internal_device_unblock(sdev, new_state);
+ if (ret) {
+ scsi_device_put(sdev);
+ break;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_host_unblock);
+
/**
* scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
* @sgl: scatter-gather list
@@ -2919,6 +3057,78 @@ void sdev_enable_disk_events(struct scsi_device *sdev)
}
EXPORT_SYMBOL(sdev_enable_disk_events);
+static unsigned char designator_prio(const unsigned char *d)
+{
+ if (d[1] & 0x30)
+ /* not associated with LUN */
+ return 0;
+
+ if (d[3] == 0)
+ /* invalid length */
+ return 0;
+
+ /*
+ * Order of preference for lun descriptor:
+ * - SCSI name string
+ * - NAA IEEE Registered Extended
+ * - EUI-64 based 16-byte
+ * - EUI-64 based 12-byte
+ * - NAA IEEE Registered
+ * - NAA IEEE Extended
+ * - EUI-64 based 8-byte
+ * - SCSI name string (truncated)
+ * - T10 Vendor ID
+ * as longer descriptors reduce the likelyhood
+ * of identification clashes.
+ */
+
+ switch (d[1] & 0xf) {
+ case 8:
+ /* SCSI name string, variable-length UTF-8 */
+ return 9;
+ case 3:
+ switch (d[4] >> 4) {
+ case 6:
+ /* NAA registered extended */
+ return 8;
+ case 5:
+ /* NAA registered */
+ return 5;
+ case 4:
+ /* NAA extended */
+ return 4;
+ case 3:
+ /* NAA locally assigned */
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (d[3]) {
+ case 16:
+ /* EUI64-based, 16 byte */
+ return 7;
+ case 12:
+ /* EUI64-based, 12 byte */
+ return 6;
+ case 8:
+ /* EUI64-based, 8 byte */
+ return 3;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ /* T10 vendor ID */
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/**
* scsi_vpd_lun_id - return a unique device identification
* @sdev: SCSI device
@@ -2935,7 +3145,7 @@ EXPORT_SYMBOL(sdev_enable_disk_events);
*/
int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
{
- u8 cur_id_type = 0xff;
+ u8 cur_id_prio = 0;
u8 cur_id_size = 0;
const unsigned char *d, *cur_id_str;
const struct scsi_vpd *vpd_pg83;
@@ -2948,20 +3158,6 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
return -ENXIO;
}
- /*
- * Look for the correct descriptor.
- * Order of preference for lun descriptor:
- * - SCSI name string
- * - NAA IEEE Registered Extended
- * - EUI-64 based 16-byte
- * - EUI-64 based 12-byte
- * - NAA IEEE Registered
- * - NAA IEEE Extended
- * - T10 Vendor ID
- * as longer descriptors reduce the likelyhood
- * of identification clashes.
- */
-
/* The id string must be at least 20 bytes + terminating NULL byte */
if (id_len < 21) {
rcu_read_unlock();
@@ -2969,39 +3165,32 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
}
memset(id, 0, id_len);
- d = vpd_pg83->data + 4;
- while (d < vpd_pg83->data + vpd_pg83->len) {
- /* Skip designators not referring to the LUN */
- if ((d[1] & 0x30) != 0x00)
- goto next_desig;
+ for (d = vpd_pg83->data + 4;
+ d < vpd_pg83->data + vpd_pg83->len;
+ d += d[3] + 4) {
+ u8 prio = designator_prio(d);
+
+ if (prio == 0 || cur_id_prio > prio)
+ continue;
switch (d[1] & 0xf) {
case 0x1:
/* T10 Vendor ID */
if (cur_id_size > d[3])
break;
- /* Prefer anything */
- if (cur_id_type > 0x01 && cur_id_type != 0xff)
- break;
+ cur_id_prio = prio;
cur_id_size = d[3];
if (cur_id_size + 4 > id_len)
cur_id_size = id_len - 4;
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
id_size = snprintf(id, id_len, "t10.%*pE",
cur_id_size, cur_id_str);
break;
case 0x2:
/* EUI-64 */
- if (cur_id_size > d[3])
- break;
- /* Prefer NAA IEEE Registered Extended */
- if (cur_id_type == 0x3 &&
- cur_id_size == d[3])
- break;
+ cur_id_prio = prio;
cur_id_size = d[3];
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
switch (cur_id_size) {
case 8:
id_size = snprintf(id, id_len,
@@ -3019,17 +3208,14 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
cur_id_str);
break;
default:
- cur_id_size = 0;
break;
}
break;
case 0x3:
/* NAA */
- if (cur_id_size > d[3])
- break;
+ cur_id_prio = prio;
cur_id_size = d[3];
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
switch (cur_id_size) {
case 8:
id_size = snprintf(id, id_len,
@@ -3042,32 +3228,29 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
cur_id_str);
break;
default:
- cur_id_size = 0;
break;
}
break;
case 0x8:
/* SCSI name string */
- if (cur_id_size + 4 > d[3])
+ if (cur_id_size > d[3])
break;
/* Prefer others for truncated descriptor */
- if (cur_id_size && d[3] > id_len)
- break;
+ if (d[3] > id_len) {
+ prio = 2;
+ if (cur_id_prio > prio)
+ break;
+ }
+ cur_id_prio = prio;
cur_id_size = id_size = d[3];
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
if (cur_id_size >= id_len)
cur_id_size = id_len - 1;
memcpy(id, cur_id_str, cur_id_size);
- /* Decrease priority for truncated descriptor */
- if (cur_id_size != id_size)
- cur_id_size = 6;
break;
default:
break;
}
-next_desig:
- d += d[3] + 4;
}
rcu_read_unlock();
@@ -3121,3 +3304,20 @@ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
return group_id;
}
EXPORT_SYMBOL(scsi_vpd_tpg_id);
+
+/**
+ * scsi_build_sense - build sense data for a command
+ * @scmd: scsi command for which the sense should be formatted
+ * @desc: Sense format (non-zero == descriptor format,
+ * 0 == fixed format)
+ * @key: Sense key
+ * @asc: Additional sense code
+ * @ascq: Additional sense code qualifier
+ *
+ **/
+void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
+{
+ scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+}
+EXPORT_SYMBOL_GPL(scsi_build_sense);
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index c91fa3feb930..b02af340c2d3 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -28,8 +28,11 @@ static void scsi_log_release_buffer(char *bufptr)
static inline const char *scmd_name(const struct scsi_cmnd *scmd)
{
- return scmd->request->rq_disk ?
- scmd->request->rq_disk->disk_name : NULL;
+ struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
+
+ if (!rq->q || !rq->q->disk)
+ return NULL;
+ return rq->q->disk->disk_name;
}
static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
@@ -84,14 +87,14 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
char *logbuf;
size_t off = 0, logbuf_len;
- if (!scmd || !scmd->cmnd)
+ if (!scmd)
return;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
if (!logbuf)
return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
- scmd->request->tag);
+ scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag);
if (off < logbuf_len) {
va_start(args, fmt);
off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
@@ -180,15 +183,12 @@ void scsi_print_command(struct scsi_cmnd *cmd)
char *logbuf;
size_t off, logbuf_len;
- if (!cmd->cmnd)
- return;
-
logbuf = scsi_log_reserve_buffer(&logbuf_len);
if (!logbuf)
return;
off = sdev_format_header(logbuf, logbuf_len,
- scmd_name(cmd), cmd->request->tag);
+ scmd_name(cmd), scsi_cmd_to_rq(cmd)->tag);
if (off >= logbuf_len)
goto out_printk;
off += scnprintf(logbuf + off, logbuf_len - off, "CDB: ");
@@ -205,16 +205,12 @@ void scsi_print_command(struct scsi_cmnd *cmd)
/* Print opcode in one line and use separate lines for CDB */
off += scnprintf(logbuf + off, logbuf_len - off, "\n");
dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
- scsi_log_release_buffer(logbuf);
for (k = 0; k < cmd->cmd_len; k += 16) {
size_t linelen = min(cmd->cmd_len - k, 16);
- logbuf = scsi_log_reserve_buffer(&logbuf_len);
- if (!logbuf)
- break;
off = sdev_format_header(logbuf, logbuf_len,
scmd_name(cmd),
- cmd->request->tag);
+ scsi_cmd_to_rq(cmd)->tag);
if (!WARN_ON(off > logbuf_len - 58)) {
off += scnprintf(logbuf + off, logbuf_len - off,
"CDB[%02x]: ", k);
@@ -224,9 +220,8 @@ void scsi_print_command(struct scsi_cmnd *cmd)
}
dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s",
logbuf);
- scsi_log_release_buffer(logbuf);
}
- return;
+ goto out;
}
if (!WARN_ON(off > logbuf_len - 49)) {
off += scnprintf(logbuf + off, logbuf_len - off, " ");
@@ -236,6 +231,7 @@ void scsi_print_command(struct scsi_cmnd *cmd)
}
out_printk:
dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+out:
scsi_log_release_buffer(logbuf);
}
EXPORT_SYMBOL(scsi_print_command);
@@ -377,7 +373,8 @@ EXPORT_SYMBOL(__scsi_print_sense);
/* Normalize and print sense buffer in SCSI command */
void scsi_print_sense(const struct scsi_cmnd *cmd)
{
- scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag,
+ scsi_log_print_sense(cmd->device, scmd_name(cmd),
+ scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag,
cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
}
EXPORT_SYMBOL(scsi_print_sense);
@@ -389,15 +386,14 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
size_t off, logbuf_len;
const char *mlret_string = scsi_mlreturn_string(disposition);
const char *hb_string = scsi_hostbyte_string(cmd->result);
- const char *db_string = scsi_driverbyte_string(cmd->result);
unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
if (!logbuf)
return;
- off = sdev_format_header(logbuf, logbuf_len,
- scmd_name(cmd), cmd->request->tag);
+ off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd),
+ scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag);
if (off >= logbuf_len)
goto out_printk;
@@ -430,13 +426,8 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
if (WARN_ON(off >= logbuf_len))
goto out_printk;
- if (db_string)
- off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=%s ", db_string);
- else
- off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=0x%02x ",
- driver_byte(cmd->result));
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "driverbyte=DRIVER_OK ");
off += scnprintf(logbuf + off, logbuf_len - off,
"cmd_age=%lus", cmd_age);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 3717eea37ecb..d581613d87c7 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -8,7 +8,6 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#include <linux/async.h>
#include <linux/blk-pm.h>
#include <scsi/scsi.h>
@@ -56,9 +55,6 @@ static int scsi_dev_type_suspend(struct device *dev,
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err;
- /* flush pending in-flight resume operations, suspend is synchronous */
- async_synchronize_full_domain(&scsi_sd_pm_domain);
-
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
err = cb(dev, pm);
@@ -69,108 +65,30 @@ static int scsi_dev_type_suspend(struct device *dev,
return err;
}
-static int scsi_dev_type_resume(struct device *dev,
- int (*cb)(struct device *, const struct dev_pm_ops *))
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int err = 0;
-
- err = cb(dev, pm);
- scsi_device_resume(to_scsi_device(dev));
- dev_dbg(dev, "scsi resume: %d\n", err);
-
- if (err == 0) {
- pm_runtime_disable(dev);
- err = pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- /*
- * Forcibly set runtime PM status of request queue to "active"
- * to make sure we can again get requests from the queue
- * (see also blk_pm_peek_request()).
- *
- * The resume hook will correct runtime PM status of the disk.
- */
- if (!err && scsi_is_sdev_device(dev)) {
- struct scsi_device *sdev = to_scsi_device(dev);
-
- blk_set_runtime_active(sdev->request_queue);
- }
- }
-
- return err;
-}
-
static int
scsi_bus_suspend_common(struct device *dev,
int (*cb)(struct device *, const struct dev_pm_ops *))
{
- int err = 0;
-
- if (scsi_is_sdev_device(dev)) {
- /*
- * All the high-level SCSI drivers that implement runtime
- * PM treat runtime suspend, system suspend, and system
- * hibernate nearly identically. In all cases the requirements
- * for runtime suspension are stricter.
- */
- if (pm_runtime_suspended(dev))
- return 0;
-
- err = scsi_dev_type_suspend(dev, cb);
- }
-
- return err;
-}
-
-static void async_sdev_resume(void *dev, async_cookie_t cookie)
-{
- scsi_dev_type_resume(dev, do_scsi_resume);
-}
-
-static void async_sdev_thaw(void *dev, async_cookie_t cookie)
-{
- scsi_dev_type_resume(dev, do_scsi_thaw);
-}
+ if (!scsi_is_sdev_device(dev))
+ return 0;
-static void async_sdev_restore(void *dev, async_cookie_t cookie)
-{
- scsi_dev_type_resume(dev, do_scsi_restore);
+ return scsi_dev_type_suspend(dev, cb);
}
static int scsi_bus_resume_common(struct device *dev,
int (*cb)(struct device *, const struct dev_pm_ops *))
{
- async_func_t fn;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int err;
if (!scsi_is_sdev_device(dev))
- fn = NULL;
- else if (cb == do_scsi_resume)
- fn = async_sdev_resume;
- else if (cb == do_scsi_thaw)
- fn = async_sdev_thaw;
- else if (cb == do_scsi_restore)
- fn = async_sdev_restore;
- else
- fn = NULL;
-
- if (fn) {
- async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
-
- /*
- * If a user has disabled async probing a likely reason
- * is due to a storage enclosure that does not inject
- * staggered spin-ups. For safety, make resume
- * synchronous as well in that case.
- */
- if (strncmp(scsi_scan_type, "async", 5) != 0)
- async_synchronize_full_domain(&scsi_sd_pm_domain);
- } else {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
- return 0;
+ return 0;
+
+ err = cb(dev, pm);
+ scsi_device_resume(to_scsi_device(dev));
+ dev_dbg(dev, "scsi resume: %d\n", err);
+
+ return err;
}
static int scsi_bus_prepare(struct device *dev)
@@ -262,7 +180,7 @@ static int sdev_runtime_resume(struct device *dev)
blk_pre_runtime_resume(sdev->request_queue);
if (pm && pm->runtime_resume)
err = pm->runtime_resume(dev);
- blk_post_runtime_resume(sdev->request_queue, err);
+ blk_post_runtime_resume(sdev->request_queue);
return err;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3bff9f7aa684..c52de9a973e4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -3,9 +3,10 @@
#define _SCSI_PRIV_H
#include <linux/device.h>
-#include <linux/async.h>
#include <scsi/scsi_device.h>
+#include <linux/sbitmap.h>
+struct bsg_device;
struct request_queue;
struct request;
struct scsi_cmnd;
@@ -15,6 +16,18 @@ struct scsi_host_template;
struct Scsi_Host;
struct scsi_nl_hdr;
+#define SCSI_CMD_RETRIES_NO_LIMIT -1
+
+/*
+ * Error codes used by scsi-ml internally. These must not be used by drivers.
+ */
+enum scsi_ml_status {
+ SCSIML_STAT_OK = 0x00,
+ SCSIML_STAT_RESV_CONFLICT = 0x01, /* Reservation conflict */
+ SCSIML_STAT_NOSPC = 0x02, /* Space allocation on the dev failed */
+ SCSIML_STAT_MED_ERROR = 0x03, /* Medium error */
+ SCSIML_STAT_TGT_FAILURE = 0x04, /* Permanent target failure */
+};
/*
* Scsi Error Handler Flags
@@ -29,7 +42,6 @@ extern int scsi_init_hosts(void);
extern void scsi_exit_hosts(void);
/* scsi.c */
-extern bool scsi_use_blk_mq;
int scsi_init_sense_cache(struct Scsi_Host *shost);
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING
@@ -71,9 +83,9 @@ extern void scsi_exit_devinfo(void);
/* scsi_error.c */
extern void scmd_eh_abort_handler(struct work_struct *work);
-extern enum blk_eh_timer_return scsi_times_out(struct request *req);
+extern enum blk_eh_timer_return scsi_timeout(struct request *req);
extern int scsi_error_handler(void *host);
-extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
+extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
extern void scsi_eh_scmd_add(struct scsi_cmnd *);
void scsi_eh_ready_devs(struct Scsi_Host *shost,
@@ -81,26 +93,21 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *done_q);
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q);
-int scsi_noretry_cmd(struct scsi_cmnd *scmd);
+bool scsi_noretry_cmd(struct scsi_cmnd *scmd);
+void scsi_eh_done(struct scsi_cmnd *scmd);
/* scsi_lib.c */
-extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd);
-extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
-extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
-extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
-extern int scsi_init_queue(void);
+extern void scsi_mq_free_tags(struct kref *kref);
extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
-struct request_queue;
-struct request;
/* scsi_proc.c */
#ifdef CONFIG_SCSI_PROC_FS
@@ -120,7 +127,7 @@ extern void scsi_exit_procfs(void);
#endif /* CONFIG_PROC_FS */
/* scsi_scan.c */
-extern char scsi_scan_type[];
+void scsi_enable_async_suspend(struct device *dev);
extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode);
@@ -147,7 +154,7 @@ extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
extern struct bus_type scsi_bus_type;
-extern const struct attribute_group *scsi_sysfs_shost_attr_groups[];
+extern const struct attribute_group *scsi_shost_groups[];
/* scsi_netlink.c */
#ifdef CONFIG_SCSI_NETLINK
@@ -174,8 +181,6 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
#endif /* CONFIG_PM */
-extern struct async_domain scsi_sd_pm_domain;
-
/* scsi_dh.c */
#ifdef CONFIG_SCSI_DH
void scsi_dh_add_device(struct scsi_device *sdev);
@@ -185,6 +190,10 @@ static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev);
+
+extern int scsi_device_max_queue_depth(struct scsi_device *sdev);
+
/*
* internal scsi timeout functions: for use by mid-layer and transport
* classes.
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index d6982d355739..95aee1ad1383 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -49,7 +49,7 @@ static DEFINE_MUTEX(global_host_template_mutex);
static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct Scsi_Host *shost = PDE_DATA(file_inode(file));
+ struct Scsi_Host *shost = pde_data(file_inode(file));
ssize_t ret = -ENOMEM;
char *page;
@@ -79,7 +79,7 @@ static int proc_scsi_show(struct seq_file *m, void *v)
static int proc_scsi_host_open(struct inode *inode, struct file *file)
{
- return single_open_size(file, proc_scsi_show, PDE_DATA(inode),
+ return single_open_size(file, proc_scsi_show, pde_data(inode),
4 * PAGE_SIZE);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 058079f915f1..5d27f5196de6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
-char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
+static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
S_IRUGO|S_IWUSR);
@@ -122,6 +122,22 @@ struct async_scan_data {
struct completion prev_finished;
};
+/*
+ * scsi_enable_async_suspend - Enable async suspend and resume
+ */
+void scsi_enable_async_suspend(struct device *dev)
+{
+ /*
+ * If a user has disabled async probing a likely reason is due to a
+ * storage enclosure that does not inject staggered spin-ups. For
+ * safety, make resume synchronous as well in that case.
+ */
+ if (strncmp(scsi_scan_type, "async", 5) != 0)
+ return;
+ /* Enable asynchronous suspend and resume. */
+ device_enable_async_suspend(dev);
+}
+
/**
* scsi_complete_async_scans - Wait for asynchronous scans to complete
*
@@ -198,6 +214,53 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
SCSI_TIMEOUT, 3, NULL);
}
+static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ unsigned int depth)
+{
+ int new_shift = sbitmap_calculate_shift(depth);
+ bool need_alloc = !sdev->budget_map.map;
+ bool need_free = false;
+ int ret;
+ struct sbitmap sb_backup;
+
+ depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
+
+ /*
+ * realloc if new shift is calculated, which is caused by setting
+ * up one new default queue depth after calling ->slave_configure
+ */
+ if (!need_alloc && new_shift != sdev->budget_map.shift)
+ need_alloc = need_free = true;
+
+ if (!need_alloc)
+ return 0;
+
+ /*
+ * Request queue has to be frozen for reallocating budget map,
+ * and here disk isn't added yet, so freezing is pretty fast
+ */
+ if (need_free) {
+ blk_mq_freeze_queue(sdev->request_queue);
+ sb_backup = sdev->budget_map;
+ }
+ ret = sbitmap_init_node(&sdev->budget_map,
+ scsi_device_max_queue_depth(sdev),
+ new_shift, GFP_KERNEL,
+ sdev->request_queue->node, false, true);
+ if (!ret)
+ sbitmap_resize(&sdev->budget_map, depth);
+
+ if (need_free) {
+ if (ret)
+ sdev->budget_map = sb_backup;
+ else
+ sbitmap_free(&sb_backup);
+ ret = 0;
+ blk_mq_unfreeze_queue(sdev->request_queue);
+ }
+ return ret;
+}
+
/**
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
@@ -215,7 +278,9 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
u64 lun, void *hostdata)
{
+ unsigned int depth;
struct scsi_device *sdev;
+ struct request_queue *q;
int display_failure_msg = 1, ret;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
@@ -236,7 +301,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sdev_state = SDEV_CREATED;
INIT_LIST_HEAD(&sdev->siblings);
INIT_LIST_HEAD(&sdev->same_target_siblings);
- INIT_LIST_HEAD(&sdev->cmd_list);
INIT_LIST_HEAD(&sdev->starved_entry);
INIT_LIST_HEAD(&sdev->event_list);
spin_lock_init(&sdev->list_lock);
@@ -266,19 +330,37 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/
sdev->borken = 1;
- sdev->request_queue = scsi_mq_alloc_queue(sdev);
- if (!sdev->request_queue) {
+ sdev->sg_reserved_size = INT_MAX;
+
+ q = blk_mq_init_queue(&sdev->host->tag_set);
+ if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
put_device(&starget->dev);
kfree(sdev);
goto out;
}
- WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
- sdev->request_queue->queuedata = sdev;
+ kref_get(&sdev->host->tagset_refcnt);
+ sdev->request_queue = q;
+ q->queuedata = sdev;
+ __scsi_init_queue(sdev->host, q);
+ WARN_ON_ONCE(!blk_get_queue(q));
- scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
- sdev->host->cmd_per_lun : 1);
+ depth = sdev->host->cmd_per_lun ?: 1;
+
+ /*
+ * Use .can_queue as budget map's depth because we have to
+ * support adjusting queue depth from sysfs. Meantime use
+ * default device queue depth to figure out sbitmap shift
+ * since we use this queue depth most of times.
+ */
+ if (scsi_realloc_sdev_budget_map(sdev, depth)) {
+ put_device(&starget->dev);
+ kfree(sdev);
+ goto out;
+ }
+
+ scsi_change_queue_depth(sdev, depth);
scsi_sysfs_device_initialize(sdev);
@@ -431,6 +513,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
dev->bus = &scsi_bus_type;
dev->type = &scsi_target_type;
+ scsi_enable_async_suspend(dev);
starget->id = id;
starget->channel = channel;
starget->can_queue = 0;
@@ -454,7 +537,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
error = shost->hostt->target_alloc(starget);
if(error) {
- dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
+ if (error != -ENXIO)
+ dev_err(dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final
* put because it will be under the host lock */
scsi_target_destroy(starget);
@@ -599,14 +683,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
"scsi scan: INQUIRY %s with code 0x%x\n",
result ? "failed" : "successful", result));
- if (result) {
+ if (result > 0) {
/*
* not-ready to ready transition [asc/ascq=0x28/0x0]
* or power-on, reset [asc/ascq=0x29/0x0], continue.
* INQUIRY should not yield UNIT_ATTENTION
* but many buggy devices do so anyway.
*/
- if (driver_byte(result) == DRIVER_SENSE &&
+ if (scsi_status_is_check_condition(result) &&
scsi_sense_valid(&sshdr)) {
if ((sshdr.sense_key == UNIT_ATTENTION) &&
((sshdr.asc == 0x28) ||
@@ -614,7 +698,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
(sshdr.ascq == 0))
continue;
}
- } else {
+ } else if (result == 0) {
/*
* if nothing was transferred, we try
* again. It's a workaround for some USB
@@ -650,7 +734,17 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (pass == 1) {
if (BLIST_INQUIRY_36 & *bflags)
next_inquiry_len = 36;
- else if (sdev->inquiry_len)
+ /*
+ * LLD specified a maximum sdev->inquiry_len
+ * but device claims it has more data. Capping
+ * the length only makes sense for legacy
+ * devices. If a device supports SPC-4 (2014)
+ * or newer, assume that it is safe to ask for
+ * as much as the device says it supports.
+ */
+ else if (sdev->inquiry_len &&
+ response_len > sdev->inquiry_len &&
+ (inq_result[2] & 0x7) < 6) /* SPC-4 */
next_inquiry_len = sdev->inquiry_len;
else
next_inquiry_len = response_len;
@@ -952,6 +1046,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_UNMAP_LIMIT_WS)
sdev->unmap_limit_for_ws = 1;
+ if (*bflags & BLIST_IGN_MEDIA_CHANGE)
+ sdev->ignore_media_change = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
@@ -974,12 +1071,20 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
return SCSI_SCAN_NO_RESPONSE;
}
+
+ /*
+ * The queue_depth is often changed in ->slave_configure.
+ * Set up budget map again since memory consumption of
+ * the map depends on actual queue depth.
+ */
+ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
}
if (sdev->scsi_level >= SCSI_3)
scsi_attach_vpd(sdev);
sdev->max_queue_depth = sdev->queue_depth;
+ WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
sdev->sdev_bflags = *bflags;
/*
@@ -1079,8 +1184,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (!sdev)
goto out;
- result = kmalloc(result_len, GFP_KERNEL |
- ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
+ result = kmalloc(result_len, GFP_KERNEL);
if (!result)
goto out_free_sdev;
@@ -1337,8 +1441,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
*/
length = (511 + 1) * sizeof(struct scsi_lun);
retry:
- lun_data = kmalloc(length, GFP_KERNEL |
- (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
+ lun_data = kmalloc(length, GFP_KERNEL);
if (!lun_data) {
printk(ALLOC_FAILURE_MSG, __func__);
goto out;
@@ -1715,15 +1818,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
*/
static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
{
- struct async_scan_data *data;
+ struct async_scan_data *data = NULL;
unsigned long flags;
if (strncmp(scsi_scan_type, "sync", 4) == 0)
return NULL;
+ mutex_lock(&shost->scan_mutex);
if (shost->async_scan) {
shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
- return NULL;
+ goto err;
}
data = kmalloc(sizeof(*data), GFP_KERNEL);
@@ -1734,7 +1838,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
goto err;
init_completion(&data->prev_finished);
- mutex_lock(&shost->scan_mutex);
spin_lock_irqsave(shost->host_lock, flags);
shost->async_scan = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -1749,6 +1852,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return data;
err:
+ mutex_unlock(&shost->scan_mutex);
kfree(data);
return NULL;
}
@@ -1875,60 +1979,3 @@ void scsi_forget_host(struct Scsi_Host *shost)
spin_unlock_irqrestore(shost->host_lock, flags);
}
-/**
- * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself
- * @shost: Host that needs a scsi_device
- *
- * Lock status: None assumed.
- *
- * Returns: The scsi_device or NULL
- *
- * Notes:
- * Attach a single scsi_device to the Scsi_Host - this should
- * be made to look like a "pseudo-device" that points to the
- * HA itself.
- *
- * Note - this device is not accessible from any high-level
- * drivers (including generics), which is probably not
- * optimal. We can add hooks later to attach.
- */
-struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
-{
- struct scsi_device *sdev = NULL;
- struct scsi_target *starget;
-
- mutex_lock(&shost->scan_mutex);
- if (!scsi_host_scan_allowed(shost))
- goto out;
- starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
- if (!starget)
- goto out;
-
- sdev = scsi_alloc_sdev(starget, 0, NULL);
- if (sdev)
- sdev->borken = 0;
- else
- scsi_target_reap(starget);
- put_device(&starget->dev);
- out:
- mutex_unlock(&shost->scan_mutex);
- return sdev;
-}
-EXPORT_SYMBOL(scsi_get_host_dev);
-
-/**
- * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself
- * @sdev: Host device to be freed
- *
- * Lock status: None assumed.
- *
- * Returns: Nothing
- */
-void scsi_free_host_dev(struct scsi_device *sdev)
-{
- BUG_ON(sdev->id != sdev->host->this_id);
-
- __scsi_remove_device(sdev);
-}
-EXPORT_SYMBOL(scsi_free_host_dev);
-
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 677b5c5403d2..cac7c902cf70 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -13,6 +13,7 @@
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
+#include <linux/bsg.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -370,10 +371,9 @@ static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store
shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(cmd_per_lun, "%hd\n");
-shost_rd_attr(can_queue, "%hd\n");
+shost_rd_attr(can_queue, "%d\n");
shost_rd_attr(sg_tablesize, "%hu\n");
shost_rd_attr(sg_prot_tablesize, "%hu\n");
-shost_rd_attr(unchecked_isa_dma, "%d\n");
shost_rd_attr(prot_capabilities, "%u\n");
shost_rd_attr(prot_guard_type, "%hd\n");
shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
@@ -393,6 +393,16 @@ show_use_blk_mq(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(use_blk_mq, S_IRUGO, show_use_blk_mq, NULL);
+static ssize_t
+show_nr_hw_queues(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct blk_mq_tag_set *tag_set = &shost->tag_set;
+
+ return snprintf(buf, 20, "%d\n", tag_set->nr_hw_queues);
+}
+static DEVICE_ATTR(nr_hw_queues, S_IRUGO, show_nr_hw_queues, NULL);
+
static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_use_blk_mq.attr,
&dev_attr_unique_id.attr,
@@ -401,7 +411,6 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_can_queue.attr,
&dev_attr_sg_tablesize.attr,
&dev_attr_sg_prot_tablesize.attr,
- &dev_attr_unchecked_isa_dma.attr,
&dev_attr_proc_name.attr,
&dev_attr_scan.attr,
&dev_attr_hstate.attr,
@@ -411,14 +420,15 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_prot_guard_type.attr,
&dev_attr_host_reset.attr,
&dev_attr_eh_deadline.attr,
+ &dev_attr_nr_hw_queues.attr,
NULL
};
-static struct attribute_group scsi_shost_attr_group = {
+static const struct attribute_group scsi_shost_attr_group = {
.attrs = scsi_sysfs_shost_attrs,
};
-const struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
+const struct attribute_group *scsi_shost_groups[] = {
&scsi_shost_attr_group,
NULL
};
@@ -438,10 +448,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
+ struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
+ struct module *mod;
sdev = container_of(work, struct scsi_device, ew.work);
+ mod = sdev->host->hostt->module;
+
scsi_dh_release_device(sdev);
parent = sdev->sdev_gendev.parent;
@@ -466,6 +480,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
/* NULL queue means the device can't be used */
sdev->request_queue = NULL;
+ sbitmap_free(&sdev->budget_map);
+
mutex_lock(&sdev->inquiry_mutex);
vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0,
lockdep_is_held(&sdev->inquiry_mutex));
@@ -475,6 +491,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb0 = rcu_replace_pointer(sdev->vpd_pgb0, vpd_pgb0,
+ lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb1 = rcu_replace_pointer(sdev->vpd_pgb1, vpd_pgb1,
+ lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_pg0)
@@ -485,16 +507,28 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree_rcu(vpd_pg80, rcu);
if (vpd_pg89)
kfree_rcu(vpd_pg89, rcu);
+ if (vpd_pgb0)
+ kfree_rcu(vpd_pgb0, rcu);
+ if (vpd_pgb1)
+ kfree_rcu(vpd_pgb1, rcu);
+ if (vpd_pgb2)
+ kfree_rcu(vpd_pgb2, rcu);
kfree(sdev->inquiry);
kfree(sdev);
if (parent)
put_device(parent);
+ module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
+
+ /* Set module pointer as NULL in case of module unloading */
+ if (!try_module_get(sdp->host->hostt->module))
+ sdp->host->hostt->module = NULL;
+
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
@@ -539,7 +573,6 @@ struct bus_type scsi_bus_type = {
.pm = &scsi_bus_pm_ops,
#endif
};
-EXPORT_SYMBOL_GPL(scsi_bus_type);
int scsi_sysfs_register(void)
{
@@ -659,7 +692,7 @@ sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
- return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
+ return snprintf(buf, 20, "%d\n", scsi_device_busy(sdev));
}
static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
@@ -776,6 +809,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
int i, ret;
struct scsi_device *sdev = to_scsi_device(dev);
enum scsi_device_state state = 0;
+ bool rescan_dev = false;
for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
const int len = strlen(sdev_states[i].name);
@@ -794,15 +828,36 @@ store_state_field(struct device *dev, struct device_attribute *attr,
}
mutex_lock(&sdev->state_mutex);
- ret = scsi_device_set_state(sdev, state);
- /*
- * If the device state changes to SDEV_RUNNING, we need to run
- * the queue to avoid I/O hang.
- */
- if (ret == 0 && state == SDEV_RUNNING)
- blk_mq_run_hw_queues(sdev->request_queue, true);
+ switch (sdev->sdev_state) {
+ case SDEV_RUNNING:
+ case SDEV_OFFLINE:
+ break;
+ default:
+ mutex_unlock(&sdev->state_mutex);
+ return -EINVAL;
+ }
+ if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
+ ret = 0;
+ } else {
+ ret = scsi_device_set_state(sdev, state);
+ if (ret == 0 && state == SDEV_RUNNING)
+ rescan_dev = true;
+ }
mutex_unlock(&sdev->state_mutex);
+ if (rescan_dev) {
+ /*
+ * If the device state changes to SDEV_RUNNING, we need to
+ * run the queue to avoid I/O hang, and rescan the device
+ * to revalidate it. Running the queue first is necessary
+ * because another thread may be waiting inside
+ * blk_mq_freeze_queue_wait() and because that call may be
+ * waiting for pending I/O to finish.
+ */
+ blk_mq_run_hw_queues(sdev->request_queue, true);
+ scsi_rescan_device(dev);
+ }
+
return ret == 0 ? count : -EINVAL;
}
@@ -856,7 +911,7 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj, \
struct bin_attribute *bin_attr, \
char *buf, loff_t off, size_t count) \
{ \
- struct device *dev = container_of(kobj, struct device, kobj); \
+ struct device *dev = kobj_to_dev(kobj); \
struct scsi_device *sdev = to_scsi_device(dev); \
struct scsi_vpd *vpd_page; \
int ret = -EINVAL; \
@@ -878,13 +933,16 @@ static struct bin_attribute dev_attr_vpd_##_page = { \
sdev_vpd_pg_attr(pg83);
sdev_vpd_pg_attr(pg80);
sdev_vpd_pg_attr(pg89);
+sdev_vpd_pg_attr(pgb0);
+sdev_vpd_pg_attr(pgb1);
+sdev_vpd_pg_attr(pgb2);
sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct scsi_device *sdev = to_scsi_device(dev);
if (!sdev->inquiry)
@@ -926,6 +984,7 @@ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
show_sdev_iostat(iorequest_cnt);
show_sdev_iostat(iodone_cnt);
show_sdev_iostat(ioerr_cnt);
+show_sdev_iostat(iotmo_cnt);
static ssize_t
sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1045,14 +1104,14 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
name = sdev_bflags_name[i];
if (name)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s%s", len ? " " : "", name);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%s%s", len ? " " : "", name);
else
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%sINVALID_BIT(%d)", len ? " " : "", i);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%sINVALID_BIT(%d)", len ? " " : "", i);
}
if (len)
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL);
@@ -1181,7 +1240,7 @@ static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct scsi_device *sdev = to_scsi_device(dev);
@@ -1193,21 +1252,13 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
!sdev->host->hostt->change_queue_depth)
return 0;
-#ifdef CONFIG_SCSI_DH
- if (attr == &dev_attr_access_state.attr &&
- !sdev->handler)
- return 0;
- if (attr == &dev_attr_preferred_path.attr &&
- !sdev->handler)
- return 0;
-#endif
return attr->mode;
}
static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
struct bin_attribute *attr, int i)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct scsi_device *sdev = to_scsi_device(dev);
@@ -1223,6 +1274,15 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89)
return 0;
+ if (attr == &dev_attr_vpd_pgb0 && !sdev->vpd_pgb0)
+ return 0;
+
+ if (attr == &dev_attr_vpd_pgb1 && !sdev->vpd_pgb1)
+ return 0;
+
+ if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2)
+ return 0;
+
return S_IRUGO;
}
@@ -1244,6 +1304,7 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_iorequest_cnt.attr,
&dev_attr_iodone_cnt.attr,
&dev_attr_ioerr_cnt.attr,
+ &dev_attr_iotmo_cnt.attr,
&dev_attr_modalias.attr,
&dev_attr_queue_depth.attr,
&dev_attr_queue_type.attr,
@@ -1269,6 +1330,9 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
&dev_attr_vpd_pg89,
+ &dev_attr_vpd_pgb0,
+ &dev_attr_vpd_pgb1,
+ &dev_attr_vpd_pgb2,
&dev_attr_inquiry,
NULL
};
@@ -1315,8 +1379,7 @@ static int scsi_target_add(struct scsi_target *starget)
**/
int scsi_sysfs_add_sdev(struct scsi_device *sdev)
{
- int error, i;
- struct request_queue *rq = sdev->request_queue;
+ int error;
struct scsi_target *starget = sdev->sdev_target;
error = scsi_target_add(starget);
@@ -1355,30 +1418,17 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
transport_add_device(&sdev->sdev_gendev);
sdev->is_visible = 1;
- error = bsg_scsi_register_queue(rq, &sdev->sdev_gendev);
- if (error)
- /* we're treating error on bsg register as non-fatal,
- * so pretend nothing went wrong */
- sdev_printk(KERN_INFO, sdev,
- "Failed to register bsg queue, errno=%d\n", error);
-
- /* add additional host specific attributes */
- if (sdev->host->hostt->sdev_attrs) {
- for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
- error = device_create_file(&sdev->sdev_gendev,
- sdev->host->hostt->sdev_attrs[i]);
- if (error)
- return error;
+ if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
+ sdev->bsg_dev = scsi_bsg_register_queue(sdev);
+ if (IS_ERR(sdev->bsg_dev)) {
+ error = PTR_ERR(sdev->bsg_dev);
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to register bsg queue, errno=%d\n",
+ error);
+ sdev->bsg_dev = NULL;
}
}
- if (sdev->host->hostt->sdev_groups) {
- error = sysfs_create_groups(&sdev->sdev_gendev.kobj,
- sdev->host->hostt->sdev_groups);
- if (error)
- return error;
- }
-
scsi_autopm_put_device(sdev);
return error;
}
@@ -1418,11 +1468,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
if (res != 0)
return;
- if (sdev->host->hostt->sdev_groups)
- sysfs_remove_groups(&sdev->sdev_gendev.kobj,
- sdev->host->hostt->sdev_groups);
-
- bsg_unregister_queue(sdev->request_queue);
+ if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev)
+ bsg_unregister_queue(sdev->bsg_dev);
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
device_del(dev);
@@ -1438,7 +1485,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
scsi_device_set_state(sdev, SDEV_DEL);
mutex_unlock(&sdev->state_mutex);
- blk_cleanup_queue(sdev->request_queue);
+ blk_mq_destroy_queue(sdev->request_queue);
+ kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
if (sdev->host->hostt->slave_destroy)
@@ -1447,7 +1495,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
/*
* Paired with the kref_get() in scsi_sysfs_initialize(). We have
- * remoed sysfs visibility from the device, so make the target
+ * removed sysfs visibility from the device, so make the target
* invisible if this was the last device underneath it.
*/
scsi_target_reap(scsi_target(sdev));
@@ -1559,18 +1607,6 @@ EXPORT_SYMBOL(scsi_register_interface);
**/
int scsi_sysfs_add_host(struct Scsi_Host *shost)
{
- int error, i;
-
- /* add host specific attributes */
- if (shost->hostt->shost_attrs) {
- for (i = 0; shost->hostt->shost_attrs[i]; i++) {
- error = device_create_file(&shost->shost_dev,
- shost->hostt->shost_attrs[i]);
- if (error)
- return error;
- }
- }
-
transport_register_device(&shost->shost_gendev);
transport_configure_device(&shost->shost_gendev);
return 0;
@@ -1586,13 +1622,16 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
{
unsigned long flags;
struct Scsi_Host *shost = sdev->host;
+ struct scsi_host_template *hostt = shost->hostt;
struct scsi_target *starget = sdev->sdev_target;
device_initialize(&sdev->sdev_gendev);
sdev->sdev_gendev.bus = &scsi_bus_type;
sdev->sdev_gendev.type = &scsi_dev_type;
+ scsi_enable_async_suspend(&sdev->sdev_gendev);
dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
+ sdev->sdev_gendev.groups = hostt->sdev_groups;
device_initialize(&sdev->sdev_dev);
sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index ac35c301c792..41a950075913 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -18,11 +18,9 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- u32 lba = 0, txlen;
+ u32 lba, txlen;
- lba |= ((cdb[1] & 0x1F) << 16);
- lba |= (cdb[2] << 8);
- lba |= cdb[3];
+ lba = get_unaligned_be24(&cdb[1]) & 0x1fffff;
/*
* From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be read (READ(6)) or written (WRITE(6)).
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2732fa65119c..8934160c4a33 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -22,6 +22,7 @@
#include <net/netlink.h>
#include <scsi/scsi_netlink_fc.h>
#include <scsi/scsi_bsg_fc.h>
+#include <uapi/scsi/fc/fc_els.h>
#include "scsi_priv.h"
static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
@@ -33,6 +34,11 @@ static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
static void fc_bsg_remove(struct request_queue *);
static void fc_bsg_goose_queue(struct fc_rport *);
+static void fc_li_stats_update(u16 event_type,
+ struct fc_fpin_stats *stats);
+static void fc_delivery_stats_update(u32 reason_code,
+ struct fc_fpin_stats *stats);
+static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats);
/*
* Module Parameters
@@ -142,20 +148,23 @@ fc_enum_name_search(host_event_code, fc_host_event_code,
static struct {
enum fc_port_state value;
char *name;
+ int matchlen;
} fc_port_state_names[] = {
- { FC_PORTSTATE_UNKNOWN, "Unknown" },
- { FC_PORTSTATE_NOTPRESENT, "Not Present" },
- { FC_PORTSTATE_ONLINE, "Online" },
- { FC_PORTSTATE_OFFLINE, "Offline" },
- { FC_PORTSTATE_BLOCKED, "Blocked" },
- { FC_PORTSTATE_BYPASSED, "Bypassed" },
- { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics" },
- { FC_PORTSTATE_LINKDOWN, "Linkdown" },
- { FC_PORTSTATE_ERROR, "Error" },
- { FC_PORTSTATE_LOOPBACK, "Loopback" },
- { FC_PORTSTATE_DELETED, "Deleted" },
+ { FC_PORTSTATE_UNKNOWN, "Unknown", 7},
+ { FC_PORTSTATE_NOTPRESENT, "Not Present", 11 },
+ { FC_PORTSTATE_ONLINE, "Online", 6 },
+ { FC_PORTSTATE_OFFLINE, "Offline", 7 },
+ { FC_PORTSTATE_BLOCKED, "Blocked", 7 },
+ { FC_PORTSTATE_BYPASSED, "Bypassed", 8 },
+ { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics", 11 },
+ { FC_PORTSTATE_LINKDOWN, "Linkdown", 8 },
+ { FC_PORTSTATE_ERROR, "Error", 5 },
+ { FC_PORTSTATE_LOOPBACK, "Loopback", 8 },
+ { FC_PORTSTATE_DELETED, "Deleted", 7 },
+ { FC_PORTSTATE_MARGINAL, "Marginal", 8 },
};
fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
+fc_enum_name_match(port_state, fc_port_state, fc_port_state_names)
#define FC_PORTSTATE_MAX_NAMELEN 20
@@ -253,6 +262,7 @@ static const struct {
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
{ FC_PORTSPEED_64GBIT, "64 Gbit" },
{ FC_PORTSPEED_128GBIT, "128 Gbit" },
+ { FC_PORTSPEED_256GBIT, "256 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
@@ -418,6 +428,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->fabric_name = -1;
memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
+ memset(&fc_host->fpin_stats, 0, sizeof(fc_host->fpin_stats));
fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
@@ -532,7 +543,7 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
struct nlmsghdr *nlh;
struct fc_nl_event *event;
const char *name;
- u32 len;
+ size_t len, padding;
int err;
if (!data_buf || data_len < 4)
@@ -543,7 +554,7 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
goto send_fail;
}
- len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
+ len = FC_NL_MSGALIGN(sizeof(*event) - sizeof(event->event_data) + data_len);
skb = nlmsg_new(len, GFP_KERNEL);
if (!skb) {
@@ -567,7 +578,9 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
event->event_num = event_number;
event->event_code = event_code;
if (data_len)
- memcpy(&event->event_data, data_buf, data_len);
+ memcpy(event->event_data_flex, data_buf, data_len);
+ padding = len - offsetof(typeof(*event), event_data_flex) - data_len;
+ memset(event->event_data_flex + data_len, 0, padding);
nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
GFP_KERNEL);
@@ -628,7 +641,256 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
EXPORT_SYMBOL(fc_host_post_vendor_event);
/**
- * fc_host_rcv_fpin - routine to process a received FPIN.
+ * fc_find_rport_by_wwpn - find the fc_rport pointer for a given wwpn
+ * @shost: host the fc_rport is associated with
+ * @wwpn: wwpn of the fc_rport device
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+struct fc_rport *
+fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn)
+{
+ struct fc_rport *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ list_for_each_entry(rport, &fc_host_rports(shost), peers) {
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ continue;
+
+ if (rport->port_name == wwpn) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return rport;
+ }
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return NULL;
+}
+EXPORT_SYMBOL(fc_find_rport_by_wwpn);
+
+static void
+fc_li_stats_update(u16 event_type,
+ struct fc_fpin_stats *stats)
+{
+ stats->li++;
+ switch (event_type) {
+ case FPIN_LI_UNKNOWN:
+ stats->li_failure_unknown++;
+ break;
+ case FPIN_LI_LINK_FAILURE:
+ stats->li_link_failure_count++;
+ break;
+ case FPIN_LI_LOSS_OF_SYNC:
+ stats->li_loss_of_sync_count++;
+ break;
+ case FPIN_LI_LOSS_OF_SIG:
+ stats->li_loss_of_signals_count++;
+ break;
+ case FPIN_LI_PRIM_SEQ_ERR:
+ stats->li_prim_seq_err_count++;
+ break;
+ case FPIN_LI_INVALID_TX_WD:
+ stats->li_invalid_tx_word_count++;
+ break;
+ case FPIN_LI_INVALID_CRC:
+ stats->li_invalid_crc_count++;
+ break;
+ case FPIN_LI_DEVICE_SPEC:
+ stats->li_device_specific++;
+ break;
+ }
+}
+
+static void
+fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats)
+{
+ stats->dn++;
+ switch (reason_code) {
+ case FPIN_DELI_UNKNOWN:
+ stats->dn_unknown++;
+ break;
+ case FPIN_DELI_TIMEOUT:
+ stats->dn_timeout++;
+ break;
+ case FPIN_DELI_UNABLE_TO_ROUTE:
+ stats->dn_unable_to_route++;
+ break;
+ case FPIN_DELI_DEVICE_SPEC:
+ stats->dn_device_specific++;
+ break;
+ }
+}
+
+static void
+fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats)
+{
+ stats->cn++;
+ switch (event_type) {
+ case FPIN_CONGN_CLEAR:
+ stats->cn_clear++;
+ break;
+ case FPIN_CONGN_LOST_CREDIT:
+ stats->cn_lost_credit++;
+ break;
+ case FPIN_CONGN_CREDIT_STALL:
+ stats->cn_credit_stall++;
+ break;
+ case FPIN_CONGN_OVERSUBSCRIPTION:
+ stats->cn_oversubscription++;
+ break;
+ case FPIN_CONGN_DEVICE_SPEC:
+ stats->cn_device_specific++;
+ }
+}
+
+/*
+ * fc_fpin_li_stats_update - routine to update Link Integrity
+ * event statistics.
+ * @shost: host the FPIN was received on
+ * @tlv: pointer to link integrity descriptor
+ *
+ */
+static void
+fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
+{
+ u8 i;
+ struct fc_rport *rport = NULL;
+ struct fc_rport *attach_rport = NULL;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv;
+ u16 event_type = be16_to_cpu(li_desc->event_type);
+ u64 wwpn;
+
+ rport = fc_find_rport_by_wwpn(shost,
+ be64_to_cpu(li_desc->attached_wwpn));
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ attach_rport = rport;
+ fc_li_stats_update(event_type, &attach_rport->fpin_stats);
+ }
+
+ if (be32_to_cpu(li_desc->pname_count) > 0) {
+ for (i = 0;
+ i < be32_to_cpu(li_desc->pname_count);
+ i++) {
+ wwpn = be64_to_cpu(li_desc->pname_list[i]);
+ rport = fc_find_rport_by_wwpn(shost, wwpn);
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ if (rport == attach_rport)
+ continue;
+ fc_li_stats_update(event_type,
+ &rport->fpin_stats);
+ }
+ }
+ }
+
+ if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn))
+ fc_li_stats_update(event_type, &fc_host->fpin_stats);
+}
+
+/*
+ * fc_fpin_delivery_stats_update - routine to update Delivery Notification
+ * event statistics.
+ * @shost: host the FPIN was received on
+ * @tlv: pointer to delivery descriptor
+ *
+ */
+static void
+fc_fpin_delivery_stats_update(struct Scsi_Host *shost,
+ struct fc_tlv_desc *tlv)
+{
+ struct fc_rport *rport = NULL;
+ struct fc_rport *attach_rport = NULL;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_fn_deli_desc *dn_desc = (struct fc_fn_deli_desc *)tlv;
+ u32 reason_code = be32_to_cpu(dn_desc->deli_reason_code);
+
+ rport = fc_find_rport_by_wwpn(shost,
+ be64_to_cpu(dn_desc->attached_wwpn));
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ attach_rport = rport;
+ fc_delivery_stats_update(reason_code,
+ &attach_rport->fpin_stats);
+ }
+
+ if (fc_host->port_name == be64_to_cpu(dn_desc->attached_wwpn))
+ fc_delivery_stats_update(reason_code, &fc_host->fpin_stats);
+}
+
+/*
+ * fc_fpin_peer_congn_stats_update - routine to update Peer Congestion
+ * event statistics.
+ * @shost: host the FPIN was received on
+ * @tlv: pointer to peer congestion descriptor
+ *
+ */
+static void
+fc_fpin_peer_congn_stats_update(struct Scsi_Host *shost,
+ struct fc_tlv_desc *tlv)
+{
+ u8 i;
+ struct fc_rport *rport = NULL;
+ struct fc_rport *attach_rport = NULL;
+ struct fc_fn_peer_congn_desc *pc_desc =
+ (struct fc_fn_peer_congn_desc *)tlv;
+ u16 event_type = be16_to_cpu(pc_desc->event_type);
+ u64 wwpn;
+
+ rport = fc_find_rport_by_wwpn(shost,
+ be64_to_cpu(pc_desc->attached_wwpn));
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ attach_rport = rport;
+ fc_cn_stats_update(event_type, &attach_rport->fpin_stats);
+ }
+
+ if (be32_to_cpu(pc_desc->pname_count) > 0) {
+ for (i = 0;
+ i < be32_to_cpu(pc_desc->pname_count);
+ i++) {
+ wwpn = be64_to_cpu(pc_desc->pname_list[i]);
+ rport = fc_find_rport_by_wwpn(shost, wwpn);
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ if (rport == attach_rport)
+ continue;
+ fc_cn_stats_update(event_type,
+ &rport->fpin_stats);
+ }
+ }
+ }
+}
+
+/*
+ * fc_fpin_congn_stats_update - routine to update Congestion
+ * event statistics.
+ * @shost: host the FPIN was received on
+ * @tlv: pointer to congestion descriptor
+ *
+ */
+static void
+fc_fpin_congn_stats_update(struct Scsi_Host *shost,
+ struct fc_tlv_desc *tlv)
+{
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_fn_congn_desc *congn = (struct fc_fn_congn_desc *)tlv;
+
+ fc_cn_stats_update(be16_to_cpu(congn->event_type),
+ &fc_host->fpin_stats);
+}
+
+/**
+ * fc_host_fpin_rcv - routine to process a received FPIN.
* @shost: host the FPIN was received on
* @fpin_len: length of FPIN payload, in bytes
* @fpin_buf: pointer to FPIN payload
@@ -639,6 +901,38 @@ EXPORT_SYMBOL(fc_host_post_vendor_event);
void
fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf)
{
+ struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf;
+ struct fc_tlv_desc *tlv;
+ u32 desc_cnt = 0, bytes_remain;
+ u32 dtag;
+
+ /* Update Statistics */
+ tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
+ bytes_remain = fpin_len - offsetof(struct fc_els_fpin, fpin_desc);
+ bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
+
+ while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
+ bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
+ dtag = be32_to_cpu(tlv->desc_tag);
+ switch (dtag) {
+ case ELS_DTAG_LNK_INTEGRITY:
+ fc_fpin_li_stats_update(shost, tlv);
+ break;
+ case ELS_DTAG_DELIVERY:
+ fc_fpin_delivery_stats_update(shost, tlv);
+ break;
+ case ELS_DTAG_PEER_CONGEST:
+ fc_fpin_peer_congn_stats_update(shost, tlv);
+ break;
+ case ELS_DTAG_CONGESTION:
+ fc_fpin_congn_stats_update(shost, tlv);
+ }
+
+ desc_cnt++;
+ bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ }
+
fc_host_post_fc_event(shost, fc_get_event_number(),
FCH_EVT_LINK_FPIN, fpin_len, fpin_buf, 0);
}
@@ -878,7 +1172,7 @@ static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
return 0;
}
-fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
+fc_rport_show_function(dev_loss_tmo, "%u\n", 20, )
static ssize_t
store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -939,7 +1233,59 @@ show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
show_fc_rport_roles, NULL);
-fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
+static ssize_t fc_rport_set_marginal_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fc_rport *rport = transport_class_to_rport(dev);
+ enum fc_port_state port_state;
+ int ret = 0;
+
+ ret = get_fc_port_state_match(buf, &port_state);
+ if (ret)
+ return -EINVAL;
+ if (port_state == FC_PORTSTATE_MARGINAL) {
+ /*
+ * Change the state to Marginal only if the
+ * current rport state is Online
+ * Allow only Online->Marginal
+ */
+ if (rport->port_state == FC_PORTSTATE_ONLINE)
+ rport->port_state = port_state;
+ else
+ return -EINVAL;
+ } else if (port_state == FC_PORTSTATE_ONLINE) {
+ /*
+ * Change the state to Online only if the
+ * current rport state is Marginal
+ * Allow only Marginal->Online
+ */
+ if (rport->port_state == FC_PORTSTATE_MARGINAL)
+ rport->port_state = port_state;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+ return count;
+}
+
+static ssize_t
+show_fc_rport_port_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *name;
+ struct fc_rport *rport = transport_class_to_rport(dev);
+
+ name = get_fc_port_state_name(rport->port_state);
+ if (!name)
+ return -EINVAL;
+
+ return snprintf(buf, 20, "%s\n", name);
+}
+
+static FC_DEVICE_ATTR(rport, port_state, 0444 | 0200,
+ show_fc_rport_port_state, fc_rport_set_marginal_state);
+
fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
/*
@@ -990,6 +1336,67 @@ store_fc_rport_fast_io_fail_tmo(struct device *dev,
static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
+#define fc_rport_fpin_statistic(name) \
+static ssize_t fc_rport_fpinstat_##name(struct device *cd, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fc_rport *rport = transport_class_to_rport(cd); \
+ \
+ return snprintf(buf, 20, "0x%llx\n", rport->fpin_stats.name); \
+} \
+static FC_DEVICE_ATTR(rport, fpin_##name, 0444, fc_rport_fpinstat_##name, NULL)
+
+fc_rport_fpin_statistic(dn);
+fc_rport_fpin_statistic(dn_unknown);
+fc_rport_fpin_statistic(dn_timeout);
+fc_rport_fpin_statistic(dn_unable_to_route);
+fc_rport_fpin_statistic(dn_device_specific);
+fc_rport_fpin_statistic(cn);
+fc_rport_fpin_statistic(cn_clear);
+fc_rport_fpin_statistic(cn_lost_credit);
+fc_rport_fpin_statistic(cn_credit_stall);
+fc_rport_fpin_statistic(cn_oversubscription);
+fc_rport_fpin_statistic(cn_device_specific);
+fc_rport_fpin_statistic(li);
+fc_rport_fpin_statistic(li_failure_unknown);
+fc_rport_fpin_statistic(li_link_failure_count);
+fc_rport_fpin_statistic(li_loss_of_sync_count);
+fc_rport_fpin_statistic(li_loss_of_signals_count);
+fc_rport_fpin_statistic(li_prim_seq_err_count);
+fc_rport_fpin_statistic(li_invalid_tx_word_count);
+fc_rport_fpin_statistic(li_invalid_crc_count);
+fc_rport_fpin_statistic(li_device_specific);
+
+static struct attribute *fc_rport_statistics_attrs[] = {
+ &device_attr_rport_fpin_dn.attr,
+ &device_attr_rport_fpin_dn_unknown.attr,
+ &device_attr_rport_fpin_dn_timeout.attr,
+ &device_attr_rport_fpin_dn_unable_to_route.attr,
+ &device_attr_rport_fpin_dn_device_specific.attr,
+ &device_attr_rport_fpin_li.attr,
+ &device_attr_rport_fpin_li_failure_unknown.attr,
+ &device_attr_rport_fpin_li_link_failure_count.attr,
+ &device_attr_rport_fpin_li_loss_of_sync_count.attr,
+ &device_attr_rport_fpin_li_loss_of_signals_count.attr,
+ &device_attr_rport_fpin_li_prim_seq_err_count.attr,
+ &device_attr_rport_fpin_li_invalid_tx_word_count.attr,
+ &device_attr_rport_fpin_li_invalid_crc_count.attr,
+ &device_attr_rport_fpin_li_device_specific.attr,
+ &device_attr_rport_fpin_cn.attr,
+ &device_attr_rport_fpin_cn_clear.attr,
+ &device_attr_rport_fpin_cn_lost_credit.attr,
+ &device_attr_rport_fpin_cn_credit_stall.attr,
+ &device_attr_rport_fpin_cn_oversubscription.attr,
+ &device_attr_rport_fpin_cn_device_specific.attr,
+ NULL
+};
+
+static struct attribute_group fc_rport_statistics_group = {
+ .name = "statistics",
+ .attrs = fc_rport_statistics_attrs,
+};
+
/*
* FC SCSI Target Attribute Management
@@ -1274,7 +1681,7 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING | FC_VPORT_DELETING)) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return -EBUSY;
}
@@ -1743,6 +2150,42 @@ fc_host_statistic(fc_xid_not_found);
fc_host_statistic(fc_xid_busy);
fc_host_statistic(fc_seq_not_found);
fc_host_statistic(fc_non_bls_resp);
+fc_host_statistic(cn_sig_warn);
+fc_host_statistic(cn_sig_alarm);
+
+
+#define fc_host_fpin_statistic(name) \
+static ssize_t fc_host_fpinstat_##name(struct device *cd, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct Scsi_Host *shost = transport_class_to_shost(cd); \
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost); \
+ \
+ return snprintf(buf, 20, "0x%llx\n", fc_host->fpin_stats.name); \
+} \
+static FC_DEVICE_ATTR(host, fpin_##name, 0444, fc_host_fpinstat_##name, NULL)
+
+fc_host_fpin_statistic(dn);
+fc_host_fpin_statistic(dn_unknown);
+fc_host_fpin_statistic(dn_timeout);
+fc_host_fpin_statistic(dn_unable_to_route);
+fc_host_fpin_statistic(dn_device_specific);
+fc_host_fpin_statistic(cn);
+fc_host_fpin_statistic(cn_clear);
+fc_host_fpin_statistic(cn_lost_credit);
+fc_host_fpin_statistic(cn_credit_stall);
+fc_host_fpin_statistic(cn_oversubscription);
+fc_host_fpin_statistic(cn_device_specific);
+fc_host_fpin_statistic(li);
+fc_host_fpin_statistic(li_failure_unknown);
+fc_host_fpin_statistic(li_link_failure_count);
+fc_host_fpin_statistic(li_loss_of_sync_count);
+fc_host_fpin_statistic(li_loss_of_signals_count);
+fc_host_fpin_statistic(li_prim_seq_err_count);
+fc_host_fpin_statistic(li_invalid_tx_word_count);
+fc_host_fpin_statistic(li_invalid_crc_count);
+fc_host_fpin_statistic(li_device_specific);
static ssize_t
fc_reset_statistics(struct device *dev, struct device_attribute *attr,
@@ -1792,7 +2235,29 @@ static struct attribute *fc_statistics_attrs[] = {
&device_attr_host_fc_xid_busy.attr,
&device_attr_host_fc_seq_not_found.attr,
&device_attr_host_fc_non_bls_resp.attr,
+ &device_attr_host_cn_sig_warn.attr,
+ &device_attr_host_cn_sig_alarm.attr,
&device_attr_host_reset_statistics.attr,
+ &device_attr_host_fpin_dn.attr,
+ &device_attr_host_fpin_dn_unknown.attr,
+ &device_attr_host_fpin_dn_timeout.attr,
+ &device_attr_host_fpin_dn_unable_to_route.attr,
+ &device_attr_host_fpin_dn_device_specific.attr,
+ &device_attr_host_fpin_li.attr,
+ &device_attr_host_fpin_li_failure_unknown.attr,
+ &device_attr_host_fpin_li_link_failure_count.attr,
+ &device_attr_host_fpin_li_loss_of_sync_count.attr,
+ &device_attr_host_fpin_li_loss_of_signals_count.attr,
+ &device_attr_host_fpin_li_prim_seq_err_count.attr,
+ &device_attr_host_fpin_li_invalid_tx_word_count.attr,
+ &device_attr_host_fpin_li_invalid_crc_count.attr,
+ &device_attr_host_fpin_li_device_specific.attr,
+ &device_attr_host_fpin_cn.attr,
+ &device_attr_host_fpin_cn_clear.attr,
+ &device_attr_host_fpin_cn_lost_credit.attr,
+ &device_attr_host_fpin_cn_credit_stall.attr,
+ &device_attr_host_fpin_cn_oversubscription.attr,
+ &device_attr_host_fpin_cn_device_specific.attr,
NULL
};
@@ -2094,7 +2559,8 @@ fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
if (rport->scsi_target_id == -1)
continue;
- if (rport->port_state != FC_PORTSTATE_ONLINE)
+ if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
+ (rport->port_state != FC_PORTSTATE_MARGINAL))
continue;
if ((channel == rport->channel) &&
@@ -2176,6 +2642,7 @@ fc_attach_transport(struct fc_function_template *ft)
i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
i->rport_attr_cont.ac.class = &fc_rport_class.class;
i->rport_attr_cont.ac.match = fc_rport_match;
+ i->rport_attr_cont.statistics = &fc_rport_statistics_group;
transport_container_register(&i->rport_attr_cont);
i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
@@ -2261,7 +2728,7 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
- SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(port_state);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
@@ -2957,7 +3424,8 @@ fc_remote_port_delete(struct fc_rport *rport)
spin_lock_irqsave(shost->host_lock, flags);
- if (rport->port_state != FC_PORTSTATE_ONLINE) {
+ if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
+ (rport->port_state != FC_PORTSTATE_MARGINAL)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
@@ -3099,7 +3567,8 @@ fc_timeout_deleted_rport(struct work_struct *work)
* target, validate it still is. If not, tear down the
* scsi_target on it.
*/
- if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
+ if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
+ (rport->port_state == FC_PORTSTATE_MARGINAL)) &&
(rport->scsi_target_id != -1) &&
!(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
dev_printk(KERN_ERR, &rport->dev,
@@ -3242,7 +3711,8 @@ fc_scsi_scan_rport(struct work_struct *work)
struct fc_internal *i = to_fc_internal(shost->transportt);
unsigned long flags;
- if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
+ if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
+ (rport->port_state == FC_PORTSTATE_MARGINAL)) &&
(rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
!(i->f->disable_target_scan)) {
scsi_scan_target(&rport->dev, rport->channel,
@@ -3315,6 +3785,28 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
}
EXPORT_SYMBOL(fc_block_scsi_eh);
+/*
+ * fc_eh_should_retry_cmd - Checks if the cmd should be retried or not
+ * @scmd: The SCSI command to be checked
+ *
+ * This checks the rport state to decide if a cmd is
+ * retryable.
+ *
+ * Returns: true if the rport state is not in marginal state.
+ */
+bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
+
+ if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
+ (scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
+ set_host_byte(scmd, DID_TRANSPORT_MARGINAL);
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(fc_eh_should_retry_cmd);
+
/**
* fc_vport_setup - allocates and creates a FC virtual port.
* @shost: scsi host the virtual port is connected to.
@@ -3746,7 +4238,8 @@ static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport)
!(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
return BLK_STS_RESOURCE;
- if (rport->port_state != FC_PORTSTATE_ONLINE)
+ if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
+ (rport->port_state != FC_PORTSTATE_MARGINAL))
return BLK_STS_IOERR;
return BLK_STS_OK;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index dfc726fa34e3..cd3db9684e52 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,8 +86,12 @@ struct iscsi_internal {
struct transport_container session_cont;
};
+static DEFINE_IDR(iscsi_ep_idr);
+static DEFINE_MUTEX(iscsi_ep_idr_mutex);
+
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
-static struct workqueue_struct *iscsi_eh_timer_workq;
+
+static struct workqueue_struct *iscsi_conn_cleanup_workq;
static DEFINE_IDA(iscsi_sess_ida);
/*
@@ -124,7 +128,11 @@ show_transport_handle(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
- return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ return sysfs_emit(buf, "%llu\n",
+ (unsigned long long)iscsi_handle(priv->iscsi_transport));
}
static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
@@ -134,7 +142,7 @@ show_transport_##name(struct device *dev, \
struct device_attribute *attr,char *buf) \
{ \
struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
- return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\
} \
static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
@@ -163,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
static void iscsi_endpoint_release(struct device *dev)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, ep->id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
kfree(ep);
}
@@ -175,7 +188,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
+ return sysfs_emit(buf, "%d\n", ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -188,48 +201,37 @@ static struct attribute_group iscsi_endpoint_group = {
.attrs = iscsi_endpoint_attrs,
};
-#define ISCSI_MAX_EPID -1
-
-static int iscsi_match_epid(struct device *dev, const void *data)
-{
- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- const uint64_t *epid = data;
-
- return *epid == ep->id;
-}
-
struct iscsi_endpoint *
iscsi_create_endpoint(int dd_size)
{
- struct device *dev;
struct iscsi_endpoint *ep;
- uint64_t id;
- int err;
-
- for (id = 1; id < ISCSI_MAX_EPID; id++) {
- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
- iscsi_match_epid);
- if (!dev)
- break;
- else
- put_device(dev);
- }
- if (id == ISCSI_MAX_EPID) {
- printk(KERN_ERR "Too many connections. Max supported %u\n",
- ISCSI_MAX_EPID - 1);
- return NULL;
- }
+ int err, id;
ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
if (!ep)
return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+
+ /*
+ * First endpoint id should be 1 to comply with user space
+ * applications (iscsid).
+ */
+ id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO);
+ if (id < 0) {
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+ id);
+ goto free_ep;
+ }
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
- dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+ dev_set_name(&ep->dev, "ep-%d", id);
err = device_register(&ep->dev);
if (err)
- goto free_ep;
+ goto free_id;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
@@ -243,6 +245,10 @@ unregister_dev:
device_unregister(&ep->dev);
return NULL;
+free_id:
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
free_ep:
kfree(ep);
return NULL;
@@ -256,22 +262,30 @@ void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
}
EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+void iscsi_put_endpoint(struct iscsi_endpoint *ep)
+{
+ put_device(&ep->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
+
+/**
+ * iscsi_lookup_endpoint - get ep from handle
+ * @handle: endpoint handle
+ *
+ * Caller must do a iscsi_put_endpoint.
+ */
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
struct iscsi_endpoint *ep;
- struct device *dev;
- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
- iscsi_match_epid);
- if (!dev)
- return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ ep = idr_find(&iscsi_ep_idr, handle);
+ if (!ep)
+ goto unlock;
- ep = iscsi_dev_to_endpoint(dev);
- /*
- * we can drop this now because the interface will prevent
- * removals and lookups from racing.
- */
- put_device(dev);
+ get_device(&ep->dev);
+unlock:
+ mutex_unlock(&iscsi_ep_idr_mutex);
return ep;
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@@ -428,40 +442,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
struct iscsi_transport *t = iface->transport;
- int param;
- int param_type;
+ int param = -1;
- if (attr == &dev_attr_iface_enabled.attr)
- param = ISCSI_NET_PARAM_IFACE_ENABLE;
- else if (attr == &dev_attr_iface_vlan_id.attr)
- param = ISCSI_NET_PARAM_VLAN_ID;
- else if (attr == &dev_attr_iface_vlan_priority.attr)
- param = ISCSI_NET_PARAM_VLAN_PRIORITY;
- else if (attr == &dev_attr_iface_vlan_enabled.attr)
- param = ISCSI_NET_PARAM_VLAN_ENABLED;
- else if (attr == &dev_attr_iface_mtu.attr)
- param = ISCSI_NET_PARAM_MTU;
- else if (attr == &dev_attr_iface_port.attr)
- param = ISCSI_NET_PARAM_PORT;
- else if (attr == &dev_attr_iface_ipaddress_state.attr)
- param = ISCSI_NET_PARAM_IPADDR_STATE;
- else if (attr == &dev_attr_iface_delayed_ack_en.attr)
- param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
- else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
- param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
- else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
- param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
- else if (attr == &dev_attr_iface_tcp_wsf.attr)
- param = ISCSI_NET_PARAM_TCP_WSF;
- else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
- param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
- else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
- param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
- else if (attr == &dev_attr_iface_cache_id.attr)
- param = ISCSI_NET_PARAM_CACHE_ID;
- else if (attr == &dev_attr_iface_redirect_en.attr)
- param = ISCSI_NET_PARAM_REDIRECT_EN;
- else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+ if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
else if (attr == &dev_attr_iface_header_digest.attr)
param = ISCSI_IFACE_PARAM_HDRDGST_EN;
@@ -497,6 +480,40 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
else if (attr == &dev_attr_iface_initiator_name.attr)
param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
+
+ if (param != -1)
+ return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
+
+ if (attr == &dev_attr_iface_enabled.attr)
+ param = ISCSI_NET_PARAM_IFACE_ENABLE;
+ else if (attr == &dev_attr_iface_vlan_id.attr)
+ param = ISCSI_NET_PARAM_VLAN_ID;
+ else if (attr == &dev_attr_iface_vlan_priority.attr)
+ param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+ else if (attr == &dev_attr_iface_vlan_enabled.attr)
+ param = ISCSI_NET_PARAM_VLAN_ENABLED;
+ else if (attr == &dev_attr_iface_mtu.attr)
+ param = ISCSI_NET_PARAM_MTU;
+ else if (attr == &dev_attr_iface_port.attr)
+ param = ISCSI_NET_PARAM_PORT;
+ else if (attr == &dev_attr_iface_ipaddress_state.attr)
+ param = ISCSI_NET_PARAM_IPADDR_STATE;
+ else if (attr == &dev_attr_iface_delayed_ack_en.attr)
+ param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
+ else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF;
+ else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
+ else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
+ else if (attr == &dev_attr_iface_cache_id.attr)
+ param = ISCSI_NET_PARAM_CACHE_ID;
+ else if (attr == &dev_attr_iface_redirect_en.attr)
+ param = ISCSI_NET_PARAM_REDIRECT_EN;
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
param = ISCSI_NET_PARAM_IPV4_ADDR;
@@ -587,32 +604,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
return 0;
}
- switch (param) {
- case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
- case ISCSI_IFACE_PARAM_HDRDGST_EN:
- case ISCSI_IFACE_PARAM_DATADGST_EN:
- case ISCSI_IFACE_PARAM_IMM_DATA_EN:
- case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
- case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
- case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
- case ISCSI_IFACE_PARAM_ERL:
- case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
- case ISCSI_IFACE_PARAM_FIRST_BURST:
- case ISCSI_IFACE_PARAM_MAX_R2T:
- case ISCSI_IFACE_PARAM_MAX_BURST:
- case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
- case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
- case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
- case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
- case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
- case ISCSI_IFACE_PARAM_INITIATOR_NAME:
- param_type = ISCSI_IFACE_PARAM;
- break;
- default:
- param_type = ISCSI_NET_PARAM;
- }
-
- return t->attr_is_visible(param_type, param);
+ return t->attr_is_visible(ISCSI_NET_PARAM, param);
}
static struct attribute *iscsi_iface_attrs[] = {
@@ -1568,7 +1560,6 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
struct iscsi_cls_host *ihost = shost->shost_data;
memset(ihost, 0, sizeof(*ihost));
- atomic_set(&ihost->nr_scans, 0);
mutex_init(&ihost->mutex);
iscsi_bsg_host_add(shost, ihost);
@@ -1611,6 +1602,7 @@ static DEFINE_MUTEX(rx_queue_mutex);
static LIST_HEAD(sesslist);
static DEFINE_SPINLOCK(sesslock);
static LIST_HEAD(connlist);
+static LIST_HEAD(connlist_err);
static DEFINE_SPINLOCK(connlock);
static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn)
@@ -1686,10 +1678,8 @@ static const char *iscsi_session_state_name(int state)
int iscsi_session_chkready(struct iscsi_cls_session *session)
{
- unsigned long flags;
int err;
- spin_lock_irqsave(&session->lock, flags);
switch (session->state) {
case ISCSI_SESSION_LOGGED_IN:
err = 0;
@@ -1704,7 +1694,6 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
err = DID_NO_CONNECT << 16;
break;
}
- spin_unlock_irqrestore(&session->lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_session_chkready);
@@ -1757,25 +1746,6 @@ void iscsi_host_for_each_session(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
-/**
- * iscsi_scan_finished - helper to report when running scans are done
- * @shost: scsi host
- * @time: scan run time
- *
- * This function can be used by drives like qla4xxx to report to the scsi
- * layer when the scans it kicked off at module load time are done.
- */
-int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
-{
- struct iscsi_cls_host *ihost = shost->shost_data;
- /*
- * qla4xxx will have kicked off some session unblocks before calling
- * scsi_scan_host, so just wait for them to complete.
- */
- return !atomic_read(&ihost->nr_scans);
-}
-EXPORT_SYMBOL_GPL(iscsi_scan_finished);
-
struct iscsi_scan_data {
unsigned int channel;
unsigned int id;
@@ -1844,8 +1814,6 @@ static void iscsi_scan_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session, scan_work);
- struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_cls_host *ihost = shost->shost_data;
struct iscsi_scan_data scan_data;
scan_data.channel = 0;
@@ -1854,7 +1822,6 @@ static void iscsi_scan_session(struct work_struct *work)
scan_data.rescan = SCSI_SCAN_RESCAN;
iscsi_user_scan_session(&session->dev, &scan_data);
- atomic_dec(&ihost->nr_scans);
}
/**
@@ -1912,12 +1879,12 @@ static void session_recovery_timedout(struct work_struct *work)
}
spin_unlock_irqrestore(&session->lock, flags);
- if (session->transport->session_recovery_timedout)
- session->transport->session_recovery_timedout(session);
-
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
+
+ if (session->transport->session_recovery_timedout)
+ session->transport->session_recovery_timedout(session);
}
static void __iscsi_unblock_session(struct work_struct *work)
@@ -1925,30 +1892,16 @@ static void __iscsi_unblock_session(struct work_struct *work)
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
unblock_work);
- struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n");
- /*
- * The recovery and unblock work get run from the same workqueue,
- * so try to cancel it if it was going to run after this unblock.
- */
- cancel_delayed_work(&session->recovery_work);
+
+ cancel_delayed_work_sync(&session->recovery_work);
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_LOGGED_IN;
spin_unlock_irqrestore(&session->lock, flags);
/* start IO */
scsi_target_unblock(&session->dev, SDEV_RUNNING);
- /*
- * Only do kernel scanning if the driver is properly hooked into
- * the async scanning code (drivers like iscsi_tcp do login and
- * scanning from userspace).
- */
- if (shost->hostt->scan_finished) {
- if (scsi_queue_work(shost, &session->scan_work))
- atomic_inc(&ihost->nr_scans);
- }
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n");
}
@@ -1960,12 +1913,16 @@ static void __iscsi_unblock_session(struct work_struct *work)
*/
void iscsi_unblock_session(struct iscsi_cls_session *session)
{
- queue_work(iscsi_eh_timer_workq, &session->unblock_work);
+ if (!cancel_work_sync(&session->block_work))
+ cancel_delayed_work_sync(&session->recovery_work);
+
+ queue_work(session->workq, &session->unblock_work);
/*
- * make sure all the events have completed before tell the driver
- * it is safe
+ * Blocking the session can be done from any context so we only
+ * queue the block work. Make sure the unblock work has completed
+ * because it flushes/cancels the other works and updates the state.
*/
- flush_workqueue(iscsi_eh_timer_workq);
+ flush_work(&session->unblock_work);
}
EXPORT_SYMBOL_GPL(iscsi_unblock_session);
@@ -1983,14 +1940,14 @@ static void __iscsi_block_session(struct work_struct *work)
scsi_target_block(&session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
if (session->recovery_tmo >= 0)
- queue_delayed_work(iscsi_eh_timer_workq,
+ queue_delayed_work(session->workq,
&session->recovery_work,
session->recovery_tmo * HZ);
}
void iscsi_block_session(struct iscsi_cls_session *session)
{
- queue_work(iscsi_eh_timer_workq, &session->block_work);
+ queue_work(session->workq, &session->block_work);
}
EXPORT_SYMBOL_GPL(iscsi_block_session);
@@ -2012,7 +1969,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
if (session->target_id == ISCSI_MAX_TARGET) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
- return;
+ goto unbind_session_exit;
}
target_id = session->target_id;
@@ -2020,14 +1977,24 @@ static void __iscsi_unbind_session(struct work_struct *work)
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
+ scsi_remove_target(&session->dev);
+
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, target_id);
+ ida_free(&iscsi_sess_ida, target_id);
- scsi_remove_target(&session->dev);
+unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}
+static void __iscsi_destroy_session(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, destroy_work);
+
+ session->transport->destroy_session(session);
+}
+
struct iscsi_cls_session *
iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
int dd_size)
@@ -2050,6 +2017,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
INIT_WORK(&session->block_work, __iscsi_block_session);
INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
INIT_WORK(&session->scan_work, iscsi_scan_session);
+ INIT_WORK(&session->destroy_work, __iscsi_destroy_session);
spin_lock_init(&session->lock);
/* this is released in the dev's release function */
@@ -2067,19 +2035,27 @@ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
{
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
unsigned long flags;
int id = 0;
int err;
session->sid = atomic_add_return(1, &iscsi_session_nr);
+ session->workq = alloc_workqueue("iscsi_ctrl_%d:%d",
+ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ shost->host_no, session->sid);
+ if (!session->workq)
+ return -ENOMEM;
+
if (target_id == ISCSI_MAX_TARGET) {
- id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&iscsi_sess_ida, GFP_KERNEL);
if (id < 0) {
iscsi_cls_session_printk(KERN_ERR, session,
"Failure in Target ID Allocation\n");
- return id;
+ err = id;
+ goto destroy_wq;
}
session->target_id = (unsigned int)id;
session->ida_used = true;
@@ -2112,8 +2088,9 @@ release_dev:
device_del(&session->dev);
release_ida:
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, session->target_id);
-
+ ida_free(&iscsi_sess_ida, session->target_id);
+destroy_wq:
+ destroy_workqueue(session->workq);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_add_session);
@@ -2164,7 +2141,9 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
{
if (!iscsi_is_conn_dev(dev))
return 0;
- return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
+
+ iscsi_remove_conn(iscsi_dev_to_conn(dev));
+ return 0;
}
void iscsi_remove_session(struct iscsi_cls_session *session)
@@ -2175,14 +2154,13 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
ISCSI_DBG_TRANS_SESSION(session, "Removing session\n");
spin_lock_irqsave(&sesslock, flags);
- list_del(&session->sess_list);
+ if (!list_empty(&session->sess_list))
+ list_del(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
- /* make sure there are no blocks/unblocks queued */
- flush_workqueue(iscsi_eh_timer_workq);
- /* make sure the timedout callout is not running */
- if (!cancel_delayed_work(&session->recovery_work))
- flush_workqueue(iscsi_eh_timer_workq);
+ if (!cancel_work_sync(&session->block_work))
+ cancel_delayed_work_sync(&session->recovery_work);
+ cancel_work_sync(&session->unblock_work);
/*
* If we are blocked let commands flow again. The lld or iscsi
* layer should set up the queuecommand to fail commands.
@@ -2194,7 +2172,10 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
spin_unlock_irqrestore(&session->lock, flags);
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
- /* flush running scans then delete devices */
+ /*
+ * qla4xxx can perform it's own scans when it runs in kernel only
+ * mode. Make sure to flush those scans.
+ */
flush_work(&session->scan_work);
/* flush running unbind operations */
flush_work(&session->unbind_work);
@@ -2210,11 +2191,203 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
transport_unregister_device(&session->dev);
+ destroy_workqueue(session->workq);
+
ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n");
device_del(&session->dev);
}
EXPORT_SYMBOL_GPL(iscsi_remove_session);
+static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
+{
+ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n");
+
+ switch (flag) {
+ case STOP_CONN_RECOVER:
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+ break;
+ case STOP_CONN_TERM:
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
+ break;
+ default:
+ iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
+ flag);
+ return;
+ }
+
+ conn->transport->stop_conn(conn, flag);
+ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
+}
+
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+ struct iscsi_endpoint *ep;
+
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+ if (!conn->ep || !session->transport->ep_disconnect)
+ return;
+
+ ep = conn->ep;
+ conn->ep = NULL;
+
+ session->transport->unbind_conn(conn, is_active);
+ session->transport->ep_disconnect(ep);
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+ struct iscsi_endpoint *ep,
+ bool is_active)
+{
+ /* Check if this was a conn error and the kernel took ownership */
+ spin_lock_irq(&conn->lock);
+ if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ iscsi_ep_disconnect(conn, is_active);
+ } else {
+ spin_unlock_irq(&conn->lock);
+ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+ mutex_unlock(&conn->ep_mutex);
+
+ flush_work(&conn->cleanup_work);
+ /*
+ * Userspace is now done with the EP so we can release the ref
+ * iscsi_cleanup_conn_work_fn took.
+ */
+ iscsi_put_endpoint(ep);
+ mutex_lock(&conn->ep_mutex);
+ }
+}
+
+static int iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
+{
+ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
+ /*
+ * For offload, iscsid may not know about the ep like when iscsid is
+ * restarted or for kernel based session shutdown iscsid is not even
+ * up. For these cases, we do the disconnect now.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
+ * If this is a termination we have to call stop_conn with that flag
+ * so the correct states get set. If we haven't run the work yet try to
+ * avoid the extra run.
+ */
+ if (flag == STOP_CONN_TERM) {
+ cancel_work_sync(&conn->cleanup_work);
+ iscsi_stop_conn(conn, flag);
+ } else {
+ /*
+ * Figure out if it was the kernel or userspace initiating this.
+ */
+ spin_lock_irq(&conn->lock);
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ iscsi_stop_conn(conn, flag);
+ } else {
+ spin_unlock_irq(&conn->lock);
+ ISCSI_DBG_TRANS_CONN(conn,
+ "flush kernel conn cleanup.\n");
+ flush_work(&conn->cleanup_work);
+ }
+ /*
+ * Only clear for recovery to avoid extra cleanup runs during
+ * termination.
+ */
+ spin_lock_irq(&conn->lock);
+ clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ spin_unlock_irq(&conn->lock);
+ }
+ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
+ return 0;
+}
+
+static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
+{
+ struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
+ cleanup_work);
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+
+ mutex_lock(&conn->ep_mutex);
+ /*
+ * Get a ref to the ep, so we don't release its ID until after
+ * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
+ */
+ if (conn->ep)
+ get_device(&conn->ep->dev);
+ iscsi_ep_disconnect(conn, false);
+
+ if (system_state != SYSTEM_RUNNING) {
+ /*
+ * If the user has set up for the session to never timeout
+ * then hang like they wanted. For all other cases fail right
+ * away since userspace is not going to relogin.
+ */
+ if (session->recovery_tmo > 0)
+ session->recovery_tmo = 0;
+ }
+
+ iscsi_stop_conn(conn, STOP_CONN_RECOVER);
+ mutex_unlock(&conn->ep_mutex);
+ ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
+}
+
+static int iscsi_iter_force_destroy_conn_fn(struct device *dev, void *data)
+{
+ struct iscsi_transport *transport;
+ struct iscsi_cls_conn *conn;
+
+ if (!iscsi_is_conn_dev(dev))
+ return 0;
+
+ conn = iscsi_dev_to_conn(dev);
+ transport = conn->transport;
+
+ if (READ_ONCE(conn->state) != ISCSI_CONN_DOWN)
+ iscsi_if_stop_conn(conn, STOP_CONN_TERM);
+
+ transport->destroy_conn(conn);
+ return 0;
+}
+
+/**
+ * iscsi_force_destroy_session - destroy a session from the kernel
+ * @session: session to destroy
+ *
+ * Force the destruction of a session from the kernel. This should only be
+ * used when userspace is no longer running during system shutdown.
+ */
+void iscsi_force_destroy_session(struct iscsi_cls_session *session)
+{
+ struct iscsi_transport *transport = session->transport;
+ unsigned long flags;
+
+ WARN_ON_ONCE(system_state == SYSTEM_RUNNING);
+
+ spin_lock_irqsave(&sesslock, flags);
+ if (list_empty(&session->sess_list)) {
+ spin_unlock_irqrestore(&sesslock, flags);
+ /*
+ * Conn/ep is already freed. Session is being torn down via
+ * async path. For shutdown we don't care about it so return.
+ */
+ return;
+ }
+ spin_unlock_irqrestore(&sesslock, flags);
+
+ device_for_each_child(&session->dev, NULL,
+ iscsi_iter_force_destroy_conn_fn);
+ transport->destroy_session(session);
+}
+EXPORT_SYMBOL_GPL(iscsi_force_destroy_session);
+
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
@@ -2224,27 +2397,16 @@ void iscsi_free_session(struct iscsi_cls_session *session)
EXPORT_SYMBOL_GPL(iscsi_free_session);
/**
- * iscsi_create_conn - create iscsi class connection
+ * iscsi_alloc_conn - alloc iscsi class connection
* @session: iscsi cls session
* @dd_size: private driver data size
* @cid: connection id
- *
- * This can be called from a LLD or iscsi_transport. The connection
- * is child of the session so cid must be unique for all connections
- * on the session.
- *
- * Since we do not support MCS, cid will normally be zero. In some cases
- * for software iscsi we could be trying to preallocate a connection struct
- * in which case there could be two connection structs and cid would be
- * non-zero.
*/
struct iscsi_cls_conn *
-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
{
struct iscsi_transport *transport = session->transport;
struct iscsi_cls_conn *conn;
- unsigned long flags;
- int err;
conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
if (!conn)
@@ -2253,55 +2415,73 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
+ spin_lock_init(&conn->lock);
INIT_LIST_HEAD(&conn->conn_list);
+ INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
/* this is released in the dev's release function */
if (!get_device(&session->dev))
goto free_conn;
dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid);
+ device_initialize(&conn->dev);
conn->dev.parent = &session->dev;
conn->dev.release = iscsi_conn_release;
- err = device_register(&conn->dev);
+
+ return conn;
+
+free_conn:
+ kfree(conn);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_alloc_conn);
+
+/**
+ * iscsi_add_conn - add iscsi class connection
+ * @conn: iscsi cls connection
+ *
+ * This will expose iscsi_cls_conn to sysfs so make sure the related
+ * resources for sysfs attributes are initialized before calling this.
+ */
+int iscsi_add_conn(struct iscsi_cls_conn *conn)
+{
+ int err;
+ unsigned long flags;
+ struct iscsi_cls_session *session = iscsi_dev_to_session(conn->dev.parent);
+
+ err = device_add(&conn->dev);
if (err) {
- iscsi_cls_session_printk(KERN_ERR, session, "could not "
- "register connection's dev\n");
- goto release_parent_ref;
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register connection's dev\n");
+ return err;
}
err = transport_register_device(&conn->dev);
if (err) {
- iscsi_cls_session_printk(KERN_ERR, session, "could not "
- "register transport's dev\n");
- goto release_conn_ref;
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register transport's dev\n");
+ device_del(&conn->dev);
+ return err;
}
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
spin_unlock_irqrestore(&connlock, flags);
- ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
- return conn;
-
-release_conn_ref:
- put_device(&conn->dev);
-release_parent_ref:
- put_device(&session->dev);
-free_conn:
- kfree(conn);
- return NULL;
+ return 0;
}
-
-EXPORT_SYMBOL_GPL(iscsi_create_conn);
+EXPORT_SYMBOL_GPL(iscsi_add_conn);
/**
- * iscsi_destroy_conn - destroy iscsi class connection
- * @conn: iscsi cls session
+ * iscsi_remove_conn - remove iscsi class connection from sysfs
+ * @conn: iscsi cls connection
*
- * This can be called from a LLD or iscsi_transport.
+ * Remove iscsi_cls_conn from sysfs, and wait for previous
+ * read/write of iscsi_cls_conn's attributes in sysfs to finish.
*/
-int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+void iscsi_remove_conn(struct iscsi_cls_conn *conn)
{
unsigned long flags;
@@ -2310,11 +2490,21 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
- ISCSI_DBG_TRANS_CONN(conn, "Completing conn destruction\n");
- device_unregister(&conn->dev);
- return 0;
+ device_del(&conn->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_remove_conn);
+
+void iscsi_put_conn(struct iscsi_cls_conn *conn)
+{
+ put_device(&conn->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_put_conn);
+
+void iscsi_get_conn(struct iscsi_cls_conn *conn)
+{
+ get_device(&conn->dev);
}
-EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+EXPORT_SYMBOL_GPL(iscsi_get_conn);
/*
* iscsi interface functions
@@ -2428,6 +2618,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
+ unsigned long flags;
+ int state;
+
+ spin_lock_irqsave(&conn->lock, flags);
+ /*
+ * Userspace will only do a stop call if we are at least bound. And, we
+ * only need to do the in kernel cleanup if in the UP state so cmds can
+ * be released to upper layers. If in other states just wait for
+ * userspace to avoid races that can leave the cleanup_work queued.
+ */
+ state = READ_ONCE(conn->state);
+ switch (state) {
+ case ISCSI_CONN_BOUND:
+ case ISCSI_CONN_UP:
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+ &conn->flags)) {
+ queue_work(iscsi_conn_cleanup_workq,
+ &conn->cleanup_work);
+ }
+ break;
+ default:
+ ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+ state);
+ break;
+ }
+ spin_unlock_irqrestore(&conn->lock, flags);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2762,10 +2978,12 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
if (!conn)
return -EINVAL;
+ ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n");
+ flush_work(&conn->cleanup_work);
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
+
if (transport->destroy_conn)
transport->destroy_conn(conn);
-
return 0;
}
@@ -2775,7 +2993,10 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
- int err = 0, value = 0;
+ int err = 0, value = 0, state;
+
+ if (ev->u.set_param.len > PAGE_SIZE)
+ return -EINVAL;
session = iscsi_session_lookup(ev->u.set_param.sid);
conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid);
@@ -2789,8 +3010,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
- err = transport->set_param(conn, ev->u.set_param.param,
- data, ev->u.set_param.len);
+ state = READ_ONCE(conn->state);
+ if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
+ err = transport->set_param(conn, ev->u.set_param.param,
+ data, ev->u.set_param.len);
+ } else {
+ return -ENOTCONN;
+ }
}
return err;
@@ -2845,14 +3071,22 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
ep = iscsi_lookup_endpoint(ep_handle);
if (!ep)
return -EINVAL;
+
conn = ep->conn;
- if (conn) {
- mutex_lock(&conn->ep_mutex);
- conn->ep = NULL;
- mutex_unlock(&conn->ep_mutex);
+ if (!conn) {
+ /*
+ * conn was not even bound yet, so we can't get iscsi conn
+ * failures yet.
+ */
+ transport->ep_disconnect(ep);
+ goto put_ep;
}
- transport->ep_disconnect(ep);
+ mutex_lock(&conn->ep_mutex);
+ iscsi_if_disconnect_bound_ep(conn, ep, false);
+ mutex_unlock(&conn->ep_mutex);
+put_ep:
+ iscsi_put_endpoint(ep);
return 0;
}
@@ -2878,6 +3112,7 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
ev->r.retcode = transport->ep_poll(ep,
ev->u.ep_poll.timeout_ms);
+ iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
rc = iscsi_if_ep_disconnect(transport,
@@ -2924,6 +3159,9 @@ iscsi_set_host_param(struct iscsi_transport *transport,
if (!transport->set_host_param)
return -ENOSYS;
+ if (ev->u.set_host_param.len > PAGE_SIZE)
+ return -EINVAL;
+
shost = scsi_host_lookup(ev->u.set_host_param.host_no);
if (!shost) {
printk(KERN_ERR "set_host_param could not find host no %u\n",
@@ -3186,7 +3424,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.set_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_set_fnode;
}
idx = ev->u.set_flashnode.flashnode_idx;
@@ -3505,6 +3743,118 @@ exit_host_stats:
return err;
}
+static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+ struct nlmsghdr *nlh)
+{
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn = NULL;
+ struct iscsi_endpoint *ep;
+ uint32_t pdu_len;
+ int err = 0;
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_CONN:
+ return iscsi_if_create_conn(transport, ev);
+ case ISCSI_UEVENT_DESTROY_CONN:
+ return iscsi_if_destroy_conn(transport, ev);
+ case ISCSI_UEVENT_STOP_CONN:
+ conn = iscsi_conn_lookup(ev->u.stop_conn.sid,
+ ev->u.stop_conn.cid);
+ if (!conn)
+ return -EINVAL;
+
+ return iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
+ }
+
+ /*
+ * The following cmds need to be run under the ep_mutex so in kernel
+ * conn cleanup (ep_disconnect + unbind and conn) is not done while
+ * these are running. They also must not run if we have just run a conn
+ * cleanup because they would set the state in a way that might allow
+ * IO or send IO themselves.
+ */
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_START_CONN:
+ conn = iscsi_conn_lookup(ev->u.start_conn.sid,
+ ev->u.start_conn.cid);
+ break;
+ case ISCSI_UEVENT_BIND_CONN:
+ conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
+ break;
+ case ISCSI_UEVENT_SEND_PDU:
+ conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
+ break;
+ }
+
+ if (!conn)
+ return -EINVAL;
+
+ mutex_lock(&conn->ep_mutex);
+ spin_lock_irq(&conn->lock);
+ if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ mutex_unlock(&conn->ep_mutex);
+ ev->r.retcode = -ENOTCONN;
+ return 0;
+ }
+ spin_unlock_irq(&conn->lock);
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_BIND_CONN:
+ session = iscsi_session_lookup(ev->u.b_conn.sid);
+ if (!session) {
+ err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->bind_conn(session, conn,
+ ev->u.b_conn.transport_eph,
+ ev->u.b_conn.is_leading);
+ if (!ev->r.retcode)
+ WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
+
+ if (ev->r.retcode || !transport->ep_connect)
+ break;
+
+ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+ if (ep) {
+ ep->conn = conn;
+ conn->ep = ep;
+ iscsi_put_endpoint(ep);
+ } else {
+ err = -ENOTCONN;
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "Could not set ep conn binding\n");
+ }
+ break;
+ case ISCSI_UEVENT_START_CONN:
+ ev->r.retcode = transport->start_conn(conn);
+ if (!ev->r.retcode)
+ WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
+ break;
+ case ISCSI_UEVENT_SEND_PDU:
+ pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
+
+ if ((ev->u.send_pdu.hdr_size > pdu_len) ||
+ (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
+ err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->send_pdu(conn,
+ (struct iscsi_hdr *)((char *)ev + sizeof(*ev)),
+ (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
+ ev->u.send_pdu.data_size);
+ break;
+ default:
+ err = -ENOSYS;
+ }
+
+ mutex_unlock(&conn->ep_mutex);
+ return err;
+}
static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
@@ -3515,9 +3865,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
- struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep = NULL;
+ if (!netlink_capable(skb, CAP_SYS_ADMIN))
+ return -EPERM;
+
if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
*group = ISCSI_NL_GRP_UIP;
else
@@ -3553,6 +3905,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
+ iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -3563,76 +3916,40 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
else
transport->destroy_session(session);
break;
- case ISCSI_UEVENT_UNBIND_SESSION:
+ case ISCSI_UEVENT_DESTROY_SESSION_ASYNC:
session = iscsi_session_lookup(ev->u.d_session.sid);
- if (session)
- scsi_queue_work(iscsi_session_to_shost(session),
- &session->unbind_work);
- else
+ if (!session)
err = -EINVAL;
- break;
- case ISCSI_UEVENT_CREATE_CONN:
- err = iscsi_if_create_conn(transport, ev);
- break;
- case ISCSI_UEVENT_DESTROY_CONN:
- err = iscsi_if_destroy_conn(transport, ev);
- break;
- case ISCSI_UEVENT_BIND_CONN:
- session = iscsi_session_lookup(ev->u.b_conn.sid);
- conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
+ else if (iscsi_session_has_conns(ev->u.d_session.sid))
+ err = -EBUSY;
+ else {
+ unsigned long flags;
- if (conn && conn->ep)
- iscsi_if_ep_disconnect(transport, conn->ep->id);
+ /* Prevent this session from being found again */
+ spin_lock_irqsave(&sesslock, flags);
+ list_del_init(&session->sess_list);
+ spin_unlock_irqrestore(&sesslock, flags);
- if (!session || !conn) {
- err = -EINVAL;
- break;
+ queue_work(system_unbound_wq, &session->destroy_work);
}
-
- ev->r.retcode = transport->bind_conn(session, conn,
- ev->u.b_conn.transport_eph,
- ev->u.b_conn.is_leading);
- if (ev->r.retcode || !transport->ep_connect)
- break;
-
- ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
- if (ep) {
- ep->conn = conn;
-
- mutex_lock(&conn->ep_mutex);
- conn->ep = ep;
- mutex_unlock(&conn->ep_mutex);
- } else
- iscsi_cls_conn_printk(KERN_ERR, conn,
- "Could not set ep conn "
- "binding\n");
- break;
- case ISCSI_UEVENT_SET_PARAM:
- err = iscsi_set_param(transport, ev);
break;
- case ISCSI_UEVENT_START_CONN:
- conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
- if (conn)
- ev->r.retcode = transport->start_conn(conn);
+ case ISCSI_UEVENT_UNBIND_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+ if (session)
+ queue_work(session->workq, &session->unbind_work);
else
err = -EINVAL;
break;
- case ISCSI_UEVENT_STOP_CONN:
- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
- if (conn)
- transport->stop_conn(conn, ev->u.stop_conn.flag);
- else
- err = -EINVAL;
+ case ISCSI_UEVENT_SET_PARAM:
+ err = iscsi_set_param(transport, ev);
break;
+ case ISCSI_UEVENT_CREATE_CONN:
+ case ISCSI_UEVENT_DESTROY_CONN:
+ case ISCSI_UEVENT_STOP_CONN:
+ case ISCSI_UEVENT_START_CONN:
+ case ISCSI_UEVENT_BIND_CONN:
case ISCSI_UEVENT_SEND_PDU:
- conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
- if (conn)
- ev->r.retcode = transport->send_pdu(conn,
- (struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
- (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
- ev->u.send_pdu.data_size);
- else
- err = -EINVAL;
+ err = iscsi_if_transport_conn(transport, nlh);
break;
case ISCSI_UEVENT_GET_STATS:
err = iscsi_if_get_stats(transport, nlh);
@@ -3810,6 +4127,28 @@ iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
+static const char *const connection_state_names[] = {
+ [ISCSI_CONN_UP] = "up",
+ [ISCSI_CONN_DOWN] = "down",
+ [ISCSI_CONN_FAILED] = "failed",
+ [ISCSI_CONN_BOUND] = "bound"
+};
+
+static ssize_t show_conn_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
+ const char *state = "unknown";
+ int conn_state = READ_ONCE(conn->state);
+
+ if (conn_state >= 0 &&
+ conn_state < ARRAY_SIZE(connection_state_names))
+ state = connection_state_names[conn_state];
+
+ return sysfs_emit(buf, "%s\n", state);
+}
+static ISCSI_CLASS_ATTR(conn, state, S_IRUGO, show_conn_state,
+ NULL);
#define iscsi_conn_ep_attr_show(param) \
static ssize_t show_conn_ep_param_##param(struct device *dev, \
@@ -3879,6 +4218,7 @@ static struct attribute *iscsi_conn_attrs[] = {
&dev_attr_conn_tcp_xmit_wsf.attr,
&dev_attr_conn_tcp_recv_wsf.attr,
&dev_attr_conn_local_ipaddr.attr,
+ &dev_attr_conn_state.attr,
NULL,
};
@@ -3950,6 +4290,8 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
param = ISCSI_PARAM_TCP_RECV_WSF;
else if (attr == &dev_attr_conn_local_ipaddr.attr)
param = ISCSI_PARAM_LOCAL_IPADDR;
+ else if (attr == &dev_attr_conn_state.attr)
+ return S_IRUGO;
else {
WARN_ONCE(1, "Invalid conn attr");
return 0;
@@ -4031,7 +4373,7 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
- return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+ return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state));
}
static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
NULL);
@@ -4040,7 +4382,7 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
- return sprintf(buf, "%d\n", session->creator);
+ return sysfs_emit(buf, "%d\n", session->creator);
}
static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
NULL);
@@ -4049,7 +4391,7 @@ show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
- return sprintf(buf, "%d\n", session->target_id);
+ return sysfs_emit(buf, "%d\n", session->target_id);
}
static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
show_priv_session_target_id, NULL);
@@ -4062,8 +4404,8 @@ show_priv_session_##field(struct device *dev, \
struct iscsi_cls_session *session = \
iscsi_dev_to_session(dev->parent); \
if (session->field == -1) \
- return sprintf(buf, "off\n"); \
- return sprintf(buf, format"\n", session->field); \
+ return sysfs_emit(buf, "off\n"); \
+ return sysfs_emit(buf, format"\n", session->field); \
}
#define iscsi_priv_session_attr_store(field) \
@@ -4456,6 +4798,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
int err;
BUG_ON(!tt);
+ WARN_ON(tt->ep_disconnect && !tt->unbind_conn);
priv = iscsi_if_transport_lookup(tt);
if (priv)
@@ -4467,7 +4810,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
INIT_LIST_HEAD(&priv->list);
priv->iscsi_transport = tt;
priv->t.user_scan = iscsi_user_scan;
- priv->t.create_work_queue = 1;
priv->dev.class = &iscsi_transport_class;
dev_set_name(&priv->dev, "%s", tt->name);
@@ -4514,7 +4856,7 @@ free_priv:
}
EXPORT_SYMBOL_GPL(iscsi_register_transport);
-int iscsi_unregister_transport(struct iscsi_transport *tt)
+void iscsi_unregister_transport(struct iscsi_transport *tt)
{
struct iscsi_internal *priv;
unsigned long flags;
@@ -4537,8 +4879,6 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
device_unregister(&priv->dev);
mutex_unlock(&rx_queue_mutex);
-
- return 0;
}
EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
@@ -4602,8 +4942,10 @@ static __init int iscsi_transport_init(void)
goto unregister_flashnode_bus;
}
- iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
- if (!iscsi_eh_timer_workq) {
+ iscsi_conn_cleanup_workq = alloc_workqueue("%s",
+ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ "iscsi_conn_cleanup");
+ if (!iscsi_conn_cleanup_workq) {
err = -ENOMEM;
goto release_nls;
}
@@ -4631,7 +4973,7 @@ unregister_transport_class:
static void __exit iscsi_transport_exit(void)
{
- destroy_workqueue(iscsi_eh_timer_workq);
+ destroy_workqueue(iscsi_conn_cleanup_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);
transport_class_unregister(&iscsi_connection_class);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 182fd25c7c43..74b99f2b0b74 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -34,7 +34,6 @@
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_request.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -154,6 +153,7 @@ static struct {
{ SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
{ SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
{ SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
+ { SAS_LINK_RATE_22_5_GBPS, "22.5 Gbit" },
};
sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
@@ -225,6 +225,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct device *dma_dev = shost->dma_dev;
INIT_LIST_HEAD(&sas_host->rphy_list);
mutex_init(&sas_host->lock);
@@ -236,6 +237,11 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
shost->host_no);
+ if (dma_dev->dma_mask) {
+ shost->opt_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
return 0;
}
@@ -563,7 +569,7 @@ show_sas_phy_enable(struct device *dev, struct device_attribute *attr,
{
struct sas_phy *phy = transport_class_to_phy(dev);
- return snprintf(buf, 20, "%d", phy->enabled);
+ return snprintf(buf, 20, "%d\n", phy->enabled);
}
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, show_sas_phy_enable,
@@ -716,12 +722,17 @@ int sas_phy_add(struct sas_phy *phy)
int error;
error = device_add(&phy->dev);
- if (!error) {
- transport_add_device(&phy->dev);
- transport_configure_device(&phy->dev);
+ if (error)
+ return error;
+
+ error = transport_add_device(&phy->dev);
+ if (error) {
+ device_del(&phy->dev);
+ return error;
}
+ transport_configure_device(&phy->dev);
- return error;
+ return 0;
}
EXPORT_SYMBOL(sas_phy_add);
@@ -1229,16 +1240,15 @@ int sas_read_port_mode_page(struct scsi_device *sdev)
char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata;
struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
struct scsi_mode_data mode_data;
- int res, error;
+ int error;
if (!buffer)
return -ENOMEM;
- res = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
- &mode_data, NULL);
+ error = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
+ &mode_data, NULL);
- error = -EINVAL;
- if (!scsi_status_is_good(res))
+ if (error)
goto out;
msdata = buffer + mode_data.header_length +
@@ -1526,7 +1536,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
list_add_tail(&rphy->list, &sas_host->rphy_list);
if (identify->device_type == SAS_END_DEVICE &&
(identify->target_port_protocols &
- (SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA)))
+ (SAS_PROTOCOL_SSP | SAS_PROTOCOL_STP | SAS_PROTOCOL_SATA)))
rphy->scsi_target_id = sas_host->next_target_id++;
else if (identify->device_type == SAS_END_DEVICE)
rphy->scsi_target_id = -1;
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f8661062ef95..f569cf0095c2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -117,13 +117,17 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
sshdr = &sshdr_tmp;
for(i = 0; i < DV_RETRIES; i++) {
+ /*
+ * The purpose of the RQF_PM flag below is to bypass the
+ * SDEV_QUIESCE state.
+ */
result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
sshdr, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
- 0, NULL);
- if (driver_byte(result) != DRIVER_SENSE ||
+ RQF_PM, NULL);
+ if (result < 0 || !scsi_sense_valid(sshdr) ||
sshdr->sense_key != UNIT_ATTENTION)
break;
}
@@ -339,7 +343,7 @@ store_spi_transport_##field(struct device *dev, \
struct spi_transport_attrs *tp \
= (struct spi_transport_attrs *)&starget->starget_data; \
\
- if (i->f->set_##field) \
+ if (!i->f->set_##field) \
return -EINVAL; \
val = simple_strtoul(buf, NULL, 0); \
if (val > tp->max_##field) \
@@ -994,8 +998,9 @@ void
spi_dv_device(struct scsi_device *sdev)
{
struct scsi_target *starget = sdev->sdev_target;
- u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ unsigned int sleep_flags;
+ u8 *buffer;
/*
* Because this function and the power management code both call
@@ -1003,25 +1008,28 @@ spi_dv_device(struct scsi_device *sdev)
* while suspend or resume is in progress. Hence the
* lock/unlock_system_sleep() calls.
*/
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
+
+ if (scsi_autopm_get_device(sdev))
+ goto unlock_system_sleep;
if (unlikely(spi_dv_in_progress(starget)))
- goto unlock;
+ goto put_autopm;
if (unlikely(scsi_device_get(sdev)))
- goto unlock;
+ goto put_autopm;
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
if (unlikely(!buffer))
- goto out_put;
+ goto put_sdev;
/* We need to verify that the actual device will quiesce; the
* later target quiesce is just a nice to have */
if (unlikely(scsi_device_quiesce(sdev)))
- goto out_free;
+ goto free_buffer;
scsi_target_quiesce(starget);
@@ -1041,13 +1049,17 @@ spi_dv_device(struct scsi_device *sdev)
spi_initial_dv(starget) = 1;
- out_free:
+free_buffer:
kfree(buffer);
- out_put:
+
+put_sdev:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
-unlock:
- unlock_system_sleep();
+put_autopm:
+ scsi_autopm_put_device(sdev);
+
+unlock_system_sleep:
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL(spi_dv_device);
@@ -1219,7 +1231,7 @@ int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
{
if (cmd->flags & SCMD_TAGGED) {
*msg++ = SIMPLE_QUEUE_TAG;
- *msg++ = cmd->request->tag;
+ *msg++ = scsi_cmd_to_rq(cmd)->tag;
return 2;
}
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index d4d1104fac99..98a34ed10f1a 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -395,6 +395,10 @@ static void srp_reconnect_work(struct work_struct *work)
}
}
+/*
+ * scsi_target_block() must have been called before this function is
+ * called to guarantee that no .queuecommand() calls are in progress.
+ */
static void __rport_fail_io_fast(struct srp_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
@@ -404,11 +408,7 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
return;
- /*
- * Call scsi_target_block() to wait for ongoing shost->queuecommand()
- * calls before invoking i->f->terminate_rport_io().
- */
- scsi_target_block(rport->dev.parent);
+
scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
/* Involve the LLD if possible to terminate all I/O on the rport. */
@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
res = mutex_lock_interruptible(&rport->mutex);
if (res)
goto out;
- scsi_target_block(&shost->shost_gendev);
+ if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
+ /*
+ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+ * later is ok though, scsi_internal_device_unblock_nowait()
+ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
+ */
+ scsi_target_block(&shost->shost_gendev);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
@@ -570,8 +577,6 @@ int srp_reconnect_rport(struct srp_rport *rport)
* failure timers if these had not yet been started.
*/
__rport_fail_io_fast(rport);
- scsi_target_unblock(&shost->shost_gendev,
- SDEV_TRANSPORT_OFFLINE);
__srp_start_tl_fail_timers(rport);
} else if (rport->state != SRP_RPORT_BLOCKED) {
scsi_target_unblock(&shost->shost_gendev,
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index e969138051c7..e2c7d8ef205f 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -14,17 +14,14 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
+#include <linux/pagemap.h>
+#include <linux/msdos_partition.h>
#include <asm/unaligned.h>
#include <scsi/scsicam.h>
-
-static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds,
- unsigned int *secs);
-
/**
* scsi_bios_ptable - Read PC partition table out of first sector of device.
* @dev: from this device
@@ -35,105 +32,47 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds
*/
unsigned char *scsi_bios_ptable(struct block_device *dev)
{
- unsigned char *res = kmalloc(66, GFP_KERNEL);
- if (res) {
- struct block_device *bdev = dev->bd_contains;
- Sector sect;
- void *data = read_dev_sector(bdev, 0, &sect);
- if (data) {
- memcpy(res, data + 0x1be, 66);
- put_dev_sector(sect);
- } else {
- kfree(res);
- res = NULL;
- }
- }
- return res;
-}
-EXPORT_SYMBOL(scsi_bios_ptable);
-
-/**
- * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors.
- * @bdev: which device
- * @capacity: size of the disk in sectors
- * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
- *
- * Description : determine the BIOS mapping/geometry used for a drive in a
- * SCSI-CAM system, storing the results in ip as required
- * by the HDIO_GETGEO ioctl().
- *
- * Returns : -1 on failure, 0 on success.
- */
-
-int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
-{
- unsigned char *p;
- u64 capacity64 = capacity; /* Suppress gcc warning */
- int ret;
-
- p = scsi_bios_ptable(bdev);
- if (!p)
- return -1;
-
- /* try to infer mapping from partition table */
- ret = scsi_partsize(p, (unsigned long)capacity, (unsigned int *)ip + 2,
- (unsigned int *)ip + 0, (unsigned int *)ip + 1);
- kfree(p);
-
- if (ret == -1 && capacity64 < (1ULL << 32)) {
- /* pick some standard mapping with at most 1024 cylinders,
- and at most 62 sectors per track - this works up to
- 7905 MB */
- ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2,
- (unsigned int *)ip + 0, (unsigned int *)ip + 1);
- }
-
- /* if something went wrong, then apparently we have to return
- a geometry with more than 1024 cylinders */
- if (ret || ip[0] > 255 || ip[1] > 63) {
- if ((capacity >> 11) > 65534) {
- ip[0] = 255;
- ip[1] = 63;
- } else {
- ip[0] = 64;
- ip[1] = 32;
- }
+ struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping;
+ unsigned char *res = NULL;
+ struct folio *folio;
- if (capacity > 65535*63*255)
- ip[2] = 65535;
- else
- ip[2] = (unsigned long)capacity / (ip[0] * ip[1]);
- }
+ folio = read_mapping_folio(mapping, 0, NULL);
+ if (IS_ERR(folio))
+ return NULL;
- return 0;
+ res = kmemdup(folio_address(folio) + 0x1be, 66, GFP_KERNEL);
+ folio_put(folio);
+ return res;
}
-EXPORT_SYMBOL(scsicam_bios_param);
+EXPORT_SYMBOL(scsi_bios_ptable);
/**
* scsi_partsize - Parse cylinders/heads/sectors from PC partition table
- * @buf: partition table, see scsi_bios_ptable()
+ * @bdev: block device to parse
* @capacity: size of the disk in sectors
- * @cyls: put cylinders here
- * @hds: put heads here
- * @secs: put sectors here
+ * @geom: output in form of [hds, cylinders, sectors]
*
* Determine the BIOS mapping/geometry used to create the partition
- * table, storing the results in @cyls, @hds, and @secs
+ * table, storing the results in @geom.
*
- * Returns: -1 on failure, 0 on success.
+ * Returns: %false on failure, %true on success.
*/
-
-int scsi_partsize(unsigned char *buf, unsigned long capacity,
- unsigned int *cyls, unsigned int *hds, unsigned int *secs)
+bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3])
{
- struct partition *p = (struct partition *)buf, *largest = NULL;
- int i, largest_cyl;
int cyl, ext_cyl, end_head, end_cyl, end_sector;
unsigned int logical_end, physical_end, ext_physical_end;
+ struct msdos_partition *p, *largest = NULL;
+ void *buf;
+ int ret = false;
+ buf = scsi_bios_ptable(bdev);
+ if (!buf)
+ return false;
if (*(unsigned short *) (buf + 64) == 0xAA55) {
- for (largest_cyl = -1, i = 0; i < 4; ++i, ++p) {
+ int largest_cyl = -1, i;
+
+ for (i = 0, p = buf; i < 4; i++, p++) {
if (!p->sys_ind)
continue;
#ifdef DEBUG
@@ -153,7 +92,7 @@ int scsi_partsize(unsigned char *buf, unsigned long capacity,
end_sector = largest->end_sector & 0x3f;
if (end_head + 1 == 0 || end_sector == 0)
- return -1;
+ goto out_free_buf;
#ifdef DEBUG
printk("scsicam_bios_param : end at h = %d, c = %d, s = %d\n",
@@ -178,19 +117,24 @@ int scsi_partsize(unsigned char *buf, unsigned long capacity,
,logical_end, physical_end, ext_physical_end, ext_cyl);
#endif
- if ((logical_end == physical_end) ||
- (end_cyl == 1023 && ext_physical_end == logical_end)) {
- *secs = end_sector;
- *hds = end_head + 1;
- *cyls = capacity / ((end_head + 1) * end_sector);
- return 0;
+ if (logical_end == physical_end ||
+ (end_cyl == 1023 && ext_physical_end == logical_end)) {
+ geom[0] = end_head + 1;
+ geom[1] = end_sector;
+ geom[2] = (unsigned long)capacity /
+ ((end_head + 1) * end_sector);
+ ret = true;
+ goto out_free_buf;
}
#ifdef DEBUG
printk("scsicam_bios_param : logical (%u) != physical (%u)\n",
logical_end, physical_end);
#endif
}
- return -1;
+
+out_free_buf:
+ kfree(buf);
+ return ret;
}
EXPORT_SYMBOL(scsi_partsize);
@@ -258,3 +202,56 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds
*hds = (unsigned int) heads;
return (rv);
}
+
+/**
+ * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors.
+ * @bdev: which device
+ * @capacity: size of the disk in sectors
+ * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
+ *
+ * Description : determine the BIOS mapping/geometry used for a drive in a
+ * SCSI-CAM system, storing the results in ip as required
+ * by the HDIO_GETGEO ioctl().
+ *
+ * Returns : -1 on failure, 0 on success.
+ */
+int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
+{
+ u64 capacity64 = capacity; /* Suppress gcc warning */
+ int ret = 0;
+
+ /* try to infer mapping from partition table */
+ if (scsi_partsize(bdev, capacity, ip))
+ return 0;
+
+ if (capacity64 < (1ULL << 32)) {
+ /*
+ * Pick some standard mapping with at most 1024 cylinders, and
+ * at most 62 sectors per track - this works up to 7905 MB.
+ */
+ ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2,
+ (unsigned int *)ip + 0, (unsigned int *)ip + 1);
+ }
+
+ /*
+ * If something went wrong, then apparently we have to return a geometry
+ * with more than 1024 cylinders.
+ */
+ if (ret || ip[0] > 255 || ip[1] > 63) {
+ if ((capacity >> 11) > 65534) {
+ ip[0] = 255;
+ ip[1] = 63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+
+ if (capacity > 65535*63*255)
+ ip[2] = 65535;
+ else
+ ip[2] = (unsigned long)capacity / (ip[0] * ip[1]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(scsicam_bios_param);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8ca9299ffd36..eb76ba055021 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -38,7 +38,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bio.h>
-#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/idr.h>
@@ -48,9 +47,9 @@
#include <linux/blkpg.h>
#include <linux/blk-pm.h>
#include <linux/delay.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
-#include <linux/async.h>
#include <linux/slab.h>
#include <linux/sed-opal.h>
#include <linux/pm_runtime.h>
@@ -98,11 +97,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
-#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
#define SD_MINORS 16
-#else
-#define SD_MINORS 0
-#endif
static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
@@ -113,7 +108,8 @@ static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
static int sd_suspend_system(struct device *);
static int sd_suspend_runtime(struct device *);
-static int sd_resume(struct device *);
+static int sd_resume_system(struct device *);
+static int sd_resume_runtime(struct device *);
static void sd_rescan(struct device *);
static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
static void sd_uninit_command(struct scsi_cmnd *SCpnt);
@@ -125,14 +121,9 @@ static void scsi_disk_release(struct device *cdev);
static DEFINE_IDA(sd_index_ida);
-/* This semaphore is used to mediate the 0->1 reference get in the
- * face of object destruction (i.e. we can't allow a get on an
- * object after last put) */
-static DEFINE_MUTEX(sd_ref_mutex);
-
static struct kmem_cache *sd_cdb_cache;
-static mempool_t *sd_cdb_pool;
static mempool_t *sd_page_pool;
+static struct lock_class_key sd_bio_compl_lkclass;
static const char *sd_cache_types[] = {
"write through", "none", "write back",
@@ -194,7 +185,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
}
if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
- SD_MAX_RETRIES, &data, NULL))
+ sdkp->max_retries, &data, NULL))
return -EINVAL;
len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
data.block_descriptor_length);
@@ -211,13 +202,13 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
*/
data.device_specific = 0;
- if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
- SD_MAX_RETRIES, &data, &sshdr)) {
+ if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
+ sdkp->max_retries, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
return -EINVAL;
}
- revalidate_disk(sdkp->disk);
+ sd_revalidate_disk(sdkp->disk);
return count;
}
@@ -528,6 +519,54 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(max_write_same_blocks);
+static ssize_t
+zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ if (sdkp->device->type == TYPE_ZBC)
+ return sprintf(buf, "host-managed\n");
+ if (sdkp->zoned == 1)
+ return sprintf(buf, "host-aware\n");
+ if (sdkp->zoned == 2)
+ return sprintf(buf, "drive-managed\n");
+ return sprintf(buf, "none\n");
+}
+static DEVICE_ATTR_RO(zoned_cap);
+
+static ssize_t
+max_retries_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdev = sdkp->device;
+ int retries, err;
+
+ err = kstrtoint(buf, 10, &retries);
+ if (err)
+ return err;
+
+ if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
+ sdkp->max_retries = retries;
+ return count;
+ }
+
+ sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
+ SD_MAX_RETRIES);
+ return -EINVAL;
+}
+
+static ssize_t
+max_retries_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return sprintf(buf, "%d\n", sdkp->max_retries);
+}
+
+static DEVICE_ATTR_RW(max_retries);
+
static struct attribute *sd_disk_attrs[] = {
&dev_attr_cache_type.attr,
&dev_attr_FUA.attr,
@@ -541,6 +580,8 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_zeroing_mode.attr,
&dev_attr_max_write_same_blocks.attr,
&dev_attr_max_medium_access_timeouts.attr,
+ &dev_attr_zoned_cap.attr,
+ &dev_attr_max_retries.attr,
NULL,
};
ATTRIBUTE_GROUPS(sd_disk);
@@ -554,11 +595,11 @@ static struct class sd_disk_class = {
static const struct dev_pm_ops sd_pm_ops = {
.suspend = sd_suspend_system,
- .resume = sd_resume,
+ .resume = sd_resume_system,
.poweroff = sd_suspend_system,
- .restore = sd_resume,
+ .restore = sd_resume_system,
.runtime_suspend = sd_suspend_runtime,
- .runtime_resume = sd_resume,
+ .runtime_resume = sd_resume_runtime,
};
static struct scsi_driver sd_template = {
@@ -580,13 +621,11 @@ static struct scsi_driver sd_template = {
};
/*
- * Dummy kobj_map->probe function.
- * The default ->probe function will call modprobe, which is
- * pointless as this module is already loaded.
+ * Don't request a new module, as that could deadlock in multipath
+ * environment.
*/
-static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data)
+static void sd_default_probe(dev_t devt)
{
- return NULL;
}
/*
@@ -618,38 +657,12 @@ static int sd_major(int major_idx)
}
}
-static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
-{
- struct scsi_disk *sdkp = NULL;
-
- mutex_lock(&sd_ref_mutex);
-
- if (disk->private_data) {
- sdkp = scsi_disk(disk);
- if (scsi_device_get(sdkp->device) == 0)
- get_device(&sdkp->dev);
- else
- sdkp = NULL;
- }
- mutex_unlock(&sd_ref_mutex);
- return sdkp;
-}
-
-static void scsi_disk_put(struct scsi_disk *sdkp)
-{
- struct scsi_device *sdev = sdkp->device;
-
- mutex_lock(&sd_ref_mutex);
- put_device(&sdkp->dev);
- scsi_device_put(sdev);
- mutex_unlock(&sd_ref_mutex);
-}
-
#ifdef CONFIG_BLK_SED_OPAL
static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
size_t len, bool send)
{
- struct scsi_device *sdev = data;
+ struct scsi_disk *sdkp = data;
+ struct scsi_device *sdev = sdkp->device;
u8 cdb[12] = { 0, };
int ret;
@@ -658,9 +671,9 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
put_unaligned_be16(spsp, &cdb[2]);
put_unaligned_be32(len, &cdb[6]);
- ret = scsi_execute_req(sdev, cdb,
- send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
- buffer, len, NULL, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
+ RQF_PM, NULL);
return ret <= 0 ? ret : -EIO;
}
#endif /* CONFIG_BLK_SED_OPAL */
@@ -730,8 +743,9 @@ static unsigned int sd_prot_flag_mask(unsigned int prot_op)
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
- struct bio *bio = scmd->request->bio;
- unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ struct bio *bio = rq->bio;
+ unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
unsigned int protect = 0;
if (dix) { /* DIX Type 0, 1, 2, 3 */
@@ -783,7 +797,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
case SD_LBP_FULL:
case SD_LBP_DISABLE:
blk_queue_max_discard_sectors(q, 0);
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return;
case SD_LBP_UNMAP:
@@ -816,13 +829,13 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
}
blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
unsigned int data_len = 24;
@@ -840,24 +853,25 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
cmd->cmnd[0] = UNMAP;
cmd->cmnd[8] = 24;
- buf = page_address(rq->special_vec.bv_page);
+ buf = bvec_virt(&rq->special_vec);
put_unaligned_be16(6 + 16, &buf[0]);
put_unaligned_be16(16, &buf[2]);
put_unaligned_be64(lba, &buf[8]);
put_unaligned_be32(nr_blocks, &buf[16]);
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
rq->timeout = SD_TIMEOUT;
- return scsi_init_io(cmd);
+ return scsi_alloc_sgtables(cmd);
}
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
bool unmap)
{
struct scsi_device *sdp = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -877,18 +891,19 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
put_unaligned_be64(lba, &cmd->cmnd[2]);
put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
- return scsi_init_io(cmd);
+ return scsi_alloc_sgtables(cmd);
}
static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
bool unmap)
{
struct scsi_device *sdp = cmd->device;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -908,18 +923,18 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
put_unaligned_be32(lba, &cmd->cmnd[2]);
put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
- return scsi_init_io(cmd);
+ return scsi_alloc_sgtables(cmd);
}
static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -932,8 +947,10 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
}
}
- if (sdp->no_write_same)
+ if (sdp->no_write_same) {
+ rq->rq_flags |= RQF_QUIET;
return BLK_STS_TARGET;
+ }
if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
return sd_setup_write_same16_cmnd(cmd, false);
@@ -982,13 +999,13 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
* Reporting a maximum number of blocks that is not aligned
* on the device physical size would cause a large write same
* request to be split into physically unaligned chunks by
- * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
- * even if the caller of these functions took care to align the
- * large request. So make sure the maximum reported is aligned
- * to the device physical block size. This is only an optional
- * optimization for regular disks, but this is mandatory to
- * avoid failure of large write same requests directed at
- * sequential write required zones of host-managed ZBC disks.
+ * __blkdev_issue_write_zeroes() even if the caller of this
+ * functions took care to align the large request. So make sure
+ * the maximum reported is aligned to the device physical block
+ * size. This is only an optional optimization for regular
+ * disks, but this is mandatory to avoid failure of large write
+ * same requests directed at sequential write required zones of
+ * host-managed ZBC disks.
*/
sdkp->max_ws_blocks =
round_down(sdkp->max_ws_blocks,
@@ -997,71 +1014,14 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
}
out:
- blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
- (logical_block_size >> 9));
blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
(logical_block_size >> 9));
}
-/**
- * sd_setup_write_same_cmnd - write the same data to multiple blocks
- * @cmd: command to prepare
- *
- * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
- * the preference indicated by the target device.
- **/
-static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
-{
- struct request *rq = cmd->request;
- struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- struct bio *bio = rq->bio;
- u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
- u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
- blk_status_t ret;
-
- if (sdkp->device->no_write_same)
- return BLK_STS_TARGET;
-
- BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
-
- rq->timeout = SD_WRITE_SAME_TIMEOUT;
-
- if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
- cmd->cmd_len = 16;
- cmd->cmnd[0] = WRITE_SAME_16;
- put_unaligned_be64(lba, &cmd->cmnd[2]);
- put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
- } else {
- cmd->cmd_len = 10;
- cmd->cmnd[0] = WRITE_SAME;
- put_unaligned_be32(lba, &cmd->cmnd[2]);
- put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
- }
-
- cmd->transfersize = sdp->sector_size;
- cmd->allowed = SD_MAX_RETRIES;
-
- /*
- * For WRITE SAME the data transferred via the DATA OUT buffer is
- * different from the amount of data actually written to the target.
- *
- * We set up __data_len to the amount of data transferred via the
- * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
- * to transfer a single sector of data first, but then reset it to
- * the amount of data to be written right after so that the I/O path
- * knows how much to actually write.
- */
- rq->__data_len = sdp->sector_size;
- ret = scsi_init_io(cmd);
- rq->__data_len = blk_rq_bytes(rq);
-
- return ret;
-}
-
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
/* flush requests don't perform I/O, zero the S/G table */
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
@@ -1069,7 +1029,7 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
cmd->cmnd[0] = SYNCHRONIZE_CACHE;
cmd->cmd_len = 10;
cmd->transfersize = 0;
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
return BLK_STS_OK;
@@ -1079,13 +1039,7 @@ static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
sector_t lba, unsigned int nr_blocks,
unsigned char flags)
{
- cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
- if (unlikely(cmd->cmnd == NULL))
- return BLK_STS_RESOURCE;
-
cmd->cmd_len = SD_EXT_CDB_SIZE;
- memset(cmd->cmnd, 0, cmd->cmd_len);
-
cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
cmd->cmnd[7] = 0x18; /* Additional CDB len */
cmd->cmnd[9] = write ? WRITE_32 : READ_32;
@@ -1158,9 +1112,9 @@ static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sector_t threshold;
unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -1171,23 +1125,24 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
unsigned int dif;
bool dix;
- ret = scsi_init_io(cmd);
+ ret = scsi_alloc_sgtables(cmd);
if (ret != BLK_STS_OK)
return ret;
+ ret = BLK_STS_IOERR;
if (!scsi_device_online(sdp) || sdp->changed) {
scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
- return BLK_STS_IOERR;
+ goto fail;
}
- if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
- return BLK_STS_IOERR;
+ goto fail;
}
if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
- return BLK_STS_IOERR;
+ goto fail;
}
/*
@@ -1206,6 +1161,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
}
}
+ if (req_op(rq) == REQ_OP_ZONE_APPEND) {
+ ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
+ if (ret)
+ goto fail;
+ }
+
fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
dix = scsi_prot_sg_count(cmd);
dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
@@ -1231,7 +1192,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
}
if (unlikely(ret != BLK_STS_OK))
- return ret;
+ goto fail;
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -1240,7 +1201,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
*/
cmd->transfersize = sdp->sector_size;
cmd->underflow = nr_blocks << 9;
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->sdb.length = nr_blocks * sdp->sector_size;
SCSI_LOG_HLQUEUE(1,
@@ -1255,19 +1216,21 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
blk_rq_sectors(rq)));
/*
- * This indicates that the command is ready from our end to be
- * queued.
+ * This indicates that the command is ready from our end to be queued.
*/
return BLK_STS_OK;
+fail:
+ scsi_free_sgtables(cmd);
+ return ret;
}
static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
switch (req_op(rq)) {
case REQ_OP_DISCARD:
- switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
+ switch (scsi_disk(rq->q->disk)->provisioning_mode) {
case SD_LBP_UNMAP:
return sd_setup_unmap_cmnd(cmd);
case SD_LBP_WS16:
@@ -1281,12 +1244,11 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
}
case REQ_OP_WRITE_ZEROES:
return sd_setup_write_zeroes_cmnd(cmd);
- case REQ_OP_WRITE_SAME:
- return sd_setup_write_same_cmnd(cmd);
case REQ_OP_FLUSH:
return sd_setup_flush_cmnd(cmd);
case REQ_OP_READ:
case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
return sd_setup_read_write_cmnd(cmd);
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
@@ -1308,18 +1270,26 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
- struct request *rq = SCpnt->request;
- u8 *cmnd;
+ struct request *rq = scsi_cmd_to_rq(SCpnt);
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
mempool_free(rq->special_vec.bv_page, sd_page_pool);
+}
- if (SCpnt->cmnd != scsi_req(rq)->cmd) {
- cmnd = SCpnt->cmnd;
- SCpnt->cmnd = NULL;
- SCpnt->cmd_len = 0;
- mempool_free(cmnd, sd_cdb_pool);
+static bool sd_need_revalidate(struct block_device *bdev,
+ struct scsi_disk *sdkp)
+{
+ if (sdkp->device->removable || sdkp->write_prot) {
+ if (bdev_check_media_change(bdev))
+ return true;
}
+
+ /*
+ * Force a full rescan after ioctl(BLKRRPART). While the disk state has
+ * nothing to do with partitions, BLKRRPART is used to force a full
+ * revalidate after things like a format for historical reasons.
+ */
+ return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
}
/**
@@ -1335,21 +1305,19 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
* In the latter case @inode and @filp carry an abridged amount
* of information as noted above.
*
- * Locking: called with bdev->bd_mutex held.
+ * Locking: called with bdev->bd_disk->open_mutex held.
**/
static int sd_open(struct block_device *bdev, fmode_t mode)
{
- struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
- struct scsi_device *sdev;
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdev = sdkp->device;
int retval;
- if (!sdkp)
+ if (scsi_device_get(sdev))
return -ENXIO;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
- sdev = sdkp->device;
-
/*
* If the device is in error recovery, wait until it is done.
* If the device is offline, then disallow any access to it.
@@ -1358,8 +1326,8 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
if (!scsi_block_when_processing_errors(sdev))
goto error_out;
- if (sdev->removable || sdkp->write_prot)
- check_disk_change(bdev);
+ if (sd_need_revalidate(bdev, sdkp))
+ sd_revalidate_disk(bdev->bd_disk);
/*
* If the drive is empty, just let the open fail.
@@ -1394,7 +1362,7 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
return 0;
error_out:
- scsi_disk_put(sdkp);
+ scsi_device_put(sdev);
return retval;
}
@@ -1409,7 +1377,7 @@ error_out:
* Note: may block (uninterruptible) if error recovery is underway
* on this disk.
*
- * Locking: called with bdev->bd_mutex held.
+ * Locking: called with bdev->bd_disk->open_mutex held.
**/
static void sd_release(struct gendisk *disk, fmode_t mode)
{
@@ -1423,7 +1391,7 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
- scsi_disk_put(sdkp);
+ scsi_device_put(sdev);
}
static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1465,20 +1433,20 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* Note: most ioctls are forward onto the block subsystem or further
* down in the scsi subsystem.
**/
-static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, void __user *p)
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
+ void __user *p = (void __user *)arg;
int error;
SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
"cmd=0x%x\n", disk->disk_name, cmd));
- error = scsi_verify_blk_ioctl(bdev, cmd);
- if (error < 0)
- return error;
+ if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
+ return -ENOIOCTLCMD;
/*
* If we are in the middle of error recovery, don't let anyone
@@ -1489,27 +1457,11 @@ static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
(mode & FMODE_NDELAY) != 0);
if (error)
- goto out;
+ return error;
if (is_sed_ioctl(cmd))
return sed_ioctl(sdkp->opal_dev, cmd, p);
-
- /*
- * Send SCSI addressing ioctls directly to mid level, send other
- * ioctls to block level and then onto mid level if they can't be
- * resolved.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- error = scsi_ioctl(sdp, cmd, p);
- break;
- default:
- error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
- break;
- }
-out:
- return error;
+ return scsi_ioctl(sdp, mode, cmd, p);
}
static void set_media_not_present(struct scsi_disk *sdkp)
@@ -1553,9 +1505,10 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_disk *sdkp = disk->private_data;
struct scsi_device *sdp;
int retval;
+ bool disk_changed;
if (!sdkp)
return 0;
@@ -1586,11 +1539,11 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
if (scsi_block_when_processing_errors(sdp)) {
struct scsi_sense_hdr sshdr = { 0, };
- retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
+ retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
&sshdr);
/* failed to execute TUR, assume media not present */
- if (host_byte(retval)) {
+ if (retval < 0 || host_byte(retval)) {
set_media_not_present(sdkp);
goto out;
}
@@ -1613,10 +1566,9 @@ out:
* Medium present state has changed in either direction.
* Device has indicated UNIT_ATTENTION.
*/
- retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+ disk_changed = sdp->changed;
sdp->changed = 0;
- scsi_disk_put(sdkp);
- return retval;
+ return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
@@ -1643,7 +1595,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
* flush everything.
*/
res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
- timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
+ timeout, sdkp->max_retries, 0, RQF_PM, NULL);
if (res == 0)
break;
}
@@ -1651,16 +1603,20 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
if (res) {
sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
- if (driver_byte(res) == DRIVER_SENSE)
+ if (res < 0)
+ return res;
+
+ if (scsi_status_is_check_condition(res) &&
+ scsi_sense_valid(sshdr)) {
sd_print_sense_hdr(sdkp, sshdr);
- /* we need to evaluate the error return */
- if (scsi_sense_valid(sshdr) &&
- (sshdr->asc == 0x3a || /* medium not present */
- sshdr->asc == 0x20 || /* invalid command */
- (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
+ /* we need to evaluate the error return */
+ if (sshdr->asc == 0x3a || /* medium not present */
+ sshdr->asc == 0x20 || /* invalid command */
+ (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
/* this is no error here */
return 0;
+ }
switch (host_byte(res)) {
/* ignore errors due to racing a disconnection */
@@ -1684,36 +1640,46 @@ static void sd_rescan(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
- revalidate_disk(sdkp->disk);
-}
-
-static int sd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- void __user *p = (void __user *)arg;
- int ret;
-
- ret = sd_ioctl_common(bdev, mode, cmd, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
+ sd_revalidate_disk(sdkp->disk);
}
-#ifdef CONFIG_COMPAT
-static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id type)
{
- void __user *p = compat_ptr(arg);
- int ret;
-
- ret = sd_ioctl_common(bdev, mode, cmd, p);
- if (ret != -ENOTTY)
- return ret;
+ struct scsi_device *sdev = scsi_disk(disk)->device;
+ const struct scsi_vpd *vpd;
+ const unsigned char *d;
+ int ret = -ENXIO, len;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg83);
+ if (!vpd)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
+ /* we only care about designators with LU association */
+ if (((d[1] >> 4) & 0x3) != 0x00)
+ continue;
+ if ((d[1] & 0xf) != type)
+ continue;
- return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
+ /*
+ * Only exit early if a 16-byte descriptor was found. Otherwise
+ * keep looking as one with more entropy might still show up.
+ */
+ len = d[3];
+ if (len != 8 && len != 12 && len != 16)
+ continue;
+ ret = len;
+ memcpy(id, d + 4, len);
+ if (len == 16)
+ break;
+ }
+out_unlock:
+ rcu_read_unlock();
+ return ret;
}
-#endif
static char sd_pr_type(enum pr_type type)
{
@@ -1738,7 +1704,8 @@ static char sd_pr_type(enum pr_type type)
static int sd_pr_command(struct block_device *bdev, u8 sa,
u64 key, u64 sa_key, u8 type, u8 flags)
{
- struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdev = sdkp->device;
struct scsi_sense_hdr sshdr;
int result;
u8 cmd[16] = { 0, };
@@ -1754,9 +1721,9 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
data[20] = flags;
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
- &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
- if (driver_byte(result) == DRIVER_SENSE &&
+ if (scsi_status_is_check_condition(result) &&
scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
scsi_print_sense_hdr(sdev, NULL, &sshdr);
@@ -1808,19 +1775,25 @@ static const struct pr_ops sd_pr_ops = {
.pr_clear = sd_pr_clear,
};
+static void scsi_disk_free_disk(struct gendisk *disk)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+
+ put_device(&sdkp->disk_dev);
+}
+
static const struct block_device_operations sd_fops = {
.owner = THIS_MODULE,
.open = sd_open,
.release = sd_release,
.ioctl = sd_ioctl,
.getgeo = sd_getgeo,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sd_compat_ioctl,
-#endif
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = sd_check_events,
- .revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
+ .get_unique_id = sd_get_unique_id,
+ .free_disk = scsi_disk_free_disk,
.pr_ops = &sd_pr_ops,
};
@@ -1838,7 +1811,7 @@ static const struct block_device_operations sd_fops = {
**/
static void sd_eh_reset(struct scsi_cmnd *scmd)
{
- struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
/* New SCSI EH run, reset gate variable */
sdkp->ignore_medium_access_errors = false;
@@ -1858,7 +1831,7 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
**/
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{
- struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
struct scsi_device *sdev = scmd->device;
if (!scsi_device_online(sdev) ||
@@ -1899,7 +1872,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
- struct request *req = scmd->request;
+ struct request *req = scsi_cmd_to_rq(scmd);
struct scsi_device *sdev = scmd->device;
unsigned int transferred, good_bytes;
u64 start_lba, end_lba, bad_lba;
@@ -1954,15 +1927,14 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned int sector_size = SCpnt->device->sector_size;
unsigned int resid;
struct scsi_sense_hdr sshdr;
- struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
- struct request *req = SCpnt->request;
+ struct request *req = scsi_cmd_to_rq(SCpnt);
+ struct scsi_disk *sdkp = scsi_disk(req->q->disk);
int sense_valid = 0;
int sense_deferred = 0;
switch (req_op(req)) {
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
@@ -2001,7 +1973,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
}
sdkp->medium_access_timed_out = 0;
- if (driver_byte(result) != DRIVER_SENSE &&
+ if (!scsi_status_is_check_condition(result) &&
(!sense_valid || sense_deferred))
goto out;
@@ -2055,7 +2027,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
out:
if (sd_is_zoned(sdkp))
- sd_zbc_complete(SCpnt, good_bytes, &sshdr);
+ good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
"sd_done: completed %d of %d bytes\n",
@@ -2085,31 +2057,36 @@ sd_spinup_disk(struct scsi_disk *sdkp)
retries = 0;
do {
+ bool media_was_present = sdkp->media_present;
+
cmd[0] = TEST_UNIT_READY;
memset((void *) &cmd[1], 0, 9);
the_result = scsi_execute_req(sdkp->device, cmd,
DMA_NONE, NULL, 0,
&sshdr, SD_TIMEOUT,
- SD_MAX_RETRIES, NULL);
+ sdkp->max_retries, NULL);
/*
* If the drive has indicated to us that it
* doesn't have any media in it, don't bother
* with any more polling.
*/
- if (media_not_present(sdkp, &sshdr))
+ if (media_not_present(sdkp, &sshdr)) {
+ if (media_was_present)
+ sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
return;
+ }
if (the_result)
sense_valid = scsi_sense_valid(&sshdr);
retries++;
- } while (retries < 3 &&
+ } while (retries < 3 &&
(!scsi_status_is_good(the_result) ||
- ((driver_byte(the_result) == DRIVER_SENSE) &&
+ (scsi_status_is_check_condition(the_result) &&
sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
- if (driver_byte(the_result) != DRIVER_SENSE) {
+ if (!scsi_status_is_check_condition(the_result)) {
/* no sense, TUR either succeeded or failed
* with a status error */
if(!spintime && !scsi_status_is_good(the_result)) {
@@ -2147,7 +2124,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
cmd[4] |= 1 << 4;
scsi_execute_req(sdkp->device, cmd, DMA_NONE,
NULL, 0, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES,
+ SD_TIMEOUT, sdkp->max_retries,
NULL);
spintime_expire = jiffies + 100 * HZ;
spintime = 1;
@@ -2197,47 +2174,55 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
{
struct scsi_device *sdp = sdkp->device;
u8 type;
- int ret = 0;
if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
sdkp->protection_type = 0;
- return ret;
+ return 0;
}
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
- if (type > T10_PI_TYPE3_PROTECTION)
- ret = -ENODEV;
- else if (scsi_host_dif_capable(sdp->host, type))
- ret = 1;
-
- if (sdkp->first_scan || type != sdkp->protection_type)
- switch (ret) {
- case -ENODEV:
- sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
- " protection type %u. Disabling disk!\n",
- type);
- break;
- case 1:
- sd_printk(KERN_NOTICE, sdkp,
- "Enabling DIF Type %u protection\n", type);
- break;
- case 0:
- sd_printk(KERN_NOTICE, sdkp,
- "Disabling DIF Type %u protection\n", type);
- break;
- }
+ if (type > T10_PI_TYPE3_PROTECTION) {
+ sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
+ " protection type %u. Disabling disk!\n",
+ type);
+ sdkp->protection_type = 0;
+ return -ENODEV;
+ }
sdkp->protection_type = type;
- return ret;
+ return 0;
+}
+
+static void sd_config_protection(struct scsi_disk *sdkp)
+{
+ struct scsi_device *sdp = sdkp->device;
+
+ if (!sdkp->first_scan)
+ return;
+
+ sd_dif_config_host(sdkp);
+
+ if (!sdkp->protection_type)
+ return;
+
+ if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "Disabling DIF Type %u protection\n",
+ sdkp->protection_type);
+ sdkp->protection_type = 0;
+ }
+
+ sd_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
+ sdkp->protection_type);
}
static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr *sshdr, int sense_valid,
int the_result)
{
- if (driver_byte(the_result) == DRIVER_SENSE)
+ if (sense_valid)
sd_print_sense_hdr(sdkp, sshdr);
else
sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
@@ -2289,12 +2274,12 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
buffer, RC16_LEN, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ SD_TIMEOUT, sdkp->max_retries, NULL);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
- if (the_result) {
+ if (the_result > 0) {
sense_valid = scsi_sense_valid(&sshdr);
if (sense_valid &&
sshdr.sense_key == ILLEGAL_REQUEST &&
@@ -2374,12 +2359,12 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
buffer, 8, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ SD_TIMEOUT, sdkp->max_retries, NULL);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
- if (the_result) {
+ if (the_result > 0) {
sense_valid = scsi_sense_valid(&sshdr);
if (sense_valid &&
sshdr.sense_key == UNIT_ATTENTION &&
@@ -2555,18 +2540,23 @@ sd_print_capacity(struct scsi_disk *sdkp,
sd_printk(KERN_NOTICE, sdkp,
"%u-byte physical blocks\n",
sdkp->physical_block_size);
-
- sd_zbc_print_zones(sdkp);
}
/* called with buffer of length 512 */
static inline int
-sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
+sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
unsigned char *buffer, int len, struct scsi_mode_data *data,
struct scsi_sense_hdr *sshdr)
{
- return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
- SD_TIMEOUT, SD_MAX_RETRIES, data,
+ /*
+ * If we must use MODE SENSE(10), make sure that the buffer length
+ * is at least 8 bytes so that the mode sense header fits.
+ */
+ if (sdkp->device->use_10_for_ms && len < 8)
+ len = 8;
+
+ return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
+ SD_TIMEOUT, sdkp->max_retries, data,
sshdr);
}
@@ -2589,14 +2579,14 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
}
if (sdp->use_192_bytes_for_3f) {
- res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
+ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
} else {
/*
* First attempt: ask for all pages (0x3F), but only 4 bytes.
* We have to start carefully: some devices hang if we ask
* for more than is available.
*/
- res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
+ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
/*
* Second attempt: ask for page 0 When only page 0 is
@@ -2604,18 +2594,18 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
* 5: Illegal Request, Sense Code 24: Invalid field in
* CDB.
*/
- if (!scsi_status_is_good(res))
- res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
+ if (res < 0)
+ res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
/*
* Third attempt: ask 255 bytes, as we did earlier.
*/
- if (!scsi_status_is_good(res))
- res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
+ if (res < 0)
+ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
&data, NULL);
}
- if (!scsi_status_is_good(res)) {
+ if (res < 0) {
sd_first_printk(KERN_WARNING, sdkp,
"Test WP failed, assume Write Enabled\n");
} else {
@@ -2673,10 +2663,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
/* cautiously ask */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
+ res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
&data, &sshdr);
- if (!scsi_status_is_good(res))
+ if (res < 0)
goto bad_sense;
if (!data.header_length) {
@@ -2705,10 +2695,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
/* Get the data */
if (len > first_len)
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
+ res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
&data, &sshdr);
- if (scsi_status_is_good(res)) {
+ if (!res) {
int offset = data.header_length + data.block_descriptor_length;
while (offset < len) {
@@ -2743,7 +2733,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
}
- sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+ sd_first_printk(KERN_WARNING, sdkp,
+ "No Caching mode page found\n");
goto defaults;
Page_found:
@@ -2798,7 +2789,7 @@ defaults:
"Assuming drive cache: write back\n");
sdkp->WCE = 1;
} else {
- sd_first_printk(KERN_ERR, sdkp,
+ sd_first_printk(KERN_WARNING, sdkp,
"Assuming drive cache: write through\n");
sdkp->WCE = 0;
}
@@ -2824,9 +2815,9 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
return;
res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
- SD_MAX_RETRIES, &data, &sshdr);
+ sdkp->max_retries, &data, &sshdr);
- if (!scsi_status_is_good(res) || !data.header_length ||
+ if (res < 0 || !data.header_length ||
data.length < 6) {
sd_first_printk(KERN_WARNING, sdkp,
"getting Control mode page failed, assume no ATO\n");
@@ -2858,40 +2849,37 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
*/
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
- unsigned int sector_sz = sdkp->device->sector_size;
- const int vpd_len = 64;
- unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
+ struct scsi_vpd *vpd;
- if (!buffer ||
- /* Block Limits VPD */
- scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
- goto out;
+ rcu_read_lock();
- blk_queue_io_min(sdkp->disk->queue,
- get_unaligned_be16(&buffer[6]) * sector_sz);
+ vpd = rcu_dereference(sdkp->device->vpd_pgb0);
+ if (!vpd || vpd->len < 16)
+ goto out;
- sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
- sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
+ sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]);
+ sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
+ sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
- if (buffer[3] == 0x3c) {
+ if (vpd->len >= 64) {
unsigned int lba_count, desc_count;
- sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
+ sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
if (!sdkp->lbpme)
goto out;
- lba_count = get_unaligned_be32(&buffer[20]);
- desc_count = get_unaligned_be32(&buffer[24]);
+ lba_count = get_unaligned_be32(&vpd->data[20]);
+ desc_count = get_unaligned_be32(&vpd->data[24]);
if (lba_count && desc_count)
sdkp->max_unmap_blocks = lba_count;
- sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
+ sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
- if (buffer[32] & 0x80)
+ if (vpd->data[32] & 0x80)
sdkp->unmap_alignment =
- get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+ get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
@@ -2913,7 +2901,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
}
out:
- kfree(buffer);
+ rcu_read_unlock();
}
/**
@@ -2923,18 +2911,21 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
struct request_queue *q = sdkp->disk->queue;
- unsigned char *buffer;
+ struct scsi_vpd *vpd;
u16 rot;
- const int vpd_len = 64;
+ u8 zoned;
- buffer = kmalloc(vpd_len, GFP_KERNEL);
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb1);
- if (!buffer ||
- /* Block Device Characteristics VPD */
- scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
- goto out;
+ if (!vpd || vpd->len < 8) {
+ rcu_read_unlock();
+ return;
+ }
- rot = get_unaligned_be16(&buffer[4]);
+ rot = get_unaligned_be16(&vpd->data[4]);
+ zoned = (vpd->data[8] >> 4) & 3;
+ rcu_read_unlock();
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
@@ -2943,26 +2934,32 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
if (sdkp->device->type == TYPE_ZBC) {
/* Host-managed */
- q->limits.zoned = BLK_ZONED_HM;
+ disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
} else {
- sdkp->zoned = (buffer[8] >> 4) & 3;
- if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) {
+ sdkp->zoned = zoned;
+ if (sdkp->zoned == 1) {
/* Host-aware */
- q->limits.zoned = BLK_ZONED_HA;
+ disk_set_zoned(sdkp->disk, BLK_ZONED_HA);
} else {
- /*
- * Treat drive-managed devices and host-aware devices
- * with partitions as regular block devices.
- */
- q->limits.zoned = BLK_ZONED_NONE;
+ /* Regular disk or drive managed disk */
+ disk_set_zoned(sdkp->disk, BLK_ZONED_NONE);
}
}
- if (blk_queue_is_zoned(q) && sdkp->first_scan)
+
+ if (!sdkp->first_scan)
+ return;
+
+ if (blk_queue_is_zoned(q)) {
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
-
- out:
- kfree(buffer);
+ } else {
+ if (sdkp->zoned == 1)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Host-aware SMR disk used as regular disk\n");
+ else if (sdkp->zoned == 2)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Drive-managed SMR disk\n");
+ }
}
/**
@@ -2971,24 +2968,24 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
*/
static void sd_read_block_provisioning(struct scsi_disk *sdkp)
{
- unsigned char *buffer;
- const int vpd_len = 8;
+ struct scsi_vpd *vpd;
if (sdkp->lbpme == 0)
return;
- buffer = kmalloc(vpd_len, GFP_KERNEL);
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb2);
- if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
- goto out;
+ if (!vpd || vpd->len < 8) {
+ rcu_read_unlock();
+ return;
+ }
sdkp->lbpvpd = 1;
- sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
- sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
- sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
-
- out:
- kfree(buffer);
+ sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */
+ sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
+ sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
+ rcu_read_unlock();
}
static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
@@ -3002,8 +2999,7 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
}
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
- /* too large values might cause issues with arcmsr */
- int vpd_buf_len = 64;
+ struct scsi_vpd *vpd;
sdev->no_report_opcodes = 1;
@@ -3011,8 +3007,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
* CODES is unsupported and the device has an ATA
* Information VPD page (SAT).
*/
- if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+ if (vpd)
sdev->no_write_same = 1;
+ rcu_read_unlock();
}
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
@@ -3036,6 +3035,109 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->security = 1;
}
+static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
+{
+ return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
+}
+
+/**
+ * sd_read_cpr - Query concurrent positioning ranges
+ * @sdkp: disk to query
+ */
+static void sd_read_cpr(struct scsi_disk *sdkp)
+{
+ struct blk_independent_access_ranges *iars = NULL;
+ unsigned char *buffer = NULL;
+ unsigned int nr_cpr = 0;
+ int i, vpd_len, buf_len = SD_BUF_SIZE;
+ u8 *desc;
+
+ /*
+ * We need to have the capacity set first for the block layer to be
+ * able to check the ranges.
+ */
+ if (sdkp->first_scan)
+ return;
+
+ if (!sdkp->capacity)
+ goto out;
+
+ /*
+ * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
+ * leading to a maximum page size of 64 + 256*32 bytes.
+ */
+ buf_len = 64 + 256*32;
+ buffer = kmalloc(buf_len, GFP_KERNEL);
+ if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
+ goto out;
+
+ /* We must have at least a 64B header and one 32B range descriptor */
+ vpd_len = get_unaligned_be16(&buffer[2]) + 4;
+ if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid Concurrent Positioning Ranges VPD page\n");
+ goto out;
+ }
+
+ nr_cpr = (vpd_len - 64) / 32;
+ if (nr_cpr == 1) {
+ nr_cpr = 0;
+ goto out;
+ }
+
+ iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
+ if (!iars) {
+ nr_cpr = 0;
+ goto out;
+ }
+
+ desc = &buffer[64];
+ for (i = 0; i < nr_cpr; i++, desc += 32) {
+ if (desc[0] != i) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid Concurrent Positioning Range number\n");
+ nr_cpr = 0;
+ break;
+ }
+
+ iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
+ iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
+ }
+
+out:
+ disk_set_independent_access_ranges(sdkp->disk, iars);
+ if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u concurrent positioning ranges\n", nr_cpr);
+ sdkp->nr_actuators = nr_cpr;
+ }
+
+ kfree(buffer);
+}
+
+static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp)
+{
+ struct scsi_device *sdp = sdkp->device;
+ unsigned int min_xfer_bytes =
+ logical_to_bytes(sdp, sdkp->min_xfer_blocks);
+
+ if (sdkp->min_xfer_blocks == 0)
+ return false;
+
+ if (min_xfer_bytes & (sdkp->physical_block_size - 1)) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "Preferred minimum I/O size %u bytes not a " \
+ "multiple of physical block size (%u bytes)\n",
+ min_xfer_bytes, sdkp->physical_block_size);
+ sdkp->min_xfer_blocks = 0;
+ return false;
+ }
+
+ sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n",
+ min_xfer_bytes);
+ return true;
+}
+
/*
* Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, not a
@@ -3047,6 +3149,8 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
struct scsi_device *sdp = sdkp->device;
unsigned int opt_xfer_bytes =
logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ unsigned int min_xfer_bytes =
+ logical_to_bytes(sdp, sdkp->min_xfer_blocks);
if (sdkp->opt_xfer_blocks == 0)
return false;
@@ -3075,6 +3179,15 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
return false;
}
+ if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "Optimal transfer size %u bytes not a " \
+ "multiple of preferred minimum block " \
+ "size (%u bytes)\n",
+ opt_xfer_bytes, min_xfer_bytes);
+ return false;
+ }
+
if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
sd_first_printk(KERN_WARNING, sdkp,
"Optimal transfer size %u bytes not a " \
@@ -3142,6 +3255,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
+ sd_read_cpr(sdkp);
}
sd_print_capacity(sdkp, old_capacity);
@@ -3151,6 +3265,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
+ sd_config_protection(sdkp);
}
/*
@@ -3166,12 +3281,27 @@ static int sd_revalidate_disk(struct gendisk *disk)
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
+ if (sd_validate_min_xfer_size(sdkp))
+ blk_queue_io_min(sdkp->disk->queue,
+ logical_to_bytes(sdp, sdkp->min_xfer_blocks));
+ else
+ blk_queue_io_min(sdkp->disk->queue, 0);
+
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
- } else
+ } else {
+ q->limits.io_opt = 0;
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
+ }
+
+ /*
+ * Limit default to SCSI host optimal sector limit if set. There may be
+ * an impact on performance for when the size of a request exceeds this
+ * host limit.
+ */
+ rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
@@ -3187,10 +3317,18 @@ static int sd_revalidate_disk(struct gendisk *disk)
sdkp->first_scan = 0;
- set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
+ set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
kfree(buffer);
+ /*
+ * For a zoned drive, revalidating the zones can be done only once
+ * the gendisk capacity is set. So if this fails, set back the gendisk
+ * capacity to 0.
+ */
+ if (sd_zbc_revalidate_zones(sdkp))
+ set_capacity_and_notify(disk, 0);
+
out:
return 0;
}
@@ -3295,10 +3433,12 @@ static int sd_probe(struct device *dev)
sdp->type != TYPE_RBC)
goto out;
-#ifndef CONFIG_BLK_DEV_ZONED
- if (sdp->type == TYPE_ZBC)
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
+ sdev_printk(KERN_WARNING, sdp,
+ "Unsupported ZBC host-managed device.\n");
goto out;
-#endif
+ }
+
SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
"sd_probe\n"));
@@ -3307,7 +3447,8 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- gd = alloc_disk(SD_MINORS);
+ gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
+ &sd_bio_compl_lkclass);
if (!gd)
goto out_free;
@@ -3324,9 +3465,9 @@ static int sd_probe(struct device *dev)
}
sdkp->device = sdp;
- sdkp->driver = &sd_template;
sdkp->disk = gd;
sdkp->index = index;
+ sdkp->max_retries = SD_MAX_RETRIES;
atomic_set(&sdkp->openers, 0);
atomic_set(&sdkp->device->ioerr_cnt, 0);
@@ -3338,24 +3479,25 @@ static int sd_probe(struct device *dev)
SD_MOD_TIMEOUT);
}
- device_initialize(&sdkp->dev);
- sdkp->dev.parent = dev;
- sdkp->dev.class = &sd_disk_class;
- dev_set_name(&sdkp->dev, "%s", dev_name(dev));
+ device_initialize(&sdkp->disk_dev);
+ sdkp->disk_dev.parent = get_device(dev);
+ sdkp->disk_dev.class = &sd_disk_class;
+ dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
- error = device_add(&sdkp->dev);
- if (error)
- goto out_free_index;
+ error = device_add(&sdkp->disk_dev);
+ if (error) {
+ put_device(&sdkp->disk_dev);
+ goto out;
+ }
- get_device(dev);
dev_set_drvdata(dev, sdkp);
gd->major = sd_major((index & 0xf0) >> 4);
gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+ gd->minors = SD_MINORS;
gd->fops = &sd_fops;
- gd->private_data = &sdkp->driver;
- gd->queue = sdkp->device->request_queue;
+ gd->private_data = sdkp;
/* defaults, until the device tells us otherwise */
sdp->sector_size = 512;
@@ -3371,7 +3513,6 @@ static int sd_probe(struct device *dev)
sd_revalidate_disk(gd);
- gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
gd->events |= DISK_EVENT_MEDIA_CHANGE;
@@ -3383,14 +3524,16 @@ static int sd_probe(struct device *dev)
pm_runtime_set_autosuspend_delay(dev,
sdp->host->hostt->rpm_autosuspend_delay);
}
- device_add_disk(dev, gd, NULL);
- if (sdkp->capacity)
- sd_dif_config_host(sdkp);
- sd_revalidate_disk(gd);
+ error = device_add_disk(dev, gd, NULL);
+ if (error) {
+ put_device(&sdkp->disk_dev);
+ put_disk(gd);
+ goto out;
+ }
if (sdkp->security) {
- sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
+ sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
if (sdkp->opal_dev)
sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
}
@@ -3425,62 +3568,26 @@ static int sd_probe(struct device *dev)
**/
static int sd_remove(struct device *dev)
{
- struct scsi_disk *sdkp;
- dev_t devt;
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
- sdkp = dev_get_drvdata(dev);
- devt = disk_devt(sdkp->disk);
scsi_autopm_get_device(sdkp->device);
- async_synchronize_full_domain(&scsi_sd_pm_domain);
- device_del(&sdkp->dev);
+ device_del(&sdkp->disk_dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
- free_opal_dev(sdkp->opal_dev);
-
- blk_register_region(devt, SD_MINORS, NULL,
- sd_default_probe, NULL, NULL);
-
- mutex_lock(&sd_ref_mutex);
- dev_set_drvdata(dev, NULL);
- put_device(&sdkp->dev);
- mutex_unlock(&sd_ref_mutex);
-
+ put_disk(sdkp->disk);
return 0;
}
-/**
- * scsi_disk_release - Called to free the scsi_disk structure
- * @dev: pointer to embedded class device
- *
- * sd_ref_mutex must be held entering this routine. Because it is
- * called on last put, you should always use the scsi_disk_get()
- * scsi_disk_put() helpers which manipulate the semaphore directly
- * and never do a direct put_device.
- **/
static void scsi_disk_release(struct device *dev)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- struct gendisk *disk = sdkp->disk;
- struct request_queue *q = disk->queue;
ida_free(&sd_index_ida, sdkp->index);
-
- /*
- * Wait until all requests that are in progress have completed.
- * This is necessary to avoid that e.g. scsi_end_request() crashes
- * due to clearing the disk->private_data pointer. Wait from inside
- * scsi_disk_release() instead of from sd_release() to avoid that
- * freezing and unfreezing the request queue affects user space I/O
- * in case multiple processes open a /dev/sd... node concurrently.
- */
- blk_mq_freeze_queue(q);
- blk_mq_unfreeze_queue(q);
-
- disk->private_data = NULL;
- put_disk(disk);
+ sd_zbc_free_zone_info(sdkp);
put_device(&sdkp->device->sdev_gendev);
+ free_opal_dev(sdkp->opal_dev);
kfree(sdkp);
}
@@ -3502,15 +3609,15 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
return -ENODEV;
res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL);
+ SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
- if (driver_byte(res) == DRIVER_SENSE)
+ if (res > 0 && scsi_sense_valid(&sshdr)) {
sd_print_sense_hdr(sdkp, &sshdr);
- if (scsi_sense_valid(&sshdr) &&
/* 0x3a is medium not present */
- sshdr.asc == 0x3a)
- res = 0;
+ if (sshdr.asc == 0x3a)
+ res = 0;
+ }
}
/* SCSI error codes must not go to the generic layer */
@@ -3556,7 +3663,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
return 0;
if (sdkp->WCE && sdkp->media_present) {
- sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+ if (!sdkp->device->silence_suspend)
+ sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
ret = sd_sync_cache(sdkp, &sshdr);
if (ret) {
@@ -3578,7 +3686,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
}
if (sdkp->device->manage_start_stop) {
- sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ if (!sdkp->device->silence_suspend)
+ sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
/* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0);
if (ignore_stop_errors)
@@ -3590,6 +3699,9 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
static int sd_suspend_system(struct device *dev)
{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
return sd_suspend_common(dev, true);
}
@@ -3616,6 +3728,38 @@ static int sd_resume(struct device *dev)
return ret;
}
+static int sd_resume_system(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return sd_resume(dev);
+}
+
+static int sd_resume_runtime(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ struct scsi_device *sdp;
+
+ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
+ return 0;
+
+ sdp = sdkp->device;
+
+ if (sdp->ignore_media_change) {
+ /* clear the device's sense data */
+ static const u8 cmd[10] = { REQUEST_SENSE };
+
+ if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL,
+ NULL, sdp->request_queue->rq_timeout, 1, 0,
+ RQF_PM, NULL))
+ sd_printk(KERN_NOTICE, sdkp,
+ "Failed to clear sense data\n");
+ }
+
+ return sd_resume(dev);
+}
+
/**
* init_sd - entry point for this driver (both when built in or when
* a module).
@@ -3629,11 +3773,9 @@ static int __init init_sd(void)
SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
for (i = 0; i < SD_MAJORS; i++) {
- if (register_blkdev(sd_major(i), "sd") != 0)
+ if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
continue;
majors++;
- blk_register_region(sd_major(i), SD_MINORS, NULL,
- sd_default_probe, NULL, NULL);
}
if (!majors)
@@ -3651,18 +3793,11 @@ static int __init init_sd(void)
goto err_out_class;
}
- sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
- if (!sd_cdb_pool) {
- printk(KERN_ERR "sd: can't init extended cdb pool\n");
- err = -ENOMEM;
- goto err_out_cache;
- }
-
sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
if (!sd_page_pool) {
printk(KERN_ERR "sd: can't init discard page pool\n");
err = -ENOMEM;
- goto err_out_ppool;
+ goto err_out_cache;
}
err = scsi_register_driver(&sd_template.gendrv);
@@ -3674,9 +3809,6 @@ static int __init init_sd(void)
err_out_driver:
mempool_destroy(sd_page_pool);
-err_out_ppool:
- mempool_destroy(sd_cdb_pool);
-
err_out_cache:
kmem_cache_destroy(sd_cdb_cache);
@@ -3700,16 +3832,13 @@ static void __exit exit_sd(void)
SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
scsi_unregister_driver(&sd_template.gendrv);
- mempool_destroy(sd_cdb_pool);
mempool_destroy(sd_page_pool);
kmem_cache_destroy(sd_cdb_cache);
class_unregister(&sd_disk_class);
- for (i = 0; i < SD_MAJORS; i++) {
- blk_unregister_region(sd_major(i), SD_MINORS);
+ for (i = 0; i < SD_MAJORS; i++)
unregister_blkdev(sd_major(i), "sd");
- }
}
module_init(init_sd);
@@ -3724,15 +3853,14 @@ void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
{
const char *hb_string = scsi_hostbyte_string(result);
- const char *db_string = scsi_driverbyte_string(result);
- if (hb_string || db_string)
+ if (hb_string)
sd_printk(KERN_INFO, sdkp,
"%s: Result: hostbyte=%s driverbyte=%s\n", msg,
hb_string ? hb_string : "invalid",
- db_string ? db_string : "invalid");
+ "DRIVER_OK");
else
sd_printk(KERN_INFO, sdkp,
- "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
- msg, host_byte(result), driver_byte(result));
+ "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
+ msg, host_byte(result), "DRIVER_OK");
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 50fff0bf8c8e..5eea762f84d1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -67,21 +67,54 @@ enum {
SD_ZERO_WS10_UNMAP, /* Use WRITE SAME(10) with UNMAP */
};
+/**
+ * struct zoned_disk_info - Specific properties of a ZBC SCSI device.
+ * @nr_zones: number of zones.
+ * @zone_blocks: number of logical blocks per zone.
+ *
+ * This data structure holds the ZBC SCSI device properties that are retrieved
+ * twice: a first time before the gendisk capacity is known and a second time
+ * after the gendisk capacity is known.
+ */
+struct zoned_disk_info {
+ u32 nr_zones;
+ u32 zone_blocks;
+};
+
struct scsi_disk {
- struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
- struct device dev;
+
+ /*
+ * disk_dev is used to show attributes in /sys/class/scsi_disk/,
+ * but otherwise not really needed. Do not use for refcounting.
+ */
+ struct device disk_dev;
struct gendisk *disk;
struct opal_dev *opal_dev;
#ifdef CONFIG_BLK_DEV_ZONED
- u32 nr_zones;
- u32 zone_blocks;
+ /* Updated during revalidation before the gendisk capacity is known. */
+ struct zoned_disk_info early_zone_info;
+ /* Updated during revalidation after the gendisk capacity is known. */
+ struct zoned_disk_info zone_info;
u32 zones_optimal_open;
u32 zones_optimal_nonseq;
u32 zones_max_open;
+ /*
+ * Either zero or a power of two. If not zero it means that the offset
+ * between zone starting LBAs is constant.
+ */
+ u32 zone_starting_lba_gran;
+ u32 *zones_wp_offset;
+ spinlock_t zones_wp_offset_lock;
+ u32 *rev_wp_offset;
+ struct mutex rev_mutex;
+ struct work_struct zone_wp_offset_work;
+ char *zone_wp_update_buf;
#endif
atomic_t openers;
sector_t capacity; /* size in logical blocks */
+ int max_retries;
+ u32 min_xfer_blocks;
u32 max_xfer_blocks;
u32 opt_xfer_blocks;
u32 max_ws_blocks;
@@ -97,6 +130,7 @@ struct scsi_disk {
u8 protection_type;/* Data Integrity Field */
u8 provisioning_mode;
u8 zeroing_mode;
+ u8 nr_actuators; /* Number of actuators */
unsigned ATO : 1; /* state of disk ATO bit */
unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */
@@ -117,11 +151,11 @@ struct scsi_disk {
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
};
-#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
+#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
{
- return container_of(disk->private_data, struct scsi_disk, driver);
+ return disk->private_data;
}
#define sd_printk(prefix, sdsk, fmt, a...) \
@@ -207,24 +241,32 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
-extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
-extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp);
+int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
+int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all);
-extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
- struct scsi_sense_hdr *sshdr);
+unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
+ struct scsi_sense_hdr *sshdr);
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
+blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
+ unsigned int nr_blocks);
+
#else /* CONFIG_BLK_DEV_ZONED */
-static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
- unsigned char *buf)
+static inline void sd_zbc_free_zone_info(struct scsi_disk *sdkp) {}
+
+static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
{
return 0;
}
-static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
+static inline int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
+{
+ return 0;
+}
static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op,
@@ -233,9 +275,18 @@ static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
return BLK_STS_TARGET;
}
-static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
- unsigned int good_bytes,
- struct scsi_sense_hdr *sshdr) {}
+static inline unsigned int sd_zbc_complete(struct scsi_cmnd *cmd,
+ unsigned int good_bytes, struct scsi_sense_hdr *sshdr)
+{
+ return good_bytes;
+}
+
+static inline blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd,
+ sector_t *lba,
+ unsigned int nr_blocks)
+{
+ return BLK_STS_TARGET;
+}
#define sd_zbc_report_zones NULL
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 4cadb26070a8..968993ee6d5d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -6,7 +6,7 @@
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*/
-#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/t10-pi.h>
#include <scsi/scsi.h>
@@ -59,8 +59,6 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
bi.profile = &t10_pi_type1_crc;
bi.tuple_size = sizeof(struct t10_pi_tuple);
- sd_printk(KERN_NOTICE, sdkp,
- "Enabling DIX %s protection\n", bi.profile->name);
if (dif && type) {
bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
@@ -72,11 +70,11 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
bi.tag_size = sizeof(u16) + sizeof(u32);
else
bi.tag_size = sizeof(u16);
-
- sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
- bi.tag_size);
}
+ sd_printk(KERN_NOTICE, sdkp,
+ "Enabling DIX %s, application tag size %u bytes\n",
+ bi.profile->name, bi.tag_size);
out:
blk_integrity_register(disk, &bi);
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index f45c22b09726..bd15624c6322 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -11,6 +11,7 @@
#include <linux/blkdev.h>
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
+#include <linux/mutex.h>
#include <asm/unaligned.h>
@@ -19,11 +20,66 @@
#include "sd.h"
-static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
+/**
+ * sd_zbc_get_zone_wp_offset - Get zone write pointer offset.
+ * @zone: Zone for which to return the write pointer offset.
+ *
+ * Return: offset of the write pointer from the start of the zone.
+ */
+static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
+{
+ if (zone->type == ZBC_ZONE_TYPE_CONV)
+ return 0;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
+ return zone->wp - zone->start;
+ case BLK_ZONE_COND_FULL:
+ return zone->len;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_OFFLINE:
+ case BLK_ZONE_COND_READONLY:
+ default:
+ /*
+ * Offline and read-only zones do not have a valid
+ * write pointer. Use 0 as for an empty zone.
+ */
+ return 0;
+ }
+}
+
+/* Whether or not a SCSI zone descriptor describes a gap zone. */
+static bool sd_zbc_is_gap_zone(const u8 buf[64])
+{
+ return (buf[0] & 0xf) == ZBC_ZONE_TYPE_GAP;
+}
+
+/**
+ * sd_zbc_parse_report - Parse a SCSI zone descriptor
+ * @sdkp: SCSI disk pointer.
+ * @buf: SCSI zone descriptor.
+ * @idx: Index of the zone relative to the first zone reported by the current
+ * sd_zbc_report_zones() call.
+ * @cb: Callback function pointer.
+ * @data: Second argument passed to @cb.
+ *
+ * Return: Value returned by @cb.
+ *
+ * Convert a SCSI zone descriptor into struct blk_zone format. Additionally,
+ * call @cb(blk_zone, @data).
+ */
+static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64],
unsigned int idx, report_zones_cb cb, void *data)
{
struct scsi_device *sdp = sdkp->device;
struct blk_zone zone = { 0 };
+ sector_t start_lba, gran;
+ int ret;
+
+ if (WARN_ON_ONCE(sd_zbc_is_gap_zone(buf)))
+ return -EINVAL;
zone.type = buf[0] & 0x0f;
zone.cond = (buf[1] >> 4) & 0xf;
@@ -32,14 +88,40 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
if (buf[1] & 0x02)
zone.non_seq = 1;
- zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
- zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
- zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
- if (zone.type != ZBC_ZONE_TYPE_CONV &&
- zone.cond == ZBC_ZONE_COND_FULL)
+ start_lba = get_unaligned_be64(&buf[16]);
+ zone.start = logical_to_sectors(sdp, start_lba);
+ zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+ zone.len = zone.capacity;
+ if (sdkp->zone_starting_lba_gran) {
+ gran = logical_to_sectors(sdp, sdkp->zone_starting_lba_gran);
+ if (zone.len > gran) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid zone at LBA %llu with capacity %llu and length %llu; granularity = %llu\n",
+ start_lba,
+ sectors_to_logical(sdp, zone.capacity),
+ sectors_to_logical(sdp, zone.len),
+ sectors_to_logical(sdp, gran));
+ return -EINVAL;
+ }
+ /*
+ * Use the starting LBA granularity instead of the zone length
+ * obtained from the REPORT ZONES command.
+ */
+ zone.len = gran;
+ }
+ if (zone.cond == ZBC_ZONE_COND_FULL)
zone.wp = zone.start + zone.len;
+ else
+ zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
- return cb(&zone, idx, data);
+ ret = cb(&zone, idx, data);
+ if (ret)
+ return ret;
+
+ if (sdkp->rev_wp_offset)
+ sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone);
+
+ return 0;
}
/**
@@ -82,8 +164,7 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
sd_printk(KERN_ERR, sdkp,
"REPORT ZONES start lba %llu failed\n", lba);
sd_print_result(sdkp, "REPORT ZONES", result);
- if (driver_byte(result) == DRIVER_SENSE &&
- scsi_sense_valid(&sshdr))
+ if (result > 0 && scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
return -EIO;
}
@@ -100,7 +181,7 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
}
/**
- * Allocate a buffer for report zones reply.
+ * sd_zbc_alloc_report_buffer() - Allocate a buffer for report zones reply.
* @sdkp: The target disk
* @nr_zones: Maximum number of zones to report
* @buflen: Size of the buffer allocated
@@ -121,14 +202,14 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
/*
* Report zone buffer size should be at most 64B times the number of
- * zones requested plus the 64B reply header, but should be at least
- * SECTOR_SIZE for ATA devices.
+ * zones requested plus the 64B reply header, but should be aligned
+ * to SECTOR_SIZE for ATA devices.
* Make sure that this size does not exceed the hardware capabilities.
* Furthermore, since the report zone command cannot be split, make
* sure that the allocated buffer can always be mapped by limiting the
* number of pages allocated to the HBA max segments limit.
*/
- nr_zones = min(nr_zones, sdkp->nr_zones);
+ nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
@@ -136,13 +217,12 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
while (bufsize >= SECTOR_SIZE) {
buf = __vmalloc(bufsize,
- GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY,
- PAGE_KERNEL);
+ GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY);
if (buf) {
*buflen = bufsize;
return buf;
}
- bufsize >>= 1;
+ bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
}
return NULL;
@@ -154,16 +234,28 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
*/
static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
{
- return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+ return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks);
}
+/**
+ * sd_zbc_report_zones - SCSI .report_zones() callback.
+ * @disk: Disk to report zones for.
+ * @sector: Start sector.
+ * @nr_zones: Maximum number of zones to report.
+ * @cb: Callback function called to report zone information.
+ * @data: Second argument passed to @cb.
+ *
+ * Called by the block layer to iterate over zone information. See also the
+ * disk->fops->report_zones() calls in block/blk-zoned.c.
+ */
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct scsi_disk *sdkp = scsi_disk(disk);
- sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
+ sector_t lba = sectors_to_logical(sdkp->device, sector);
unsigned int nr, i;
unsigned char *buf;
+ u64 zone_length, start_lba;
size_t offset, buflen = 0;
int zone_idx = 0;
int ret;
@@ -172,7 +264,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
/* Not a zoned device */
return -EOPNOTSUPP;
- if (!capacity)
+ if (!sdkp->capacity)
/* Device gone or invalid */
return -ENODEV;
@@ -180,9 +272,8 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
if (!buf)
return -ENOMEM;
- while (zone_idx < nr_zones && sector < capacity) {
- ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
- sectors_to_logical(sdkp->device, sector), true);
+ while (zone_idx < nr_zones && lba < sdkp->capacity) {
+ ret = sd_zbc_do_report_zones(sdkp, buf, buflen, lba, true);
if (ret)
goto out;
@@ -193,14 +284,36 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
for (i = 0; i < nr && zone_idx < nr_zones; i++) {
offset += 64;
+ start_lba = get_unaligned_be64(&buf[offset + 16]);
+ zone_length = get_unaligned_be64(&buf[offset + 8]);
+ if ((zone_idx == 0 &&
+ (lba < start_lba ||
+ lba >= start_lba + zone_length)) ||
+ (zone_idx > 0 && start_lba != lba) ||
+ start_lba + zone_length < start_lba) {
+ sd_printk(KERN_ERR, sdkp,
+ "Zone %d at LBA %llu is invalid: %llu + %llu\n",
+ zone_idx, lba, start_lba, zone_length);
+ ret = -EINVAL;
+ goto out;
+ }
+ lba = start_lba + zone_length;
+ if (sd_zbc_is_gap_zone(&buf[offset])) {
+ if (sdkp->zone_starting_lba_gran)
+ continue;
+ sd_printk(KERN_ERR, sdkp,
+ "Gap zone without constant LBA offsets\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
cb, data);
if (ret)
goto out;
+
zone_idx++;
}
-
- sector += sd_zbc_zone_sectors(sdkp) * i;
}
ret = zone_idx;
@@ -209,6 +322,142 @@ out:
return ret;
}
+static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
+{
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
+ sector_t sector = blk_rq_pos(rq);
+
+ if (!sd_is_zoned(sdkp))
+ /* Not a zoned device */
+ return BLK_STS_IOERR;
+
+ if (sdkp->device->changed)
+ return BLK_STS_IOERR;
+
+ if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
+ /* Unaligned request */
+ return BLK_STS_IOERR;
+
+ return BLK_STS_OK;
+}
+
+#define SD_ZBC_INVALID_WP_OFST (~0u)
+#define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1)
+
+static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct scsi_disk *sdkp = data;
+
+ lockdep_assert_held(&sdkp->zones_wp_offset_lock);
+
+ sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone);
+
+ return 0;
+}
+
+/*
+ * An attempt to append a zone triggered an invalid write pointer error.
+ * Reread the write pointer of the zone(s) in which the append failed.
+ */
+static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
+{
+ struct scsi_disk *sdkp;
+ unsigned long flags;
+ sector_t zno;
+ int ret;
+
+ sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
+
+ spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
+ for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) {
+ if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
+ continue;
+
+ spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
+ ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
+ SD_BUF_SIZE,
+ zno * sdkp->zone_info.zone_blocks, true);
+ spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
+ if (!ret)
+ sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
+ zno, sd_zbc_update_wp_offset_cb,
+ sdkp);
+ }
+ spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
+
+ scsi_device_put(sdkp->device);
+}
+
+/**
+ * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command.
+ * @cmd: the command to setup
+ * @lba: the LBA to patch
+ * @nr_blocks: the number of LBAs to be written
+ *
+ * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND.
+ * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and
+ * patching of the lba for an emulated ZONE_APPEND command.
+ *
+ * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will
+ * schedule a REPORT ZONES command and return BLK_STS_IOERR.
+ */
+blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
+ unsigned int nr_blocks)
+{
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
+ unsigned int wp_offset, zno = blk_rq_zone_no(rq);
+ unsigned long flags;
+ blk_status_t ret;
+
+ ret = sd_zbc_cmnd_checks(cmd);
+ if (ret != BLK_STS_OK)
+ return ret;
+
+ if (!blk_rq_zone_is_seq(rq))
+ return BLK_STS_IOERR;
+
+ /* Unlock of the write lock will happen in sd_zbc_complete() */
+ if (!blk_req_zone_write_trylock(rq))
+ return BLK_STS_ZONE_RESOURCE;
+
+ spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
+ wp_offset = sdkp->zones_wp_offset[zno];
+ switch (wp_offset) {
+ case SD_ZBC_INVALID_WP_OFST:
+ /*
+ * We are about to schedule work to update a zone write pointer
+ * offset, which will cause the zone append command to be
+ * requeued. So make sure that the scsi device does not go away
+ * while the work is being processed.
+ */
+ if (scsi_device_get(sdkp->device)) {
+ ret = BLK_STS_IOERR;
+ break;
+ }
+ sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST;
+ schedule_work(&sdkp->zone_wp_offset_work);
+ fallthrough;
+ case SD_ZBC_UPDATING_WP_OFST:
+ ret = BLK_STS_DEV_RESOURCE;
+ break;
+ default:
+ wp_offset = sectors_to_logical(sdkp->device, wp_offset);
+ if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) {
+ ret = BLK_STS_IOERR;
+ break;
+ }
+
+ *lba += wp_offset;
+ }
+ spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
+ if (ret)
+ blk_req_zone_write_unlock(rq);
+ return ret;
+}
+
/**
* sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
* can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
@@ -222,21 +471,15 @@ out:
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all)
{
- struct request *rq = cmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct request *rq = scsi_cmd_to_rq(cmd);
sector_t sector = blk_rq_pos(rq);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t block = sectors_to_logical(sdkp->device, sector);
+ blk_status_t ret;
- if (!sd_is_zoned(sdkp))
- /* Not a zoned device */
- return BLK_STS_IOERR;
-
- if (sdkp->device->changed)
- return BLK_STS_IOERR;
-
- if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
- /* Unaligned request */
- return BLK_STS_IOERR;
+ ret = sd_zbc_cmnd_checks(cmd);
+ if (ret != BLK_STS_OK)
+ return ret;
cmd->cmd_len = 16;
memset(cmd->cmnd, 0, cmd->cmd_len);
@@ -255,20 +498,108 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
return BLK_STS_OK;
}
+static bool sd_zbc_need_zone_wp_update(struct request *rq)
+{
+ switch (req_op(rq)) {
+ case REQ_OP_ZONE_APPEND:
+ case REQ_OP_ZONE_FINISH:
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ return true;
+ case REQ_OP_WRITE:
+ case REQ_OP_WRITE_ZEROES:
+ return blk_rq_zone_is_seq(rq);
+ default:
+ return false;
+ }
+}
+
+/**
+ * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion
+ * @cmd: Completed command
+ * @good_bytes: Command reply bytes
+ *
+ * Called from sd_zbc_complete() to handle the update of the cached zone write
+ * pointer value in case an update is needed.
+ */
+static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
+ unsigned int good_bytes)
+{
+ int result = cmd->result;
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
+ unsigned int zno = blk_rq_zone_no(rq);
+ enum req_op op = req_op(rq);
+ unsigned long flags;
+
+ /*
+ * If we got an error for a command that needs updating the write
+ * pointer offset cache, we must mark the zone wp offset entry as
+ * invalid to force an update from disk the next time a zone append
+ * command is issued.
+ */
+ spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
+
+ if (result && op != REQ_OP_ZONE_RESET_ALL) {
+ if (op == REQ_OP_ZONE_APPEND) {
+ /* Force complete completion (no retry) */
+ good_bytes = 0;
+ scsi_set_resid(cmd, blk_rq_bytes(rq));
+ }
+
+ /*
+ * Force an update of the zone write pointer offset on
+ * the next zone append access.
+ */
+ if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
+ sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST;
+ goto unlock_wp_offset;
+ }
+
+ switch (op) {
+ case REQ_OP_ZONE_APPEND:
+ rq->__sector += sdkp->zones_wp_offset[zno];
+ fallthrough;
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_WRITE:
+ if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp))
+ sdkp->zones_wp_offset[zno] +=
+ good_bytes >> SECTOR_SHIFT;
+ break;
+ case REQ_OP_ZONE_RESET:
+ sdkp->zones_wp_offset[zno] = 0;
+ break;
+ case REQ_OP_ZONE_FINISH:
+ sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp);
+ break;
+ case REQ_OP_ZONE_RESET_ALL:
+ memset(sdkp->zones_wp_offset, 0,
+ sdkp->zone_info.nr_zones * sizeof(unsigned int));
+ break;
+ default:
+ break;
+ }
+
+unlock_wp_offset:
+ spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
+
+ return good_bytes;
+}
+
/**
* sd_zbc_complete - ZBC command post processing.
* @cmd: Completed command
* @good_bytes: Command reply bytes
* @sshdr: command sense header
*
- * Called from sd_done(). Process report zones reply and handle reset zone
- * and write commands errors.
+ * Called from sd_done() to handle zone commands errors and updates to the
+ * device queue zone write pointer offset cahce.
*/
-void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
+unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr)
{
int result = cmd->result;
- struct request *rq = cmd->request;
+ struct request *rq = scsi_cmd_to_rq(cmd);
if (op_is_zone_mgmt(req_op(rq)) &&
result &&
@@ -280,7 +611,13 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
* so be quiet about the error.
*/
rq->rq_flags |= RQF_QUIET;
- }
+ } else if (sd_zbc_need_zone_wp_update(rq))
+ good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes);
+
+ if (req_op(rq) == REQ_OP_ZONE_APPEND)
+ blk_req_zone_write_unlock(rq);
+
+ return good_bytes;
}
/**
@@ -293,6 +630,7 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
unsigned char *buf)
{
+ u64 zone_starting_lba_gran;
if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
sd_printk(KERN_NOTICE, sdkp,
@@ -306,12 +644,36 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
sdkp->zones_max_open = 0;
- } else {
- /* Host-managed */
- sdkp->urswrz = buf[4] & 1;
- sdkp->zones_optimal_open = 0;
- sdkp->zones_optimal_nonseq = 0;
- sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
+ return 0;
+ }
+
+ /* Host-managed */
+ sdkp->urswrz = buf[4] & 1;
+ sdkp->zones_optimal_open = 0;
+ sdkp->zones_optimal_nonseq = 0;
+ sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
+ /* Check zone alignment method */
+ switch (buf[23] & 0xf) {
+ case 0:
+ case ZBC_CONSTANT_ZONE_LENGTH:
+ /* Use zone length */
+ break;
+ case ZBC_CONSTANT_ZONE_START_OFFSET:
+ zone_starting_lba_gran = get_unaligned_be64(&buf[24]);
+ if (zone_starting_lba_gran == 0 ||
+ !is_power_of_2(zone_starting_lba_gran) ||
+ logical_to_sectors(sdkp->device, zone_starting_lba_gran) >
+ UINT_MAX) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid zone starting LBA granularity %llu\n",
+ zone_starting_lba_gran);
+ return -ENODEV;
+ }
+ sdkp->zone_starting_lba_gran = zone_starting_lba_gran;
+ break;
+ default:
+ sd_printk(KERN_ERR, sdkp, "Invalid zone alignment method\n");
+ return -ENODEV;
}
/*
@@ -333,7 +695,7 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
* sd_zbc_check_capacity - Check the device capacity
* @sdkp: Target disk
* @buf: command buffer
- * @zblock: zone size in number of blocks
+ * @zblocks: zone size in logical blocks
*
* Get the device zone size and check that the device capacity as reported
* by READ CAPACITY matches the max_lba value (plus one) of the report zones
@@ -367,14 +729,25 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
}
}
- /* Get the size of the first reported zone */
- rec = buf + 64;
- zone_blocks = get_unaligned_be64(&rec[8]);
- if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "Zone size too large\n");
- return -EFBIG;
+ if (sdkp->zone_starting_lba_gran == 0) {
+ /* Get the size of the first reported zone */
+ rec = buf + 64;
+ zone_blocks = get_unaligned_be64(&rec[8]);
+ if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Zone size too large\n");
+ return -EFBIG;
+ }
+ } else {
+ zone_blocks = sdkp->zone_starting_lba_gran;
+ }
+
+ if (!is_power_of_2(zone_blocks)) {
+ sd_printk(KERN_ERR, sdkp,
+ "Zone size %llu is not a power of two.\n",
+ zone_blocks);
+ return -EINVAL;
}
*zblocks = zone_blocks;
@@ -382,19 +755,186 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
return 0;
}
-int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
+static void sd_zbc_print_zones(struct scsi_disk *sdkp)
+{
+ if (!sd_is_zoned(sdkp) || !sdkp->capacity)
+ return;
+
+ if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1))
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u zones of %u logical blocks + 1 runt zone\n",
+ sdkp->zone_info.nr_zones - 1,
+ sdkp->zone_info.zone_blocks);
+ else
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u zones of %u logical blocks\n",
+ sdkp->zone_info.nr_zones,
+ sdkp->zone_info.zone_blocks);
+}
+
+static int sd_zbc_init_disk(struct scsi_disk *sdkp)
+{
+ sdkp->zones_wp_offset = NULL;
+ spin_lock_init(&sdkp->zones_wp_offset_lock);
+ sdkp->rev_wp_offset = NULL;
+ mutex_init(&sdkp->rev_mutex);
+ INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn);
+ sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL);
+ if (!sdkp->zone_wp_update_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp)
+{
+ if (!sdkp->zone_wp_update_buf)
+ return;
+
+ /* Serialize against revalidate zones */
+ mutex_lock(&sdkp->rev_mutex);
+
+ kvfree(sdkp->zones_wp_offset);
+ sdkp->zones_wp_offset = NULL;
+ kfree(sdkp->zone_wp_update_buf);
+ sdkp->zone_wp_update_buf = NULL;
+
+ sdkp->early_zone_info = (struct zoned_disk_info){ };
+ sdkp->zone_info = (struct zoned_disk_info){ };
+
+ mutex_unlock(&sdkp->rev_mutex);
+}
+
+static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+
+ swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
+}
+
+/*
+ * Call blk_revalidate_disk_zones() if any of the zoned disk properties have
+ * changed that make it necessary to call that function. Called by
+ * sd_revalidate_disk() after the gendisk capacity has been set.
+ */
+int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
{
struct gendisk *disk = sdkp->disk;
+ struct request_queue *q = disk->queue;
+ u32 zone_blocks = sdkp->early_zone_info.zone_blocks;
+ unsigned int nr_zones = sdkp->early_zone_info.nr_zones;
+ u32 max_append;
+ int ret = 0;
+ unsigned int flags;
+
+ /*
+ * For all zoned disks, initialize zone append emulation data if not
+ * already done. This is necessary also for host-aware disks used as
+ * regular disks due to the presence of partitions as these partitions
+ * may be deleted and the disk zoned model changed back from
+ * BLK_ZONED_NONE to BLK_ZONED_HA.
+ */
+ if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) {
+ ret = sd_zbc_init_disk(sdkp);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * There is nothing to do for regular disks, including host-aware disks
+ * that have partitions.
+ */
+ if (!blk_queue_is_zoned(q))
+ return 0;
+
+ /*
+ * Make sure revalidate zones are serialized to ensure exclusive
+ * updates of the scsi disk data.
+ */
+ mutex_lock(&sdkp->rev_mutex);
+
+ if (sdkp->zone_info.zone_blocks == zone_blocks &&
+ sdkp->zone_info.nr_zones == nr_zones &&
+ disk->nr_zones == nr_zones)
+ goto unlock;
+
+ flags = memalloc_noio_save();
+ sdkp->zone_info.zone_blocks = zone_blocks;
+ sdkp->zone_info.nr_zones = nr_zones;
+ sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
+ if (!sdkp->rev_wp_offset) {
+ ret = -ENOMEM;
+ memalloc_noio_restore(flags);
+ goto unlock;
+ }
+
+ ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
+
+ memalloc_noio_restore(flags);
+ kvfree(sdkp->rev_wp_offset);
+ sdkp->rev_wp_offset = NULL;
+
+ if (ret) {
+ sdkp->zone_info = (struct zoned_disk_info){ };
+ sdkp->capacity = 0;
+ goto unlock;
+ }
+
+ max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
+ q->limits.max_segments << (PAGE_SHIFT - 9));
+ max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
+
+ blk_queue_max_zone_append_sectors(q, max_append);
+
+ sd_zbc_print_zones(sdkp);
+
+unlock:
+ mutex_unlock(&sdkp->rev_mutex);
+
+ return ret;
+}
+
+/**
+ * sd_zbc_read_zones - Read zone information and update the request queue
+ * @sdkp: SCSI disk pointer.
+ * @buf: 512 byte buffer used for storing SCSI command output.
+ *
+ * Read zone information and update the request queue zone characteristics and
+ * also the zoned device information in *sdkp. Called by sd_revalidate_disk()
+ * before the gendisk capacity has been set.
+ */
+int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
+{
+ struct gendisk *disk = sdkp->disk;
+ struct request_queue *q = disk->queue;
unsigned int nr_zones;
u32 zone_blocks = 0;
int ret;
- if (!sd_is_zoned(sdkp))
+ if (!sd_is_zoned(sdkp)) {
/*
- * Device managed or normal SCSI disk,
- * no special handling required
+ * Device managed or normal SCSI disk, no special handling
+ * required. Nevertheless, free the disk zone information in
+ * case the device type changed.
*/
+ sd_zbc_free_zone_info(sdkp);
return 0;
+ }
+
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ if (!blk_queue_is_zoned(q)) {
+ /*
+ * This can happen for a host aware disk with partitions.
+ * The block device zone model was already cleared by
+ * disk_set_zoned(). Only free the scsi disk zone
+ * information and exit early.
+ */
+ sd_zbc_free_zone_info(sdkp);
+ return 0;
+ }
/* Check zoned block device characteristics (unconstrained reads) */
ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
@@ -407,35 +947,25 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
goto err;
/* The drive satisfies the kernel restrictions: set it up */
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue);
- blk_queue_required_elevator_features(sdkp->disk->queue,
- ELEVATOR_F_ZBD_SEQ_WRITE);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
+ if (sdkp->zones_max_open == U32_MAX)
+ disk_set_max_open_zones(disk, 0);
+ else
+ disk_set_max_open_zones(disk, sdkp->zones_max_open);
+ disk_set_max_active_zones(disk, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
-
/*
- * Revalidate the disk zone bitmaps once the block device capacity is
- * set on the second revalidate execution during disk scan and if
- * something changed when executing a normal revalidate.
+ * Per ZBC and ZAC specifications, writes in sequential write required
+ * zones of host-managed devices must be aligned to the device physical
+ * block size.
*/
- if (sdkp->first_scan) {
- sdkp->zone_blocks = zone_blocks;
- sdkp->nr_zones = nr_zones;
- return 0;
- }
+ if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
+ blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
- if (sdkp->zone_blocks != zone_blocks ||
- sdkp->nr_zones != nr_zones ||
- disk->queue->nr_zones != nr_zones) {
- ret = blk_revalidate_disk_zones(disk);
- if (ret != 0)
- goto err;
- sdkp->zone_blocks = zone_blocks;
- sdkp->nr_zones = nr_zones;
- }
+ sdkp->early_zone_info.nr_zones = nr_zones;
+ sdkp->early_zone_info.zone_blocks = zone_blocks;
return 0;
@@ -444,20 +974,3 @@ err:
return ret;
}
-
-void sd_zbc_print_zones(struct scsi_disk *sdkp)
-{
- if (!sd_is_zoned(sdkp) || !sdkp->capacity)
- return;
-
- if (sdkp->capacity & (sdkp->zone_blocks - 1))
- sd_printk(KERN_NOTICE, sdkp,
- "%u zones of %u logical blocks + 1 runt zone\n",
- sdkp->nr_zones - 1,
- sdkp->zone_blocks);
- else
- sd_printk(KERN_NOTICE, sdkp,
- "%u zones of %u logical blocks\n",
- sdkp->nr_zones,
- sdkp->zone_blocks);
-}
diff --git a/drivers/scsi/sense_codes.h b/drivers/scsi/sense_codes.h
index 201a536688de..805d4c13d070 100644
--- a/drivers/scsi/sense_codes.h
+++ b/drivers/scsi/sense_codes.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* The canonical list of T10 Additional Sense Codes is available at:
- * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
+ * http://www.t10.org/lists/asc-num.txt [most recent: 20200817]
*/
SENSE_CODE(0x0000, "No additional sense information")
@@ -29,6 +29,7 @@ SENSE_CODE(0x001E, "Conflicting SA creation request")
SENSE_CODE(0x001F, "Logical unit transitioning to another power condition")
SENSE_CODE(0x0020, "Extended copy information available")
SENSE_CODE(0x0021, "Atomic command aborted due to ACA")
+SENSE_CODE(0x0022, "Deferred microcode is pending")
SENSE_CODE(0x0100, "No index/sector signal")
@@ -72,6 +73,9 @@ SENSE_CODE(0x041F, "Logical unit not ready, microcode download required")
SENSE_CODE(0x0420, "Logical unit not ready, logical unit reset required")
SENSE_CODE(0x0421, "Logical unit not ready, hard reset required")
SENSE_CODE(0x0422, "Logical unit not ready, power cycle required")
+SENSE_CODE(0x0423, "Logical unit not ready, affiliation required")
+SENSE_CODE(0x0424, "Depopulation in progress")
+SENSE_CODE(0x0425, "Depopulation restoration in progress")
SENSE_CODE(0x0500, "Logical unit does not respond to selection")
@@ -104,6 +108,17 @@ SENSE_CODE(0x0B06, "Warning - non-volatile cache now volatile")
SENSE_CODE(0x0B07, "Warning - degraded power to non-volatile cache")
SENSE_CODE(0x0B08, "Warning - power loss expected")
SENSE_CODE(0x0B09, "Warning - device statistics notification active")
+SENSE_CODE(0x0B0A, "Warning - high critical temperature limit exceeded")
+SENSE_CODE(0x0B0B, "Warning - low critical temperature limit exceeded")
+SENSE_CODE(0x0B0C, "Warning - high operating temperature limit exceeded")
+SENSE_CODE(0x0B0D, "Warning - low operating temperature limit exceeded")
+SENSE_CODE(0x0B0E, "Warning - high critical humidity limit exceeded")
+SENSE_CODE(0x0B0F, "Warning - low critical humidity limit exceeded")
+SENSE_CODE(0x0B10, "Warning - high operating humidity limit exceeded")
+SENSE_CODE(0x0B11, "Warning - low operating humidity limit exceeded")
+SENSE_CODE(0x0B12, "Warning - microcode security at risk")
+SENSE_CODE(0x0B13, "Warning - microcode digital signature validation failure")
+SENSE_CODE(0x0B14, "Warning - physical element status change")
SENSE_CODE(0x0C00, "Write error")
SENSE_CODE(0x0C01, "Write error - recovered with auto reallocation")
@@ -122,6 +137,8 @@ SENSE_CODE(0x0C0D, "Write error - not enough unsolicited data")
SENSE_CODE(0x0C0E, "Multiple write errors")
SENSE_CODE(0x0C0F, "Defects in error window")
SENSE_CODE(0x0C10, "Incomplete multiple atomic write operations")
+SENSE_CODE(0x0C11, "Write error - recovery scan needed")
+SENSE_CODE(0x0C12, "Write error - insufficient zone resources")
SENSE_CODE(0x0D00, "Error detected by third party temporary initiator")
SENSE_CODE(0x0D01, "Third party device failure")
@@ -242,6 +259,9 @@ SENSE_CODE(0x2009, "Access denied - invalid LU identifier")
SENSE_CODE(0x200A, "Access denied - invalid proxy token")
SENSE_CODE(0x200B, "Access denied - ACL LUN conflict")
SENSE_CODE(0x200C, "Illegal command when not in append-only mode")
+SENSE_CODE(0x200D, "Not an administrative logical unit")
+SENSE_CODE(0x200E, "Not a subsidiary logical unit")
+SENSE_CODE(0x200F, "Not a conglomerate logical unit")
SENSE_CODE(0x2100, "Logical block address out of range")
SENSE_CODE(0x2101, "Invalid element address")
@@ -251,6 +271,8 @@ SENSE_CODE(0x2104, "Unaligned write command")
SENSE_CODE(0x2105, "Write boundary violation")
SENSE_CODE(0x2106, "Attempt to read invalid data")
SENSE_CODE(0x2107, "Read boundary violation")
+SENSE_CODE(0x2108, "Misaligned write command")
+SENSE_CODE(0x2109, "Attempt to access gap zone")
SENSE_CODE(0x2200, "Illegal function (use 20 00, 24 00, or 26 00)")
@@ -275,6 +297,7 @@ SENSE_CODE(0x2405, "Security working key frozen")
SENSE_CODE(0x2406, "Nonce not unique")
SENSE_CODE(0x2407, "Nonce timestamp out of range")
SENSE_CODE(0x2408, "Invalid XCDB")
+SENSE_CODE(0x2409, "Invalid fast format")
SENSE_CODE(0x2500, "Logical unit not supported")
@@ -297,6 +320,10 @@ SENSE_CODE(0x260F, "Invalid data-out buffer integrity check value")
SENSE_CODE(0x2610, "Data decryption key fail limit reached")
SENSE_CODE(0x2611, "Incomplete key-associated data set")
SENSE_CODE(0x2612, "Vendor specific key reference not found")
+SENSE_CODE(0x2613, "Application tag mode page is invalid")
+SENSE_CODE(0x2614, "Tape stream mirroring prevented")
+SENSE_CODE(0x2615, "Copy source or copy destination not authorized")
+SENSE_CODE(0x2616, "Fast copy not possible")
SENSE_CODE(0x2700, "Write protected")
SENSE_CODE(0x2701, "Hardware write protected")
@@ -342,6 +369,7 @@ SENSE_CODE(0x2A12, "Data encryption parameters changed by vendor specific event"
SENSE_CODE(0x2A13, "Data encryption key instance counter has changed")
SENSE_CODE(0x2A14, "SA creation capabilities data has changed")
SENSE_CODE(0x2A15, "Medium removal prevention preempted")
+SENSE_CODE(0x2A16, "Zone reset write pointer recommended")
SENSE_CODE(0x2B00, "Copy cannot execute since host cannot disconnect")
@@ -360,6 +388,11 @@ SENSE_CODE(0x2C0B, "Not reserved")
SENSE_CODE(0x2C0C, "Orwrite generation does not match")
SENSE_CODE(0x2C0D, "Reset write pointer not allowed")
SENSE_CODE(0x2C0E, "Zone is offline")
+SENSE_CODE(0x2C0F, "Stream not open")
+SENSE_CODE(0x2C10, "Unwritten data in zone")
+SENSE_CODE(0x2C11, "Descriptor format sense data required")
+SENSE_CODE(0x2C12, "Zone is inactive")
+SENSE_CODE(0x2C13, "Well known logical unit access required")
SENSE_CODE(0x2D00, "Overwrite error on update in place")
@@ -395,6 +428,8 @@ SENSE_CODE(0x3100, "Medium format corrupted")
SENSE_CODE(0x3101, "Format command failed")
SENSE_CODE(0x3102, "Zoned formatting failed due to spare linking")
SENSE_CODE(0x3103, "Sanitize command failed")
+SENSE_CODE(0x3104, "Depopulation failed")
+SENSE_CODE(0x3105, "Depopulation restoration failed")
SENSE_CODE(0x3200, "No defect spare location available")
SENSE_CODE(0x3201, "Defect list update failure")
@@ -419,6 +454,7 @@ SENSE_CODE(0x3802, "Esn - power management class event")
SENSE_CODE(0x3804, "Esn - media class event")
SENSE_CODE(0x3806, "Esn - device busy class event")
SENSE_CODE(0x3807, "Thin Provisioning soft threshold reached")
+SENSE_CODE(0x3808, "Depopulation interrupted")
SENSE_CODE(0x3900, "Saving parameters not supported")
@@ -456,6 +492,7 @@ SENSE_CODE(0x3B19, "Element enabled")
SENSE_CODE(0x3B1A, "Data transfer device removed")
SENSE_CODE(0x3B1B, "Data transfer device inserted")
SENSE_CODE(0x3B1C, "Too many logical objects on partition to support operation")
+SENSE_CODE(0x3B20, "Element static information changed")
SENSE_CODE(0x3D00, "Invalid bits in identify message")
@@ -488,6 +525,11 @@ SENSE_CODE(0x3F13, "iSCSI IP address removed")
SENSE_CODE(0x3F14, "iSCSI IP address changed")
SENSE_CODE(0x3F15, "Inspect referrals sense descriptors")
SENSE_CODE(0x3F16, "Microcode has been changed without reset")
+SENSE_CODE(0x3F17, "Zone transition to full")
+SENSE_CODE(0x3F18, "Bind completed")
+SENSE_CODE(0x3F19, "Bind redirected")
+SENSE_CODE(0x3F1A, "Subsidiary binding changed")
+
/*
* SENSE_CODE(0x40NN, "Ram failure")
* SENSE_CODE(0x40NN, "Diagnostic failure on component nn")
@@ -589,6 +631,9 @@ SENSE_CODE(0x550B, "Insufficient power for operation")
SENSE_CODE(0x550C, "Insufficient resources to create rod")
SENSE_CODE(0x550D, "Insufficient resources to create rod token")
SENSE_CODE(0x550E, "Insufficient zone resources")
+SENSE_CODE(0x550F, "Insufficient zone resources to complete write")
+SENSE_CODE(0x5510, "Maximum number of streams open")
+SENSE_CODE(0x5511, "Insufficient resources to bind")
SENSE_CODE(0x5700, "Unable to recover table-of-contents")
@@ -692,6 +737,7 @@ SENSE_CODE(0x5D69, "Firmware impending failure throughput performance")
SENSE_CODE(0x5D6A, "Firmware impending failure seek time performance")
SENSE_CODE(0x5D6B, "Firmware impending failure spin-up retry count")
SENSE_CODE(0x5D6C, "Firmware impending failure drive calibration retry count")
+SENSE_CODE(0x5D73, "Media impending failure endurance limit met")
SENSE_CODE(0x5DFF, "Failure prediction threshold exceeded (false)")
SENSE_CODE(0x5E00, "Low power condition on")
@@ -744,6 +790,8 @@ SENSE_CODE(0x6708, "Assign failure occurred")
SENSE_CODE(0x6709, "Multiply assigned logical unit")
SENSE_CODE(0x670A, "Set target port groups command failed")
SENSE_CODE(0x670B, "ATA device feature not enabled")
+SENSE_CODE(0x670C, "Command rejected")
+SENSE_CODE(0x670D, "Explicit bind not allowed")
SENSE_CODE(0x6800, "Logical unit not configured")
SENSE_CODE(0x6801, "Subsidiary logical unit not configured")
@@ -772,6 +820,10 @@ SENSE_CODE(0x6F04, "Media region code is mismatched to logical unit region")
SENSE_CODE(0x6F05, "Drive region must be permanent/region reset count error")
SENSE_CODE(0x6F06, "Insufficient block count for binding nonce recording")
SENSE_CODE(0x6F07, "Conflict in binding nonce recording")
+SENSE_CODE(0x6F08, "Insufficient permission")
+SENSE_CODE(0x6F09, "Invalid drive-host pairing server")
+SENSE_CODE(0x6F0A, "Drive-host pairing suspended")
+
/*
* SENSE_CODE(0x70NN, "Decompression exception short algorithm id of nn")
*/
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index c2afba2a5414..0a1734f34587 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -87,9 +87,16 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
0
};
unsigned char recv_page_code;
+ unsigned int retries = SES_RETRIES;
+ struct scsi_sense_hdr sshdr;
+
+ do {
+ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+ &sshdr, SES_TIMEOUT, 1, NULL);
+ } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
+ (sshdr.sense_key == NOT_READY ||
+ (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
- ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
- NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (unlikely(ret))
return ret;
@@ -111,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
static int ses_send_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen)
{
- u32 result;
+ int result;
unsigned char cmd[] = {
SEND_DIAGNOSTIC,
@@ -121,9 +128,16 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
bufflen & 0xff,
0
};
+ struct scsi_sense_hdr sshdr;
+ unsigned int retries = SES_RETRIES;
+
+ do {
+ result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
+ &sshdr, SES_TIMEOUT, 1, NULL);
+ } while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
+ (sshdr.sense_key == NOT_READY ||
+ (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
- result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
- NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (result)
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
result);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4e6af592f018..ce34a8ad53b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -31,6 +31,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/errno.h>
#include <linux/mtio.h>
#include <linux/ioctl.h>
+#include <linux/major.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/init.h>
@@ -48,11 +49,15 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/uio.h>
#include <linux/cred.h> /* for sg_check_file_access() */
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/sg.h>
#include "scsi_logging.h"
@@ -76,7 +81,7 @@ static int sg_proc_init(void);
#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
-int sg_big_buff = SG_DEF_RESERVED_SIZE;
+static int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
of this size (or less if there is not enough memory) will be reserved
@@ -166,13 +171,13 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
bool exclude; /* 1->open(O_EXCL) succeeded and is active */
int open_cnt; /* count of opens (perhaps < num(sfds) ) */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
- struct gendisk *disk;
+ char name[DISK_NAME_LEN];
struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
struct kref d_ref;
} Sg_device;
/* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, blk_status_t status);
+static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -190,7 +195,7 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
-static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
@@ -202,8 +207,7 @@ static void sg_device_destroy(struct kref *kref);
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
#define sg_printk(prefix, sdp, fmt, a...) \
- sdev_prefix_printk(prefix, (sdp)->device, \
- (sdp)->disk->disk_name, fmt, ##a)
+ sdev_prefix_printk(prefix, (sdp)->device, (sdp)->name, fmt, ##a)
/*
* The SCSI interfaces that use read() and write() as an asynchronous variant of
@@ -224,11 +228,6 @@ static int sg_check_file_access(struct file *filp, const char *caller)
caller, task_tgid_vnr(current), current->comm);
return -EPERM;
}
- if (uaccess_kernel()) {
- pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
- caller, task_tgid_vnr(current), current->comm);
- return -EACCES;
- }
return 0;
}
@@ -238,8 +237,9 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
-
- return blk_verify_command(cmd, filp->f_mode);
+ if (!scsi_cmd_allowed(cmd, filp->f_mode))
+ return -EPERM;
+ return 0;
}
static int
@@ -444,6 +444,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
+ bool busy;
sg_io_hdr_t *hp;
struct sg_header *old_hdr;
int retval;
@@ -466,20 +467,16 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
if (retval)
return retval;
- srp = sg_get_rq_mark(sfp, req_pack_id);
+ srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
- (atomic_read(&sdp->detaching) ||
- (srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
- if (retval)
- /* -ERESTARTSYS as signal hit process */
- return retval;
+ ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
+ (!busy && atomic_read(&sdp->detaching))));
+ if (!srp)
+ /* signal or detaching */
+ return retval ? retval : -ENODEV;
}
if (srp->header.interface_id != '\0')
return sg_new_read(sfp, buf, count, srp);
@@ -498,9 +495,11 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
old_hdr->host_status = hp->host_status;
old_hdr->driver_status = hp->driver_status;
if ((CHECK_CONDITION & hp->masked_status) ||
- (DRIVER_SENSE & hp->driver_status))
+ (srp->sense_b[0] & 0x70) == 0x70) {
+ old_hdr->driver_status = DRIVER_SENSE;
memcpy(old_hdr->sense_buffer, srp->sense_b,
sizeof (old_hdr->sense_buffer));
+ }
switch (hp->host_status) {
/* This setup of 'result' is for backward compatibility and is best
ignored by the user who should use target, host + driver status */
@@ -574,7 +573,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
hp->sb_len_wr = 0;
if ((hp->mx_sb_len > 0) && hp->sbp) {
if ((CHECK_CONDITION & hp->masked_status) ||
- (DRIVER_SENSE & hp->driver_status)) {
+ (srp->sense_b[0] & 0x70) == 0x70) {
int sb_len = SCSI_SENSE_BUFFERSIZE;
sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
@@ -583,6 +582,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
err = -EFAULT;
goto err_out;
}
+ hp->driver_status = DRIVER_SENSE;
hp->sb_len_wr = len;
}
}
@@ -685,8 +685,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
- if (copy_from_user(cmnd, buf, cmd_size))
+ if (copy_from_user(cmnd, buf, cmd_size)) {
+ sg_remove_request(sfp, srp);
return -EFAULT;
+ }
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
@@ -793,8 +795,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (hp->dxfer_len >= SZ_256M)
+ if (hp->dxfer_len >= SZ_256M) {
+ sg_remove_request(sfp, srp);
return -EINVAL;
+ }
k = sg_start_req(srp, cmnd);
if (k) {
@@ -806,8 +810,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
- scsi_req_free_cmd(scsi_req(srp->rq));
- blk_put_request(srp->rq);
+ blk_mq_free_request(srp->rq);
srp->rq = NULL;
}
@@ -825,8 +828,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
- blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
- srp->rq, at_head, sg_rq_end_io);
+ srp->rq->end_io = sg_rq_end_io;
+ blk_execute_rq_nowait(srp->rq, at_head);
return 0;
}
@@ -934,9 +937,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
+ srp_done(sfp, srp));
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
@@ -971,7 +972,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
*/
return 0;
case SG_GET_LOW_DMA:
- return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
+ return put_user(0, ip);
case SG_GET_SCSI_ID:
{
sg_scsi_id_t v;
@@ -1102,7 +1103,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
+ return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
@@ -1113,8 +1114,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
return put_user(max_sectors_bytes(sdp->device->request_queue),
ip);
case BLKTRACESETUP:
- return blk_trace_setup(sdp->device->request_queue,
- sdp->disk->disk_name,
+ return blk_trace_setup(sdp->device->request_queue, sdp->name,
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
NULL, p);
case BLKTRACESTART:
@@ -1159,29 +1159,9 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
if (ret != -ENOIOCTLCMD)
return ret;
-
- return scsi_ioctl(sdp->device, cmd_in, p);
+ return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
}
-#ifdef CONFIG_COMPAT
-static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
-{
- void __user *p = compat_ptr(arg);
- Sg_device *sdp;
- Sg_fd *sfp;
- int ret;
-
- if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
- return -ENXIO;
-
- ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- return scsi_compat_ioctl(sdp->device, cmd_in, p);
-}
-#endif
-
static __poll_t
sg_poll(struct file *filp, poll_table * wait)
{
@@ -1331,11 +1311,11 @@ sg_rq_end_io_usercontext(struct work_struct *work)
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
-static void
+static enum rq_end_io_ret
sg_rq_end_io(struct request *rq, blk_status_t status)
{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
struct sg_request *srp = rq->end_io_data;
- struct scsi_request *req = scsi_req(rq);
Sg_device *sdp;
Sg_fd *sfp;
unsigned long iflags;
@@ -1344,19 +1324,19 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
int result, resid, done = 1;
if (WARN_ON(srp->done != 0))
- return;
+ return RQ_END_IO_NONE;
sfp = srp->parentfp;
if (WARN_ON(sfp == NULL))
- return;
+ return RQ_END_IO_NONE;
sdp = sfp->parentdp;
if (unlikely(atomic_read(&sdp->detaching)))
pr_info("%s: device detaching\n", __func__);
- sense = req->sense;
- result = req->result;
- resid = req->resid_len;
+ sense = scmd->sense_buffer;
+ result = scmd->result;
+ resid = scmd->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_cmd_done: pack_id=%d, res=0x%x\n",
@@ -1370,7 +1350,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
- srp->header.msg_status = msg_byte(result);
+ srp->header.msg_status = COMMAND_COMPLETE;
srp->header.host_status = host_byte(result);
srp->header.driver_status = driver_byte(result);
if ((sdp->sgdebug > 0) &&
@@ -1391,8 +1371,8 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
}
}
- if (req->sense_len)
- memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
+ if (scmd->sense_len)
+ memcpy(srp->sense_b, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
/* Rely on write phase to clean out srp status values, so no "else" */
@@ -1403,8 +1383,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
* blk_rq_unmap_user() can be called from user context.
*/
srp->rq = NULL;
- scsi_req_free_cmd(scsi_req(rq));
- blk_put_request(rq);
+ blk_mq_free_request(rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (unlikely(srp->orphan)) {
@@ -1427,6 +1406,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
schedule_work(&srp->ew.work);
}
+ return RQ_END_IO_NONE;
}
static const struct file_operations sg_fops = {
@@ -1435,9 +1415,7 @@ static const struct file_operations sg_fops = {
.write = sg_write,
.poll = sg_poll,
.unlocked_ioctl = sg_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sg_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = sg_open,
.mmap = sg_mmap,
.release = sg_release,
@@ -1450,7 +1428,7 @@ static struct class *sg_sysfs_class;
static int sg_sysfs_valid = 0;
static Sg_device *
-sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
+sg_alloc(struct scsi_device *scsidp)
{
struct request_queue *q = scsidp->request_queue;
Sg_device *sdp;
@@ -1486,9 +1464,7 @@ sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
"sg_alloc: dev=%d \n", k));
- sprintf(disk->disk_name, "sg%d", k);
- disk->first_minor = k;
- sdp->disk = disk;
+ sprintf(sdp->name, "sg%d", k);
sdp->device = scsidp;
mutex_init(&sdp->open_rel_lock);
INIT_LIST_HEAD(&sdp->sfds);
@@ -1515,19 +1491,11 @@ static int
sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
- struct gendisk *disk;
Sg_device *sdp = NULL;
struct cdev * cdev = NULL;
int error;
unsigned long iflags;
- disk = alloc_disk(1);
- if (!disk) {
- pr_warn("%s: alloc_disk failed\n", __func__);
- return -ENOMEM;
- }
- disk->major = SCSI_GENERIC_MAJOR;
-
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev) {
@@ -1537,7 +1505,7 @@ sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
cdev->owner = THIS_MODULE;
cdev->ops = &sg_fops;
- sdp = sg_alloc(disk, scsidp);
+ sdp = sg_alloc(scsidp);
if (IS_ERR(sdp)) {
pr_warn("%s: sg_alloc failed\n", __func__);
error = PTR_ERR(sdp);
@@ -1555,7 +1523,7 @@ sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
MKDEV(SCSI_GENERIC_MAJOR,
sdp->index),
- sdp, "%s", disk->disk_name);
+ sdp, "%s", sdp->name);
if (IS_ERR(sg_class_member)) {
pr_err("%s: device_create failed\n", __func__);
error = PTR_ERR(sg_class_member);
@@ -1583,7 +1551,6 @@ cdev_add_err:
kfree(sdp);
out:
- put_disk(disk);
if (cdev)
cdev_del(cdev);
return error;
@@ -1607,7 +1574,6 @@ sg_device_destroy(struct kref *kref)
SCSI_LOG_TIMEOUT(3,
sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
- put_disk(sdp->disk);
kfree(sdp);
}
@@ -1662,6 +1628,37 @@ MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+
+static struct ctl_table sg_sysctls[] = {
+ {
+ .procname = "sg-big-buff",
+ .data = &sg_big_buff,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static struct ctl_table_header *hdr;
+static void register_sg_sysctls(void)
+{
+ if (!hdr)
+ hdr = register_sysctl("kernel", sg_sysctls);
+}
+
+static void unregister_sg_sysctls(void)
+{
+ if (hdr)
+ unregister_sysctl_table(hdr);
+}
+#else
+#define register_sg_sysctls() do { } while (0)
+#define unregister_sg_sysctls() do { } while (0)
+#endif /* CONFIG_SYSCTL */
+
static int __init
init_sg(void)
{
@@ -1694,6 +1691,7 @@ init_sg(void)
return 0;
}
class_destroy(sg_sysfs_class);
+ register_sg_sysctls();
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
return rc;
@@ -1702,6 +1700,7 @@ err_out:
static void __exit
exit_sg(void)
{
+ unregister_sg_sysctls();
#ifdef CONFIG_SCSI_PROC_FS
remove_proc_subtree("scsi/sg", NULL);
#endif /* CONFIG_SCSI_PROC_FS */
@@ -1718,7 +1717,6 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
- struct scsi_request *req;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
@@ -1729,52 +1727,46 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
- unsigned char *long_cmdp = NULL;
+ struct scsi_cmnd *scmd;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
- if (hp->cmd_len > BLK_MAX_CDB) {
- long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
- if (!long_cmdp)
- return -ENOMEM;
- }
-
/*
* NOTE
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
- * preallocated requests are already in use, then blk_get_request()
+ * preallocated requests are already in use, then scsi_alloc_request()
* will sleep until an active command completes, freeing up a request.
* Although waiting in an asynchronous interface is less than ideal, we
* do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
* not expect an EWOULDBLOCK from this condition.
*/
- rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
- if (IS_ERR(rq)) {
- kfree(long_cmdp);
+ rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
return PTR_ERR(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
+
+ if (hp->cmd_len > sizeof(scmd->cmnd)) {
+ blk_mq_free_request(rq);
+ return -EINVAL;
}
- req = scsi_req(rq);
- if (hp->cmd_len > BLK_MAX_CDB)
- req->cmd = long_cmdp;
- memcpy(req->cmd, cmd, hp->cmd_len);
- req->cmd_len = hp->cmd_len;
+ memcpy(scmd->cmnd, cmd, hp->cmd_len);
+ scmd->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
- req->retries = SG_DEFAULT_RETRIES;
+ scmd->allowed = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
- !sfp->parentdp->device->host->unchecked_isa_dma &&
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
md = NULL;
else
@@ -1812,33 +1804,8 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md->from_user = 0;
}
- if (iov_count) {
- struct iovec *iov = NULL;
- struct iov_iter i;
-
-#ifdef CONFIG_COMPAT
- if (in_compat_syscall())
- res = compat_import_iovec(rw, hp->dxferp, iov_count,
- 0, &iov, &i);
- else
-#endif
- res = import_iovec(rw, hp->dxferp, iov_count,
- 0, &iov, &i);
- if (res < 0)
- return res;
-
- iov_iter_truncate(&i, hp->dxfer_len);
- if (!iov_iter_count(&i)) {
- kfree(iov);
- return -EINVAL;
- }
-
- res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
- kfree(iov);
- } else
- res = blk_rq_map_user(q, rq, md, hp->dxferp,
- hp->dxfer_len, GFP_ATOMIC);
-
+ res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len,
+ GFP_ATOMIC, iov_count, iov_count, 1, rw);
if (!res) {
srp->bio = rq->bio;
@@ -1864,10 +1831,8 @@ sg_finish_rem_req(Sg_request *srp)
if (srp->bio)
ret = blk_rq_unmap_user(srp->bio);
- if (srp->rq) {
- scsi_req_free_cmd(scsi_req(srp->rq));
- blk_put_request(srp->rq);
- }
+ if (srp->rq)
+ blk_mq_free_request(srp->rq);
if (srp->res_used)
sg_unlink_reserve(sfp, srp);
@@ -1897,7 +1862,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO;
- struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
return -EFAULT;
@@ -1923,9 +1887,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
scatter_elem_sz_prev = num;
}
- if (sdp->device->host->unchecked_isa_dma)
- gfp_mask |= GFP_DMA;
-
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
@@ -2096,19 +2057,28 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
}
static Sg_request *
-sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
{
Sg_request *resp;
unsigned long iflags;
+ *busy = false;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
- /* look for requests that are ready + not SG_IO owned */
- if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ /* look for requests that are not SG_IO owned */
+ if ((!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
- resp->done = 2; /* guard against other readers */
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ switch (resp->done) {
+ case 0: /* request active */
+ *busy = true;
+ break;
+ case 1: /* request done; response ready to return */
+ resp->done = 2; /* guard against other readers */
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+ case 2: /* response already being returned */
+ break;
+ }
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@@ -2162,6 +2132,15 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ /*
+ * If the device is detaching, wakeup any readers in case we just
+ * removed the last response, which would leave nothing for them to
+ * return other than -ENODEV.
+ */
+ if (unlikely(atomic_read(&sfp->parentdp->detaching)))
+ wake_up_interruptible_all(&sfp->read_wait);
+
return res;
}
@@ -2507,7 +2486,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
(int) scsidp->queue_depth,
- (int) atomic_read(&scsidp->device_busy),
+ (int) scsi_device_busy(scsidp),
(int) scsi_device_online(scsidp));
}
read_unlock_irqrestore(&sg_index_lock, iflags);
@@ -2551,8 +2530,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
"(res)sgat=%d low_dma=%d\n", k,
jiffies_to_msecs(fp->timeout),
fp->reserve.bufflen,
- (int) fp->reserve.k_use_sg,
- (int) sdp->device->host->unchecked_isa_dma);
+ (int) fp->reserve.k_use_sg, 0);
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
@@ -2613,7 +2591,7 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
goto skip;
read_lock(&sdp->sfd_lock);
if (!list_empty(&sdp->sfds)) {
- seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+ seq_printf(s, " >>> device=%s ", sdp->name);
if (atomic_read(&sdp->detaching))
seq_puts(s, "detaching pending close ");
else if (sdp->device) {
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 713bce998b0e..57d5dff62f63 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -28,7 +28,11 @@
#include <asm/sgi/ip22.h>
#include <asm/sgi/wd.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
struct ip22_hostdata {
@@ -65,14 +69,15 @@ static irqreturn_t sgiwd93_intr(int irq, void *dev_id)
static inline
void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
{
- unsigned long len = cmd->SCp.this_residual;
- void *addr = cmd->SCp.ptr;
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
+ void *addr = scsi_pointer->ptr;
dma_addr_t physaddr;
unsigned long count;
struct hpc_chunk *hcp;
physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din));
- cmd->SCp.dma_handle = physaddr;
+ scsi_pointer->dma_handle = physaddr;
hcp = hd->cpu;
while (len) {
@@ -95,13 +100,14 @@ void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
*/
hcp->desc.pbuf = 0;
hcp->desc.cntinfo = HPCDMA_EOX;
- dma_cache_sync(hd->dev, hd->cpu,
+ dma_sync_single_for_device(hd->dev, hd->dma,
(unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
DMA_TO_DEVICE);
}
static int dma_setup(struct scsi_cmnd *cmd, int datainp)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host);
struct hpc3_scsiregs *hregs =
(struct hpc3_scsiregs *) cmd->device->host->base;
@@ -116,7 +122,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp)
* obvious). IMHO a better fix would be, not to do these dma setups
* in the first place.
*/
- if (cmd->SCp.ptr == NULL || cmd->SCp.this_residual == 0)
+ if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0)
return 1;
fill_hpc_entries(hdata, cmd, datainp);
@@ -136,13 +142,14 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp)
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct ip22_hostdata *hdata = host_to_hostdata(instance);
struct hpc3_scsiregs *hregs;
if (!SCpnt)
return;
- if (SCpnt->SCp.ptr == NULL || SCpnt->SCp.this_residual == 0)
+ if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0)
return;
hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base;
@@ -156,8 +163,8 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
barrier();
}
hregs->ctrl = 0;
- dma_unmap_single(hdata->dev, SCpnt->SCp.dma_handle,
- SCpnt->SCp.this_residual,
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
DMA_DIR(hdata->wh.dma_dir));
pr_debug("\n");
@@ -187,7 +194,7 @@ static inline void init_hpc_chain(struct ip22_hostdata *hdata)
hcp++;
dma += sizeof(struct hpc_chunk);
start += sizeof(struct hpc_chunk);
- };
+ }
hcp--;
hcp->desc.pnext = hdata->dma;
}
@@ -209,6 +216,7 @@ static struct scsi_host_template sgiwd93_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 8,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int sgiwd93_probe(struct platform_device *pdev)
@@ -234,8 +242,8 @@ static int sgiwd93_probe(struct platform_device *pdev)
hdata = host_to_hostdata(host);
hdata->dev = &pdev->dev;
- hdata->cpu = dma_alloc_attrs(&pdev->dev, HPC_DMA_SIZE, &hdata->dma,
- GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
+ hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE,
+ &hdata->dma, DMA_TO_DEVICE, GFP_KERNEL);
if (!hdata->cpu) {
printk(KERN_WARNING "sgiwd93: Could not allocate memory for "
"host %d buffer.\n", unit);
@@ -274,8 +282,8 @@ static int sgiwd93_probe(struct platform_device *pdev)
out_irq:
free_irq(irq, host);
out_free:
- dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
- DMA_ATTR_NON_CONSISTENT);
+ dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
+ DMA_TO_DEVICE);
out_put:
scsi_host_put(host);
out:
@@ -291,8 +299,8 @@ static int sgiwd93_remove(struct platform_device *pdev)
scsi_remove_host(host);
free_irq(pd->irq, host);
- dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
- DMA_ATTR_NON_CONSISTENT);
+ dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
+ DMA_TO_DEVICE);
scsi_host_put(host);
return 0;
}
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 22302612e032..e519df68d603 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -213,21 +213,19 @@ static struct eisa_driver sim710_eisa_driver = {
static int __init sim710_init(void)
{
- int err = -ENODEV;
-
#ifdef MODULE
if (sim710)
param_setup(sim710);
#endif
#ifdef CONFIG_EISA
- err = eisa_driver_register(&sim710_eisa_driver);
+ /*
+ * FIXME: We'd really like to return -ENODEV if no devices have actually
+ * been found. However eisa_driver_register() only reports problems
+ * with kobject_register() so simply return success for now.
+ */
+ eisa_driver_register(&sim710_eisa_driver);
#endif
- /* FIXME: what we'd really like to return here is -ENODEV if
- * no devices have actually been found. Instead, the err
- * above actually only reports problems with kobject_register,
- * so for the moment return success */
-
return 0;
}
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index bc6506884e3b..973d240649ab 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,11 +1,11 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
-# (mailto:esc.storagedev@microsemi.com)
+# (mailto:storagedev@microchip.com)
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
@@ -38,14 +38,14 @@
# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
config SCSI_SMARTPQI
- tristate "Microsemi PQI Driver"
+ tristate "Microchip PQI Driver"
depends on PCI && SCSI && !S390
select SCSI_SAS_ATTRS
select RAID_ATTRS
- ---help---
- This driver supports Microsemi PQI controllers.
+ help
+ This driver supports Microchip PQI controllers.
- <http://www.microsemi.com>
+ <http://www.microchip.com>
To compile this driver as a module, choose M here: the
module will be called smartpqi.
@@ -53,4 +53,4 @@ config SCSI_SMARTPQI
Note: the aacraid driver will not manage a smartpqi
controller. You need to enable smartpqi for smartpqi
controllers. For more information, please see
- Documentation/scsi/smartpqi.txt
+ Documentation/scsi/smartpqi.rst
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 1129fe7a27ed..e550b12e525a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -59,7 +59,7 @@ struct pqi_device_registers {
/*
* controller registers
*
- * These are defined by the Microsemi implementation.
+ * These are defined by the Microchip implementation.
*
* Some registers (those named sis_*) are only used when in
* legacy SIS mode before we transition the controller into
@@ -79,11 +79,14 @@ struct pqi_ctrl_registers {
__le32 sis_ctrl_to_host_doorbell_clear; /* A0h */
u8 reserved4[0xb0 - (0xa0 + sizeof(__le32))];
__le32 sis_driver_scratch; /* B0h */
- u8 reserved5[0xbc - (0xb0 + sizeof(__le32))];
+ __le32 sis_product_identifier; /* B4h */
+ u8 reserved5[0xbc - (0xb4 + sizeof(__le32))];
__le32 sis_firmware_status; /* BCh */
- u8 reserved6[0x1000 - (0xbc + sizeof(__le32))];
+ u8 reserved6[0xcc - (0xbc + sizeof(__le32))];
+ __le32 sis_ctrl_shutdown_reason_code; /* CCh */
+ u8 reserved7[0x1000 - (0xcc + sizeof(__le32))];
__le32 sis_mailbox[8]; /* 1000h */
- u8 reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
+ u8 reserved8[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
/*
* The PQI spec states that the PQI registers should be at
* offset 0 from the PCIe BAR 0. However, we can't map
@@ -93,14 +96,23 @@ struct pqi_ctrl_registers {
struct pqi_device_registers pqi_registers; /* 4000h */
};
-#if ((HZ) < 1000)
-#define PQI_HZ 1000
-#else
-#define PQI_HZ (HZ)
-#endif
-
#define PQI_DEVICE_REGISTERS_OFFSET 0x4000
+/* shutdown reasons for taking the controller offline */
+enum pqi_ctrl_shutdown_reason {
+ PQI_IQ_NOT_DRAINED_TIMEOUT = 1,
+ PQI_LUN_RESET_TIMEOUT = 2,
+ PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT = 3,
+ PQI_NO_HEARTBEAT = 4,
+ PQI_FIRMWARE_KERNEL_NOT_UP = 5,
+ PQI_OFA_RESPONSE_TIMEOUT = 6,
+ PQI_INVALID_REQ_ID = 7,
+ PQI_UNMATCHED_REQ_ID = 8,
+ PQI_IO_PI_OUT_OF_RANGE = 9,
+ PQI_EVENT_PI_OUT_OF_RANGE = 10,
+ PQI_UNEXPECTED_IU_TYPE = 11
+};
+
enum pqi_io_path {
RAID_PATH = 0,
AIO_PATH = 1
@@ -128,10 +140,13 @@ struct pqi_iu_header {
__le16 iu_length; /* in bytes - does not include the length */
/* of this header */
__le16 response_queue_id; /* specifies the OQ where the */
- /* response IU is to be delivered */
- u8 work_area[2]; /* reserved for driver use */
+ /* response IU is to be delivered */
+ u16 driver_flags; /* reserved for driver use */
};
+/* manifest constants for pqi_iu_header.driver_flags */
+#define PQI_DRIVER_NONBLOCKABLE_REQUEST 0x1
+
/*
* According to the PQI spec, the IU header is only the first 4 bytes of our
* pqi_iu_header structure.
@@ -256,6 +271,7 @@ struct pqi_device_capability {
};
#define PQI_MAX_EMBEDDED_SG_DESCRIPTORS 4
+#define PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS 3
struct pqi_raid_path_request {
struct pqi_iu_header header;
@@ -277,10 +293,10 @@ struct pqi_raid_path_request {
u8 additional_cdb_bytes_usage : 3;
u8 reserved5 : 3;
u8 cdb[16];
- u8 reserved6[12];
+ u8 reserved6[11];
+ u8 ml_device_lun_number;
__le32 timeout;
- struct pqi_sg_descriptor
- sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+ struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
struct pqi_aio_path_request {
@@ -307,8 +323,73 @@ struct pqi_aio_path_request {
u8 cdb_length;
u8 lun_number[8];
u8 reserved4[4];
- struct pqi_sg_descriptor
- sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+ struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+#define PQI_RAID1_NVME_XFER_LIMIT (32 * 1024) /* 32 KiB */
+
+struct pqi_aio_r1_path_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 volume_id; /* ID of the RAID volume */
+ __le32 it_nexus_1; /* IT nexus of the 1st drive in the RAID volume */
+ __le32 it_nexus_2; /* IT nexus of the 2nd drive in the RAID volume */
+ __le32 it_nexus_3; /* IT nexus of the 3rd drive in the RAID volume */
+ __le32 data_length; /* total bytes to read/write */
+ u8 data_direction : 2;
+ u8 partial : 1;
+ u8 memory_type : 1;
+ u8 fence : 1;
+ u8 encryption_enable : 1;
+ u8 reserved : 2;
+ u8 task_attribute : 3;
+ u8 command_priority : 4;
+ u8 reserved2 : 1;
+ __le16 data_encryption_key_index;
+ u8 cdb[16];
+ __le16 error_index;
+ u8 num_sg_descriptors;
+ u8 cdb_length;
+ u8 num_drives; /* number of drives in the RAID volume (2 or 3) */
+ u8 reserved3[3];
+ __le32 encrypt_tweak_lower;
+ __le32 encrypt_tweak_upper;
+ struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+#define PQI_DEFAULT_MAX_WRITE_RAID_5_6 (8 * 1024U)
+#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA (~0U)
+#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME (32 * 1024U)
+
+struct pqi_aio_r56_path_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 volume_id; /* ID of the RAID volume */
+ __le32 data_it_nexus; /* IT nexus for the data drive */
+ __le32 p_parity_it_nexus; /* IT nexus for the P parity drive */
+ __le32 q_parity_it_nexus; /* IT nexus for the Q parity drive */
+ __le32 data_length; /* total bytes to read/write */
+ u8 data_direction : 2;
+ u8 partial : 1;
+ u8 mem_type : 1; /* 0 = PCIe, 1 = DDR */
+ u8 fence : 1;
+ u8 encryption_enable : 1;
+ u8 reserved : 2;
+ u8 task_attribute : 3;
+ u8 command_priority : 4;
+ u8 reserved1 : 1;
+ __le16 data_encryption_key_index;
+ u8 cdb[16];
+ __le16 error_index;
+ u8 num_sg_descriptors;
+ u8 cdb_length;
+ u8 xor_multiplier;
+ u8 reserved2[3];
+ __le32 encrypt_tweak_lower;
+ __le32 encrypt_tweak_upper;
+ __le64 row; /* row = logical LBA/blocks per row */
+ u8 reserved3[8];
+ struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS];
};
struct pqi_io_response {
@@ -346,20 +427,20 @@ struct pqi_event_config {
u8 reserved[2];
u8 num_event_descriptors;
u8 reserved1;
- struct pqi_event_descriptor descriptors[1];
+ struct pqi_event_descriptor descriptors[];
};
#define PQI_MAX_EVENT_DESCRIPTORS 255
#define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0
#define PQI_EVENT_OFA_QUIESCE 0x1
-#define PQI_EVENT_OFA_CANCELLED 0x2
+#define PQI_EVENT_OFA_CANCELED 0x2
struct pqi_event_response {
struct pqi_iu_header header;
u8 event_type;
u8 reserved2 : 7;
- u8 request_acknowlege : 1;
+ u8 request_acknowledge : 1;
__le16 event_id;
__le32 additional_event_id;
union {
@@ -387,7 +468,8 @@ struct pqi_task_management_request {
struct pqi_iu_header header;
__le16 request_id;
__le16 nexus_id;
- u8 reserved[2];
+ u8 reserved;
+ u8 ml_device_lun_number;
__le16 timeout;
u8 lun_number[8];
__le16 protocol_specific;
@@ -442,10 +524,6 @@ struct pqi_vendor_general_response {
#define PQI_OFA_SIGNATURE "OFA_QRM"
#define PQI_OFA_MAX_SG_DESCRIPTORS 64
-#define PQI_OFA_MEMORY_DESCRIPTOR_LENGTH \
- (offsetof(struct pqi_ofa_memory, sg_descriptor) + \
- (PQI_OFA_MAX_SG_DESCRIPTORS * sizeof(struct pqi_sg_descriptor)))
-
struct pqi_ofa_memory {
__le64 signature; /* "OFA_QRM" */
__le16 version; /* version of this struct (1 = 1st version) */
@@ -453,7 +531,7 @@ struct pqi_ofa_memory {
__le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors;
u8 reserved1[2];
- struct pqi_sg_descriptor sg_descriptor[1];
+ struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS];
};
struct pqi_aio_error_info {
@@ -483,6 +561,9 @@ struct pqi_raid_error_info {
#define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13
#define PQI_REQUEST_IU_RAID_PATH_IO 0x14
#define PQI_REQUEST_IU_AIO_PATH_IO 0x15
+#define PQI_REQUEST_IU_AIO_PATH_RAID5_IO 0x18
+#define PQI_REQUEST_IU_AIO_PATH_RAID6_IO 0x19
+#define PQI_REQUEST_IU_AIO_PATH_RAID1_IO 0x1A
#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
@@ -585,6 +666,7 @@ struct pqi_raid_error_info {
/* these values are defined by the PQI spec */
#define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE 255
#define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535
+
#define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT 64
#define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT 16
#define PQI_ADMIN_INDEX_ALIGNMENT 64
@@ -628,6 +710,7 @@ typedef u32 pqi_index_t;
#define SOP_TMF_COMPLETE 0x0
#define SOP_TMF_REJECTED 0x4
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
+#define SOP_RC_INCORRECT_LOGICAL_UNIT 0x9
/* additional CDB bytes usage field codes */
#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
@@ -654,7 +737,7 @@ struct pqi_admin_queues_aligned {
struct pqi_admin_queues {
void *iq_element_array;
void *oq_element_array;
- pqi_index_t *iq_ci;
+ pqi_index_t __iomem *iq_ci;
pqi_index_t __iomem *oq_pi;
dma_addr_t iq_element_array_bus_addr;
dma_addr_t oq_element_array_bus_addr;
@@ -679,8 +762,8 @@ struct pqi_queue_group {
dma_addr_t oq_element_array_bus_addr;
__le32 __iomem *iq_pi[2];
pqi_index_t iq_pi_copy[2];
- pqi_index_t __iomem *iq_ci[2];
- pqi_index_t __iomem *oq_pi;
+ pqi_index_t __iomem *iq_ci[2];
+ pqi_index_t __iomem *oq_pi;
dma_addr_t iq_ci_bus_addr[2];
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
@@ -693,7 +776,7 @@ struct pqi_event_queue {
u16 oq_id;
u16 int_msg_num;
void *oq_element_array;
- pqi_index_t __iomem *oq_pi;
+ pqi_index_t __iomem *oq_pi;
dma_addr_t oq_element_array_bus_addr;
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
@@ -759,13 +842,32 @@ struct pqi_config_table_firmware_features {
u8 features_supported[];
/* u8 features_requested_by_host[]; */
/* u8 features_enabled[]; */
-};
-
-#define PQI_FIRMWARE_FEATURE_OFA 0
-#define PQI_FIRMWARE_FEATURE_SMP 1
-#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
-#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
-#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
+/* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */
+/* __le16 firmware_max_known_feature; */
+/* __le16 host_max_known_feature; */
+};
+
+#define PQI_FIRMWARE_FEATURE_OFA 0
+#define PQI_FIRMWARE_FEATURE_SMP 1
+#define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE 2
+#define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS 3
+#define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS 4
+#define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS 5
+#define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS 6
+#define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS 7
+#define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS 8
+#define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS 9
+#define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS 10
+#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+#define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN 12
+#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
+#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
+#define PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME 15
+#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16
+#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
+#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
+#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 21
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -820,7 +922,8 @@ union pqi_reset_register {
#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
-#define RAID_MAP_MAX_ENTRIES 1024
+#define RAID_MAP_MAX_ENTRIES 1024
+#define RAID_MAP_MAX_DATA_DISKS_PER_ROW 128
#define PQI_PHYSICAL_DEVICE_BUS 0
#define PQI_RAID_VOLUME_BUS 1
@@ -840,19 +943,21 @@ struct report_lun_header {
#define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5)
#define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6)
-#define CISS_REPORT_PHYS_FLAG_OTHER (1 << 1)
+#define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2 0x2
+#define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4 0x4
+#define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK 0xf
-struct report_log_lun_extended_entry {
+struct report_log_lun {
u8 lunid[8];
u8 volume_id[16];
};
-struct report_log_lun_extended {
+struct report_log_lun_list {
struct report_lun_header header;
- struct report_log_lun_extended_entry lun_entries[1];
+ struct report_log_lun lun_entries[1];
};
-struct report_phys_lun_extended_entry {
+struct report_phys_lun_8byte_wwid {
u8 lunid[8];
__be64 wwid;
u8 device_type;
@@ -862,12 +967,27 @@ struct report_phys_lun_extended_entry {
u32 aio_handle;
};
+struct report_phys_lun_16byte_wwid {
+ u8 lunid[8];
+ u8 wwid[16];
+ u8 device_type;
+ u8 device_flags;
+ u8 lun_count; /* number of LUNs in a multi-LUN device */
+ u8 redundant_paths;
+ u32 aio_handle;
+};
+
/* for device_flags field of struct report_phys_lun_extended_entry */
#define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8
-struct report_phys_lun_extended {
+struct report_phys_lun_8byte_wwid_list {
+ struct report_lun_header header;
+ struct report_phys_lun_8byte_wwid lun_entries[1];
+};
+
+struct report_phys_lun_16byte_wwid_list {
struct report_lun_header header;
- struct report_phys_lun_extended_entry lun_entries[1];
+ struct report_phys_lun_16byte_wwid lun_entries[1];
};
struct raid_map_disk_data {
@@ -906,10 +1026,69 @@ struct raid_map {
#pragma pack()
+struct pqi_scsi_dev_raid_map_data {
+ bool is_write;
+ u8 raid_level;
+ u32 map_index;
+ u64 first_block;
+ u64 last_block;
+ u32 data_length;
+ u32 block_cnt;
+ u32 blocks_per_row;
+ u64 first_row;
+ u64 last_row;
+ u32 first_row_offset;
+ u32 last_row_offset;
+ u32 first_column;
+ u32 last_column;
+ u64 r5or6_first_row;
+ u64 r5or6_last_row;
+ u32 r5or6_first_row_offset;
+ u32 r5or6_last_row_offset;
+ u32 r5or6_first_column;
+ u32 r5or6_last_column;
+ u16 data_disks_per_row;
+ u32 total_disks_per_row;
+ u16 layout_map_count;
+ u32 stripesize;
+ u16 strip_size;
+ u32 first_group;
+ u32 last_group;
+ u32 map_row;
+ u32 aio_handle;
+ u64 disk_block;
+ u32 disk_block_cnt;
+ u8 cdb[16];
+ u8 cdb_length;
+
+ /* RAID 1 specific */
+#define NUM_RAID1_MAP_ENTRIES 3
+ u32 num_it_nexus_entries;
+ u32 it_nexus[NUM_RAID1_MAP_ENTRIES];
+
+ /* RAID 5 / RAID 6 specific */
+ u32 p_parity_it_nexus; /* aio_handle */
+ u32 q_parity_it_nexus; /* aio_handle */
+ u8 xor_mult;
+ u64 row;
+ u64 stripe_lba;
+ u32 p_index;
+ u32 q_index;
+};
+
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+#define NUM_STREAMS_PER_LUN 8
+
+struct pqi_stream_data {
+ u64 next_lba;
+ u32 last_accessed;
+};
+
+#define PQI_MAX_LUNS_PER_DEVICE 256
+
struct pqi_scsi_dev {
- int devtype; /* as reported by INQUIRY commmand */
+ int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */
/* BMIC_IDENTIFY_PHYSICAL_DEVICE */
/* only valid for devtype = TYPE_DISK */
@@ -917,7 +1096,7 @@ struct pqi_scsi_dev {
int target;
int lun;
u8 scsi3addr[8];
- __be64 wwid;
+ u8 wwid[16];
u8 volume_id[16];
u8 is_physical_device : 1;
u8 is_external_raid_device : 1;
@@ -927,8 +1106,9 @@ struct pqi_scsi_dev {
u8 new_device : 1;
u8 keep_device : 1;
u8 volume_offline : 1;
+ u8 rescan : 1;
+ u8 ignore_device : 1;
bool aio_enabled; /* only valid for physical disks */
- bool in_reset;
bool in_remove;
bool device_offline;
u8 vendor[8]; /* bytes 8-15 of inquiry data */
@@ -947,11 +1127,15 @@ struct pqi_scsi_dev {
u8 phy_connected_dev_type;
u8 box[8];
u16 phys_connector[8];
+ u8 phy_id;
+ u8 ncq_prio_enable;
+ u8 ncq_prio_support;
+ u8 multi_lun_device_lun_count;
bool raid_bypass_configured; /* RAID bypass configured */
bool raid_bypass_enabled; /* RAID bypass enabled */
- int offload_to_mirror; /* Send next RAID bypass request */
- /* to mirror drive. */
+ u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
struct raid_map *raid_map; /* RAID bypass map */
+ u32 max_transfer_encrypted;
struct pqi_sas_port *sas_port;
struct scsi_device *sdev;
@@ -961,7 +1145,9 @@ struct pqi_scsi_dev {
struct list_head add_list_entry;
struct list_head delete_list_entry;
- atomic_t scsi_cmds_outstanding;
+ struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
+ atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
+ atomic_t raid_bypass_cnt;
};
/* VPD inquiry pages */
@@ -1067,10 +1253,8 @@ struct pqi_io_request {
struct pqi_event {
bool pending;
u8 event_type;
- __le16 event_id;
- __le32 additional_event_id;
- __le32 ofa_bytes_requested;
- __le16 ofa_cancel_reason;
+ u16 event_id;
+ u32 additional_event_id;
};
#define PQI_RESERVED_IO_SLOTS_LUN_RESET 1
@@ -1080,13 +1264,26 @@ struct pqi_event {
(PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
+#define PQI_CTRL_PRODUCT_ID_GEN1 0
+#define PQI_CTRL_PRODUCT_ID_GEN2 7
+#define PQI_CTRL_PRODUCT_REVISION_A 0
+#define PQI_CTRL_PRODUCT_REVISION_B 1
+
+enum pqi_ctrl_removal_state {
+ PQI_CTRL_PRESENT = 0,
+ PQI_CTRL_GRACEFUL_REMOVAL,
+ PQI_CTRL_SURPRISE_REMOVAL
+};
+
struct pqi_ctrl_info {
unsigned int ctrl_id;
struct pci_dev *pci_dev;
- char firmware_version[11];
+ char firmware_version[32];
char serial_number[17];
char model[17];
char vendor[9];
+ u8 product_id;
+ u8 product_revision;
void __iomem *iomem_base;
struct pqi_ctrl_registers __iomem *registers;
struct pqi_device_registers __iomem *pqi_registers;
@@ -1116,6 +1313,7 @@ struct pqi_ctrl_info {
u16 max_inbound_iu_length_per_firmware;
u16 max_inbound_iu_length;
unsigned int max_sg_per_iu;
+ unsigned int max_sg_per_r56_iu;
void *admin_queue_memory_base;
u32 admin_queue_memory_length;
dma_addr_t admin_queue_memory_base_dma_handle;
@@ -1134,19 +1332,32 @@ struct pqi_ctrl_info {
struct mutex scan_mutex;
struct mutex lun_reset_mutex;
- struct mutex ofa_mutex; /* serialize ofa */
bool controller_online;
bool block_requests;
- bool block_device_reset;
- bool in_ofa;
- bool in_shutdown;
+ bool scan_blocked;
+ u8 logical_volume_rescan_needed : 1;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
u8 pqi_reset_quiesce_supported : 1;
u8 soft_reset_handshake_supported : 1;
- u8 raid_iu_timeout_supported: 1;
- u8 tmf_iu_timeout_supported: 1;
+ u8 raid_iu_timeout_supported : 1;
+ u8 tmf_iu_timeout_supported : 1;
+ u8 firmware_triage_supported : 1;
+ u8 rpl_extended_format_4_5_supported : 1;
+ u8 multi_lun_device_supported : 1;
+ u8 enable_r1_writes : 1;
+ u8 enable_r5_writes : 1;
+ u8 enable_r6_writes : 1;
+ u8 lv_drive_type_mix_valid : 1;
+ u8 enable_stream_detection : 1;
+ u8 disable_managed_interrupts : 1;
+ u8 ciss_report_log_flags;
+ u32 max_transfer_encrypted_sas_sata;
+ u32 max_transfer_encrypted_nvme;
+ u32 max_write_raid_5_6;
+ u32 max_write_raid_1_10_2drive;
+ u32 max_write_raid_1_10_3drive;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -1176,14 +1387,15 @@ struct pqi_ctrl_info {
atomic_t num_blocked_threads;
wait_queue_head_t block_requests_wait;
- struct list_head raid_bypass_retry_list;
- spinlock_t raid_bypass_retry_list_lock;
- struct work_struct raid_bypass_retry_work;
-
+ struct mutex ofa_mutex;
struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
dma_addr_t pqi_ofa_mem_dma_handle;
void **pqi_ofa_chunk_virt_addr;
- atomic_t sync_cmds_outstanding;
+ struct work_struct ofa_memory_alloc_work;
+ struct work_struct ofa_quiesce_work;
+ u32 ofa_bytes_requested;
+ u16 ofa_cancel_reason;
+ enum pqi_ctrl_removal_state ctrl_removal_state;
};
enum pqi_ctrl_mode {
@@ -1207,6 +1419,7 @@ enum pqi_ctrl_mode {
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
#define BMIC_READ 0x26
#define BMIC_WRITE 0x27
+#define BMIC_SENSE_FEATURE 0x61
#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
#define BMIC_CSMI_PASSTHRU 0x68
@@ -1226,6 +1439,19 @@ enum pqi_ctrl_mode {
(((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \
CISS_GET_LEVEL_2_TARGET((lunid)))
+#define LV_GET_DRIVE_TYPE_MIX(lunid) ((lunid)[6])
+
+#define LV_DRIVE_TYPE_MIX_UNKNOWN 0
+#define LV_DRIVE_TYPE_MIX_NO_RESTRICTION 1
+#define LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY 2
+#define LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY 3
+#define LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY 4
+#define LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY 5
+#define LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY 6
+#define LV_DRIVE_TYPE_MIX_SAS_ONLY 7
+#define LV_DRIVE_TYPE_MIX_SATA_ONLY 8
+#define LV_DRIVE_TYPE_MIX_NVME_ONLY 9
+
#define NO_TIMEOUT ((unsigned long) -1)
#pragma pack(1)
@@ -1233,7 +1459,7 @@ enum pqi_ctrl_mode {
struct bmic_identify_controller {
u8 configured_logical_drive_count;
__le32 configuration_signature;
- u8 firmware_version[4];
+ u8 firmware_version_short[4];
u8 reserved[145];
__le16 extended_logical_unit_count;
u8 reserved1[34];
@@ -1241,11 +1467,17 @@ struct bmic_identify_controller {
u8 reserved2[8];
u8 vendor_id[8];
u8 product_id[16];
- u8 reserved3[68];
+ u8 reserved3[62];
+ __le32 extra_controller_flags;
+ u8 reserved4[2];
u8 controller_mode;
- u8 reserved4[32];
+ u8 spare_part_number[32];
+ u8 firmware_version_long[32];
};
+/* constants for extra_controller_flags field of bmic_identify_controller */
+#define BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED 0x20000000
+
struct bmic_sense_subsystem_info {
u8 reserved[44];
u8 ctrl_serial_number[16];
@@ -1255,6 +1487,7 @@ struct bmic_sense_subsystem_info {
#define SA_DEVICE_TYPE_SATA 0x1
#define SA_DEVICE_TYPE_SAS 0x2
#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5
+#define SA_DEVICE_TYPE_SES 0x6
#define SA_DEVICE_TYPE_CONTROLLER 0x7
#define SA_DEVICE_TYPE_NVME 0x9
@@ -1338,6 +1571,34 @@ struct bmic_identify_physical_device {
u8 padding_to_multiple_of_512[9];
};
+#define BMIC_SENSE_FEATURE_IO_PAGE 0x8
+#define BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE 0x2
+
+struct bmic_sense_feature_buffer_header {
+ u8 page_code;
+ u8 subpage_code;
+ __le16 buffer_length;
+};
+
+struct bmic_sense_feature_page_header {
+ u8 page_code;
+ u8 subpage_code;
+ __le16 page_length;
+};
+
+struct bmic_sense_feature_io_page_aio_subpage {
+ struct bmic_sense_feature_page_header header;
+ u8 firmware_read_support;
+ u8 driver_read_support;
+ u8 firmware_write_support;
+ u8 driver_write_support;
+ __le16 max_transfer_encrypted_sas_sata;
+ __le16 max_transfer_encrypted_nvme;
+ __le16 max_write_raid_5_6;
+ __le16 max_write_raid_1_10_2drive;
+ __le16 max_write_raid_1_10_3drive;
+};
+
struct bmic_smp_request {
u8 frame_type;
u8 function;
@@ -1405,16 +1666,6 @@ struct bmic_diag_options {
#pragma pack()
-static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
-{
- atomic_inc(&ctrl_info->num_busy_threads);
-}
-
-static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
-{
- atomic_dec(&ctrl_info->num_busy_threads);
-}
-
static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
{
void *hostdata = shost_priv(shost);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index b7492568e02f..b971fbe3b3a1 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,28 +33,40 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.10-025"
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 10
-#define DRIVER_REVISION 25
+#define DRIVER_VERSION "2.1.18-045"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 1
+#define DRIVER_RELEASE 18
+#define DRIVER_REVISION 45
-#define DRIVER_NAME "Microsemi PQI Driver (v" \
+#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
#define DRIVER_NAME_SHORT "smartpqi"
#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
-MODULE_AUTHOR("Microsemi");
-MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
+#define PQI_POST_RESET_DELAY_SECS 5
+#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
+
+MODULE_AUTHOR("Microchip");
+MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
DRIVER_VERSION);
-MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
+struct pqi_cmd_priv {
+ int this_residual;
+};
+
+static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
+static void pqi_verify_structures(void);
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
static void pqi_ctrl_offline_worker(struct work_struct *work);
-static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
@@ -62,20 +74,28 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_io_request *io_request);
static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
struct pqi_iu_header *request, unsigned int flags,
- struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
+ struct pqi_raid_error_info *error_info);
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info, bool raid_bypass);
+ struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
+static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd);
+static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd);
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
- u32 bytes_requested);
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_secs);
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
+static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -148,14 +168,24 @@ MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
static int pqi_expose_ld_first;
module_param_named(expose_ld_first,
pqi_expose_ld_first, int, 0644);
-MODULE_PARM_DESC(expose_ld_first,
- "Expose logical drives before physical drives.");
+MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
static int pqi_hide_vsep;
module_param_named(hide_vsep,
pqi_hide_vsep, int, 0644);
-MODULE_PARM_DESC(hide_vsep,
- "Hide the virtual SEP for direct attached drives.");
+MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
+
+static int pqi_disable_managed_interrupts;
+module_param_named(disable_managed_interrupts,
+ pqi_disable_managed_interrupts, int, 0644);
+MODULE_PARM_DESC(disable_managed_interrupts,
+ "Disable the kernel automatically assigning SMP affinity to IRQs.");
+
+static unsigned int pqi_ctrl_ready_timeout_secs;
+module_param_named(ctrl_ready_timeout,
+ pqi_ctrl_ready_timeout_secs, uint, 0644);
+MODULE_PARM_DESC(ctrl_ready_timeout,
+ "Timeout in seconds for driver to wait for controller ready.");
static char *raid_levels[] = {
"RAID-0",
@@ -163,8 +193,8 @@ static char *raid_levels[] = {
"RAID-1(1+0)",
"RAID-5",
"RAID-5+1",
- "RAID-ADG",
- "RAID-1(ADM)",
+ "RAID-6",
+ "RAID-1(Triple)",
};
static char *pqi_raid_level_to_string(u8 raid_level)
@@ -181,14 +211,14 @@ static char *pqi_raid_level_to_string(u8 raid_level)
#define SA_RAID_5 3 /* also used for RAID 50 */
#define SA_RAID_51 4
#define SA_RAID_6 5 /* also used for RAID 60 */
-#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
-#define SA_RAID_MAX SA_RAID_ADM
+#define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
+#define SA_RAID_MAX SA_RAID_TRIPLE
#define SA_RAID_UNKNOWN 0xff
static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
{
pqi_prep_for_scsi_done(scmd);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
static inline void pqi_disable_write_same(struct scsi_device *sdev)
@@ -220,7 +250,7 @@ static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{
if (ctrl_info->controller_online)
if (!sis_is_firmware_running(ctrl_info))
- pqi_take_ctrl_offline(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
}
static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
@@ -228,26 +258,108 @@ static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
}
-static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
- struct pqi_ctrl_info *ctrl_info)
+#define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
+#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
+
+static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
{
- return sis_read_driver_scratch(ctrl_info);
+ return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
}
static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_mode mode)
{
- sis_write_driver_scratch(ctrl_info, mode);
+ u32 driver_scratch;
+
+ driver_scratch = sis_read_driver_scratch(ctrl_info);
+
+ if (mode == PQI_MODE)
+ driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
+ else
+ driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
+
+ sis_write_driver_scratch(ctrl_info, driver_scratch);
+}
+
+static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
+{
+ return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
+}
+
+static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
+{
+ u32 driver_scratch;
+
+ driver_scratch = sis_read_driver_scratch(ctrl_info);
+
+ if (is_supported)
+ driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
+ else
+ driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
+
+ sis_write_driver_scratch(ctrl_info, driver_scratch);
+}
+
+static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->scan_blocked = true;
+ mutex_lock(&ctrl_info->scan_mutex);
+}
+
+static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->scan_blocked = false;
+ mutex_unlock(&ctrl_info->scan_mutex);
+}
+
+static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->scan_blocked;
}
static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->block_device_reset = true;
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+}
+
+static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
}
-static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
{
- return ctrl_info->block_device_reset;
+ struct Scsi_Host *shost;
+ unsigned int num_loops;
+ int msecs_sleep;
+
+ shost = ctrl_info->scsi_host;
+
+ scsi_block_requests(shost);
+
+ num_loops = 0;
+ msecs_sleep = 20;
+ while (scsi_host_busy(shost)) {
+ num_loops++;
+ if (num_loops == 10)
+ msecs_sleep = 500;
+ msleep(msecs_sleep);
+ }
+}
+
+static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ scsi_unblock_requests(ctrl_info->scsi_host);
+}
+
+static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_inc(&ctrl_info->num_busy_threads);
+}
+
+static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_dec(&ctrl_info->num_busy_threads);
}
static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
@@ -258,51 +370,53 @@ static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = true;
- scsi_block_requests(ctrl_info->scsi_host);
}
static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = false;
wake_up_all(&ctrl_info->block_requests_wait);
- pqi_retry_raid_bypass_requests(ctrl_info);
- scsi_unblock_requests(ctrl_info->scsi_host);
}
-static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
- unsigned long timeout_msecs)
+static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
{
- unsigned long remaining_msecs;
-
if (!pqi_ctrl_blocked(ctrl_info))
- return timeout_msecs;
+ return;
atomic_inc(&ctrl_info->num_blocked_threads);
-
- if (timeout_msecs == NO_TIMEOUT) {
- wait_event(ctrl_info->block_requests_wait,
- !pqi_ctrl_blocked(ctrl_info));
- remaining_msecs = timeout_msecs;
- } else {
- unsigned long remaining_jiffies;
-
- remaining_jiffies =
- wait_event_timeout(ctrl_info->block_requests_wait,
- !pqi_ctrl_blocked(ctrl_info),
- msecs_to_jiffies(timeout_msecs));
- remaining_msecs = jiffies_to_msecs(remaining_jiffies);
- }
-
+ wait_event(ctrl_info->block_requests_wait,
+ !pqi_ctrl_blocked(ctrl_info));
atomic_dec(&ctrl_info->num_blocked_threads);
-
- return remaining_msecs;
}
+#define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
+
static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
{
+ unsigned long start_jiffies;
+ unsigned long warning_timeout;
+ bool displayed_warning;
+
+ displayed_warning = false;
+ start_jiffies = jiffies;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
+
while (atomic_read(&ctrl_info->num_busy_threads) >
- atomic_read(&ctrl_info->num_blocked_threads))
+ atomic_read(&ctrl_info->num_blocked_threads)) {
+ if (time_after(jiffies, warning_timeout)) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "waiting %u seconds for driver activity to quiesce\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000);
+ displayed_warning = true;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
+ }
usleep_range(1000, 2000);
+ }
+
+ if (displayed_warning)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "driver activity quiesced after waiting for %u seconds\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000);
}
static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
@@ -310,34 +424,25 @@ static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
return device->device_offline;
}
-static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
-{
- device->in_reset = true;
-}
-
-static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
-{
- device->in_reset = false;
-}
-
-static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
+static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
{
- return device->in_reset;
+ mutex_lock(&ctrl_info->ofa_mutex);
}
-static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->in_ofa = true;
+ mutex_unlock(&ctrl_info->ofa_mutex);
}
-static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->in_ofa = false;
+ mutex_lock(&ctrl_info->ofa_mutex);
+ mutex_unlock(&ctrl_info->ofa_mutex);
}
-static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
+static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
{
- return ctrl_info->in_ofa;
+ return mutex_is_locked(&ctrl_info->ofa_mutex);
}
static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
@@ -345,29 +450,32 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
device->in_remove = true;
}
-static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
{
- return device->in_remove && !ctrl_info->in_shutdown;
+ return device->in_remove;
}
-static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+static inline int pqi_event_type_to_event_index(unsigned int event_type)
{
- ctrl_info->in_shutdown = true;
+ int index;
+
+ for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
+ if (event_type == pqi_supported_event_types[index])
+ return index;
+
+ return -1;
}
-static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+static inline bool pqi_is_supported_event(unsigned int event_type)
{
- return ctrl_info->in_shutdown;
+ return pqi_event_type_to_event_index(event_type) != -1;
}
-static inline void pqi_schedule_rescan_worker_with_delay(
- struct pqi_ctrl_info *ctrl_info, unsigned long delay)
+static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
+ unsigned long delay)
{
if (pqi_ctrl_offline(ctrl_info))
return;
- if (pqi_ctrl_in_ofa(ctrl_info))
- return;
schedule_delayed_work(&ctrl_info->rescan_work, delay);
}
@@ -377,10 +485,9 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
}
-#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
+#define PQI_RESCAN_WORK_DELAY (10 * HZ)
-static inline void pqi_schedule_rescan_worker_delayed(
- struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
{
pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
}
@@ -390,11 +497,6 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
cancel_delayed_work_sync(&ctrl_info->rescan_work);
}
-static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
-{
- cancel_work_sync(&ctrl_info->event_work);
-}
-
static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
{
if (!ctrl_info->heartbeat_counter)
@@ -405,22 +507,15 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{
- if (!ctrl_info->soft_reset_status)
- return 0;
-
return readb(ctrl_info->soft_reset_status);
}
-static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
- u8 clear)
+static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{
u8 status;
- if (!ctrl_info->soft_reset_status)
- return;
-
status = pqi_read_soft_reset_status(ctrl_info);
- status &= ~clear;
+ status &= ~PQI_SOFT_RESET_ABORT;
writeb(status, ctrl_info->soft_reset_status);
}
@@ -483,6 +578,10 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
cdb = request->cdb;
switch (cmd) {
+ case TEST_UNIT_READY:
+ request->data_direction = SOP_READ_FLAG;
+ cdb[0] = TEST_UNIT_READY;
+ break;
case INQUIRY:
request->data_direction = SOP_READ_FLAG;
cdb[0] = INQUIRY;
@@ -496,10 +595,14 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
case CISS_REPORT_PHYS:
request->data_direction = SOP_READ_FLAG;
cdb[0] = cmd;
- if (cmd == CISS_REPORT_PHYS)
- cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
- else
- cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+ if (cmd == CISS_REPORT_PHYS) {
+ if (ctrl_info->rpl_extended_format_4_5_supported)
+ cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
+ else
+ cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
+ } else {
+ cdb[1] = ctrl_info->ciss_report_log_flags;
+ }
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case CISS_GET_RAID_MAP:
@@ -509,6 +612,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case SA_FLUSH_CACHE:
+ request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
request->data_direction = SOP_WRITE_FLAG;
cdb[0] = BMIC_WRITE;
cdb[6] = BMIC_FLUSH_CACHE;
@@ -516,10 +620,11 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
break;
case BMIC_SENSE_DIAG_OPTIONS:
cdb_length = 0;
- /* fall through */
+ fallthrough;
case BMIC_IDENTIFY_CONTROLLER:
case BMIC_IDENTIFY_PHYSICAL_DEVICE:
case BMIC_SENSE_SUBSYSTEM_INFORMATION:
+ case BMIC_SENSE_FEATURE:
request->data_direction = SOP_READ_FLAG;
cdb[0] = BMIC_READ;
cdb[6] = cmd;
@@ -527,7 +632,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
break;
case BMIC_SET_DIAG_OPTIONS:
cdb_length = 0;
- /* fall through */
+ fallthrough;
case BMIC_WRITE_HOST_WELLNESS:
request->data_direction = SOP_WRITE_FLAG;
cdb[0] = BMIC_WRITE;
@@ -542,8 +647,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
put_unaligned_be16(cdb_length, &cdb[7]);
break;
default:
- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
- cmd);
+ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
break;
}
@@ -603,20 +707,18 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
- struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+ struct pqi_raid_error_info *error_info)
{
int rc;
struct pqi_raid_path_request request;
enum dma_data_direction dir;
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- cmd, scsi3addr, buffer,
- buffer_length, vpd_page, &dir);
+ rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
+ buffer, buffer_length, vpd_page, &dir);
if (rc)
return rc;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
- error_info, timeout_msecs);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
@@ -629,7 +731,7 @@ static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
u8 cmd, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+ buffer, buffer_length, 0, NULL);
}
static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
@@ -637,7 +739,7 @@ static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
struct pqi_raid_error_info *error_info)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+ buffer, buffer_length, 0, error_info);
}
static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
@@ -659,7 +761,7 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
- buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
+ buffer, buffer_length, vpd_page, NULL);
}
static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
@@ -681,27 +783,116 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
request.cdb[2] = (u8)bmic_device_index;
request.cdb[9] = (u8)(bmic_device_index >> 8);
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
+static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
+{
+ u32 bytes;
+
+ bytes = get_unaligned_le16(limit);
+ if (bytes == 0)
+ bytes = ~0;
+ else
+ bytes *= 1024;
+
+ return bytes;
+}
+
+#pragma pack(1)
+
+struct bmic_sense_feature_buffer {
+ struct bmic_sense_feature_buffer_header header;
+ struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
+};
+
+#pragma pack()
+
+#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
+ offsetofend(struct bmic_sense_feature_buffer, \
+ aio_subpage.max_write_raid_1_10_3drive)
+
+#define MINIMUM_AIO_SUBPAGE_LENGTH \
+ (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
+ max_write_raid_1_10_3drive) - \
+ sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
+
+static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ enum dma_data_direction dir;
+ struct pqi_raid_path_request request;
+ struct bmic_sense_feature_buffer *buffer;
+
+ buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
+ buffer, sizeof(*buffer), 0, &dir);
+ if (rc)
+ goto error;
+
+ request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
+ request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
+
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
+ if (rc)
+ goto error;
+
+ if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
+ buffer->header.subpage_code !=
+ BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+ get_unaligned_le16(&buffer->header.buffer_length) <
+ MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
+ buffer->aio_subpage.header.page_code !=
+ BMIC_SENSE_FEATURE_IO_PAGE ||
+ buffer->aio_subpage.header.subpage_code !=
+ BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+ get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
+ MINIMUM_AIO_SUBPAGE_LENGTH) {
+ goto error;
+ }
+
+ ctrl_info->max_transfer_encrypted_sas_sata =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
+
+ ctrl_info->max_transfer_encrypted_nvme =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_transfer_encrypted_nvme);
+
+ ctrl_info->max_write_raid_5_6 =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_5_6);
+
+ ctrl_info->max_write_raid_1_10_2drive =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_1_10_2drive);
+
+ ctrl_info->max_write_raid_1_10_3drive =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_1_10_3drive);
+
+error:
+ kfree(buffer);
+
+ return rc;
+}
+
static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
enum bmic_flush_cache_shutdown_event shutdown_event)
{
int rc;
struct bmic_flush_cache *flush_cache;
- /*
- * Don't bother trying to flush the cache if the controller is
- * locked up.
- */
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
-
flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
if (!flush_cache)
return -ENOMEM;
@@ -870,7 +1061,7 @@ static int pqi_write_current_time_to_host_wellness(
return rc;
}
-#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
+#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
static void pqi_update_time_worker(struct work_struct *work)
{
@@ -880,9 +1071,6 @@ static void pqi_update_time_worker(struct work_struct *work)
ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
update_time_work);
- if (pqi_ctrl_offline(ctrl_info))
- return;
-
rc = pqi_write_current_time_to_host_wellness(ctrl_info);
if (rc)
dev_warn(&ctrl_info->pci_dev->dev,
@@ -892,27 +1080,23 @@ static void pqi_update_time_worker(struct work_struct *work)
PQI_UPDATE_TIME_WORK_INTERVAL);
}
-static inline void pqi_schedule_update_time_worker(
- struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{
schedule_delayed_work(&ctrl_info->update_time_work, 0);
}
-static inline void pqi_cancel_update_time_worker(
- struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{
cancel_delayed_work_sync(&ctrl_info->update_time_work);
}
-static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- void *buffer, size_t buffer_length)
+static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
+ size_t buffer_length)
{
- return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
- buffer_length);
+ return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
}
-static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- void **buffer)
+static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
{
int rc;
size_t lun_list_length;
@@ -927,8 +1111,7 @@ static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
goto out;
}
- rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
- sizeof(*report_lun_header));
+ rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
if (rc)
goto out;
@@ -952,8 +1135,8 @@ again:
if (rc)
goto out;
- new_lun_list_length = get_unaligned_be32(
- &((struct report_lun_header *)lun_data)->list_length);
+ new_lun_list_length =
+ get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
if (new_lun_list_length > lun_list_length) {
lun_list_length = new_lun_list_length;
@@ -974,28 +1157,82 @@ out:
return rc;
}
-static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
- void **buffer)
+static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{
- return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
- buffer);
+ int rc;
+ unsigned int i;
+ u8 rpl_response_format;
+ u32 num_physicals;
+ size_t rpl_16byte_wwid_list_length;
+ void *rpl_list;
+ struct report_lun_header *rpl_header;
+ struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
+ struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
+
+ rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
+ if (rc)
+ return rc;
+
+ if (ctrl_info->rpl_extended_format_4_5_supported) {
+ rpl_header = rpl_list;
+ rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
+ if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
+ *buffer = rpl_list;
+ return 0;
+ } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "RPL returned unsupported data format %u\n",
+ rpl_response_format);
+ return -EINVAL;
+ } else {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "RPL returned extended format 2 instead of 4\n");
+ }
+ }
+
+ rpl_8byte_wwid_list = rpl_list;
+ num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
+ rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
+
+ rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
+ if (!rpl_16byte_wwid_list)
+ return -ENOMEM;
+
+ put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
+ &rpl_16byte_wwid_list->header.list_length);
+ rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
+
+ for (i = 0; i < num_physicals; i++) {
+ memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
+ memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
+ memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
+ rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
+ rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
+ rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
+ rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
+ rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
+ }
+
+ kfree(rpl_8byte_wwid_list);
+ *buffer = rpl_16byte_wwid_list;
+
+ return 0;
}
-static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
- void **buffer)
+static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{
return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
}
static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
- struct report_phys_lun_extended **physdev_list,
- struct report_log_lun_extended **logdev_list)
+ struct report_phys_lun_16byte_wwid_list **physdev_list,
+ struct report_log_lun_list **logdev_list)
{
int rc;
size_t logdev_list_length;
size_t logdev_data_length;
- struct report_log_lun_extended *internal_logdev_list;
- struct report_log_lun_extended *logdev_data;
+ struct report_log_lun_list *internal_logdev_list;
+ struct report_log_lun_list *logdev_data;
struct report_lun_header report_lun_header;
rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
@@ -1020,7 +1257,7 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
} else {
memset(&report_lun_header, 0, sizeof(report_lun_header));
logdev_data =
- (struct report_log_lun_extended *)&report_lun_header;
+ (struct report_log_lun_list *)&report_lun_header;
logdev_list_length = 0;
}
@@ -1028,7 +1265,7 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
logdev_list_length;
internal_logdev_list = kmalloc(logdev_data_length +
- sizeof(struct report_log_lun_extended), GFP_KERNEL);
+ sizeof(struct report_log_lun), GFP_KERNEL);
if (!internal_logdev_list) {
kfree(*logdev_list);
*logdev_list = NULL;
@@ -1037,9 +1274,9 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
memcpy(internal_logdev_list, logdev_data, logdev_data_length);
memset((u8 *)internal_logdev_list + logdev_data_length, 0,
- sizeof(struct report_log_lun_extended_entry));
+ sizeof(struct report_log_lun));
put_unaligned_be32(logdev_list_length +
- sizeof(struct report_log_lun_extended_entry),
+ sizeof(struct report_log_lun),
&internal_logdev_list->header.list_length);
kfree(*logdev_list);
@@ -1139,9 +1376,9 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
err_msg = "invalid RAID-1 map";
goto bad_raid_map;
}
- } else if (device->raid_level == SA_RAID_ADM) {
+ } else if (device->raid_level == SA_RAID_TRIPLE) {
if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
- err_msg = "invalid RAID-1(ADM) map";
+ err_msg = "invalid RAID-1(Triple) map";
goto bad_raid_map;
}
} else if ((device->raid_level == SA_RAID_5 ||
@@ -1180,9 +1417,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
return -ENOMEM;
rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
- device->scsi3addr, raid_map, sizeof(*raid_map),
- 0, NULL, NO_TIMEOUT);
-
+ device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
if (rc)
goto error;
@@ -1197,17 +1432,17 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
return -ENOMEM;
rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
- device->scsi3addr, raid_map, raid_map_size,
- 0, NULL, NO_TIMEOUT);
+ device->scsi3addr, raid_map, raid_map_size, 0, NULL);
if (rc)
goto error;
if (get_unaligned_le32(&raid_map->structure_size)
!= raid_map_size) {
dev_warn(&ctrl_info->pci_dev->dev,
- "Requested %d bytes, received %d bytes",
+ "requested %u bytes, received %u bytes\n",
raid_map_size,
get_unaligned_le32(&raid_map->structure_size));
+ rc = -EINVAL;
goto error;
}
}
@@ -1226,6 +1461,39 @@ error:
return rc;
}
+static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ if (!ctrl_info->lv_drive_type_mix_valid) {
+ device->max_transfer_encrypted = ~0;
+ return;
+ }
+
+ switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
+ case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SAS_ONLY:
+ case LV_DRIVE_TYPE_MIX_SATA_ONLY:
+ device->max_transfer_encrypted =
+ ctrl_info->max_transfer_encrypted_sas_sata;
+ break;
+ case LV_DRIVE_TYPE_MIX_NVME_ONLY:
+ device->max_transfer_encrypted =
+ ctrl_info->max_transfer_encrypted_nvme;
+ break;
+ case LV_DRIVE_TYPE_MIX_UNKNOWN:
+ case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
+ default:
+ device->max_transfer_encrypted =
+ min(ctrl_info->max_transfer_encrypted_sas_sata,
+ ctrl_info->max_transfer_encrypted_nvme);
+ break;
+ }
+}
+
static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
@@ -1251,8 +1519,12 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
(bypass_status & RAID_BYPASS_CONFIGURED) != 0;
if (device->raid_bypass_configured &&
(bypass_status & RAID_BYPASS_ENABLED) &&
- pqi_get_raid_map(ctrl_info, device) == 0)
+ pqi_get_raid_map(ctrl_info, device) == 0) {
device->raid_bypass_enabled = true;
+ if (get_unaligned_le16(&device->raid_map->flags) &
+ RAID_MAP_ENCRYPTION_ENABLED)
+ pqi_set_max_transfer_encrypted(ctrl_info, device);
+ }
out:
kfree(buffer);
@@ -1300,33 +1572,75 @@ no_buffer:
device->volume_offline = volume_offline;
}
-#define PQI_INQUIRY_PAGE0_RETRIES 3
+#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
+#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
-static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *id_phys)
+{
+ int rc;
+
+ memset(id_phys, 0, sizeof(*id_phys));
+
+ rc = pqi_identify_physical_device(ctrl_info, device,
+ id_phys, sizeof(*id_phys));
+ if (rc) {
+ device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+ return rc;
+ }
+
+ scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
+ scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
+
+ memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
+ memcpy(device->model, &id_phys->model[8], sizeof(device->model));
+
+ device->box_index = id_phys->box_index;
+ device->phys_box_on_bus = id_phys->phys_box_on_bus;
+ device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
+ device->queue_depth =
+ get_unaligned_le16(&id_phys->current_queue_depth_limit);
+ device->active_path_index = id_phys->active_path_number;
+ device->path_map = id_phys->redundant_path_present_map;
+ memcpy(&device->box,
+ &id_phys->alternate_paths_phys_box_on_port,
+ sizeof(device->box));
+ memcpy(&device->phys_connector,
+ &id_phys->alternate_paths_phys_connector,
+ sizeof(device->phys_connector));
+ device->bay = id_phys->phys_bay_in_box;
+ device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
+ if (!device->multi_lun_device_lun_count)
+ device->multi_lun_device_lun_count = 1;
+ if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
+ id_phys->phy_count)
+ device->phy_id =
+ id_phys->phy_to_phy_map[device->active_path_index];
+ else
+ device->phy_id = 0xFF;
+
+ device->ncq_prio_support =
+ ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
+ PQI_DEVICE_NCQ_PRIO_SUPPORTED);
+
+ return 0;
+}
+
+static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
u8 *buffer;
- unsigned int retries;
-
- if (device->is_expander_smp_device)
- return 0;
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Send an inquiry to the device to see what it is. */
- for (retries = 0;;) {
- rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
- buffer, 64);
- if (rc == 0)
- break;
- if (pqi_is_logical_device(device) ||
- rc != PQI_CMD_STATUS_ABORTED ||
- ++retries > PQI_INQUIRY_PAGE0_RETRIES)
- goto out;
- }
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
+ if (rc)
+ goto out;
scsi_sanitize_inquiry_string(&buffer[8], 8);
scsi_sanitize_inquiry_string(&buffer[16], 16);
@@ -1335,7 +1649,7 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
memcpy(device->model, &buffer[16], sizeof(device->model));
- if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
+ if (device->devtype == TYPE_DISK) {
if (device->is_external_raid_device) {
device->raid_level = SA_RAID_UNKNOWN;
device->volume_status = CISS_LV_OK;
@@ -1353,36 +1667,100 @@ out:
return rc;
}
-static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
+/*
+ * Prevent adding drive to OS for some corner cases such as a drive
+ * undergoing a sanitize operation. Some OSes will continue to poll
+ * the drive until the sanitize completes, which can take hours,
+ * resulting in long bootup delays. Commands such as TUR, READ_CAP
+ * are allowed, but READ/WRITE cause check condition. So the OS
+ * cannot check/read the partition table.
+ * Note: devices that have completed sanitize must be re-enabled
+ * using the management utility.
+ */
+static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ u8 scsi_status;
+ int rc;
+ enum dma_data_direction dir;
+ char *buffer;
+ int buffer_length = 64;
+ size_t sense_data_length;
+ struct scsi_sense_hdr sshdr;
+ struct pqi_raid_path_request request;
+ struct pqi_raid_error_info error_info;
+ bool offline = false; /* Assume keep online */
+
+ /* Do not check controllers. */
+ if (pqi_is_hba_lunid(device->scsi3addr))
+ return false;
+
+ /* Do not check LVs. */
+ if (pqi_is_logical_device(device))
+ return false;
+
+ buffer = kmalloc(buffer_length, GFP_KERNEL);
+ if (!buffer)
+ return false; /* Assume not offline */
+
+ /* Check for SANITIZE in progress using TUR */
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
+ buffer_length, 0, &dir);
+ if (rc)
+ goto out; /* Assume not offline */
+
+ memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
+
+ if (rc)
+ goto out; /* Assume not offline */
+
+ scsi_status = error_info.status;
+ sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
+ if (sense_data_length == 0)
+ sense_data_length =
+ get_unaligned_le16(&error_info.response_data_length);
+ if (sense_data_length) {
+ if (sense_data_length > sizeof(error_info.data))
+ sense_data_length = sizeof(error_info.data);
+
+ /*
+ * Check for sanitize in progress: asc:0x04, ascq: 0x1b
+ */
+ if (scsi_status == SAM_STAT_CHECK_CONDITION &&
+ scsi_normalize_sense(error_info.data,
+ sense_data_length, &sshdr) &&
+ sshdr.sense_key == NOT_READY &&
+ sshdr.asc == 0x04 &&
+ sshdr.ascq == 0x1b) {
+ device->device_offline = true;
+ offline = true;
+ goto out; /* Keep device offline */
+ }
+ }
+
+out:
+ kfree(buffer);
+ return offline;
+}
+
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
struct bmic_identify_physical_device *id_phys)
{
int rc;
- memset(id_phys, 0, sizeof(*id_phys));
+ if (device->is_expander_smp_device)
+ return 0;
- rc = pqi_identify_physical_device(ctrl_info, device,
- id_phys, sizeof(*id_phys));
- if (rc) {
- device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
- return;
- }
+ if (pqi_is_logical_device(device))
+ rc = pqi_get_logical_device_info(ctrl_info, device);
+ else
+ rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
- device->box_index = id_phys->box_index;
- device->phys_box_on_bus = id_phys->phys_box_on_bus;
- device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
- device->queue_depth =
- get_unaligned_le16(&id_phys->current_queue_depth_limit);
- device->device_type = id_phys->device_type;
- device->active_path_index = id_phys->active_path_number;
- device->path_map = id_phys->redundant_path_present_map;
- memcpy(&device->box,
- &id_phys->alternate_paths_phys_box_on_port,
- sizeof(device->box));
- memcpy(&device->phys_connector,
- &id_phys->alternate_paths_phys_connector,
- sizeof(device->phys_connector));
- device->bay = id_phys->phys_bay_in_box;
+ return rc;
}
static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
@@ -1512,28 +1890,30 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
return rc;
}
-#define PQI_PENDING_IO_TIMEOUT_SECS 20
+#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
-static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
int rc;
+ int lun;
- pqi_device_remove_start(device);
-
- rc = pqi_device_wait_for_pending_io(ctrl_info, device,
- PQI_PENDING_IO_TIMEOUT_SECS);
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun,
- atomic_read(&device->scsi_cmds_outstanding));
+ for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
+ PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus,
+ device->target, lun,
+ atomic_read(&device->scsi_cmds_outstanding[lun]));
+ }
if (pqi_is_logical_device(device))
scsi_remove_device(device->sdev);
else
pqi_remove_sas_device(device);
+
+ pqi_device_remove_start(device);
}
/* Assumes the SCSI device list lock is held. */
@@ -1543,26 +1923,22 @@ static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
{
struct pqi_scsi_dev *device;
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
- if (device->bus == bus && device->target == target &&
- device->lun == lun)
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->bus == bus && device->target == target && device->lun == lun)
return device;
return NULL;
}
-static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
- struct pqi_scsi_dev *dev2)
+static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
{
if (dev1->is_physical_device != dev2->is_physical_device)
return false;
if (dev1->is_physical_device)
- return dev1->wwid == dev2->wwid;
+ return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
- return memcmp(dev1->volume_id, dev2->volume_id,
- sizeof(dev1->volume_id)) == 0;
+ return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
}
enum pqi_find_result {
@@ -1572,15 +1948,12 @@ enum pqi_find_result {
};
static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device_to_find,
- struct pqi_scsi_dev **matching_device)
+ struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
{
struct pqi_scsi_dev *device;
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
- device->scsi3addr)) {
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
*matching_device = device;
if (pqi_device_equal(device_to_find, device)) {
if (device_to_find->volume_offline)
@@ -1610,32 +1983,34 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
ssize_t count;
char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
- count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
+ count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
"%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
if (device->target_lun_valid)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"%d:%d",
device->target,
device->lun);
else
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"-:-");
if (pqi_is_logical_device(device))
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" %08x%08x",
*((u32 *)&device->scsi3addr),
*((u32 *)&device->scsi3addr[4]));
else
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
- " %016llx", device->sas_address);
+ " %016llx%016llx",
+ get_unaligned_be64(&device->wwid[0]),
+ get_unaligned_be64(&device->wwid[8]));
- count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
+ count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
" %s %.8s %.16s ",
pqi_device_type(device),
device->vendor,
@@ -1643,19 +2018,19 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
if (pqi_is_logical_device(device)) {
if (device->devtype == TYPE_DISK)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"SSDSmartPathCap%c En%c %-12s",
device->raid_bypass_configured ? '+' : '-',
device->raid_bypass_enabled ? '+' : '-',
pqi_raid_level_to_string(device->raid_level));
} else {
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"AIO%c", device->aio_enabled ? '+' : '-');
if (device->devtype == TYPE_DISK ||
device->devtype == TYPE_ZBC)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" qd=%-6d", device->queue_depth);
}
@@ -1663,12 +2038,28 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
}
+static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
+{
+ u32 raid_map1_size;
+ u32 raid_map2_size;
+
+ if (raid_map1 == NULL || raid_map2 == NULL)
+ return raid_map1 == raid_map2;
+
+ raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
+ raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
+
+ if (raid_map1_size != raid_map2_size)
+ return false;
+
+ return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
+}
+
/* Assumes the SCSI device list lock is held. */
-static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
- struct pqi_scsi_dev *new_device)
+static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
{
- existing_device->devtype = new_device->devtype;
existing_device->device_type = new_device->device_type;
existing_device->bus = new_device->bus;
if (new_device->target_lun_valid) {
@@ -1680,42 +2071,48 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
- existing_device->is_external_raid_device =
- new_device->is_external_raid_device;
- existing_device->is_expander_smp_device =
- new_device->is_expander_smp_device;
- existing_device->aio_enabled = new_device->aio_enabled;
- memcpy(existing_device->vendor, new_device->vendor,
- sizeof(existing_device->vendor));
- memcpy(existing_device->model, new_device->model,
- sizeof(existing_device->model));
+ memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
+ memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
existing_device->sas_address = new_device->sas_address;
- existing_device->raid_level = new_device->raid_level;
existing_device->queue_depth = new_device->queue_depth;
- existing_device->aio_handle = new_device->aio_handle;
- existing_device->volume_status = new_device->volume_status;
- existing_device->active_path_index = new_device->active_path_index;
- existing_device->path_map = new_device->path_map;
- existing_device->bay = new_device->bay;
- existing_device->box_index = new_device->box_index;
- existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
- existing_device->phy_connected_dev_type =
- new_device->phy_connected_dev_type;
- memcpy(existing_device->box, new_device->box,
- sizeof(existing_device->box));
- memcpy(existing_device->phys_connector, new_device->phys_connector,
- sizeof(existing_device->phys_connector));
- existing_device->offload_to_mirror = 0;
- kfree(existing_device->raid_map);
- existing_device->raid_map = new_device->raid_map;
- existing_device->raid_bypass_configured =
- new_device->raid_bypass_configured;
- existing_device->raid_bypass_enabled =
- new_device->raid_bypass_enabled;
existing_device->device_offline = false;
- /* To prevent this from being freed later. */
- new_device->raid_map = NULL;
+ if (pqi_is_logical_device(existing_device)) {
+ existing_device->is_external_raid_device = new_device->is_external_raid_device;
+
+ if (existing_device->devtype == TYPE_DISK) {
+ existing_device->raid_level = new_device->raid_level;
+ existing_device->volume_status = new_device->volume_status;
+ if (ctrl_info->logical_volume_rescan_needed)
+ existing_device->rescan = true;
+ memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
+ if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
+ kfree(existing_device->raid_map);
+ existing_device->raid_map = new_device->raid_map;
+ /* To prevent this from being freed later. */
+ new_device->raid_map = NULL;
+ }
+ existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
+ existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
+ }
+ } else {
+ existing_device->aio_enabled = new_device->aio_enabled;
+ existing_device->aio_handle = new_device->aio_handle;
+ existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
+ existing_device->active_path_index = new_device->active_path_index;
+ existing_device->phy_id = new_device->phy_id;
+ existing_device->path_map = new_device->path_map;
+ existing_device->bay = new_device->bay;
+ existing_device->box_index = new_device->box_index;
+ existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
+ existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
+ memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
+ memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
+
+ existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
+ if (existing_device->multi_lun_device_lun_count == 0)
+ existing_device->multi_lun_device_lun_count = 1;
+ }
}
static inline void pqi_free_device(struct pqi_scsi_dev *device)
@@ -1775,8 +2172,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
/* Assume that all devices in the existing list have gone away. */
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
device->device_gone = true;
for (i = 0; i < num_new_devices; i++) {
@@ -1793,7 +2189,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
*/
device->new_device = false;
matching_device->device_gone = false;
- pqi_scsi_update_device(matching_device, device);
+ pqi_scsi_update_device(ctrl_info, matching_device, device);
break;
case DEVICE_NOT_FOUND:
/*
@@ -1837,12 +2233,21 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- if (pqi_ctrl_in_ofa(ctrl_info))
- pqi_ctrl_ofa_done(ctrl_info);
+ /*
+ * If OFA is in progress and there are devices that need to be deleted,
+ * allow any pending reset operations to continue and unblock any SCSI
+ * requests before removal.
+ */
+ if (pqi_ofa_in_progress(ctrl_info)) {
+ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
+ if (pqi_is_device_added(device))
+ pqi_device_remove_start(device);
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ }
/* Remove all devices that have gone away. */
- list_for_each_entry_safe(device, next, &delete_list,
- delete_list_entry) {
+ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
if (device->volume_offline) {
pqi_dev_info(ctrl_info, "offline", device);
pqi_show_volume_status(ctrl_info, device);
@@ -1856,25 +2261,27 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
/*
- * Notify the SCSI ML if the queue depth of any existing device has
- * changed.
+ * Notify the SML of any existing device changes such as;
+ * queue depth, device size.
*/
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (device->sdev && device->queue_depth !=
- device->advertised_queue_depth) {
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth;
- scsi_change_queue_depth(device->sdev,
- device->advertised_queue_depth);
+ scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
+ if (device->rescan) {
+ scsi_rescan_device(&device->sdev->sdev_gendev);
+ device->rescan = false;
+ }
}
}
/* Expose any new devices. */
list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
if (!pqi_is_device_added(device)) {
- pqi_dev_info(ctrl_info, "added", device);
rc = pqi_add_device(ctrl_info, device);
- if (rc) {
+ if (rc == 0) {
+ pqi_dev_info(ctrl_info, "added", device);
+ } else {
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d addition failed, device not added\n",
ctrl_info->scsi_host->host_no,
@@ -1884,38 +2291,24 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
}
}
-}
-
-static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
-{
- bool is_supported;
- if (device->is_expander_smp_device)
- return true;
+ ctrl_info->logical_volume_rescan_needed = false;
- is_supported = false;
+}
- switch (device->devtype) {
- case TYPE_DISK:
- case TYPE_ZBC:
- case TYPE_TAPE:
- case TYPE_MEDIUM_CHANGER:
- case TYPE_ENCLOSURE:
- is_supported = true;
- break;
- case TYPE_RAID:
- /*
- * Only support the HBA controller itself as a RAID
- * controller. If it's a RAID controller other than
- * the HBA itself (an external RAID controller, for
- * example), we don't support it.
- */
- if (pqi_is_hba_lunid(device->scsi3addr))
- is_supported = true;
- break;
- }
+static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+{
+ /*
+ * Only support the HBA controller itself as a RAID
+ * controller. If it's a RAID controller other than
+ * the HBA itself (an external RAID controller, for
+ * example), we don't support it.
+ */
+ if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
+ !pqi_is_hba_lunid(device->scsi3addr))
+ return false;
- return is_supported;
+ return true;
}
static inline bool pqi_skip_device(u8 *scsi3addr)
@@ -1932,28 +2325,17 @@ static inline void pqi_mask_device(u8 *scsi3addr)
scsi3addr[3] |= 0xc0;
}
-static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
+static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
{
- if (!device->is_physical_device)
+ if (pqi_is_logical_device(device))
return false;
- if (device->is_expander_smp_device)
- return true;
-
- switch (device->devtype) {
- case TYPE_DISK:
- case TYPE_ZBC:
- case TYPE_ENCLOSURE:
- return true;
- }
-
- return false;
+ return (device->path_map & (device->path_map - 1)) != 0;
}
static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
{
- return !device->is_physical_device ||
- !pqi_skip_device(device->scsi3addr);
+ return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
}
static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
@@ -1961,10 +2343,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
int i;
int rc;
LIST_HEAD(new_device_list_head);
- struct report_phys_lun_extended *physdev_list = NULL;
- struct report_log_lun_extended *logdev_list = NULL;
- struct report_phys_lun_extended_entry *phys_lun_ext_entry;
- struct report_log_lun_extended_entry *log_lun_ext_entry;
+ struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
+ struct report_log_lun_list *logdev_list = NULL;
+ struct report_phys_lun_16byte_wwid *phys_lun;
+ struct report_log_lun *log_lun;
struct bmic_identify_physical_device *id_phys = NULL;
u32 num_physicals;
u32 num_logicals;
@@ -2015,19 +2397,19 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (pqi_hide_vsep) {
for (i = num_physicals - 1; i >= 0; i--) {
- phys_lun_ext_entry =
- &physdev_list->lun_entries[i];
- if (CISS_GET_DRIVE_NUMBER(
- phys_lun_ext_entry->lunid) ==
- PQI_VSEP_CISS_BTL) {
- pqi_mask_device(
- phys_lun_ext_entry->lunid);
+ phys_lun = &physdev_list->lun_entries[i];
+ if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
+ pqi_mask_device(phys_lun->lunid);
break;
}
}
}
}
+ if (num_logicals &&
+ (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
+ ctrl_info->lv_drive_type_mix_valid = true;
+
num_new_devices = num_physicals + num_logicals;
new_device_list = kmalloc_array(num_new_devices,
@@ -2061,16 +2443,14 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if ((!pqi_expose_ld_first && i < num_physicals) ||
(pqi_expose_ld_first && i >= num_logicals)) {
is_physical_device = true;
- phys_lun_ext_entry =
- &physdev_list->lun_entries[physical_index++];
- log_lun_ext_entry = NULL;
- scsi3addr = phys_lun_ext_entry->lunid;
+ phys_lun = &physdev_list->lun_entries[physical_index++];
+ log_lun = NULL;
+ scsi3addr = phys_lun->lunid;
} else {
is_physical_device = false;
- phys_lun_ext_entry = NULL;
- log_lun_ext_entry =
- &logdev_list->lun_entries[logical_index++];
- scsi3addr = log_lun_ext_entry->lunid;
+ phys_lun = NULL;
+ log_lun = &logdev_list->lun_entries[logical_index++];
+ scsi3addr = log_lun->lunid;
}
if (is_physical_device && pqi_skip_device(scsi3addr))
@@ -2085,16 +2465,23 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->is_physical_device = is_physical_device;
if (is_physical_device) {
- if (phys_lun_ext_entry->device_type ==
- SA_DEVICE_TYPE_EXPANDER_SMP)
+ device->device_type = phys_lun->device_type;
+ if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
device->is_expander_smp_device = true;
} else {
device->is_external_raid_device =
pqi_is_external_raid_addr(scsi3addr);
}
+ if (!pqi_is_supported_device(device))
+ continue;
+
+ /* Do not present disks that the OS cannot fully probe */
+ if (pqi_keep_device_offline(ctrl_info, device))
+ continue;
+
/* Gather information about the device. */
- rc = pqi_get_device_info(ctrl_info, device);
+ rc = pqi_get_device_info(ctrl_info, device, id_phys);
if (rc == -ENOMEM) {
dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
out_of_memory_msg);
@@ -2103,9 +2490,9 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (rc) {
if (device->is_physical_device)
dev_warn(&ctrl_info->pci_dev->dev,
- "obtaining device info failed, skipping physical device %016llx\n",
- get_unaligned_be64(
- &phys_lun_ext_entry->wwid));
+ "obtaining device info failed, skipping physical device %016llx%016llx\n",
+ get_unaligned_be64(&phys_lun->wwid[0]),
+ get_unaligned_be64(&phys_lun->wwid[8]));
else
dev_warn(&ctrl_info->pci_dev->dev,
"obtaining device info failed, skipping logical device %08x%08x\n",
@@ -2115,28 +2502,23 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
continue;
}
- if (!pqi_is_supported_device(device))
- continue;
-
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
- device->wwid = phys_lun_ext_entry->wwid;
- if ((phys_lun_ext_entry->device_flags &
+ memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
+ if ((phys_lun->device_flags &
CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
- phys_lun_ext_entry->aio_handle) {
- device->aio_enabled = true;
- device->aio_handle =
- phys_lun_ext_entry->aio_handle;
+ phys_lun->aio_handle) {
+ device->aio_enabled = true;
+ device->aio_handle =
+ phys_lun->aio_handle;
}
- pqi_get_physical_disk_info(ctrl_info, device, id_phys);
} else {
- memcpy(device->volume_id, log_lun_ext_entry->volume_id,
+ memcpy(device->volume_id, log_lun->volume_id,
sizeof(device->volume_id));
}
- if (pqi_is_device_with_sas_address(device))
- device->sas_address = get_unaligned_be64(&device->wwid);
+ device->sas_address = get_unaligned_be64(&device->wwid[0]);
new_device_list[num_valid_devices++] = device;
}
@@ -2160,48 +2542,29 @@ out:
return rc;
}
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_scsi_dev *device;
-
- while (1) {
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
-
- device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
- struct pqi_scsi_dev, scsi_device_list_entry);
- if (device)
- list_del(&device->scsi_device_list_entry);
-
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
-
- if (!device)
- break;
-
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
- pqi_free_device(device);
- }
-}
-
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
- int rc = 0;
+ int rc;
+ int mutex_acquired;
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
- if (!mutex_trylock(&ctrl_info->scan_mutex)) {
+ mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+
+ if (!mutex_acquired) {
+ if (pqi_ctrl_scan_blocked(ctrl_info))
+ return -EBUSY;
pqi_schedule_rescan_worker_delayed(ctrl_info);
- rc = -EINPROGRESS;
- } else {
- rc = pqi_update_scsi_devices(ctrl_info);
- if (rc)
- pqi_schedule_rescan_worker_delayed(ctrl_info);
- mutex_unlock(&ctrl_info->scan_mutex);
+ return -EINPROGRESS;
}
+ rc = pqi_update_scsi_devices(ctrl_info);
+ if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
+
+ mutex_unlock(&ctrl_info->scan_mutex);
+
return rc;
}
@@ -2210,8 +2573,6 @@ static void pqi_scan_start(struct Scsi_Host *shost)
struct pqi_ctrl_info *ctrl_info;
ctrl_info = shost_to_hba(shost);
- if (pqi_ctrl_in_ofa(ctrl_info))
- return;
pqi_scan_scsi_devices(ctrl_info);
}
@@ -2228,27 +2589,8 @@ static int pqi_scan_finished(struct Scsi_Host *shost,
return !mutex_is_locked(&ctrl_info->scan_mutex);
}
-static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
-{
- mutex_lock(&ctrl_info->scan_mutex);
- mutex_unlock(&ctrl_info->scan_mutex);
-}
-
-static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
-{
- mutex_lock(&ctrl_info->lun_reset_mutex);
- mutex_unlock(&ctrl_info->lun_reset_mutex);
-}
-
-static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
-{
- mutex_lock(&ctrl_info->ofa_mutex);
- mutex_unlock(&ctrl_info->ofa_mutex);
-}
-
-static inline void pqi_set_encryption_info(
- struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
- u64 first_block)
+static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
+ struct raid_map *raid_map, u64 first_block)
{
u32 volume_blk_size;
@@ -2271,333 +2613,419 @@ static inline void pqi_set_encryption_info(
* Attempt to perform RAID bypass mapping for a logical volume I/O.
*/
+static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ bool is_supported = true;
+
+ switch (rmd->raid_level) {
+ case SA_RAID_0:
+ break;
+ case SA_RAID_1:
+ if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
+ is_supported = false;
+ break;
+ case SA_RAID_TRIPLE:
+ if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
+ is_supported = false;
+ break;
+ case SA_RAID_5:
+ if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_5_6))
+ is_supported = false;
+ break;
+ case SA_RAID_6:
+ if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_5_6))
+ is_supported = false;
+ break;
+ default:
+ is_supported = false;
+ break;
+ }
+
+ return is_supported;
+}
+
#define PQI_RAID_BYPASS_INELIGIBLE 1
-static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
- struct pqi_queue_group *queue_group)
+static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
+ struct pqi_scsi_dev_raid_map_data *rmd)
{
- struct raid_map *raid_map;
- bool is_write = false;
- u32 map_index;
- u64 first_block;
- u64 last_block;
- u32 block_cnt;
- u32 blocks_per_row;
- u64 first_row;
- u64 last_row;
- u32 first_row_offset;
- u32 last_row_offset;
- u32 first_column;
- u32 last_column;
- u64 r0_first_row;
- u64 r0_last_row;
- u32 r5or6_blocks_per_row;
- u64 r5or6_first_row;
- u64 r5or6_last_row;
- u32 r5or6_first_row_offset;
- u32 r5or6_last_row_offset;
- u32 r5or6_first_column;
- u32 r5or6_last_column;
- u16 data_disks_per_row;
- u32 total_disks_per_row;
- u16 layout_map_count;
- u32 stripesize;
- u16 strip_size;
- u32 first_group;
- u32 last_group;
- u32 current_group;
- u32 map_row;
- u32 aio_handle;
- u64 disk_block;
- u32 disk_block_cnt;
- u8 cdb[16];
- u8 cdb_length;
- int offload_to_mirror;
- struct pqi_encryption_info *encryption_info_ptr;
- struct pqi_encryption_info encryption_info;
-#if BITS_PER_LONG == 32
- u64 tmpdiv;
-#endif
-
/* Check for valid opcode, get LBA and block count. */
switch (scmd->cmnd[0]) {
case WRITE_6:
- is_write = true;
- /* fall through */
+ rmd->is_write = true;
+ fallthrough;
case READ_6:
- first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
+ rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
- block_cnt = (u32)scmd->cmnd[4];
- if (block_cnt == 0)
- block_cnt = 256;
+ rmd->block_cnt = (u32)scmd->cmnd[4];
+ if (rmd->block_cnt == 0)
+ rmd->block_cnt = 256;
break;
case WRITE_10:
- is_write = true;
- /* fall through */
+ rmd->is_write = true;
+ fallthrough;
case READ_10:
- first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
- block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+ rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
break;
case WRITE_12:
- is_write = true;
- /* fall through */
+ rmd->is_write = true;
+ fallthrough;
case READ_12:
- first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
- block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+ rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+ rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
break;
case WRITE_16:
- is_write = true;
- /* fall through */
+ rmd->is_write = true;
+ fallthrough;
case READ_16:
- first_block = get_unaligned_be64(&scmd->cmnd[2]);
- block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
+ rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
+ rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
break;
default:
/* Process via normal I/O path. */
return PQI_RAID_BYPASS_INELIGIBLE;
}
- /* Check for write to non-RAID-0. */
- if (is_write && device->raid_level != SA_RAID_0)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
- if (unlikely(block_cnt == 0))
- return PQI_RAID_BYPASS_INELIGIBLE;
+ return 0;
+}
- last_block = first_block + block_cnt - 1;
- raid_map = device->raid_map;
+static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
+{
+#if BITS_PER_LONG == 32
+ u64 tmpdiv;
+#endif
+
+ rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
/* Check for invalid block or wraparound. */
- if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
- last_block < first_block)
+ if (rmd->last_block >=
+ get_unaligned_le64(&raid_map->volume_blk_cnt) ||
+ rmd->last_block < rmd->first_block)
return PQI_RAID_BYPASS_INELIGIBLE;
- data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
- strip_size = get_unaligned_le16(&raid_map->strip_size);
- layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
+ rmd->data_disks_per_row =
+ get_unaligned_le16(&raid_map->data_disks_per_row);
+ rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
+ rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
/* Calculate stripe information for the request. */
- blocks_per_row = data_disks_per_row * strip_size;
+ rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
+ if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
+ return PQI_RAID_BYPASS_INELIGIBLE;
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- do_div(tmpdiv, blocks_per_row);
- first_row = tmpdiv;
- tmpdiv = last_block;
- do_div(tmpdiv, blocks_per_row);
- last_row = tmpdiv;
- first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
- last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
- tmpdiv = first_row_offset;
- do_div(tmpdiv, strip_size);
- first_column = tmpdiv;
- tmpdiv = last_row_offset;
- do_div(tmpdiv, strip_size);
- last_column = tmpdiv;
+ tmpdiv = rmd->first_block;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->first_row = tmpdiv;
+ tmpdiv = rmd->last_block;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->last_row = tmpdiv;
+ rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
+ rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
+ tmpdiv = rmd->first_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->first_column = tmpdiv;
+ tmpdiv = rmd->last_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->last_column = tmpdiv;
#else
- first_row = first_block / blocks_per_row;
- last_row = last_block / blocks_per_row;
- first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
- last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
- first_column = first_row_offset / strip_size;
- last_column = last_row_offset / strip_size;
+ rmd->first_row = rmd->first_block / rmd->blocks_per_row;
+ rmd->last_row = rmd->last_block / rmd->blocks_per_row;
+ rmd->first_row_offset = (u32)(rmd->first_block -
+ (rmd->first_row * rmd->blocks_per_row));
+ rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
+ rmd->blocks_per_row));
+ rmd->first_column = rmd->first_row_offset / rmd->strip_size;
+ rmd->last_column = rmd->last_row_offset / rmd->strip_size;
#endif
/* If this isn't a single row/column then give to the controller. */
- if (first_row != last_row || first_column != last_column)
+ if (rmd->first_row != rmd->last_row ||
+ rmd->first_column != rmd->last_column)
return PQI_RAID_BYPASS_INELIGIBLE;
/* Proceeding with driver mapping. */
- total_disks_per_row = data_disks_per_row +
+ rmd->total_disks_per_row = rmd->data_disks_per_row +
get_unaligned_le16(&raid_map->metadata_disks_per_row);
- map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+ rmd->map_row = ((u32)(rmd->first_row >>
+ raid_map->parity_rotation_shift)) %
get_unaligned_le16(&raid_map->row_cnt);
- map_index = (map_row * total_disks_per_row) + first_column;
+ rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
+ rmd->first_column;
- /* RAID 1 */
- if (device->raid_level == SA_RAID_1) {
- if (device->offload_to_mirror)
- map_index += data_disks_per_row;
- device->offload_to_mirror = !device->offload_to_mirror;
- } else if (device->raid_level == SA_RAID_ADM) {
- /* RAID ADM */
- /*
- * Handles N-way mirrors (R1-ADM) and R10 with # of drives
- * divisible by 3.
- */
- offload_to_mirror = device->offload_to_mirror;
- if (offload_to_mirror == 0) {
- /* use physical disk in the first mirrored group. */
- map_index %= data_disks_per_row;
- } else {
- do {
- /*
- * Determine mirror group that map_index
- * indicates.
- */
- current_group = map_index / data_disks_per_row;
-
- if (offload_to_mirror != current_group) {
- if (current_group <
- layout_map_count - 1) {
- /*
- * Select raid index from
- * next group.
- */
- map_index += data_disks_per_row;
- current_group++;
- } else {
- /*
- * Select raid index from first
- * group.
- */
- map_index %= data_disks_per_row;
- current_group = 0;
- }
- }
- } while (offload_to_mirror != current_group);
- }
+ return 0;
+}
- /* Set mirror group to use next time. */
- offload_to_mirror =
- (offload_to_mirror >= layout_map_count - 1) ?
- 0 : offload_to_mirror + 1;
- WARN_ON(offload_to_mirror >= layout_map_count);
- device->offload_to_mirror = offload_to_mirror;
- /*
- * Avoid direct use of device->offload_to_mirror within this
- * function since multiple threads might simultaneously
- * increment it beyond the range of device->layout_map_count -1.
- */
- } else if ((device->raid_level == SA_RAID_5 ||
- device->raid_level == SA_RAID_6) && layout_map_count > 1) {
- /* RAID 50/60 */
- /* Verify first and last block are in same RAID group */
- r5or6_blocks_per_row = strip_size * data_disks_per_row;
- stripesize = r5or6_blocks_per_row * layout_map_count;
+static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
+ struct raid_map *raid_map)
+{
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- first_group = do_div(tmpdiv, stripesize);
- tmpdiv = first_group;
- do_div(tmpdiv, r5or6_blocks_per_row);
- first_group = tmpdiv;
- tmpdiv = last_block;
- last_group = do_div(tmpdiv, stripesize);
- tmpdiv = last_group;
- do_div(tmpdiv, r5or6_blocks_per_row);
- last_group = tmpdiv;
+ u64 tmpdiv;
+#endif
+
+ if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* RAID 50/60 */
+ /* Verify first and last block are in same RAID group. */
+ rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
+#if BITS_PER_LONG == 32
+ tmpdiv = rmd->first_block;
+ rmd->first_group = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->first_group;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->first_group = tmpdiv;
+ tmpdiv = rmd->last_block;
+ rmd->last_group = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->last_group;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->last_group = tmpdiv;
#else
- first_group = (first_block % stripesize) / r5or6_blocks_per_row;
- last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+ rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
+ rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
#endif
- if (first_group != last_group)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ if (rmd->first_group != rmd->last_group)
+ return PQI_RAID_BYPASS_INELIGIBLE;
- /* Verify request is in a single row of RAID 5/6 */
+ /* Verify request is in a single row of RAID 5/6. */
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- do_div(tmpdiv, stripesize);
- first_row = r5or6_first_row = r0_first_row = tmpdiv;
- tmpdiv = last_block;
- do_div(tmpdiv, stripesize);
- r5or6_last_row = r0_last_row = tmpdiv;
+ tmpdiv = rmd->first_block;
+ do_div(tmpdiv, rmd->stripesize);
+ rmd->first_row = tmpdiv;
+ rmd->r5or6_first_row = tmpdiv;
+ tmpdiv = rmd->last_block;
+ do_div(tmpdiv, rmd->stripesize);
+ rmd->r5or6_last_row = tmpdiv;
#else
- first_row = r5or6_first_row = r0_first_row =
- first_block / stripesize;
- r5or6_last_row = r0_last_row = last_block / stripesize;
+ rmd->first_row = rmd->r5or6_first_row =
+ rmd->first_block / rmd->stripesize;
+ rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
#endif
- if (r5or6_first_row != r5or6_last_row)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ if (rmd->r5or6_first_row != rmd->r5or6_last_row)
+ return PQI_RAID_BYPASS_INELIGIBLE;
- /* Verify request is in a single column */
+ /* Verify request is in a single column. */
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- first_row_offset = do_div(tmpdiv, stripesize);
- tmpdiv = first_row_offset;
- first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
- r5or6_first_row_offset = first_row_offset;
- tmpdiv = last_block;
- r5or6_last_row_offset = do_div(tmpdiv, stripesize);
- tmpdiv = r5or6_last_row_offset;
- r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
- tmpdiv = r5or6_first_row_offset;
- do_div(tmpdiv, strip_size);
- first_column = r5or6_first_column = tmpdiv;
- tmpdiv = r5or6_last_row_offset;
- do_div(tmpdiv, strip_size);
- r5or6_last_column = tmpdiv;
+ tmpdiv = rmd->first_block;
+ rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->first_row_offset;
+ rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->r5or6_first_row_offset = rmd->first_row_offset;
+ tmpdiv = rmd->last_block;
+ rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->r5or6_last_row_offset;
+ rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
+ tmpdiv = rmd->r5or6_first_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->first_column = rmd->r5or6_first_column = tmpdiv;
+ tmpdiv = rmd->r5or6_last_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->r5or6_last_column = tmpdiv;
#else
- first_row_offset = r5or6_first_row_offset =
- (u32)((first_block % stripesize) %
- r5or6_blocks_per_row);
+ rmd->first_row_offset = rmd->r5or6_first_row_offset =
+ (u32)((rmd->first_block % rmd->stripesize) %
+ rmd->blocks_per_row);
+
+ rmd->r5or6_last_row_offset =
+ (u32)((rmd->last_block % rmd->stripesize) %
+ rmd->blocks_per_row);
+
+ rmd->first_column =
+ rmd->r5or6_first_row_offset / rmd->strip_size;
+ rmd->r5or6_first_column = rmd->first_column;
+ rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
+#endif
+ if (rmd->r5or6_first_column != rmd->r5or6_last_column)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* Request is eligible. */
+ rmd->map_row =
+ ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
+ get_unaligned_le16(&raid_map->row_cnt);
+
+ rmd->map_index = (rmd->first_group *
+ (get_unaligned_le16(&raid_map->row_cnt) *
+ rmd->total_disks_per_row)) +
+ (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
- r5or6_last_row_offset =
- (u32)((last_block % stripesize) %
- r5or6_blocks_per_row);
+ if (rmd->is_write) {
+ u32 index;
- first_column = r5or6_first_row_offset / strip_size;
- r5or6_first_column = first_column;
- r5or6_last_column = r5or6_last_row_offset / strip_size;
+ /*
+ * p_parity_it_nexus and q_parity_it_nexus are pointers to the
+ * parity entries inside the device's raid_map.
+ *
+ * A device's RAID map is bounded by: number of RAID disks squared.
+ *
+ * The devices RAID map size is checked during device
+ * initialization.
+ */
+ index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
+ index *= rmd->total_disks_per_row;
+ index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
+
+ rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
+ if (rmd->raid_level == SA_RAID_6) {
+ rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
+ rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
+ }
+#if BITS_PER_LONG == 32
+ tmpdiv = rmd->first_block;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->row = tmpdiv;
+#else
+ rmd->row = rmd->first_block / rmd->blocks_per_row;
#endif
- if (r5or6_first_column != r5or6_last_column)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ }
+
+ return 0;
+}
+
+static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ /* Build the new CDB for the physical disk I/O. */
+ if (rmd->disk_block > 0xffffffff) {
+ rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
+ rmd->cdb[1] = 0;
+ put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
+ put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
+ rmd->cdb[14] = 0;
+ rmd->cdb[15] = 0;
+ rmd->cdb_length = 16;
+ } else {
+ rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
+ rmd->cdb[1] = 0;
+ put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
+ rmd->cdb[6] = 0;
+ put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
+ rmd->cdb[9] = 0;
+ rmd->cdb_length = 10;
+ }
+}
+
+static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ u32 index;
+ u32 group;
+
+ group = rmd->map_index / rmd->data_disks_per_row;
+
+ index = rmd->map_index - (group * rmd->data_disks_per_row);
+ rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
+ index += rmd->data_disks_per_row;
+ rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
+ if (rmd->layout_map_count > 2) {
+ index += rmd->data_disks_per_row;
+ rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
+ }
+
+ rmd->num_it_nexus_entries = rmd->layout_map_count;
+}
+
+static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+ struct pqi_queue_group *queue_group)
+{
+ int rc;
+ struct raid_map *raid_map;
+ u32 group;
+ u32 next_bypass_group;
+ struct pqi_encryption_info *encryption_info_ptr;
+ struct pqi_encryption_info encryption_info;
+ struct pqi_scsi_dev_raid_map_data rmd = { 0 };
+
+ rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+ if (rc)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ rmd.raid_level = device->raid_level;
+
+ if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ if (unlikely(rmd.block_cnt == 0))
+ return PQI_RAID_BYPASS_INELIGIBLE;
- /* Request is eligible */
- map_row =
- ((u32)(first_row >> raid_map->parity_rotation_shift)) %
- get_unaligned_le16(&raid_map->row_cnt);
+ raid_map = device->raid_map;
- map_index = (first_group *
- (get_unaligned_le16(&raid_map->row_cnt) *
- total_disks_per_row)) +
- (map_row * total_disks_per_row) + first_column;
+ rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
+ if (rc)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ if (device->raid_level == SA_RAID_1 ||
+ device->raid_level == SA_RAID_TRIPLE) {
+ if (rmd.is_write) {
+ pqi_calc_aio_r1_nexus(raid_map, &rmd);
+ } else {
+ group = device->next_bypass_group[rmd.map_index];
+ next_bypass_group = group + 1;
+ if (next_bypass_group >= rmd.layout_map_count)
+ next_bypass_group = 0;
+ device->next_bypass_group[rmd.map_index] = next_bypass_group;
+ rmd.map_index += group * rmd.data_disks_per_row;
+ }
+ } else if ((device->raid_level == SA_RAID_5 ||
+ device->raid_level == SA_RAID_6) &&
+ (rmd.layout_map_count > 1 || rmd.is_write)) {
+ rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
+ if (rc)
+ return PQI_RAID_BYPASS_INELIGIBLE;
}
- aio_handle = raid_map->disk_data[map_index].aio_handle;
- disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
- first_row * strip_size +
- (first_row_offset - first_column * strip_size);
- disk_block_cnt = block_cnt;
+ if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
+ rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
+ rmd.first_row * rmd.strip_size +
+ (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
+ rmd.disk_block_cnt = rmd.block_cnt;
/* Handle differing logical/physical block sizes. */
if (raid_map->phys_blk_shift) {
- disk_block <<= raid_map->phys_blk_shift;
- disk_block_cnt <<= raid_map->phys_blk_shift;
+ rmd.disk_block <<= raid_map->phys_blk_shift;
+ rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
}
- if (unlikely(disk_block_cnt > 0xffff))
+ if (unlikely(rmd.disk_block_cnt > 0xffff))
return PQI_RAID_BYPASS_INELIGIBLE;
- /* Build the new CDB for the physical disk I/O. */
- if (disk_block > 0xffffffff) {
- cdb[0] = is_write ? WRITE_16 : READ_16;
- cdb[1] = 0;
- put_unaligned_be64(disk_block, &cdb[2]);
- put_unaligned_be32(disk_block_cnt, &cdb[10]);
- cdb[14] = 0;
- cdb[15] = 0;
- cdb_length = 16;
- } else {
- cdb[0] = is_write ? WRITE_10 : READ_10;
- cdb[1] = 0;
- put_unaligned_be32((u32)disk_block, &cdb[2]);
- cdb[6] = 0;
- put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
- cdb[9] = 0;
- cdb_length = 10;
- }
-
- if (get_unaligned_le16(&raid_map->flags) &
- RAID_MAP_ENCRYPTION_ENABLED) {
- pqi_set_encryption_info(&encryption_info, raid_map,
- first_block);
+ pqi_set_aio_cdb(&rmd);
+
+ if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
+ if (rmd.data_length > device->max_transfer_encrypted)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+ pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
encryption_info_ptr = &encryption_info;
} else {
encryption_info_ptr = NULL;
}
- return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
- cdb, cdb_length, queue_group, encryption_info_ptr, true);
+ if (rmd.is_write) {
+ switch (device->raid_level) {
+ case SA_RAID_1:
+ case SA_RAID_TRIPLE:
+ return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
+ encryption_info_ptr, device, &rmd);
+ case SA_RAID_5:
+ case SA_RAID_6:
+ return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
+ encryption_info_ptr, device, &rmd);
+ }
+ }
+
+ return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
+ rmd.cdb, rmd.cdb_length, queue_group,
+ encryption_info_ptr, true, false);
}
#define PQI_STATUS_IDLE 0x0
@@ -2622,7 +3050,7 @@ static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
u8 status;
pqi_registers = ctrl_info->pqi_registers;
- timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
signature = readq(&pqi_registers->signature);
@@ -2801,12 +3229,14 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
int residual_count;
int xfer_count;
bool device_offline;
+ struct pqi_scsi_dev *device;
scmd = io_request->scmd;
error_info = io_request->error_info;
host_byte = DID_OK;
sense_data_length = 0;
device_offline = false;
+ device = scmd->device->hostdata;
switch (error_info->service_response) {
case PQI_AIO_SERV_RESPONSE_COMPLETE:
@@ -2831,8 +3261,14 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
- scsi_status = SAM_STAT_GOOD;
- io_request->status = -EAGAIN;
+ if (pqi_is_multipath_device(device)) {
+ pqi_device_remove_start(device);
+ host_byte = DID_NO_CONNECT;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ } else {
+ scsi_status = SAM_STAT_GOOD;
+ io_request->status = -EAGAIN;
+ }
break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE:
@@ -2874,8 +3310,7 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
}
if (device_offline && sense_data_length == 0)
- scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
- 0x3e, 0x1);
+ scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
scmd->result = scsi_status;
set_host_byte(scmd, host_byte);
@@ -2894,7 +3329,7 @@ static void pqi_process_io_error(unsigned int iu_type,
}
}
-static int pqi_interpret_task_management_response(
+static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
struct pqi_task_management_response *response)
{
int rc;
@@ -2907,18 +3342,30 @@ static int pqi_interpret_task_management_response(
case SOP_TMF_REJECTED:
rc = -EAGAIN;
break;
+ case SOP_RC_INCORRECT_LOGICAL_UNIT:
+ rc = -ENODEV;
+ break;
default:
rc = -EIO;
break;
}
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
+
return rc;
}
-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
- struct pqi_queue_group *queue_group)
+static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
- unsigned int num_responses;
+ pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
+}
+
+static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
+{
+ int num_responses;
pqi_index_t oq_pi;
pqi_index_t oq_ci;
struct pqi_io_request *io_request;
@@ -2930,6 +3377,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
while (1) {
oq_pi = readl(queue_group->oq_pi);
+ if (oq_pi >= ctrl_info->num_elements_per_oq) {
+ pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
+ return -1;
+ }
if (oq_pi == oq_ci)
break;
@@ -2938,29 +3392,39 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
request_id = get_unaligned_le16(&response->request_id);
- WARN_ON(request_id >= ctrl_info->max_io_slots);
+ if (request_id >= ctrl_info->max_io_slots) {
+ pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
+ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
+ return -1;
+ }
io_request = &ctrl_info->io_request_pool[request_id];
- WARN_ON(atomic_read(&io_request->refcount) == 0);
+ if (atomic_read(&io_request->refcount) == 0) {
+ pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
+ request_id, oq_pi, oq_ci);
+ return -1;
+ }
switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
if (io_request->scmd)
io_request->scmd->result = 0;
- /* fall through */
+ fallthrough;
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
break;
case PQI_RESPONSE_IU_VENDOR_GENERAL:
io_request->status =
get_unaligned_le16(
- &((struct pqi_vendor_general_response *)
- response)->status);
+ &((struct pqi_vendor_general_response *)response)->status);
break;
case PQI_RESPONSE_IU_TASK_MANAGEMENT:
- io_request->status =
- pqi_interpret_task_management_response(
- (void *)response);
+ io_request->status = pqi_interpret_task_management_response(ctrl_info,
+ (void *)response);
break;
case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
@@ -2971,24 +3435,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
io_request->error_info = ctrl_info->error_buffer +
(get_unaligned_le16(&response->error_index) *
PQI_ERROR_BUFFER_ELEMENT_LENGTH);
- pqi_process_io_error(response->header.iu_type,
- io_request);
+ pqi_process_io_error(response->header.iu_type, io_request);
break;
default:
+ pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
dev_err(&ctrl_info->pci_dev->dev,
- "unexpected IU type: 0x%x\n",
- response->header.iu_type);
- break;
+ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
+ response->header.iu_type, oq_pi, oq_ci);
+ return -1;
}
- io_request->io_complete_callback(io_request,
- io_request->context);
+ io_request->io_complete_callback(io_request, io_request->context);
/*
* Note that the I/O request structure CANNOT BE TOUCHED after
* returning from the I/O completion callback!
*/
-
oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
}
@@ -3070,8 +3532,8 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
&request.header.iu_length);
request.event_type = event->event_type;
- request.event_id = event->event_id;
- request.additional_event_id = event->additional_event_id;
+ put_unaligned_le16(event->event_id, &request.event_id);
+ put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
pqi_send_event_ack(ctrl_info, &request, sizeof(request));
}
@@ -3082,10 +3544,10 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
struct pqi_ctrl_info *ctrl_info)
{
- unsigned long timeout;
u8 status;
+ unsigned long timeout;
- timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
status = pqi_read_soft_reset_status(ctrl_info);
@@ -3095,133 +3557,202 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
if (status & PQI_SOFT_RESET_ABORT)
return RESET_ABORT;
+ if (!sis_is_firmware_running(ctrl_info))
+ return RESET_NORESPONSE;
+
if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
+ dev_warn(&ctrl_info->pci_dev->dev,
"timed out waiting for soft reset status\n");
return RESET_TIMEDOUT;
}
- if (!sis_is_firmware_running(ctrl_info))
- return RESET_NORESPONSE;
-
ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
}
}
-static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
- enum pqi_soft_reset_status reset_status)
+static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
{
int rc;
+ unsigned int delay_secs;
+ enum pqi_soft_reset_status reset_status;
+
+ if (ctrl_info->soft_reset_handshake_supported)
+ reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
+ else
+ reset_status = RESET_INITIATE_FIRMWARE;
+
+ delay_secs = PQI_POST_RESET_DELAY_SECS;
switch (reset_status) {
- case RESET_INITIATE_DRIVER:
- /* fall through */
case RESET_TIMEDOUT:
+ delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
+ fallthrough;
+ case RESET_INITIATE_DRIVER:
dev_info(&ctrl_info->pci_dev->dev,
- "resetting controller %u\n", ctrl_info->ctrl_id);
+ "Online Firmware Activation: resetting controller\n");
sis_soft_reset(ctrl_info);
- /* fall through */
+ fallthrough;
case RESET_INITIATE_FIRMWARE:
- rc = pqi_ofa_ctrl_restart(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+ rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_ctrl_ofa_done(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation for controller %u: %s\n",
- ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
+ "Online Firmware Activation: %s\n",
+ rc == 0 ? "SUCCESS" : "FAILED");
break;
case RESET_ABORT:
- pqi_ofa_ctrl_unquiesce(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation for controller %u: %s\n",
- ctrl_info->ctrl_id, "ABORTED");
+ "Online Firmware Activation ABORTED\n");
+ if (ctrl_info->soft_reset_handshake_supported)
+ pqi_clear_soft_reset_status(ctrl_info);
+ pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_ctrl_ofa_done(ctrl_info);
+ pqi_ofa_ctrl_unquiesce(ctrl_info);
break;
case RESET_NORESPONSE:
+ fallthrough;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev,
+ "unexpected Online Firmware Activation reset status: 0x%x\n",
+ reset_status);
pqi_ofa_free_host_buffer(ctrl_info);
- pqi_take_ctrl_offline(ctrl_info);
+ pqi_ctrl_ofa_done(ctrl_info);
+ pqi_ofa_ctrl_unquiesce(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
break;
}
}
-static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
- struct pqi_event *event)
+static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
{
- u16 event_id;
- enum pqi_soft_reset_status status;
+ struct pqi_ctrl_info *ctrl_info;
- event_id = get_unaligned_le16(&event->event_id);
+ ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
- mutex_lock(&ctrl_info->ofa_mutex);
+ pqi_ctrl_ofa_start(ctrl_info);
+ pqi_ofa_setup_host_buffer(ctrl_info);
+ pqi_ofa_host_memory_update(ctrl_info);
+}
- if (event_id == PQI_EVENT_OFA_QUIESCE) {
- dev_info(&ctrl_info->pci_dev->dev,
- "Received Online Firmware Activation quiesce event for controller %u\n",
- ctrl_info->ctrl_id);
- pqi_ofa_ctrl_quiesce(ctrl_info);
- pqi_acknowledge_event(ctrl_info, event);
- if (ctrl_info->soft_reset_handshake_supported) {
- status = pqi_poll_for_soft_reset_status(ctrl_info);
- pqi_process_soft_reset(ctrl_info, status);
- } else {
- pqi_process_soft_reset(ctrl_info,
- RESET_INITIATE_FIRMWARE);
- }
+static void pqi_ofa_quiesce_worker(struct work_struct *work)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_event *event;
- } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
- pqi_acknowledge_event(ctrl_info, event);
- pqi_ofa_setup_host_buffer(ctrl_info,
- le32_to_cpu(event->ofa_bytes_requested));
- pqi_ofa_host_memory_update(ctrl_info);
- } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
- pqi_ofa_free_host_buffer(ctrl_info);
- pqi_acknowledge_event(ctrl_info, event);
+ ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
+
+ event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
+
+ pqi_ofa_ctrl_quiesce(ctrl_info);
+ pqi_acknowledge_event(ctrl_info, event);
+ pqi_process_soft_reset(ctrl_info);
+}
+
+static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_event *event)
+{
+ bool ack_event;
+
+ ack_event = true;
+
+ switch (event->event_id) {
+ case PQI_EVENT_OFA_MEMORY_ALLOCATION:
+ dev_info(&ctrl_info->pci_dev->dev,
+ "received Online Firmware Activation memory allocation request\n");
+ schedule_work(&ctrl_info->ofa_memory_alloc_work);
+ break;
+ case PQI_EVENT_OFA_QUIESCE:
+ dev_info(&ctrl_info->pci_dev->dev,
+ "received Online Firmware Activation quiesce request\n");
+ schedule_work(&ctrl_info->ofa_quiesce_work);
+ ack_event = false;
+ break;
+ case PQI_EVENT_OFA_CANCELED:
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation(%u) cancel reason : %u\n",
- ctrl_info->ctrl_id, event->ofa_cancel_reason);
+ "received Online Firmware Activation cancel request: reason: %u\n",
+ ctrl_info->ofa_cancel_reason);
+ pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_ctrl_ofa_done(ctrl_info);
+ break;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev,
+ "received unknown Online Firmware Activation request: event ID: %u\n",
+ event->event_id);
+ break;
}
- mutex_unlock(&ctrl_info->ofa_mutex);
+ return ack_event;
+}
+
+static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->raid_bypass_enabled)
+ device->raid_bypass_enabled = false;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
+ bool rescan_needed;
struct pqi_ctrl_info *ctrl_info;
struct pqi_event *event;
+ bool ack_event;
ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
pqi_ctrl_busy(ctrl_info);
- pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
+ pqi_wait_if_ctrl_blocked(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
goto out;
- pqi_schedule_rescan_worker_delayed(ctrl_info);
-
+ rescan_needed = false;
event = ctrl_info->events;
for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
if (event->pending) {
event->pending = false;
if (event->event_type == PQI_EVENT_TYPE_OFA) {
- pqi_ctrl_unbusy(ctrl_info);
- pqi_ofa_process_event(ctrl_info, event);
- return;
+ ack_event = pqi_ofa_process_event(ctrl_info, event);
+ } else {
+ ack_event = true;
+ rescan_needed = true;
+ if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
+ ctrl_info->logical_volume_rescan_needed = true;
+ else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
+ pqi_disable_raid_bypass(ctrl_info);
}
- pqi_acknowledge_event(ctrl_info, event);
+ if (ack_event)
+ pqi_acknowledge_event(ctrl_info, event);
}
event++;
}
+#define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
+
+ if (rescan_needed)
+ pqi_schedule_rescan_worker_with_delay(ctrl_info,
+ PQI_RESCAN_WORK_FOR_EVENT_DELAY);
+
out:
pqi_ctrl_unbusy(ctrl_info);
}
-#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
+#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
static void pqi_heartbeat_timer_handler(struct timer_list *t)
{
int num_interrupts;
u32 heartbeat_count;
- struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
- heartbeat_timer);
+ struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
@@ -3235,7 +3766,7 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t)
dev_err(&ctrl_info->pci_dev->dev,
"no heartbeat detected - last heartbeat count: %u\n",
heartbeat_count);
- pqi_take_ctrl_offline(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
return;
}
} else {
@@ -3267,43 +3798,24 @@ static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
del_timer_sync(&ctrl_info->heartbeat_timer);
}
-static inline int pqi_event_type_to_event_index(unsigned int event_type)
-{
- int index;
-
- for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
- if (event_type == pqi_supported_event_types[index])
- return index;
-
- return -1;
-}
-
-static inline bool pqi_is_supported_event(unsigned int event_type)
+static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_event *event, struct pqi_event_response *response)
{
- return pqi_event_type_to_event_index(event_type) != -1;
-}
-
-static void pqi_ofa_capture_event_payload(struct pqi_event *event,
- struct pqi_event_response *response)
-{
- u16 event_id;
-
- event_id = get_unaligned_le16(&event->event_id);
-
- if (event->event_type == PQI_EVENT_TYPE_OFA) {
- if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
- event->ofa_bytes_requested =
- response->data.ofa_memory_allocation.bytes_requested;
- } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
- event->ofa_cancel_reason =
- response->data.ofa_cancelled.reason;
- }
+ switch (event->event_id) {
+ case PQI_EVENT_OFA_MEMORY_ALLOCATION:
+ ctrl_info->ofa_bytes_requested =
+ get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
+ break;
+ case PQI_EVENT_OFA_CANCELED:
+ ctrl_info->ofa_cancel_reason =
+ get_unaligned_le16(&response->data.ofa_cancelled.reason);
+ break;
}
}
-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int num_events;
+ int num_events;
pqi_index_t oq_pi;
pqi_index_t oq_ci;
struct pqi_event_queue *event_queue;
@@ -3317,26 +3829,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
while (1) {
oq_pi = readl(event_queue->oq_pi);
+ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
+ pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
+ return -1;
+ }
+
if (oq_pi == oq_ci)
break;
num_events++;
- response = event_queue->oq_element_array +
- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
-
- event_index =
- pqi_event_type_to_event_index(response->event_type);
-
- if (event_index >= 0) {
- if (response->request_acknowlege) {
- event = &ctrl_info->events[event_index];
- event->pending = true;
- event->event_type = response->event_type;
- event->event_id = response->event_id;
- event->additional_event_id =
- response->additional_event_id;
- pqi_ofa_capture_event_payload(event, response);
- }
+ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+
+ event_index = pqi_event_type_to_event_index(response->event_type);
+
+ if (event_index >= 0 && response->request_acknowledge) {
+ event = &ctrl_info->events[event_index];
+ event->pending = true;
+ event->event_type = response->event_type;
+ event->event_id = get_unaligned_le16(&response->event_id);
+ event->additional_event_id =
+ get_unaligned_le32(&response->additional_event_id);
+ if (event->event_type == PQI_EVENT_TYPE_OFA)
+ pqi_ofa_capture_event_payload(ctrl_info, event, response);
}
oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
@@ -3353,8 +3870,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
#define PQI_LEGACY_INTX_MASK 0x1
-static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
- bool enable_intx)
+static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
{
u32 intx_mask;
struct pqi_device_registers __iomem *pqi_registers;
@@ -3431,8 +3947,7 @@ static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
valid_irq = true;
break;
case IRQ_MODE_INTX:
- intx_status =
- readl(&ctrl_info->pqi_registers->legacy_intx_status);
+ intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
if (intx_status & PQI_LEGACY_INTX_PENDING)
valid_irq = true;
else
@@ -3451,7 +3966,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_queue_group *queue_group;
- unsigned int num_responses_handled;
+ int num_io_responses_handled;
+ int num_events_handled;
queue_group = data;
ctrl_info = queue_group->ctrl_info;
@@ -3459,17 +3975,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
if (!pqi_is_valid_irq(ctrl_info))
return IRQ_NONE;
- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+ if (num_io_responses_handled < 0)
+ goto out;
- if (irq == ctrl_info->event_irq)
- num_responses_handled += pqi_process_event_intr(ctrl_info);
+ if (irq == ctrl_info->event_irq) {
+ num_events_handled = pqi_process_event_intr(ctrl_info);
+ if (num_events_handled < 0)
+ goto out;
+ } else {
+ num_events_handled = 0;
+ }
- if (num_responses_handled)
+ if (num_io_responses_handled + num_events_handled > 0)
atomic_inc(&ctrl_info->num_interrupts);
pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
+out:
return IRQ_HANDLED;
}
@@ -3510,10 +4034,14 @@ static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
int num_vectors_enabled;
+ unsigned int flags = PCI_IRQ_MSIX;
+
+ if (!pqi_disable_managed_interrupts)
+ flags |= PCI_IRQ_AFFINITY;
num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ flags);
if (num_vectors_enabled < 0) {
dev_err(&ctrl_info->pci_dev->dev,
"MSI-X init failed with error %d\n",
@@ -3744,7 +4272,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
&admin_queues_aligned->iq_element_array;
admin_queues->oq_element_array =
&admin_queues_aligned->oq_element_array;
- admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
+ admin_queues->iq_ci =
+ (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
admin_queues->oq_pi =
(pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
@@ -3758,8 +4287,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
ctrl_info->admin_queue_memory_base);
admin_queues->iq_ci_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
- ((void *)admin_queues->iq_ci -
- ctrl_info->admin_queue_memory_base);
+ ((void __iomem *)admin_queues->iq_ci -
+ (void __iomem *)ctrl_info->admin_queue_memory_base);
admin_queues->oq_pi_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
((void __iomem *)admin_queues->oq_pi -
@@ -3768,7 +4297,7 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
@@ -3795,17 +4324,18 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
(PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
(admin_queues->int_msg_num << 16);
writel(reg, &pqi_registers->admin_iq_num_elements);
+
writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
&pqi_registers->function_and_status_code);
timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
while (1) {
+ msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
status = readb(&pqi_registers->function_and_status_code);
if (status == PQI_STATUS_IDLE)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
- msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
}
/*
@@ -3861,7 +4391,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
admin_queues = &ctrl_info->admin_queues;
oq_ci = admin_queues->oq_ci_copy;
- timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
oq_pi = readl(admin_queues->oq_pi);
@@ -3976,7 +4506,7 @@ static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
+ PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
rc = 0;
break;
}
@@ -4022,59 +4552,40 @@ static int pqi_process_raid_io_error_synchronous(
return rc;
}
+static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
+{
+ return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
+}
+
static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
struct pqi_iu_header *request, unsigned int flags,
- struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+ struct pqi_raid_error_info *error_info)
{
int rc = 0;
struct pqi_io_request *io_request;
- unsigned long start_jiffies;
- unsigned long msecs_blocked;
size_t iu_length;
DECLARE_COMPLETION_ONSTACK(wait);
- /*
- * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
- * are mutually exclusive.
- */
-
if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
if (down_interruptible(&ctrl_info->sync_request_sem))
return -ERESTARTSYS;
} else {
- if (timeout_msecs == NO_TIMEOUT) {
- down(&ctrl_info->sync_request_sem);
- } else {
- start_jiffies = jiffies;
- if (down_timeout(&ctrl_info->sync_request_sem,
- msecs_to_jiffies(timeout_msecs)))
- return -ETIMEDOUT;
- msecs_blocked =
- jiffies_to_msecs(jiffies - start_jiffies);
- if (msecs_blocked >= timeout_msecs) {
- rc = -ETIMEDOUT;
- goto out;
- }
- timeout_msecs -= msecs_blocked;
- }
+ down(&ctrl_info->sync_request_sem);
}
pqi_ctrl_busy(ctrl_info);
- timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
- if (timeout_msecs == 0) {
- pqi_ctrl_unbusy(ctrl_info);
- rc = -ETIMEDOUT;
- goto out;
- }
+ /*
+ * Wait for other admin queue updates such as;
+ * config table changes, OFA memory updates, ...
+ */
+ if (pqi_is_blockable_request(request))
+ pqi_wait_if_ctrl_blocked(ctrl_info);
if (pqi_ctrl_offline(ctrl_info)) {
- pqi_ctrl_unbusy(ctrl_info);
rc = -ENXIO;
goto out;
}
- atomic_inc(&ctrl_info->sync_cmds_outstanding);
-
io_request = pqi_alloc_io_request(ctrl_info);
put_unaligned_le16(io_request->index,
@@ -4091,38 +4602,24 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
io_request->io_complete_callback = pqi_raid_synchronous_complete;
io_request->context = &wait;
- pqi_start_io(ctrl_info,
- &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- pqi_ctrl_unbusy(ctrl_info);
-
- if (timeout_msecs == NO_TIMEOUT) {
- pqi_wait_for_completion_io(ctrl_info, &wait);
- } else {
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(timeout_msecs))) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "command timed out\n");
- rc = -ETIMEDOUT;
- }
- }
+ pqi_wait_for_completion_io(ctrl_info, &wait);
if (error_info) {
if (io_request->error_info)
- memcpy(error_info, io_request->error_info,
- sizeof(*error_info));
+ memcpy(error_info, io_request->error_info, sizeof(*error_info));
else
memset(error_info, 0, sizeof(*error_info));
} else if (rc == 0 && io_request->error_info) {
- rc = pqi_process_raid_io_error_synchronous(
- io_request->error_info);
+ rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
}
pqi_free_io_request(io_request);
- atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
+ pqi_ctrl_unbusy(ctrl_info);
up(&ctrl_info->sync_request_sem);
return rc;
@@ -4159,8 +4656,7 @@ static int pqi_submit_admin_request_synchronous(
rc = pqi_poll_for_admin_response(ctrl_info, response);
if (rc == 0)
- rc = pqi_validate_admin_response(response,
- request->function_code);
+ rc = pqi_validate_admin_response(response, request->function_code);
return rc;
}
@@ -4194,8 +4690,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
if (rc)
goto out;
- rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
- &response);
+ rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
pqi_pci_unmap(ctrl_info->pci_dev,
&request.data.report_device_capability.sg_descriptor, 1,
@@ -4497,8 +4992,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
}
#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
- (offsetof(struct pqi_event_config, descriptors) + \
- (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
+ struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
bool enable_events)
@@ -4530,8 +5024,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
@@ -4544,7 +5037,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
event_descriptor = &event_config->descriptors[i];
if (enable_events &&
pqi_is_supported_event(event_descriptor->event_type))
- put_unaligned_le16(ctrl_info->event_queue.oq_id,
+ put_unaligned_le16(ctrl_info->event_queue.oq_id,
&event_descriptor->oq_id);
else
put_unaligned_le16(0, &event_descriptor->oq_id);
@@ -4566,8 +5059,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
- NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
@@ -4584,11 +5076,6 @@ static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
return pqi_configure_events(ctrl_info, true);
}
-static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
-{
- return pqi_configure_events(ctrl_info, false);
-}
-
static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -4619,7 +5106,6 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
-
ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
ctrl_info->error_buffer_length,
&ctrl_info->error_buffer_dma_handle,
@@ -4639,9 +5125,8 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
struct device *dev;
struct pqi_io_request *io_request;
- ctrl_info->io_request_pool =
- kcalloc(ctrl_info->max_io_slots,
- sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
+ ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
+ sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
if (!ctrl_info->io_request_pool) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -4654,8 +5139,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
io_request = ctrl_info->io_request_pool;
for (i = 0; i < ctrl_info->max_io_slots; i++) {
- io_request->iu =
- kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
+ io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
if (!io_request->iu) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -4675,8 +5159,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
io_request->index = i;
io_request->sg_chain_buffer = sg_chain_buffer;
- io_request->sg_chain_buffer_dma_handle =
- sg_chain_buffer_dma_handle;
+ io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
io_request++;
}
@@ -4783,10 +5266,16 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
sizeof(struct pqi_sg_descriptor)) +
PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
+
+ ctrl_info->max_sg_per_r56_iu =
+ ((ctrl_info->max_inbound_iu_length -
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
+ sizeof(struct pqi_sg_descriptor)) +
+ PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
}
-static inline void pqi_set_sg_descriptor(
- struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
+static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
+ struct scatterlist *sg)
{
u64 address = (u64)sg_dma_address(sg);
unsigned int length = sg_dma_len(sg);
@@ -4796,16 +5285,52 @@ static inline void pqi_set_sg_descriptor(
put_unaligned_le32(0, &sg_descriptor->flags);
}
+static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
+ struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
+ int max_sg_per_iu, bool *chained)
+{
+ int i;
+ unsigned int num_sg_in_iu;
+
+ *chained = false;
+ i = 0;
+ num_sg_in_iu = 0;
+ max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
+
+ while (1) {
+ pqi_set_sg_descriptor(sg_descriptor, sg);
+ if (!*chained)
+ num_sg_in_iu++;
+ i++;
+ if (i == sg_count)
+ break;
+ sg_descriptor++;
+ if (i == max_sg_per_iu) {
+ put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
+ &sg_descriptor->address);
+ put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
+ &sg_descriptor->length);
+ put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
+ *chained = true;
+ num_sg_in_iu++;
+ sg_descriptor = io_request->sg_chain_buffer;
+ }
+ sg = sg_next(sg);
+ }
+
+ put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+
+ return num_sg_in_iu;
+}
+
static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
struct pqi_io_request *io_request)
{
- int i;
u16 iu_length;
int sg_count;
bool chained;
unsigned int num_sg_in_iu;
- unsigned int max_sg_per_iu;
struct scatterlist *sg;
struct pqi_sg_descriptor *sg_descriptor;
@@ -4821,41 +5346,89 @@ static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
sg = scsi_sglist(scmd);
sg_descriptor = request->sg_descriptors;
- max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
- chained = false;
+
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_iu, &chained);
+
+ request->partial = chained;
+ iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+ put_unaligned_le16(iu_length, &request->header.iu_length);
+
+ return 0;
+}
+
+static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
+ struct pqi_io_request *io_request)
+{
+ u16 iu_length;
+ int sg_count;
+ bool chained;
+ unsigned int num_sg_in_iu;
+ struct scatterlist *sg;
+ struct pqi_sg_descriptor *sg_descriptor;
+
+ sg_count = scsi_dma_map(scmd);
+ if (sg_count < 0)
+ return sg_count;
+
+ iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
+ PQI_REQUEST_HEADER_LENGTH;
num_sg_in_iu = 0;
- i = 0;
- while (1) {
- pqi_set_sg_descriptor(sg_descriptor, sg);
- if (!chained)
- num_sg_in_iu++;
- i++;
- if (i == sg_count)
- break;
- sg_descriptor++;
- if (i == max_sg_per_iu) {
- put_unaligned_le64(
- (u64)io_request->sg_chain_buffer_dma_handle,
- &sg_descriptor->address);
- put_unaligned_le32((sg_count - num_sg_in_iu)
- * sizeof(*sg_descriptor),
- &sg_descriptor->length);
- put_unaligned_le32(CISS_SG_CHAIN,
- &sg_descriptor->flags);
- chained = true;
- num_sg_in_iu++;
- sg_descriptor = io_request->sg_chain_buffer;
- }
- sg = sg_next(sg);
- }
+ if (sg_count == 0)
+ goto out;
+
+ sg = scsi_sglist(scmd);
+ sg_descriptor = request->sg_descriptors;
+
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_iu, &chained);
- put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
request->partial = chained;
iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
out:
put_unaligned_le16(iu_length, &request->header.iu_length);
+ request->num_sg_descriptors = num_sg_in_iu;
+
+ return 0;
+}
+
+static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
+ struct pqi_io_request *io_request)
+{
+ u16 iu_length;
+ int sg_count;
+ bool chained;
+ unsigned int num_sg_in_iu;
+ struct scatterlist *sg;
+ struct pqi_sg_descriptor *sg_descriptor;
+
+ sg_count = scsi_dma_map(scmd);
+ if (sg_count < 0)
+ return sg_count;
+
+ iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
+ PQI_REQUEST_HEADER_LENGTH;
+ num_sg_in_iu = 0;
+
+ if (sg_count != 0) {
+ sg = scsi_sglist(scmd);
+ sg_descriptor = request->sg_descriptors;
+
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_r56_iu, &chained);
+
+ request->partial = chained;
+ iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+ }
+
+ put_unaligned_le16(iu_length, &request->header.iu_length);
+ request->num_sg_descriptors = num_sg_in_iu;
return 0;
}
@@ -4864,12 +5437,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
struct pqi_io_request *io_request)
{
- int i;
u16 iu_length;
int sg_count;
bool chained;
unsigned int num_sg_in_iu;
- unsigned int max_sg_per_iu;
struct scatterlist *sg;
struct pqi_sg_descriptor *sg_descriptor;
@@ -4886,35 +5457,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
sg = scsi_sglist(scmd);
sg_descriptor = request->sg_descriptors;
- max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
- chained = false;
- i = 0;
- while (1) {
- pqi_set_sg_descriptor(sg_descriptor, sg);
- if (!chained)
- num_sg_in_iu++;
- i++;
- if (i == sg_count)
- break;
- sg_descriptor++;
- if (i == max_sg_per_iu) {
- put_unaligned_le64(
- (u64)io_request->sg_chain_buffer_dma_handle,
- &sg_descriptor->address);
- put_unaligned_le32((sg_count - num_sg_in_iu)
- * sizeof(*sg_descriptor),
- &sg_descriptor->length);
- put_unaligned_le32(CISS_SG_CHAIN,
- &sg_descriptor->flags);
- chained = true;
- num_sg_in_iu++;
- sg_descriptor = io_request->sg_chain_buffer;
- }
- sg = sg_next(sg);
- }
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_iu, &chained);
- put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
request->partial = chained;
iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
@@ -4949,16 +5495,15 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
io_request->scmd = scmd;
request = io_request->iu;
- memset(request, 0,
- offsetof(struct pqi_raid_path_request, sg_descriptors));
+ memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
- memcpy(request->lun_number, device->scsi3addr,
- sizeof(request->lun_number));
+ memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
+ request->ml_device_lun_number = (u8)scmd->device->lun;
cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
memcpy(request->cdb, scmd->cmnd, cdb_length);
@@ -4968,38 +5513,28 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
case 10:
case 12:
case 16:
- /* No bytes in the Additional CDB bytes field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_0;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
break;
case 20:
- /* 4 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_4;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
break;
case 24:
- /* 8 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_8;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
break;
case 28:
- /* 12 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_12;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
break;
case 32:
default:
- /* 16 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_16;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
break;
}
switch (scmd->sc_data_direction) {
- case DMA_TO_DEVICE:
+ case DMA_FROM_DEVICE:
request->data_direction = SOP_READ_FLAG;
break;
- case DMA_FROM_DEVICE:
+ case DMA_TO_DEVICE:
request->data_direction = SOP_WRITE_FLAG;
break;
case DMA_NONE:
@@ -5038,12 +5573,6 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
device, scmd, queue_group);
}
-static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
-{
- if (!pqi_ctrl_blocked(ctrl_info))
- schedule_work(&ctrl_info->raid_bypass_retry_work);
-}
-
static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
{
struct scsi_cmnd *scmd;
@@ -5060,7 +5589,7 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
return false;
device = scmd->device->hostdata;
- if (pqi_device_offline(device))
+ if (pqi_device_offline(device) || pqi_device_in_remove(device))
return false;
ctrl_info = shost_to_hba(scmd->device->host);
@@ -5070,181 +5599,95 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
return true;
}
-static inline void pqi_add_to_raid_bypass_retry_list(
- struct pqi_ctrl_info *ctrl_info,
- struct pqi_io_request *io_request, bool at_head)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
- if (at_head)
- list_add(&io_request->request_list_entry,
- &ctrl_info->raid_bypass_retry_list);
- else
- list_add_tail(&io_request->request_list_entry,
- &ctrl_info->raid_bypass_retry_list);
- spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-}
-
-static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
+static void pqi_aio_io_complete(struct pqi_io_request *io_request,
void *context)
{
struct scsi_cmnd *scmd;
scmd = io_request->scmd;
- pqi_free_io_request(io_request);
- pqi_scsi_done(scmd);
-}
-
-static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
-{
- struct scsi_cmnd *scmd;
- struct pqi_ctrl_info *ctrl_info;
-
- io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
- scmd = io_request->scmd;
- scmd->result = 0;
- ctrl_info = shost_to_hba(scmd->device->host);
-
- pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
- pqi_schedule_bypass_retry(ctrl_info);
-}
-
-static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
-{
- struct scsi_cmnd *scmd;
- struct pqi_scsi_dev *device;
- struct pqi_ctrl_info *ctrl_info;
- struct pqi_queue_group *queue_group;
-
- scmd = io_request->scmd;
- device = scmd->device->hostdata;
- if (pqi_device_in_reset(device)) {
- pqi_free_io_request(io_request);
- set_host_byte(scmd, DID_RESET);
- pqi_scsi_done(scmd);
- return 0;
+ scsi_dma_unmap(scmd);
+ if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
+ set_host_byte(scmd, DID_IMM_RETRY);
+ pqi_cmd_priv(scmd)->this_residual++;
}
- ctrl_info = shost_to_hba(scmd->device->host);
- queue_group = io_request->queue_group;
-
- pqi_reinit_io_request(io_request);
-
- return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
- device, scmd, queue_group);
-}
-
-static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
- struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_io_request *io_request;
-
- spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
- io_request = list_first_entry_or_null(
- &ctrl_info->raid_bypass_retry_list,
- struct pqi_io_request, request_list_entry);
- if (io_request)
- list_del(&io_request->request_list_entry);
- spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-
- return io_request;
+ pqi_free_io_request(io_request);
+ pqi_scsi_done(scmd);
}
-static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
-{
- int rc;
- struct pqi_io_request *io_request;
-
- pqi_ctrl_busy(ctrl_info);
-
- while (1) {
- if (pqi_ctrl_blocked(ctrl_info))
- break;
- io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
- if (!io_request)
- break;
- rc = pqi_retry_raid_bypass(io_request);
- if (rc) {
- pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
- true);
- pqi_schedule_bypass_retry(ctrl_info);
- break;
+static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
+{
+ bool io_high_prio;
+ int priority_class;
+
+ io_high_prio = false;
+
+ if (device->ncq_prio_enable) {
+ priority_class =
+ IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
+ if (priority_class == IOPRIO_CLASS_RT) {
+ /* Set NCQ priority for read/write commands. */
+ switch (scmd->cmnd[0]) {
+ case WRITE_16:
+ case READ_16:
+ case WRITE_12:
+ case READ_12:
+ case WRITE_10:
+ case READ_10:
+ case WRITE_6:
+ case READ_6:
+ io_high_prio = true;
+ break;
+ }
}
}
- pqi_ctrl_unbusy(ctrl_info);
-}
-
-static void pqi_raid_bypass_retry_worker(struct work_struct *work)
-{
- struct pqi_ctrl_info *ctrl_info;
-
- ctrl_info = container_of(work, struct pqi_ctrl_info,
- raid_bypass_retry_work);
- pqi_retry_raid_bypass_requests(ctrl_info);
-}
-
-static void pqi_clear_all_queued_raid_bypass_retries(
- struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
- INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
- spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-}
-
-static void pqi_aio_io_complete(struct pqi_io_request *io_request,
- void *context)
-{
- struct scsi_cmnd *scmd;
-
- scmd = io_request->scmd;
- scsi_dma_unmap(scmd);
- if (io_request->status == -EAGAIN)
- set_host_byte(scmd, DID_IMM_RETRY);
- else if (pqi_raid_bypass_retry_needed(io_request)) {
- pqi_queue_raid_bypass_retry(io_request);
- return;
- }
- pqi_free_io_request(io_request);
- pqi_scsi_done(scmd);
+ return io_high_prio;
}
static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
+ bool io_high_prio;
+
+ io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
+
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
- scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
+ scmd->cmnd, scmd->cmd_len, queue_group, NULL,
+ false, io_high_prio);
}
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info, bool raid_bypass)
+ struct pqi_encryption_info *encryption_info, bool raid_bypass,
+ bool io_high_prio)
{
int rc;
struct pqi_io_request *io_request;
struct pqi_aio_path_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass;
request = io_request->iu;
- memset(request, 0,
- offsetof(struct pqi_raid_path_request, sg_descriptors));
+ memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
put_unaligned_le32(aio_handle, &request->nexus_id);
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb))
cdb_length = sizeof(request->cdb);
request->cdb_length = cdb_length;
@@ -5291,18 +5734,149 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+ struct pqi_aio_r1_path_request *r1_request;
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+ io_request->io_complete_callback = pqi_aio_io_complete;
+ io_request->scmd = scmd;
+ io_request->raid_bypass = true;
+
+ r1_request = io_request->iu;
+ memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
+
+ r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
+ put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
+ r1_request->num_drives = rmd->num_it_nexus_entries;
+ put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
+ put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
+ if (rmd->num_it_nexus_entries == 3)
+ put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
+
+ put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
+ r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ put_unaligned_le16(io_request->index, &r1_request->request_id);
+ r1_request->error_index = r1_request->request_id;
+ if (rmd->cdb_length > sizeof(r1_request->cdb))
+ rmd->cdb_length = sizeof(r1_request->cdb);
+ r1_request->cdb_length = rmd->cdb_length;
+ memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
+
+ /* The direction is always write. */
+ r1_request->data_direction = SOP_READ_FLAG;
+
+ if (encryption_info) {
+ r1_request->encryption_enable = true;
+ put_unaligned_le16(encryption_info->data_encryption_key_index,
+ &r1_request->data_encryption_key_index);
+ put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+ &r1_request->encrypt_tweak_lower);
+ put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+ &r1_request->encrypt_tweak_upper);
+ }
+
+ rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
+ if (rc) {
+ pqi_free_io_request(io_request);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+ return 0;
+}
+
+static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+ struct pqi_aio_r56_path_request *r56_request;
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+ io_request->io_complete_callback = pqi_aio_io_complete;
+ io_request->scmd = scmd;
+ io_request->raid_bypass = true;
+
+ r56_request = io_request->iu;
+ memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
+
+ if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
+ r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
+ else
+ r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
+
+ put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
+ put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
+ put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
+ if (rmd->raid_level == SA_RAID_6) {
+ put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
+ r56_request->xor_multiplier = rmd->xor_mult;
+ }
+ put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
+ r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ put_unaligned_le64(rmd->row, &r56_request->row);
+
+ put_unaligned_le16(io_request->index, &r56_request->request_id);
+ r56_request->error_index = r56_request->request_id;
+
+ if (rmd->cdb_length > sizeof(r56_request->cdb))
+ rmd->cdb_length = sizeof(r56_request->cdb);
+ r56_request->cdb_length = rmd->cdb_length;
+ memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
+
+ /* The direction is always write. */
+ r56_request->data_direction = SOP_READ_FLAG;
+
+ if (encryption_info) {
+ r56_request->encryption_enable = true;
+ put_unaligned_le16(encryption_info->data_encryption_key_index,
+ &r56_request->data_encryption_key_index);
+ put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+ &r56_request->encrypt_tweak_lower);
+ put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+ &r56_request->encrypt_tweak_upper);
+ }
+
+ rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
+ if (rc) {
+ pqi_free_io_request(io_request);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+ return 0;
+}
+
static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd)
{
u16 hw_queue;
- hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+ hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
if (hw_queue > ctrl_info->max_hw_queue_index)
hw_queue = 0;
return hw_queue;
}
+static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
+{
+ if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
+ return false;
+
+ return pqi_cmd_priv(scmd)->this_residual == 0;
+}
+
/*
* This function gets called just before we hand the completed SCSI request
* back to the SML.
@@ -5323,12 +5897,86 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
return;
}
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
}
-static int pqi_scsi_queue_command(struct Scsi_Host *shost,
+static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd)
{
+ u32 oldest_jiffies;
+ u8 lru_index;
+ int i;
+ int rc;
+ struct pqi_scsi_dev *device;
+ struct pqi_stream_data *pqi_stream_data;
+ struct pqi_scsi_dev_raid_map_data rmd;
+
+ if (!ctrl_info->enable_stream_detection)
+ return false;
+
+ rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+ if (rc)
+ return false;
+
+ /* Check writes only. */
+ if (!rmd.is_write)
+ return false;
+
+ device = scmd->device->hostdata;
+
+ /* Check for RAID 5/6 streams. */
+ if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
+ return false;
+
+ /*
+ * If controller does not support AIO RAID{5,6} writes, need to send
+ * requests down non-AIO path.
+ */
+ if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
+ (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
+ return true;
+
+ lru_index = 0;
+ oldest_jiffies = INT_MAX;
+ for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
+ pqi_stream_data = &device->stream_data[i];
+ /*
+ * Check for adjacent request or request is within
+ * the previous request.
+ */
+ if ((pqi_stream_data->next_lba &&
+ rmd.first_block >= pqi_stream_data->next_lba) &&
+ rmd.first_block <= pqi_stream_data->next_lba +
+ rmd.block_cnt) {
+ pqi_stream_data->next_lba = rmd.first_block +
+ rmd.block_cnt;
+ pqi_stream_data->last_accessed = jiffies;
+ return true;
+ }
+
+ /* unused entry */
+ if (pqi_stream_data->last_accessed == 0) {
+ lru_index = i;
+ break;
+ }
+
+ /* Find entry with oldest last accessed time. */
+ if (pqi_stream_data->last_accessed <= oldest_jiffies) {
+ oldest_jiffies = pqi_stream_data->last_accessed;
+ lru_index = i;
+ }
+ }
+
+ /* Set LRU entry. */
+ pqi_stream_data = &device->stream_data[lru_index];
+ pqi_stream_data->last_accessed = jiffies;
+ pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
+
+ return false;
+}
+
+static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
int rc;
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
@@ -5337,7 +5985,6 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
bool raid_bypassed;
device = scmd->device->hostdata;
- ctrl_info = shost_to_hba(shost);
if (!device) {
set_host_byte(scmd, DID_NO_CONNECT);
@@ -5345,18 +5992,17 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
return 0;
}
- atomic_inc(&device->scsi_cmds_outstanding);
+ atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
+
+ ctrl_info = shost_to_hba(shost);
- if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
- device)) {
+ if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
}
- pqi_ctrl_busy(ctrl_info);
- if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
- pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
+ if (pqi_ctrl_blocked(ctrl_info)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -5373,90 +6019,115 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
if (device->raid_bypass_enabled &&
- !blk_rq_is_passthrough(scmd->request)) {
- rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
- scmd, queue_group);
- if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
+ pqi_is_bypass_eligible_request(scmd) &&
+ !pqi_is_parity_write_stream(ctrl_info, scmd)) {
+ rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
+ if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
+ atomic_inc(&device->raid_bypass_cnt);
+ }
}
if (!raid_bypassed)
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
} else {
if (device->aio_enabled)
- rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
else
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
}
out:
- pqi_ctrl_unbusy(ctrl_info);
if (rc)
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
return rc;
}
-static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
- struct pqi_queue_group *queue_group)
+static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
{
+ unsigned int i;
unsigned int path;
unsigned long flags;
- bool list_is_empty;
+ unsigned int queued_io_count;
+ struct pqi_queue_group *queue_group;
+ struct pqi_io_request *io_request;
- for (path = 0; path < 2; path++) {
- while (1) {
- spin_lock_irqsave(
- &queue_group->submit_lock[path], flags);
- list_is_empty =
- list_empty(&queue_group->request_list[path]);
- spin_unlock_irqrestore(
- &queue_group->submit_lock[path], flags);
- if (list_is_empty)
- break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- usleep_range(1000, 2000);
+ queued_io_count = 0;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+ for (path = 0; path < 2; path++) {
+ spin_lock_irqsave(&queue_group->submit_lock[path], flags);
+ list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
+ queued_io_count++;
+ spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
}
}
- return 0;
+ return queued_io_count;
}
-static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
+static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
{
- int rc;
unsigned int i;
unsigned int path;
+ unsigned int nonempty_inbound_queue_count;
struct pqi_queue_group *queue_group;
pqi_index_t iq_pi;
pqi_index_t iq_ci;
+ nonempty_inbound_queue_count = 0;
+
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
queue_group = &ctrl_info->queue_groups[i];
-
- rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
- if (rc)
- return rc;
-
for (path = 0; path < 2; path++) {
iq_pi = queue_group->iq_pi_copy[path];
+ iq_ci = readl(queue_group->iq_ci[path]);
+ if (iq_ci != iq_pi)
+ nonempty_inbound_queue_count++;
+ }
+ }
- while (1) {
- iq_ci = readl(queue_group->iq_ci[path]);
- if (iq_ci == iq_pi)
- break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- usleep_range(1000, 2000);
- }
+ return nonempty_inbound_queue_count;
+}
+
+#define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
+
+static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long start_jiffies;
+ unsigned long warning_timeout;
+ unsigned int queued_io_count;
+ unsigned int nonempty_inbound_queue_count;
+ bool displayed_warning;
+
+ displayed_warning = false;
+ start_jiffies = jiffies;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
+
+ while (1) {
+ queued_io_count = pqi_queued_io_count(ctrl_info);
+ nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
+ if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
+ break;
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ if (time_after(jiffies, warning_timeout)) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
+ displayed_warning = true;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
+ usleep_range(1000, 2000);
}
+ if (displayed_warning)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "queued I/O drained after waiting for %u seconds\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000);
+
return 0;
}
@@ -5482,6 +6153,7 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
list_for_each_entry_safe(io_request, next,
&queue_group->request_list[path],
request_list_entry) {
+
scmd = io_request->scmd;
if (!scmd)
continue;
@@ -5492,6 +6164,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
list_del(&io_request->request_list_entry);
set_host_byte(scmd, DID_RESET);
+ pqi_free_io_request(io_request);
+ scsi_dma_unmap(scmd);
pqi_scsi_done(scmd);
}
@@ -5501,102 +6175,39 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
}
}
-static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned int i;
- unsigned int path;
- struct pqi_queue_group *queue_group;
- unsigned long flags;
- struct pqi_io_request *io_request;
- struct pqi_io_request *next;
- struct scsi_cmnd *scmd;
-
- for (i = 0; i < ctrl_info->num_queue_groups; i++) {
- queue_group = &ctrl_info->queue_groups[i];
-
- for (path = 0; path < 2; path++) {
- spin_lock_irqsave(&queue_group->submit_lock[path],
- flags);
-
- list_for_each_entry_safe(io_request, next,
- &queue_group->request_list[path],
- request_list_entry) {
-
- scmd = io_request->scmd;
- if (!scmd)
- continue;
-
- list_del(&io_request->request_list_entry);
- set_host_byte(scmd, DID_RESET);
- pqi_scsi_done(scmd);
- }
-
- spin_unlock_irqrestore(
- &queue_group->submit_lock[path], flags);
- }
- }
-}
+#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_secs)
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
{
- unsigned long timeout;
+ int cmds_outstanding;
+ unsigned long start_jiffies;
+ unsigned long warning_timeout;
+ unsigned long msecs_waiting;
- timeout = (timeout_secs * PQI_HZ) + jiffies;
+ start_jiffies = jiffies;
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
- while (atomic_read(&device->scsi_cmds_outstanding)) {
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- if (timeout_secs != NO_TIMEOUT) {
- if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "timed out waiting for pending IO\n");
- return -ETIMEDOUT;
- }
+ while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
+ if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
}
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
-static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- unsigned long timeout_secs)
-{
- bool io_pending;
- unsigned long flags;
- unsigned long timeout;
- struct pqi_scsi_dev *device;
-
- timeout = (timeout_secs * PQI_HZ) + jiffies;
- while (1) {
- io_pending = false;
-
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (atomic_read(&device->scsi_cmds_outstanding)) {
- io_pending = true;
- break;
- }
+ msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
+ if (msecs_waiting >= timeout_msecs) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target,
+ lun, msecs_waiting / 1000, cmds_outstanding);
+ return -ETIMEDOUT;
}
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
-
- if (!io_pending)
- break;
-
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
-
- if (timeout_secs != NO_TIMEOUT) {
- if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "timed out waiting for pending IO\n");
- return -ETIMEDOUT;
- }
+ if (time_after(jiffies, warning_timeout)) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target,
+ lun, msecs_waiting / 1000, cmds_outstanding);
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
}
@@ -5604,18 +6215,6 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
-static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
-{
- while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -5624,17 +6223,20 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-#define PQI_LUN_RESET_TIMEOUT_SECS 30
#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct completion *wait)
+ struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
{
int rc;
+ unsigned int wait_secs;
+ int cmds_outstanding;
+
+ wait_secs = 0;
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
+ PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
rc = 0;
break;
}
@@ -5644,19 +6246,28 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
rc = -ENXIO;
break;
}
+
+ wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
+ cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
}
return rc;
}
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
+
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
struct pqi_io_request *io_request;
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -5670,16 +6281,16 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
put_unaligned_le16(io_request->index, &request->request_id);
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ request->ml_device_lun_number = (u8)scmd->device->lun;
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
if (ctrl_info->tmf_iu_timeout_supported)
- put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
- &request->timeout);
+ put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
- pqi_start_io(ctrl_info,
- &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
if (rc == 0)
rc = io_request->status;
@@ -5688,55 +6299,52 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
return rc;
}
-/* Performs a reset at the LUN level. */
+#define PQI_LUN_RESET_RETRIES 3
+#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
+#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
+#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-#define PQI_LUN_RESET_RETRIES 3
-#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
-#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
-
-static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
- int rc;
+ int reset_rc;
+ int wait_rc;
unsigned int retries;
- unsigned long timeout_secs;
+ unsigned long timeout_msecs;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
for (retries = 0;;) {
- rc = pqi_lun_reset(ctrl_info, device);
- if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, scmd);
+ if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
- timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
+ timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
+ PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
+ if (wait_rc && reset_rc == 0)
+ reset_rc = wait_rc;
- return rc == 0 ? SUCCESS : FAILED;
+ return reset_rc == 0 ? SUCCESS : FAILED;
}
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
+ struct pqi_scsi_dev *device;
- mutex_lock(&ctrl_info->lun_reset_mutex);
-
+ device = scmd->device->hostdata;
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_io_queued_for_device(ctrl_info, device);
rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
- pqi_device_reset_start(device);
- pqi_ctrl_unblock_requests(ctrl_info);
-
if (rc)
rc = FAILED;
else
- rc = _pqi_device_reset(ctrl_info, device);
-
- pqi_device_reset_done(device);
-
- mutex_unlock(&ctrl_info->lun_reset_mutex);
+ rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
+ pqi_ctrl_unblock_requests(ctrl_info);
return rc;
}
@@ -5752,29 +6360,27 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
ctrl_info = shost_to_hba(shost);
device = scmd->device->hostdata;
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+
dev_err(&ctrl_info->pci_dev->dev,
- "resetting scsi %d:%d:%d:%d\n",
- shost->host_no, device->bus, device->target, device->lun);
+ "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
+ shost->host_no,
+ device->bus, device->target, (u32)scmd->device->lun,
+ scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info) ||
- pqi_device_reset_blocked(ctrl_info)) {
+ if (pqi_ctrl_offline(ctrl_info))
rc = FAILED;
- goto out;
- }
-
- pqi_wait_until_ofa_finished(ctrl_info);
-
- atomic_inc(&ctrl_info->sync_cmds_outstanding);
- rc = pqi_device_reset(ctrl_info, device);
- atomic_dec(&ctrl_info->sync_cmds_outstanding);
+ else
+ rc = pqi_device_reset(ctrl_info, scmd);
-out:
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
- shost->host_no, device->bus, device->target, device->lun,
+ shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+
return rc;
}
@@ -5795,9 +6401,13 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
rphy = target_to_rphy(starget);
device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
if (device) {
- device->target = sdev_id(sdev);
- device->lun = sdev->lun;
- device->target_lun_valid = true;
+ if (device->target_lun_valid) {
+ device->ignore_device = true;
+ } else {
+ device->target = sdev_id(sdev);
+ device->lun = sdev->lun;
+ device->target_lun_valid = true;
+ }
}
} else {
device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
@@ -5812,10 +6422,13 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
scsi_change_queue_depth(sdev,
device->advertised_queue_depth);
}
- if (pqi_is_logical_device(device))
+ if (pqi_is_logical_device(device)) {
pqi_disable_write_same(sdev);
- else
+ } else {
sdev->allow_restart = 1;
+ if (device->device_type == SA_DEVICE_TYPE_NVME)
+ pqi_disable_write_same(sdev);
+ }
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -5823,16 +6436,65 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int pqi_map_queues(struct Scsi_Host *shost)
+static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ctrl_info->pci_dev, 0);
+ blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ ctrl_info->pci_dev, 0);
+}
+
+static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
+{
+ return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
}
-static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
- void __user *arg)
+static int pqi_slave_configure(struct scsi_device *sdev)
+{
+ int rc = 0;
+ struct pqi_scsi_dev *device;
+
+ device = sdev->hostdata;
+ device->devtype = sdev->type;
+
+ if (pqi_is_tape_changer_device(device) && device->ignore_device) {
+ rc = -ENXIO;
+ device->ignore_device = false;
+ }
+
+ return rc;
+}
+
+static void pqi_slave_destroy(struct scsi_device *sdev)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ int mutex_acquired;
+ unsigned long flags;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+ if (!mutex_acquired)
+ return;
+
+ device = sdev->hostdata;
+ if (!device) {
+ mutex_unlock(&ctrl_info->scan_mutex);
+ return;
+ }
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_del(&device->scsi_device_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ mutex_unlock(&ctrl_info->scan_mutex);
+
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+}
+
+static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{
struct pci_dev *pci_dev;
u32 subsystem_vendor;
@@ -5849,8 +6511,7 @@ static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
pciinfo.dev_fn = pci_dev->devfn;
subsystem_vendor = pci_dev->subsystem_vendor;
subsystem_device = pci_dev->subsystem_device;
- pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
- subsystem_vendor;
+ pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
return -EFAULT;
@@ -5956,6 +6617,8 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
+ if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
+ return -EBUSY;
if (!arg)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
@@ -6040,7 +6703,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
+ PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
if (iocommand.buf_size > 0)
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
@@ -6092,9 +6755,6 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
ctrl_info = shost_to_hba(sdev->host);
- if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
- return -EBUSY;
-
switch (cmd) {
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
@@ -6127,14 +6787,13 @@ static ssize_t pqi_firmware_version_show(struct device *dev,
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
}
static ssize_t pqi_driver_version_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%s\n",
- DRIVER_VERSION BUILD_TIMESTAMP);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
}
static ssize_t pqi_serial_number_show(struct device *dev,
@@ -6146,7 +6805,7 @@ static ssize_t pqi_serial_number_show(struct device *dev,
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
}
static ssize_t pqi_model_show(struct device *dev,
@@ -6158,7 +6817,7 @@ static ssize_t pqi_model_show(struct device *dev,
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
}
static ssize_t pqi_vendor_show(struct device *dev,
@@ -6170,7 +6829,7 @@ static ssize_t pqi_vendor_show(struct device *dev,
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
}
static ssize_t pqi_host_rescan_store(struct device *dev,
@@ -6191,14 +6850,14 @@ static ssize_t pqi_lockup_action_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
if (pqi_lockup_actions[i].action == pqi_lockup_action)
- count += snprintf(buffer + count, PAGE_SIZE - count,
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
"[%s] ", pqi_lockup_actions[i].name);
else
- count += snprintf(buffer + count, PAGE_SIZE - count,
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
"%s ", pqi_lockup_actions[i].name);
}
- count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
+ count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
return count;
}
@@ -6223,26 +6882,120 @@ static ssize_t pqi_lockup_action_store(struct device *dev,
return -EINVAL;
}
+static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, 10, "%x\n",
+ ctrl_info->enable_stream_detection);
+}
+
+static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ u8 set_stream_detection = 0;
+
+ if (kstrtou8(buffer, 0, &set_stream_detection))
+ return -EINVAL;
+
+ if (set_stream_detection > 0)
+ set_stream_detection = 1;
+
+ ctrl_info->enable_stream_detection = set_stream_detection;
+
+ return count;
+}
+
+static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
+}
+
+static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ u8 set_r5_writes = 0;
+
+ if (kstrtou8(buffer, 0, &set_r5_writes))
+ return -EINVAL;
+
+ if (set_r5_writes > 0)
+ set_r5_writes = 1;
+
+ ctrl_info->enable_r5_writes = set_r5_writes;
+
+ return count;
+}
+
+static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
+}
+
+static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ u8 set_r6_writes = 0;
+
+ if (kstrtou8(buffer, 0, &set_r6_writes))
+ return -EINVAL;
+
+ if (set_r6_writes > 0)
+ set_r6_writes = 1;
+
+ ctrl_info->enable_r6_writes = set_r6_writes;
+
+ return count;
+}
+
static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
-static DEVICE_ATTR(lockup_action, 0644,
- pqi_lockup_action_show, pqi_lockup_action_store);
-
-static struct device_attribute *pqi_shost_attrs[] = {
- &dev_attr_driver_version,
- &dev_attr_firmware_version,
- &dev_attr_model,
- &dev_attr_serial_number,
- &dev_attr_vendor,
- &dev_attr_rescan,
- &dev_attr_lockup_action,
+static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
+ pqi_lockup_action_store);
+static DEVICE_ATTR(enable_stream_detection, 0644,
+ pqi_host_enable_stream_detection_show,
+ pqi_host_enable_stream_detection_store);
+static DEVICE_ATTR(enable_r5_writes, 0644,
+ pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
+static DEVICE_ATTR(enable_r6_writes, 0644,
+ pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
+
+static struct attribute *pqi_shost_attrs[] = {
+ &dev_attr_driver_version.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_model.attr,
+ &dev_attr_serial_number.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_rescan.attr,
+ &dev_attr_lockup_action.attr,
+ &dev_attr_enable_stream_detection.attr,
+ &dev_attr_enable_r5_writes.attr,
+ &dev_attr_enable_r6_writes.attr,
NULL
};
+ATTRIBUTE_GROUPS(pqi_shost);
+
static ssize_t pqi_unique_id_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
@@ -6255,26 +7008,27 @@ static ssize_t pqi_unique_id_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
- if (device->is_physical_device) {
- memset(unique_id, 0, 8);
- memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
- } else {
+ if (device->is_physical_device)
+ memcpy(unique_id, device->wwid, sizeof(device->wwid));
+ else
memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
- }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE,
- "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ return scnprintf(buffer, PAGE_SIZE,
+ "%02X%02X%02X%02X%02X%02X%02X%02X"
+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
unique_id[0], unique_id[1], unique_id[2], unique_id[3],
unique_id[4], unique_id[5], unique_id[6], unique_id[7],
unique_id[8], unique_id[9], unique_id[10], unique_id[11],
@@ -6293,12 +7047,14 @@ static ssize_t pqi_lunid_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6306,7 +7062,7 @@ static ssize_t pqi_lunid_show(struct device *dev,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
+ return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
}
#define MAX_PATHS 8
@@ -6329,12 +7085,14 @@ static ssize_t pqi_path_info_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6407,12 +7165,14 @@ static ssize_t pqi_sas_address_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
- if (pqi_is_logical_device(device)) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6420,7 +7180,7 @@ static ssize_t pqi_sas_address_show(struct device *dev,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
+ return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
}
static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
@@ -6434,9 +7194,17 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
buffer[0] = device->raid_bypass_enabled ? '1' : '0';
buffer[1] = '\n';
buffer[2] = '\0';
@@ -6458,9 +7226,16 @@ static ssize_t pqi_raid_level_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
if (pqi_is_logical_device(device))
raid_level = pqi_raid_level_to_string(device->raid_level);
@@ -6469,27 +7244,131 @@ static ssize_t pqi_raid_level_show(struct device *dev,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
+}
+
+static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ int raid_bypass_cnt;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+}
+
+static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ int output_len = 0;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ output_len = snprintf(buf, PAGE_SIZE, "%d\n",
+ device->ncq_prio_enable);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return output_len;
+}
+
+static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u8 ncq_prio_enable = 0;
+
+ if (kstrtou8(buf, 0, &ncq_prio_enable))
+ return -EINVAL;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ if (!device->ncq_prio_support ||
+ !device->is_physical_device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -EINVAL;
+ }
+
+ device->ncq_prio_enable = ncq_prio_enable;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return strlen(buf);
}
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
-static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
- pqi_ssd_smart_path_enabled_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
-
-static struct device_attribute *pqi_sdev_attrs[] = {
- &dev_attr_lunid,
- &dev_attr_unique_id,
- &dev_attr_path_info,
- &dev_attr_sas_address,
- &dev_attr_ssd_smart_path_enabled,
- &dev_attr_raid_level,
+static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
+static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
+ pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
+
+static struct attribute *pqi_sdev_attrs[] = {
+ &dev_attr_lunid.attr,
+ &dev_attr_unique_id.attr,
+ &dev_attr_path_info.attr,
+ &dev_attr_sas_address.attr,
+ &dev_attr_ssd_smart_path_enabled.attr,
+ &dev_attr_raid_level.attr,
+ &dev_attr_raid_bypass_cnt.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL
};
+ATTRIBUTE_GROUPS(pqi_sdev);
+
static struct scsi_host_template pqi_driver_template = {
.module = THIS_MODULE,
.name = DRIVER_NAME_SHORT,
@@ -6501,9 +7380,12 @@ static struct scsi_host_template pqi_driver_template = {
.eh_device_reset_handler = pqi_eh_device_reset_handler,
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
+ .slave_configure = pqi_slave_configure,
+ .slave_destroy = pqi_slave_destroy,
.map_queues = pqi_map_queues,
- .sdev_attrs = pqi_sdev_attrs,
- .shost_attrs = pqi_shost_attrs,
+ .sdev_groups = pqi_sdev_groups,
+ .shost_groups = pqi_shost_groups,
+ .cmd_size = sizeof(struct pqi_cmd_priv),
};
static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
@@ -6513,9 +7395,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
if (!shost) {
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi_host_alloc failed for controller %u\n",
- ctrl_info->ctrl_id);
+ dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
return -ENOMEM;
}
@@ -6524,6 +7404,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->this_id = -1;
shost->max_channel = PQI_MAX_BUS;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
shost->max_lun = ~0;
shost->max_id = ~0;
shost->max_sectors = ctrl_info->max_sectors;
@@ -6534,21 +7415,18 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
shost->unique_id = shost->irq;
shost->nr_hw_queues = ctrl_info->num_queue_groups;
+ shost->host_tagset = 1;
shost->hostdata[0] = (unsigned long)ctrl_info;
rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi_add_host failed for controller %u\n",
- ctrl_info->ctrl_id);
+ dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
goto free_host;
}
rc = pqi_add_sas_host(shost, ctrl_info);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "add SAS host failed for controller %u\n",
- ctrl_info->ctrl_id);
+ dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
goto remove_host;
}
@@ -6595,8 +7473,7 @@ static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
reset_reg.all_bits = readl(&pqi_registers->device_reset);
if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
+ if (!sis_is_firmware_running(ctrl_info)) {
rc = -ENXIO;
break;
}
@@ -6618,8 +7495,7 @@ static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
rc = sis_pqi_reset_quiesce(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "PQI reset failed during quiesce with error %d\n",
- rc);
+ "PQI reset failed during quiesce with error %d\n", rc);
return rc;
}
}
@@ -6674,13 +7550,24 @@ static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
if (rc)
goto out;
- memcpy(ctrl_info->firmware_version, identify->firmware_version,
- sizeof(identify->firmware_version));
- ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
- snprintf(ctrl_info->firmware_version +
- strlen(ctrl_info->firmware_version),
- sizeof(ctrl_info->firmware_version),
- "-%u", get_unaligned_le16(&identify->firmware_build_number));
+ if (get_unaligned_le32(&identify->extra_controller_flags) &
+ BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
+ memcpy(ctrl_info->firmware_version,
+ identify->firmware_version_long,
+ sizeof(identify->firmware_version_long));
+ } else {
+ memcpy(ctrl_info->firmware_version,
+ identify->firmware_version_short,
+ sizeof(identify->firmware_version_short));
+ ctrl_info->firmware_version
+ [sizeof(identify->firmware_version_short)] = '\0';
+ snprintf(ctrl_info->firmware_version +
+ strlen(ctrl_info->firmware_version),
+ sizeof(ctrl_info->firmware_version) -
+ sizeof(identify->firmware_version_short),
+ "-%u",
+ get_unaligned_le16(&identify->firmware_build_number));
+ }
memcpy(ctrl_info->model, identify->product_id,
sizeof(identify->product_id));
@@ -6690,6 +7577,9 @@ static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
sizeof(identify->vendor_id));
ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Firmware version: %s\n", ctrl_info->firmware_version);
+
out:
kfree(identify);
@@ -6767,8 +7657,7 @@ static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
put_unaligned_le16(last_section,
&request.data.config_table_update.last_section);
- return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
@@ -6777,6 +7666,7 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
{
void *features_requested;
void __iomem *features_requested_iomem_addr;
+ void __iomem *host_max_known_feature_iomem_addr;
features_requested = firmware_features->features_supported +
le16_to_cpu(firmware_features->num_elements);
@@ -6787,6 +7677,16 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
memcpy_toio(features_requested_iomem_addr, features_requested,
le16_to_cpu(firmware_features->num_elements));
+ if (pqi_is_firmware_feature_supported(firmware_features,
+ PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
+ host_max_known_feature_iomem_addr =
+ features_requested_iomem_addr +
+ (le16_to_cpu(firmware_features->num_elements) * 2) +
+ sizeof(__le16);
+ writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
+ host_max_known_feature_iomem_addr);
+ }
+
return pqi_config_table_update(ctrl_info,
PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
@@ -6824,17 +7724,35 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
struct pqi_firmware_feature *firmware_feature)
{
switch (firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
+ ctrl_info->enable_r1_writes = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
+ ctrl_info->enable_r5_writes = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
+ ctrl_info->enable_r6_writes = firmware_feature->enabled;
+ break;
case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
ctrl_info->soft_reset_handshake_supported =
- firmware_feature->enabled;
+ firmware_feature->enabled &&
+ pqi_read_soft_reset_status(ctrl_info);
break;
case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
- ctrl_info->raid_iu_timeout_supported =
- firmware_feature->enabled;
+ ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
break;
case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
- ctrl_info->tmf_iu_timeout_supported =
- firmware_feature->enabled;
+ ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
+ ctrl_info->firmware_triage_supported = firmware_feature->enabled;
+ pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
+ break;
+ case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
+ ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
+ ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
break;
}
@@ -6862,6 +7780,51 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_status = pqi_firmware_feature_status,
},
{
+ .feature_name = "Maximum Known Feature",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 0 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 1 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 5 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 6 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 0 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 1 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID 5 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID 6 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
.feature_name = "New Soft Reset Handshake",
.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
.feature_status = pqi_ctrl_update_feature_flags,
@@ -6876,6 +7839,26 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "Firmware Triage",
+ .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RPL Extended Formats 4 and 5",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "Multi-LUN Target",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -6934,7 +7917,7 @@ static void pqi_process_firmware_features(
if (pqi_is_firmware_feature_enabled(firmware_features,
firmware_features_iomem_addr,
pqi_firmware_features[i].feature_bit)) {
- pqi_firmware_features[i].enabled = true;
+ pqi_firmware_features[i].enabled = true;
}
pqi_firmware_feature_update(ctrl_info,
&pqi_firmware_features[i]);
@@ -6960,14 +7943,36 @@ static void pqi_process_firmware_features_section(
mutex_unlock(&pqi_firmware_features_mutex);
}
+/*
+ * Reset all controller settings that can be initialized during the processing
+ * of the PQI Configuration Table.
+ */
+
+static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->heartbeat_counter = NULL;
+ ctrl_info->soft_reset_status = NULL;
+ ctrl_info->soft_reset_handshake_supported = false;
+ ctrl_info->enable_r1_writes = false;
+ ctrl_info->enable_r5_writes = false;
+ ctrl_info->enable_r6_writes = false;
+ ctrl_info->raid_iu_timeout_supported = false;
+ ctrl_info->tmf_iu_timeout_supported = false;
+ ctrl_info->firmware_triage_supported = false;
+ ctrl_info->rpl_extended_format_4_5_supported = false;
+ ctrl_info->multi_lun_device_supported = false;
+}
+
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
{
u32 table_length;
u32 section_offset;
+ bool firmware_feature_section_present;
void __iomem *table_iomem_addr;
struct pqi_config_table *config_table;
struct pqi_config_table_section_header *section;
struct pqi_config_table_section_info section_info;
+ struct pqi_config_table_section_info feature_section_info;
table_length = ctrl_info->config_table_length;
if (table_length == 0)
@@ -6984,25 +7989,24 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
* Copy the config table contents from I/O memory space into the
* temporary buffer.
*/
- table_iomem_addr = ctrl_info->iomem_base +
- ctrl_info->config_table_offset;
+ table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
memcpy_fromio(config_table, table_iomem_addr, table_length);
+ firmware_feature_section_present = false;
section_info.ctrl_info = ctrl_info;
- section_offset =
- get_unaligned_le32(&config_table->first_section_offset);
+ section_offset = get_unaligned_le32(&config_table->first_section_offset);
while (section_offset) {
section = (void *)config_table + section_offset;
section_info.section = section;
section_info.section_offset = section_offset;
- section_info.section_iomem_addr =
- table_iomem_addr + section_offset;
+ section_info.section_iomem_addr = table_iomem_addr + section_offset;
switch (get_unaligned_le16(&section->section_id)) {
case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
- pqi_process_firmware_features_section(&section_info);
+ firmware_feature_section_present = true;
+ feature_section_info = section_info;
break;
case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
if (pqi_disable_heartbeat)
@@ -7012,8 +8016,7 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
ctrl_info->heartbeat_counter =
table_iomem_addr +
section_offset +
- offsetof(
- struct pqi_config_table_heartbeat,
+ offsetof(struct pqi_config_table_heartbeat,
heartbeat_counter);
break;
case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
@@ -7021,14 +8024,21 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
table_iomem_addr +
section_offset +
offsetof(struct pqi_config_table_soft_reset,
- soft_reset_status);
+ soft_reset_status);
break;
}
- section_offset =
- get_unaligned_le16(&section->next_section_offset);
+ section_offset = get_unaligned_le16(&section->next_section_offset);
}
+ /*
+ * We process the firmware feature section after all other sections
+ * have been processed so that the feature bit callbacks can take
+ * into account the settings configured by other sections.
+ */
+ if (firmware_feature_section_present)
+ pqi_process_firmware_features_section(&feature_section_info);
+
kfree(config_table);
return 0;
@@ -7076,15 +8086,34 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
return pqi_revert_to_sis_mode(ctrl_info);
}
-#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
+static void pqi_perform_lockup_action(void)
+{
+ switch (pqi_lockup_action) {
+ case PANIC:
+ panic("FATAL: Smart Family Controller lockup detected");
+ break;
+ case REBOOT:
+ emergency_restart();
+ break;
+ case NONE:
+ default:
+ break;
+ }
+}
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
+ u32 product_id;
if (reset_devices) {
+ if (pqi_is_fw_triage_supported(ctrl_info)) {
+ rc = sis_wait_for_fw_triage_completion(ctrl_info);
+ if (rc)
+ return rc;
+ }
sis_soft_reset(ctrl_info);
- msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+ ssleep(PQI_POST_RESET_DELAY_SECS);
} else {
rc = pqi_force_sis_mode(ctrl_info);
if (rc)
@@ -7096,8 +8125,15 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
* commands.
*/
rc = sis_wait_for_ctrl_ready(ctrl_info);
- if (rc)
+ if (rc) {
+ if (reset_devices) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "kdump init failed with error %d\n", rc);
+ pqi_lockup_action = REBOOT;
+ pqi_perform_lockup_action();
+ }
return rc;
+ }
/*
* Get the controller properties. This allows us to determine
@@ -7117,15 +8153,19 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return rc;
}
+ product_id = sis_get_product_id(ctrl_info);
+ ctrl_info->product_id = (u8)product_id;
+ ctrl_info->product_revision = (u8)(product_id >> 8);
+
if (reset_devices) {
if (ctrl_info->max_outstanding_requests >
PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
- ctrl_info->max_outstanding_requests =
+ ctrl_info->max_outstanding_requests =
PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
} else {
if (ctrl_info->max_outstanding_requests >
PQI_MAX_OUTSTANDING_REQUESTS)
- ctrl_info->max_outstanding_requests =
+ ctrl_info->max_outstanding_requests =
PQI_MAX_OUTSTANDING_REQUESTS;
}
@@ -7212,11 +8252,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
pqi_init_operational_queues(ctrl_info);
- rc = pqi_request_irqs(ctrl_info);
+ rc = pqi_create_queues(ctrl_info);
if (rc)
return rc;
- rc = pqi_create_queues(ctrl_info);
+ rc = pqi_request_irqs(ctrl_info);
if (rc)
return rc;
@@ -7230,6 +8270,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
pqi_start_heartbeat_timer(ctrl_info);
+ if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+ rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+ if (rc) { /* Supported features not returned correctly. */
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining advanced RAID bypass configuration\n");
+ return rc;
+ }
+ ctrl_info->ciss_report_log_flags |=
+ CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+ }
+
rc = pqi_enable_events(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -7379,12 +8430,25 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
ctrl_info->controller_online = true;
pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_ctrl_reset_config(ctrl_info);
+
rc = pqi_process_config_table(ctrl_info);
if (rc)
return rc;
pqi_start_heartbeat_timer(ctrl_info);
+ if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+ rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining advanced RAID bypass configuration\n");
+ return rc;
+ }
+ ctrl_info->ciss_report_log_flags |=
+ CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+ }
+
rc = pqi_enable_events(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -7413,18 +8477,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- pqi_schedule_update_time_worker(ctrl_info);
+ if (pqi_ofa_in_progress(ctrl_info))
+ pqi_ctrl_unblock_scan(ctrl_info);
pqi_scan_scsi_devices(ctrl_info);
return 0;
}
-static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
- u16 timeout)
+static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
{
- return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
+ int rc;
+
+ rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
+
+ return pcibios_err_to_errno(rc);
}
static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
@@ -7523,7 +8591,6 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
atomic_set(&ctrl_info->num_interrupts, 0);
- atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
@@ -7531,19 +8598,27 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
+ INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
+ INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
+
sema_init(&ctrl_info->sync_request_sem,
PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
init_waitqueue_head(&ctrl_info->block_requests_wait);
- INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
- spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
- INIT_WORK(&ctrl_info->raid_bypass_retry_work,
- pqi_raid_bypass_retry_worker);
-
ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
ctrl_info->irq_mode = IRQ_MODE_NONE;
ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
+ ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+ ctrl_info->max_transfer_encrypted_sas_sata =
+ PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
+ ctrl_info->max_transfer_encrypted_nvme =
+ PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
+ ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
+ ctrl_info->max_write_raid_1_10_2drive = ~0;
+ ctrl_info->max_write_raid_1_10_3drive = ~0;
+ ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
+
return ctrl_info;
}
@@ -7560,7 +8635,6 @@ static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
{
- pqi_stop_heartbeat_timer(ctrl_info);
pqi_free_interrupts(ctrl_info);
if (ctrl_info->queue_memory_base)
dma_free_coherent(&ctrl_info->pci_dev->dev,
@@ -7585,9 +8659,15 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
+ ctrl_info->controller_online = false;
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
pqi_cancel_rescan_worker(ctrl_info);
pqi_cancel_update_time_worker(ctrl_info);
- pqi_remove_all_scsi_devices(ctrl_info);
+ if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
+ pqi_fail_all_outstanding_requests(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ }
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -7596,81 +8676,57 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
{
- pqi_cancel_update_time_worker(ctrl_info);
- pqi_cancel_rescan_worker(ctrl_info);
- pqi_wait_until_lun_reset_finished(ctrl_info);
- pqi_wait_until_scan_finished(ctrl_info);
- pqi_ctrl_ofa_start(ctrl_info);
+ pqi_ctrl_block_scan(ctrl_info);
+ pqi_scsi_block_requests(ctrl_info);
+ pqi_ctrl_block_device_reset(ctrl_info);
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
- pqi_fail_io_queued_for_all_devices(ctrl_info);
- pqi_wait_until_inbound_queues_empty(ctrl_info);
pqi_stop_heartbeat_timer(ctrl_info);
- ctrl_info->pqi_mode_enabled = false;
- pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
}
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
{
- pqi_ofa_free_host_buffer(ctrl_info);
- ctrl_info->pqi_mode_enabled = true;
- pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
- ctrl_info->controller_online = true;
- pqi_ctrl_unblock_requests(ctrl_info);
pqi_start_heartbeat_timer(ctrl_info);
- pqi_schedule_update_time_worker(ctrl_info);
- pqi_clear_soft_reset_status(ctrl_info,
- PQI_SOFT_RESET_ABORT);
- pqi_scan_scsi_devices(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
}
-static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
- u32 total_size, u32 chunk_size)
+static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
{
- u32 sg_count;
- u32 size;
int i;
- struct pqi_sg_descriptor *mem_descriptor = NULL;
+ u32 sg_count;
struct device *dev;
struct pqi_ofa_memory *ofap;
-
- dev = &ctrl_info->pci_dev->dev;
-
- sg_count = (total_size + chunk_size - 1);
- sg_count /= chunk_size;
+ struct pqi_sg_descriptor *mem_descriptor;
+ dma_addr_t dma_handle;
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
- if (sg_count*chunk_size < total_size)
+ sg_count = DIV_ROUND_UP(total_size, chunk_size);
+ if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
goto out;
- ctrl_info->pqi_ofa_chunk_virt_addr =
- kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
+ ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr)
goto out;
- for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
- dma_addr_t dma_handle;
+ dev = &ctrl_info->pci_dev->dev;
+ for (i = 0; i < sg_count; i++) {
ctrl_info->pqi_ofa_chunk_virt_addr[i] =
- dma_alloc_coherent(dev, chunk_size, &dma_handle,
- GFP_KERNEL);
-
+ dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
- break;
-
+ goto out_free_chunks;
mem_descriptor = &ofap->sg_descriptor[i];
- put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
- put_unaligned_le32 (chunk_size, &mem_descriptor->length);
+ put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
+ put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
- if (!size || size < total_size)
- goto out_free_chunks;
-
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
- put_unaligned_le32(size, &ofap->bytes_allocated);
+ put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
return 0;
@@ -7678,82 +8734,87 @@ out_free_chunks:
while (--i >= 0) {
mem_descriptor = &ofap->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
- get_unaligned_le64(&mem_descriptor->address));
+ ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ get_unaligned_le64(&mem_descriptor->address));
}
kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
out:
- put_unaligned_le32 (0, &ofap->bytes_allocated);
return -ENOMEM;
}
static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
u32 total_size;
+ u32 chunk_size;
u32 min_chunk_size;
- u32 chunk_sz;
- total_size = le32_to_cpu(
- ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
- min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
+ if (ctrl_info->ofa_bytes_requested == 0)
+ return 0;
+
+ total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
+ min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
+ min_chunk_size = PAGE_ALIGN(min_chunk_size);
- for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
- if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
+ for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
+ if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
return 0;
+ chunk_size /= 2;
+ chunk_size = PAGE_ALIGN(chunk_size);
+ }
return -ENOMEM;
}
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
- u32 bytes_requested)
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
- struct pqi_ofa_memory *pqi_ofa_memory;
struct device *dev;
+ struct pqi_ofa_memory *ofap;
dev = &ctrl_info->pci_dev->dev;
- pqi_ofa_memory = dma_alloc_coherent(dev,
- PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
- &ctrl_info->pqi_ofa_mem_dma_handle,
- GFP_KERNEL);
- if (!pqi_ofa_memory)
+ ofap = dma_alloc_coherent(dev, sizeof(*ofap),
+ &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
+ if (!ofap)
return;
- put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
- memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
- sizeof(pqi_ofa_memory->signature));
- pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
-
- ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
+ ctrl_info->pqi_ofa_mem_virt_addr = ofap;
if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
- dev_err(dev, "Failed to allocate host buffer of size = %u",
- bytes_requested);
+ dev_err(dev,
+ "failed to allocate host buffer for Online Firmware Activation\n");
+ dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
+ ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+ return;
}
- return;
+ put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
+ memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
}
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
- int i;
- struct pqi_sg_descriptor *mem_descriptor;
+ unsigned int i;
+ struct device *dev;
struct pqi_ofa_memory *ofap;
+ struct pqi_sg_descriptor *mem_descriptor;
+ unsigned int num_memory_descriptors;
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
if (!ofap)
return;
- if (!ofap->bytes_allocated)
+ dev = &ctrl_info->pci_dev->dev;
+
+ if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
goto out;
mem_descriptor = ofap->sg_descriptor;
+ num_memory_descriptors =
+ get_unaligned_le16(&ofap->num_memory_descriptors);
- for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
- i++) {
- dma_free_coherent(&ctrl_info->pci_dev->dev,
+ for (i = 0; i < num_memory_descriptors; i++) {
+ dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
ctrl_info->pqi_ofa_chunk_virt_addr[i],
get_unaligned_le64(&mem_descriptor[i].address));
@@ -7761,63 +8822,46 @@ static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
out:
- dma_free_coherent(&ctrl_info->pci_dev->dev,
- PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
- ctrl_info->pqi_ofa_mem_dma_handle);
+ dma_free_coherent(dev, sizeof(*ofap), ofap,
+ ctrl_info->pqi_ofa_mem_dma_handle);
ctrl_info->pqi_ofa_mem_virt_addr = NULL;
}
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
{
+ u32 buffer_length;
struct pqi_vendor_general_request request;
- size_t size;
struct pqi_ofa_memory *ofap;
memset(&request, 0, sizeof(request));
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
&request.header.iu_length);
put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
&request.function_code);
+ ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
if (ofap) {
- size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
+ buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
get_unaligned_le16(&ofap->num_memory_descriptors) *
sizeof(struct pqi_sg_descriptor);
put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
&request.data.ofa_memory_allocation.buffer_address);
- put_unaligned_le32(size,
+ put_unaligned_le32(buffer_length,
&request.data.ofa_memory_allocation.buffer_length);
-
}
- return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
{
- msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
- return pqi_ctrl_init_resume(ctrl_info);
-}
+ ssleep(delay_secs);
-static void pqi_perform_lockup_action(void)
-{
- switch (pqi_lockup_action) {
- case PANIC:
- panic("FATAL: Smart Family Controller lockup detected");
- break;
- case REBOOT:
- emergency_restart();
- break;
- case NONE:
- default:
- break;
- }
+ return pqi_ctrl_init_resume(ctrl_info);
}
static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
@@ -7830,6 +8874,7 @@ static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
unsigned int i;
struct pqi_io_request *io_request;
struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
for (i = 0; i < ctrl_info->max_io_slots; i++) {
io_request = &ctrl_info->io_request_pool[i];
@@ -7838,7 +8883,13 @@ static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
scmd = io_request->scmd;
if (scmd) {
- set_host_byte(scmd, DID_NO_CONNECT);
+ sdev = scmd->device;
+ if (!sdev || !scsi_device_online(sdev)) {
+ pqi_free_io_request(io_request);
+ continue;
+ } else {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ }
} else {
io_request->status = -ENXIO;
io_request->error_info =
@@ -7859,7 +8910,6 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
pqi_cancel_update_time_worker(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_all_outstanding_requests(ctrl_info);
- pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
pqi_ctrl_unblock_requests(ctrl_info);
}
@@ -7871,7 +8921,8 @@ static void pqi_ctrl_offline_worker(struct work_struct *work)
pqi_take_ctrl_offline_deferred(ctrl_info);
}
-static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
if (!ctrl_info->controller_online)
return;
@@ -7880,7 +8931,7 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = false;
pqi_ctrl_block_requests(ctrl_info);
if (!pqi_disable_ctrl_shutdown)
- sis_shutdown_ctrl(ctrl_info);
+ sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
pci_disable_device(ctrl_info->pci_dev);
dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
schedule_work(&ctrl_info->ctrl_offline_work);
@@ -7894,7 +8945,7 @@ static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
if (id->driver_data)
ctrl_description = (char *)id->driver_data;
else
- ctrl_description = "Microsemi Smart Family Controller";
+ ctrl_description = "Microchip Smart Family Controller";
dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
}
@@ -7903,7 +8954,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
int rc;
- int node, cp_node;
+ int node;
struct pqi_ctrl_info *ctrl_info;
pqi_print_ctrl_info(pci_dev, id);
@@ -7922,10 +8973,10 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
node = dev_to_node(&pci_dev->dev);
if (node == NUMA_NO_NODE) {
- cp_node = cpu_to_node(0);
- if (cp_node == NUMA_NO_NODE)
- cp_node = 0;
- set_dev_node(&pci_dev->dev, cp_node);
+ node = cpu_to_node(0);
+ if (node == NUMA_NO_NODE)
+ node = 0;
+ set_dev_node(&pci_dev->dev, node);
}
ctrl_info = pqi_alloc_ctrl_info(node);
@@ -7956,12 +9007,17 @@ error:
static void pqi_pci_remove(struct pci_dev *pci_dev)
{
struct pqi_ctrl_info *ctrl_info;
+ u16 vendor_id;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
return;
- ctrl_info->in_shutdown = true;
+ pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
+ if (vendor_id == 0xffff)
+ ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
+ else
+ ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
pqi_remove_ctrl(ctrl_info);
}
@@ -7986,6 +9042,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
+ enum bmic_flush_cache_shutdown_event shutdown_event;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info) {
@@ -7994,43 +9051,27 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
return;
}
- pqi_disable_events(ctrl_info);
pqi_wait_until_ofa_finished(ctrl_info);
- pqi_cancel_update_time_worker(ctrl_info);
- pqi_cancel_rescan_worker(ctrl_info);
- pqi_cancel_event_worker(ctrl_info);
- pqi_ctrl_shutdown_start(ctrl_info);
+ pqi_scsi_block_requests(ctrl_info);
+ pqi_ctrl_block_device_reset(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
- if (rc) {
- dev_err(&pci_dev->dev,
- "wait for pending I/O failed\n");
- return;
- }
-
- pqi_ctrl_block_device_reset(ctrl_info);
- pqi_wait_until_lun_reset_finished(ctrl_info);
+ if (system_state == SYSTEM_RESTART)
+ shutdown_event = RESTART;
+ else
+ shutdown_event = SHUTDOWN;
/*
* Write all data in the controller's battery-backed cache to
* storage.
*/
- rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
+ rc = pqi_flush_cache(ctrl_info, shutdown_event);
if (rc)
dev_err(&pci_dev->dev,
"unable to flush controller cache\n");
- pqi_ctrl_block_requests(ctrl_info);
-
- rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
- if (rc) {
- dev_err(&pci_dev->dev,
- "wait for pending sync cmds failed\n");
- return;
- }
-
pqi_crash_if_pending_command(ctrl_info);
pqi_reset(ctrl_info);
}
@@ -8054,35 +9095,69 @@ static void pqi_process_lockup_action_param(void)
DRIVER_NAME_SHORT, pqi_lockup_action_param);
}
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
+
+static void pqi_process_ctrl_ready_timeout_param(void)
+{
+ if (pqi_ctrl_ready_timeout_secs == 0)
+ return;
+
+ if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
+ } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
+ }
+
+ sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
+}
+
static void pqi_process_module_params(void)
{
pqi_process_lockup_action_param();
+ pqi_process_ctrl_ready_timeout_param();
}
-static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
+#if defined(CONFIG_PM)
+
+static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
+{
+ if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
+ return RESTART;
+
+ return SUSPEND;
+}
+
+static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
{
+ struct pci_dev *pci_dev;
struct pqi_ctrl_info *ctrl_info;
+ pci_dev = to_pci_dev(dev);
ctrl_info = pci_get_drvdata(pci_dev);
- pqi_disable_events(ctrl_info);
- pqi_cancel_update_time_worker(ctrl_info);
- pqi_cancel_rescan_worker(ctrl_info);
- pqi_wait_until_scan_finished(ctrl_info);
- pqi_wait_until_lun_reset_finished(ctrl_info);
pqi_wait_until_ofa_finished(ctrl_info);
- pqi_flush_cache(ctrl_info, SUSPEND);
+
+ pqi_ctrl_block_scan(ctrl_info);
+ pqi_scsi_block_requests(ctrl_info);
+ pqi_ctrl_block_device_reset(ctrl_info);
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_wait_until_inbound_queues_empty(ctrl_info);
- pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
- pqi_stop_heartbeat_timer(ctrl_info);
- if (state.event == PM_EVENT_FREEZE)
- return 0;
+ if (suspend) {
+ enum bmic_flush_cache_shutdown_event shutdown_event;
- pci_save_state(pci_dev);
- pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+ shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
+ pqi_flush_cache(ctrl_info, shutdown_event);
+ }
+
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_free_irqs(ctrl_info);
ctrl_info->controller_online = false;
ctrl_info->pqi_mode_enabled = false;
@@ -8090,37 +9165,89 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
return 0;
}
-static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
+static __maybe_unused int pqi_suspend(struct device *dev)
+{
+ return pqi_suspend_or_freeze(dev, true);
+}
+
+static int pqi_resume_or_restore(struct device *dev)
{
int rc;
+ struct pci_dev *pci_dev;
struct pqi_ctrl_info *ctrl_info;
+ pci_dev = to_pci_dev(dev);
ctrl_info = pci_get_drvdata(pci_dev);
- if (pci_dev->current_state != PCI_D0) {
- ctrl_info->max_hw_queue_index = 0;
- pqi_free_interrupts(ctrl_info);
- pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
- rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
- IRQF_SHARED, DRIVER_NAME_SHORT,
- &ctrl_info->queue_groups[0]);
- if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "irq %u init failed with error %d\n",
- pci_dev->irq, rc);
- return rc;
- }
- pqi_start_heartbeat_timer(ctrl_info);
- pqi_ctrl_unblock_requests(ctrl_info);
- return 0;
- }
+ rc = pqi_request_irqs(ctrl_info);
+ if (rc)
+ return rc;
+
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
- pci_set_power_state(pci_dev, PCI_D0);
- pci_restore_state(pci_dev);
+ ssleep(PQI_POST_RESET_DELAY_SECS);
return pqi_ctrl_init_resume(ctrl_info);
}
+static int pqi_freeze(struct device *dev)
+{
+ return pqi_suspend_or_freeze(dev, false);
+}
+
+static int pqi_thaw(struct device *dev)
+{
+ int rc;
+ struct pci_dev *pci_dev;
+ struct pqi_ctrl_info *ctrl_info;
+
+ pci_dev = to_pci_dev(dev);
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ rc = pqi_request_irqs(ctrl_info);
+ if (rc)
+ return rc;
+
+ ctrl_info->controller_online = true;
+ ctrl_info->pqi_mode_enabled = true;
+
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
+
+ return 0;
+}
+
+static int pqi_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev;
+ struct pqi_ctrl_info *ctrl_info;
+ enum bmic_flush_cache_shutdown_event shutdown_event;
+
+ pci_dev = to_pci_dev(dev);
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
+ pqi_flush_cache(ctrl_info, shutdown_event);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pqi_pm_ops = {
+ .suspend = pqi_suspend,
+ .resume = pqi_resume_or_restore,
+ .freeze = pqi_freeze,
+ .thaw = pqi_thaw,
+ .poweroff = pqi_poweroff,
+ .restore = pqi_resume_or_restore,
+};
+
+#endif /* CONFIG_PM */
+
/* Define the PCI IDs for the controllers that we support. */
static const struct pci_device_id pqi_pci_id_table[] = {
{
@@ -8169,6 +9296,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1108)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1109)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
},
{
@@ -8225,6 +9360,50 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0051)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0052)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0053)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0070)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0071)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0072)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
},
{
@@ -8257,6 +9436,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0659)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0800)
},
{
@@ -8297,6 +9480,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x080a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0900)
},
{
@@ -8377,10 +9564,162 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1304)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1380)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1400)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1402)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1410)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1411)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1412)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1420)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1430)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1440)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1441)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1450)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1452)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1460)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1461)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1463)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1470)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1471)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1472)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1473)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1474)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1480)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1490)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1491)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
{
@@ -8445,6 +9784,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x1002)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_HP, 0x1100)
},
{
@@ -8453,6 +9796,38 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0294)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x02db)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x02dc)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x032e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x036f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0381)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0382)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0383)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1d8d, 0x0800)
},
{
@@ -8473,6 +9848,102 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1dfc, 0x3161)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f0c, 0x3161)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5445)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5446)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5447)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5449)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b27)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b29)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b45)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0101)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0201)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0220)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0221)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0520)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0522)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0620)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0621)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0622)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0623)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
@@ -8487,8 +9958,9 @@ static struct pci_driver pqi_pci_driver = {
.remove = pqi_pci_remove,
.shutdown = pqi_shutdown,
#if defined(CONFIG_PM)
- .suspend = pqi_suspend,
- .resume = pqi_resume,
+ .driver = {
+ .pm = &pqi_pm_ops
+ },
#endif
};
@@ -8497,9 +9969,10 @@ static int __init pqi_init(void)
int rc;
pr_info(DRIVER_NAME "\n");
+ pqi_verify_structures();
+ sis_verify_structures();
- pqi_sas_transport_template =
- sas_attach_transport(&pqi_sas_transport_functions);
+ pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
if (!pqi_sas_transport_template)
return -ENODEV;
@@ -8521,7 +9994,7 @@ static void __exit pqi_cleanup(void)
module_init(pqi_init);
module_exit(pqi_cleanup);
-static void __attribute__((unused)) verify_structures(void)
+static void pqi_verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_host_to_ctrl_doorbell) != 0x20);
@@ -8534,8 +10007,12 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_driver_scratch) != 0xb0);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_product_identifier) != 0xb4);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_firmware_status) != 0xbc);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_ctrl_shutdown_reason_code) != 0xcc);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_mailbox) != 0x1000);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
pqi_registers) != 0x4000);
@@ -8547,7 +10024,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_iu_header,
response_queue_id) != 0x4);
BUILD_BUG_ON(offsetof(struct pqi_iu_header,
- work_area) != 0x6);
+ driver_flags) != 0x6);
BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
@@ -8645,7 +10122,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
header.iu_length) != 2);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
@@ -8701,7 +10178,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
header.iu_length) != 2);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
@@ -8725,7 +10202,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
header.response_queue_id) != 4);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
@@ -8754,7 +10231,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
header.response_queue_id) != 4);
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
@@ -8929,13 +10406,23 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
configuration_signature) != 1);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
- firmware_version) != 5);
+ firmware_version_short) != 5);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
extended_logical_unit_count) != 154);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
firmware_build_number) != 190);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ vendor_id) != 200);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ product_id) != 208);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ extra_controller_flags) != 286);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
controller_mode) != 292);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ spare_part_number) != 293);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ firmware_version_long) != 325);
BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
phys_bay_in_box) != 115);
@@ -8953,6 +10440,45 @@ static void __attribute__((unused)) verify_structures(void)
current_queue_depth_limit) != 1796);
BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
+ BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+ page_code) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+ subpage_code) != 1);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+ buffer_length) != 2);
+
+ BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+ page_code) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+ subpage_code) != 1);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+ page_length) != 2);
+
+ BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
+ != 18);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ header) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ firmware_read_support) != 4);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ driver_read_support) != 5);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ firmware_write_support) != 6);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ driver_write_support) != 7);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_transfer_encrypted_sas_sata) != 8);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_transfer_encrypted_nvme) != 10);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_5_6) != 12);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_1_10_2drive) != 14);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_1_10_3drive) != 16);
+
BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index b7289112455c..13e8c539010e 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -65,8 +65,8 @@ static int pqi_sas_port_add_phy(struct pqi_sas_phy *pqi_sas_phy)
memset(identify, 0, sizeof(*identify));
identify->sas_address = pqi_sas_port->sas_address;
identify->device_type = SAS_END_DEVICE;
- identify->initiator_port_protocols = SAS_PROTOCOL_STP;
- identify->target_port_protocols = SAS_PROTOCOL_STP;
+ identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
+ identify->target_port_protocols = SAS_PROTOCOL_ALL;
phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -93,13 +93,24 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
identify = &rphy->identify;
identify->sas_address = pqi_sas_port->sas_address;
- if (pqi_sas_port->device &&
- pqi_sas_port->device->is_expander_smp_device) {
- identify->initiator_port_protocols = SAS_PROTOCOL_SMP;
- identify->target_port_protocols = SAS_PROTOCOL_SMP;
- } else {
- identify->initiator_port_protocols = SAS_PROTOCOL_STP;
- identify->target_port_protocols = SAS_PROTOCOL_STP;
+ identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
+ identify->target_port_protocols = SAS_PROTOCOL_STP;
+
+ if (pqi_sas_port->device) {
+ identify->phy_identifier = pqi_sas_port->device->phy_id;
+ switch (pqi_sas_port->device->device_type) {
+ case SA_DEVICE_TYPE_SAS:
+ case SA_DEVICE_TYPE_SES:
+ case SA_DEVICE_TYPE_NVME:
+ identify->target_port_protocols = SAS_PROTOCOL_SSP;
+ break;
+ case SA_DEVICE_TYPE_EXPANDER_SMP:
+ identify->target_port_protocols = SAS_PROTOCOL_SMP;
+ break;
+ case SA_DEVICE_TYPE_SATA:
+ default:
+ break;
+ }
}
return sas_rphy_add(rphy);
@@ -107,8 +118,7 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
static struct sas_rphy *pqi_sas_rphy_alloc(struct pqi_sas_port *pqi_sas_port)
{
- if (pqi_sas_port->device &&
- pqi_sas_port->device->is_expander_smp_device)
+ if (pqi_sas_port->device && pqi_sas_port->device->is_expander_smp_device)
return sas_expander_alloc(pqi_sas_port->port,
SAS_FANOUT_EXPANDER_DEVICE);
@@ -161,7 +171,7 @@ static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port)
list_for_each_entry_safe(pqi_sas_phy, next,
&pqi_sas_port->phy_list_head, phy_list_entry)
- pqi_free_sas_phy(pqi_sas_phy);
+ pqi_free_sas_phy(pqi_sas_phy);
sas_port_delete(pqi_sas_port->port);
list_del(&pqi_sas_port->port_list_entry);
@@ -191,7 +201,7 @@ static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node)
list_for_each_entry_safe(pqi_sas_port, next,
&pqi_sas_node->port_list_head, port_list_entry)
- pqi_free_sas_port(pqi_sas_port);
+ pqi_free_sas_port(pqi_sas_port);
kfree(pqi_sas_node);
}
@@ -333,7 +343,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
}
if (found_device->devtype == TYPE_ENCLOSURE) {
- *identifier = get_unaligned_be64(&found_device->wwid);
+ *identifier = get_unaligned_be64(&found_device->wwid[8]);
rc = 0;
goto out;
}
@@ -354,7 +364,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
memcmp(device->phys_connector,
found_device->phys_connector, 2) == 0) {
*identifier =
- get_unaligned_be64(&device->wwid);
+ get_unaligned_be64(&device->wwid[8]);
rc = 0;
goto out;
}
@@ -370,7 +380,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
if (device->devtype == TYPE_ENCLOSURE &&
CISS_GET_DRIVE_NUMBER(device->scsi3addr) ==
PQI_VSEP_CISS_BTL) {
- *identifier = get_unaligned_be64(&device->wwid);
+ *identifier = get_unaligned_be64(&device->wwid[8]);
rc = 0;
goto out;
}
@@ -498,7 +508,7 @@ static unsigned int pqi_build_sas_smp_handler_reply(
job->reply_len = le16_to_cpu(error_info->sense_data_length);
memcpy(job->reply, error_info->data,
- le16_to_cpu(error_info->sense_data_length));
+ le16_to_cpu(error_info->sense_data_length));
return job->reply_payload.payload_len -
get_unaligned_le32(&error_info->data_in_transferred);
@@ -547,9 +557,9 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto out;
reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info);
+
out:
bsg_job_done(job, rc, reslen);
- pqi_ctrl_unbusy(ctrl_info);
}
struct sas_function_template pqi_sas_transport_functions = {
.get_linkerrors = pqi_sas_get_linkerrors,
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index f0d6e88ba2c1..5811fb3c22a9 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -51,12 +51,20 @@
#define SIS_BASE_STRUCT_REVISION 9
#define SIS_BASE_STRUCT_ALIGNMENT 16
+#define SIS_CTRL_KERNEL_FW_TRIAGE 0x3
#define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100
#define SIS_CTRL_READY_TIMEOUT_SECS 180
#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
+enum sis_fw_triage_status {
+ FW_TRIAGE_NOT_STARTED = 0,
+ FW_TRIAGE_STARTED,
+ FW_TRIAGE_COND_INVALID,
+ FW_TRIAGE_COMPLETED
+};
+
#pragma pack(1)
/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
@@ -71,20 +79,22 @@ struct sis_base_struct {
/* error response data */
__le32 error_buffer_element_length; /* length of each PQI error */
/* response buffer element */
- /* in bytes */
+ /* in bytes */
__le32 error_buffer_num_elements; /* total number of PQI error */
/* response buffers available */
};
#pragma pack()
+unsigned int sis_ctrl_ready_timeout_secs = SIS_CTRL_READY_TIMEOUT_SECS;
+
static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
unsigned int timeout_secs)
{
unsigned long timeout;
u32 status;
- timeout = (timeout_secs * PQI_HZ) + jiffies;
+ timeout = (timeout_secs * HZ) + jiffies;
while (1) {
status = readl(&ctrl_info->registers->sis_firmware_status);
@@ -114,7 +124,7 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
{
return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
- SIS_CTRL_READY_TIMEOUT_SECS);
+ sis_ctrl_ready_timeout_secs);
}
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info)
@@ -130,7 +140,7 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
status = readl(&ctrl_info->registers->sis_firmware_status);
- if (status & SIS_CTRL_KERNEL_PANIC)
+ if (status != ~0 && (status & SIS_CTRL_KERNEL_PANIC))
running = false;
else
running = true;
@@ -146,7 +156,12 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info)
{
return readl(&ctrl_info->registers->sis_firmware_status) &
- SIS_CTRL_KERNEL_UP;
+ SIS_CTRL_KERNEL_UP;
+}
+
+u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info)
+{
+ return readl(&ctrl_info->registers->sis_product_identifier);
}
/* used for passing command parameters/results when issuing SIS commands */
@@ -181,6 +196,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
/* Disable doorbell interrupts by masking all interrupts. */
writel(~0, &registers->sis_interrupt_mask);
+ usleep_range(1000, 2000);
/*
* Force the completion of the interrupt mask register write before
@@ -196,7 +212,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
* the top of the loop in order to give the controller time to start
* processing the command before we start polling.
*/
- timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS);
doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
@@ -342,7 +358,7 @@ static int sis_wait_for_doorbell_bit_to_clear(
u32 doorbell_register;
unsigned long timeout;
- timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
doorbell_register =
@@ -370,6 +386,7 @@ static int sis_wait_for_doorbell_bit_to_clear(
static inline int sis_set_doorbell_bit(struct pqi_ctrl_info *ctrl_info, u32 bit)
{
writel(bit, &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ usleep_range(1000, 2000);
return sis_wait_for_doorbell_bit_to_clear(ctrl_info, bit);
}
@@ -384,14 +401,17 @@ void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_INTX);
}
-void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info)
+void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
if (readl(&ctrl_info->registers->sis_firmware_status) &
SIS_CTRL_KERNEL_PANIC)
return;
- writel(SIS_TRIGGER_SHUTDOWN,
- &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ if (ctrl_info->firmware_triage_supported)
+ writel(ctrl_shutdown_reason, &ctrl_info->registers->sis_ctrl_shutdown_reason_code);
+
+ writel(SIS_TRIGGER_SHUTDOWN, &ctrl_info->registers->sis_host_to_ctrl_doorbell);
}
int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info)
@@ -407,6 +427,7 @@ int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
{
writel(value, &ctrl_info->registers->sis_driver_scratch);
+ usleep_range(1000, 2000);
}
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
@@ -414,13 +435,56 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
return readl(&ctrl_info->registers->sis_driver_scratch);
}
+static inline enum sis_fw_triage_status
+ sis_read_firmware_triage_status(struct pqi_ctrl_info *ctrl_info)
+{
+ return ((enum sis_fw_triage_status)(readl(&ctrl_info->registers->sis_firmware_status) &
+ SIS_CTRL_KERNEL_FW_TRIAGE));
+}
+
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
{
writel(SIS_SOFT_RESET,
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
}
-static void __attribute__((unused)) verify_structures(void)
+#define SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS 300
+#define SIS_FW_TRIAGE_STATUS_POLL_INTERVAL_SECS 1
+
+int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ enum sis_fw_triage_status status;
+ unsigned long timeout;
+
+ timeout = (SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS * HZ) + jiffies;
+ while (1) {
+ status = sis_read_firmware_triage_status(ctrl_info);
+ if (status == FW_TRIAGE_COND_INVALID) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "firmware triage condition invalid\n");
+ rc = -EINVAL;
+ break;
+ } else if (status == FW_TRIAGE_NOT_STARTED ||
+ status == FW_TRIAGE_COMPLETED) {
+ rc = 0;
+ break;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for firmware triage status\n");
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ ssleep(SIS_FW_TRIAGE_STATUS_POLL_INTERVAL_SECS);
+ }
+
+ return rc;
+}
+
+void sis_verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct sis_base_struct,
revision) != 0x0);
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 86b0e484d921..9dcbae96a5c6 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -12,6 +12,7 @@
#if !defined(_SMARTPQI_SIS_H)
#define _SMARTPQI_SIS_H
+void sis_verify_structures(void);
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info);
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
@@ -21,11 +22,16 @@ int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
void sis_enable_intx(struct pqi_ctrl_info *ctrl_info);
-void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info);
+void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info);
int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
+u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
+int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
+
+extern unsigned int sis_ctrl_ready_timeout_secs;
#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index f8397978f8ab..678651b9b4dd 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- mode: c; c-basic-offset: 8 -*- */
/* SNI RM driver
*
@@ -28,7 +27,6 @@
#include <linux/platform_device.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/delay.h>
@@ -59,6 +57,7 @@ static int snirm710_probe(struct platform_device *dev)
struct NCR_700_Host_Parameters *hostdata;
struct Scsi_Host *host;
struct resource *res;
+ int rc;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
@@ -84,7 +83,9 @@ static int snirm710_probe(struct platform_device *dev)
goto out_kfree;
host->this_id = 7;
host->base = base;
- host->irq = platform_get_irq(dev, 0);
+ host->irq = rc = platform_get_irq(dev, 0);
+ if (rc < 0)
+ goto out_put_host;
if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
printk(KERN_ERR "snirm710: request_irq failed!\n");
goto out_put_host;
@@ -125,16 +126,4 @@ static struct platform_driver snirm710_driver = {
.name = "snirm_53c710",
},
};
-
-static int __init snirm710_init(void)
-{
- return platform_driver_register(&snirm710_driver);
-}
-
-static void __exit snirm710_exit(void)
-{
- platform_driver_unregister(&snirm710_driver);
-}
-
-module_init(snirm710_init);
-module_exit(snirm710_exit);
+module_platform_driver(snirm710_driver);
diff --git a/drivers/scsi/snic/cq_desc.h b/drivers/scsi/snic/cq_desc.h
index a5290562c1fa..52a916fd0824 100644
--- a/drivers/scsi/snic/cq_desc.h
+++ b/drivers/scsi/snic/cq_desc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
diff --git a/drivers/scsi/snic/cq_enet_desc.h b/drivers/scsi/snic/cq_enet_desc.h
index 0a1be2ed0288..bd7381e52521 100644
--- a/drivers/scsi/snic/cq_enet_desc.h
+++ b/drivers/scsi/snic/cq_enet_desc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index de0ab5fc8474..32f5a34b6987 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _SNIC_H_
#define _SNIC_H_
@@ -374,7 +360,7 @@ int snic_glob_init(void);
void snic_glob_cleanup(void);
extern struct workqueue_struct *snic_event_queue;
-extern struct device_attribute *snic_attrs[];
+extern const struct attribute_group *snic_host_groups[];
int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int snic_abort_cmd(struct scsi_cmnd *);
@@ -399,7 +385,7 @@ void snic_handle_link_event(struct snic *);
void snic_handle_link(struct work_struct *);
int snic_queue_exch_ver_req(struct snic *);
-int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
+void snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len);
diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c
index 32d5d556b6f8..3ddbdbc3ded1 100644
--- a/drivers/scsi/snic/snic_attrs.c
+++ b/drivers/scsi/snic/snic_attrs.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/string.h>
#include <linux/device.h>
@@ -68,10 +54,19 @@ static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL);
-struct device_attribute *snic_attrs[] = {
- &dev_attr_snic_sym_name,
- &dev_attr_snic_state,
- &dev_attr_drv_version,
- &dev_attr_link_state,
+static struct attribute *snic_host_attrs[] = {
+ &dev_attr_snic_sym_name.attr,
+ &dev_attr_snic_state.attr,
+ &dev_attr_drv_version.attr,
+ &dev_attr_link_state.attr,
NULL,
};
+
+static const struct attribute_group snic_host_attr_group = {
+ .attrs = snic_host_attrs
+};
+
+const struct attribute_group *snic_host_groups[] = {
+ &snic_host_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index 449b03f3bbd3..5f4fca96b192 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/pci.h>
@@ -114,10 +100,7 @@ snic_queue_exch_ver_req(struct snic *snic)
rqi = snic_req_init(snic, 0);
if (!rqi) {
- SNIC_HOST_ERR(snic->shost,
- "Queuing Exch Ver Req failed, err = %d\n",
- ret);
-
+ SNIC_HOST_ERR(snic->shost, "Init Exch Ver Req failed\n");
ret = -ENOMEM;
goto error;
}
@@ -151,7 +134,7 @@ error:
/*
* snic_io_exch_ver_cmpl_handler
*/
-int
+void
snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
struct snic_req_info *rqi = NULL;
@@ -160,7 +143,6 @@ snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
u32 cmnd_id, hid, max_sgs;
ulong ctx = 0;
unsigned long flags;
- int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
@@ -224,8 +206,6 @@ exch_cmpl_end:
snic_release_untagged_req(snic, rqi);
SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
-
- return ret;
} /* end of snic_io_exch_ver_cmpl_handler */
/*
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 2b349365592f..57bdc3ba49d9 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/errno.h>
@@ -334,25 +320,7 @@ snic_stats_show(struct seq_file *sfp, void *data)
return 0;
}
-/*
- * snic_stats_open - Open the stats file for specific host
- *
- * Description:
- * This routine opens a debugfs file stats of specific host
- */
-static int
-snic_stats_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, snic_stats_show, inode->i_private);
-}
-
-static const struct file_operations snic_stats_fops = {
- .owner = THIS_MODULE,
- .open = snic_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(snic_stats);
static const struct file_operations snic_reset_stats_fops = {
.owner = THIS_MODULE,
@@ -439,42 +407,28 @@ snic_trc_seq_show(struct seq_file *sfp, void *data)
return 0;
}
-static const struct seq_operations snic_trc_seq_ops = {
+static const struct seq_operations snic_trc_sops = {
.start = snic_trc_seq_start,
.next = snic_trc_seq_next,
.stop = snic_trc_seq_stop,
.show = snic_trc_seq_show,
};
-static int
-snic_trc_open(struct inode *inode, struct file *filp)
-{
- return seq_open(filp, &snic_trc_seq_ops);
-}
-
-static const struct file_operations snic_trc_fops = {
- .owner = THIS_MODULE,
- .open = snic_trc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(snic_trc);
+#define TRC_ENABLE_FILE "tracing_enable"
+#define TRC_FILE "trace"
/*
* snic_trc_debugfs_init : creates trace/tracing_enable files for trace
* under debugfs
*/
void snic_trc_debugfs_init(void)
{
- snic_glob->trc.trc_enable = debugfs_create_bool("tracing_enable",
- S_IFREG | S_IRUGO | S_IWUSR,
- snic_glob->trc_root,
- &snic_glob->trc.enable);
-
- snic_glob->trc.trc_file = debugfs_create_file("trace",
- S_IFREG | S_IRUGO | S_IWUSR,
- snic_glob->trc_root, NULL,
- &snic_trc_fops);
+ debugfs_create_bool(TRC_ENABLE_FILE, S_IFREG | S_IRUGO | S_IWUSR,
+ snic_glob->trc_root, &snic_glob->trc.enable);
+
+ debugfs_create_file(TRC_FILE, S_IFREG | S_IRUGO | S_IWUSR,
+ snic_glob->trc_root, NULL, &snic_trc_fops);
}
/*
@@ -483,9 +437,6 @@ void snic_trc_debugfs_init(void)
void
snic_trc_debugfs_term(void)
{
- debugfs_remove(snic_glob->trc.trc_file);
- snic_glob->trc.trc_file = NULL;
-
- debugfs_remove(snic_glob->trc.trc_enable);
- snic_glob->trc.trc_enable = NULL;
+ debugfs_remove(debugfs_lookup(TRC_FILE, snic_glob->trc_root));
+ debugfs_remove(debugfs_lookup(TRC_ENABLE_FILE, snic_glob->trc_root));
}
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index e9ccfb97773f..9b2b5f8c23b9 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/mempool.h>
@@ -100,7 +86,7 @@ snic_queue_report_tgt_req(struct snic *snic)
SNIC_BUG_ON(ntgts == 0);
buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
- buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
+ buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) {
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
diff --git a/drivers/scsi/snic/snic_disc.h b/drivers/scsi/snic/snic_disc.h
index 97fa3f5c5bb4..9ad7f84a3484 100644
--- a/drivers/scsi/snic/snic_disc.h
+++ b/drivers/scsi/snic/snic_disc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_DISC_H
#define __SNIC_DISC_H
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
index 2a045a57e365..2550ba964b03 100644
--- a/drivers/scsi/snic/snic_fwint.h
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_FWINT_H
#define __SNIC_FWINT_H
@@ -159,7 +145,7 @@ struct snic_exch_ver_req {
* HBA Capabilities
* Bit 1: Reserved.
* Bit 2: Dynamic Discovery of LUNs.
- * Bit 3: Async event notifications on on tgt online/offline events.
+ * Bit 3: Async event notifications on tgt online/offline events.
* Bit 4: IO timeout support in FW.
* Bit 5-31: Reserved.
*/
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 159ee94d2a55..32a77bee41d5 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/snic/snic_io.h b/drivers/scsi/snic/snic_io.h
index 093d6524cd42..de6694a24c5f 100644
--- a/drivers/scsi/snic/snic_io.h
+++ b/drivers/scsi/snic/snic_io.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _SNIC_IO_H
#define _SNIC_IO_H
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
index c4da3673f2ae..471a37422da9 100644
--- a/drivers/scsi/snic/snic_isr.c
+++ b/drivers/scsi/snic/snic_isr.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/string.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 14f4ce665e58..174f7811fe50 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
@@ -129,7 +115,7 @@ static struct scsi_host_template snic_host_template = {
.can_queue = SNIC_MAX_IO_REQ,
.sg_tablesize = SNIC_MAX_SG_DESC_CNT,
.max_sectors = 0x800,
- .shost_attrs = snic_attrs,
+ .shost_groups = snic_host_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct snic_internal_io_state),
.proc_name = "snic_scsi",
diff --git a/drivers/scsi/snic/snic_res.c b/drivers/scsi/snic/snic_res.c
index b54912c8ca0c..43f1a2823514 100644
--- a/drivers/scsi/snic/snic_res.c
+++ b/drivers/scsi/snic/snic_res.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/snic/snic_res.h b/drivers/scsi/snic/snic_res.h
index 273f72f2a023..53cf6b19ab28 100644
--- a/drivers/scsi/snic/snic_res.h
+++ b/drivers/scsi/snic/snic_res.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_RES_H
#define __SNIC_RES_H
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index b3650c989ed4..961af6fc21bc 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/mempool.h>
#include <linux/errno.h>
@@ -33,7 +19,7 @@
#include "snic_io.h"
#include "snic.h"
-#define snic_cmd_tag(sc) (((struct scsi_cmnd *) sc)->request->tag)
+#define snic_cmd_tag(sc) (scsi_cmd_to_rq(sc)->tag)
const char *snic_state_str[] = {
[SNIC_INIT] = "SNIC_INIT",
@@ -342,7 +328,7 @@ snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
sc->result = ret;
- sc->scsi_done(sc);
+ scsi_done(sc);
return 0;
}
@@ -676,8 +662,7 @@ snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
- if (sc->scsi_done)
- sc->scsi_done(sc);
+ scsi_done(sc);
snic_stats_update_io_cmpl(&snic->s_stats);
} /* end of snic_icmnd_cmpl_handler */
@@ -855,14 +840,12 @@ snic_process_itmf_cmpl(struct snic *snic,
snic_release_req_buf(snic, rqi, sc);
- if (sc->scsi_done) {
- SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
- jiffies_to_msecs(jiffies - start_time),
- (ulong) fwreq, SNIC_TRC_CMD(sc),
- SNIC_TRC_CMD_STATE_FLAGS(sc));
+ SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time),
+ (ulong) fwreq, SNIC_TRC_CMD(sc),
+ SNIC_TRC_CMD_STATE_FLAGS(sc));
- sc->scsi_done(sc);
- }
+ scsi_done(sc);
break;
@@ -1387,19 +1370,15 @@ snic_issue_tm_req(struct snic *snic,
}
ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
- if (ret)
- goto tmreq_err;
-
- ret = 0;
tmreq_err:
if (ret) {
SNIC_HOST_ERR(snic->shost,
- "issu_tmreq: Queing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
+ "issu_tmreq: Queueing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
tmf, sc, rqi, req_id, tag, ret);
} else {
SNIC_SCSI_DBG(snic->shost,
- "issu_tmreq: Queuing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
+ "issu_tmreq: Queueing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
tmf, sc, rqi, req_id, tag);
}
@@ -1479,7 +1458,7 @@ snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
* Call scsi_done to complete the IO.
*/
sc->result = (DID_ERROR << 16);
- sc->scsi_done(sc);
+ scsi_done(sc);
break;
default:
@@ -1640,7 +1619,7 @@ snic_abort_cmd(struct scsi_cmnd *sc)
u32 start_time = jiffies;
SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
- sc, sc->cmnd[0], sc->request, tag);
+ sc, sc->cmnd[0], scsi_cmd_to_rq(sc), tag);
if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
SNIC_HOST_ERR(snic->shost,
@@ -1859,7 +1838,7 @@ snic_dr_clean_single_req(struct snic *snic,
snic_release_req_buf(snic, rqi, sc);
sc->result = (DID_ERROR << 16);
- sc->scsi_done(sc);
+ scsi_done(sc);
ret = 0;
@@ -2156,7 +2135,7 @@ snic_device_reset(struct scsi_cmnd *sc)
int dr_supp = 0;
SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
- sc, sc->cmnd[0], sc->request,
+ sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
snic_cmd_tag(sc));
dr_supp = snic_dev_reset_supported(sc->device);
if (!dr_supp) {
@@ -2339,7 +2318,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic_get_state(snic) == SNIC_FWRESET) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
- SNIC_HOST_INFO(shost, "reset:prev reset is in progres\n");
+ SNIC_HOST_INFO(shost, "reset:prev reset is in progress\n");
msleep(SNIC_HOST_RESET_TIMEOUT);
ret = SUCCESS;
@@ -2387,11 +2366,11 @@ snic_host_reset(struct scsi_cmnd *sc)
{
struct Scsi_Host *shost = sc->device->host;
u32 start_time = jiffies;
- int ret = FAILED;
+ int ret;
SNIC_SCSI_DBG(shost,
"host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
- sc, sc->cmnd[0], sc->request,
+ sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
snic_cmd_tag(sc), CMD_FLAGS(sc));
ret = snic_reset(shost, sc);
@@ -2498,20 +2477,18 @@ cleanup:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
SNIC_HOST_INFO(snic->shost,
"sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
- sc, sc->request->tag, CMD_FLAGS(sc), rqi,
+ sc, scsi_cmd_to_rq(sc)->tag, CMD_FLAGS(sc), rqi,
jiffies_to_msecs(jiffies - st_time));
/* Update IO stats */
snic_stats_update_io_cmpl(&snic->s_stats);
- if (sc->scsi_done) {
- SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
- jiffies_to_msecs(jiffies - st_time), 0,
- SNIC_TRC_CMD(sc),
- SNIC_TRC_CMD_STATE_FLAGS(sc));
+ SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
+ jiffies_to_msecs(jiffies - st_time), 0,
+ SNIC_TRC_CMD(sc),
+ SNIC_TRC_CMD_STATE_FLAGS(sc));
- sc->scsi_done(sc);
- }
+ scsi_done(sc);
}
} /* end of snic_scsi_cleanup */
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h
index faf0cb601954..f0285c5a35f8 100644
--- a/drivers/scsi/snic/snic_stats.h
+++ b/drivers/scsi/snic/snic_stats.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_STATS_H
#define __SNIC_STATS_H
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index f23fe2f88438..c2e5ab7e976c 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h
index 87dcc7457d15..c38e0dadc958 100644
--- a/drivers/scsi/snic/snic_trc.h
+++ b/drivers/scsi/snic/snic_trc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_TRC_H
#define __SNIC_TRC_H
@@ -46,9 +32,6 @@ struct snic_trc {
u32 rd_idx;
u32 wr_idx;
bool enable; /* Control Variable for Tracing */
-
- struct dentry *trc_enable; /* debugfs file object */
- struct dentry *trc_file;
};
int snic_trc_init(void);
diff --git a/drivers/scsi/snic/vnic_cq.c b/drivers/scsi/snic/vnic_cq.c
index 4c8e64e4fba6..0d5d3bd4be1c 100644
--- a/drivers/scsi/snic/vnic_cq.c
+++ b/drivers/scsi/snic/vnic_cq.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/types.h>
@@ -31,8 +17,6 @@ void svnic_cq_free(struct vnic_cq *cq)
int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
unsigned int index, unsigned int desc_count, unsigned int desc_size)
{
- int err;
-
cq->index = index;
cq->vdev = vdev;
@@ -43,11 +27,7 @@ int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
return -EINVAL;
}
- err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
- if (err)
- return err;
-
- return 0;
+ return svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
}
void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
diff --git a/drivers/scsi/snic/vnic_cq.h b/drivers/scsi/snic/vnic_cq.h
index 6e651c3e16f7..6cee911eec5f 100644
--- a/drivers/scsi/snic/vnic_cq.h
+++ b/drivers/scsi/snic/vnic_cq.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
diff --git a/drivers/scsi/snic/vnic_cq_fw.h b/drivers/scsi/snic/vnic_cq_fw.h
index c2d1bbd44bd1..d74954bc70e3 100644
--- a/drivers/scsi/snic/vnic_cq_fw.h
+++ b/drivers/scsi/snic/vnic_cq_fw.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_CQ_FW_H_
#define _VNIC_CQ_FW_H_
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index 05e374f80946..760f3f22095c 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/vnic_dev.h b/drivers/scsi/snic/vnic_dev.h
index e65726da6504..d2f9b6f7b313 100644
--- a/drivers/scsi/snic/vnic_dev.h
+++ b/drivers/scsi/snic/vnic_dev.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
diff --git a/drivers/scsi/snic/vnic_devcmd.h b/drivers/scsi/snic/vnic_devcmd.h
index d81b4f0ceaaa..9d82fcb7414b 100644
--- a/drivers/scsi/snic/vnic_devcmd.h
+++ b/drivers/scsi/snic/vnic_devcmd.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
@@ -208,7 +194,7 @@ struct vnic_devcmd_notify {
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
- u8 data[0];
+ u8 data[];
};
/*
diff --git a/drivers/scsi/snic/vnic_intr.c b/drivers/scsi/snic/vnic_intr.c
index a7d54806787d..23627f9591f2 100644
--- a/drivers/scsi/snic/vnic_intr.c
+++ b/drivers/scsi/snic/vnic_intr.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/vnic_intr.h b/drivers/scsi/snic/vnic_intr.h
index 4547f603fe5e..7bff60fafb07 100644
--- a/drivers/scsi/snic/vnic_intr.h
+++ b/drivers/scsi/snic/vnic_intr.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
diff --git a/drivers/scsi/snic/vnic_resource.h b/drivers/scsi/snic/vnic_resource.h
index 9713d6835db3..372596b0915f 100644
--- a/drivers/scsi/snic/vnic_resource.h
+++ b/drivers/scsi/snic/vnic_resource.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
diff --git a/drivers/scsi/snic/vnic_snic.h b/drivers/scsi/snic/vnic_snic.h
index 514d39f5cf00..ffc8a0fee577 100644
--- a/drivers/scsi/snic/vnic_snic.h
+++ b/drivers/scsi/snic/vnic_snic.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_SNIC_H_
#define _VNIC_SNIC_H_
diff --git a/drivers/scsi/snic/vnic_stats.h b/drivers/scsi/snic/vnic_stats.h
index 370a37c97748..38155aae7a52 100644
--- a/drivers/scsi/snic/vnic_stats.h
+++ b/drivers/scsi/snic/vnic_stats.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
diff --git a/drivers/scsi/snic/vnic_wq.c b/drivers/scsi/snic/vnic_wq.c
index 1e91d432089e..48be9a3f4c3d 100644
--- a/drivers/scsi/snic/vnic_wq.c
+++ b/drivers/scsi/snic/vnic_wq.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/snic/vnic_wq.h b/drivers/scsi/snic/vnic_wq.h
index 7cc031c7ceba..1415da4b68dc 100644
--- a/drivers/scsi/snic/vnic_wq.h
+++ b/drivers/scsi/snic/vnic_wq.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
diff --git a/drivers/scsi/snic/wq_enet_desc.h b/drivers/scsi/snic/wq_enet_desc.h
index 68f62b6d105b..e8025331b503 100644
--- a/drivers/scsi/snic/wq_enet_desc.h
+++ b/drivers/scsi/snic/wq_enet_desc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e4240e4ae8bb..a278b739d0c5 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -44,6 +44,7 @@
#include <linux/cdrom.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/blk-pm.h>
#include <linux/mutex.h>
@@ -51,6 +52,8 @@
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
@@ -79,7 +82,6 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \
CDC_MRW|CDC_MRW_W|CDC_RAM)
-static DEFINE_MUTEX(sr_mutex);
static int sr_probe(struct device *);
static int sr_remove(struct device *);
static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt);
@@ -105,20 +107,19 @@ static struct scsi_driver sr_template = {
static unsigned long sr_index_bits[SR_DISKS / BITS_PER_LONG];
static DEFINE_SPINLOCK(sr_index_lock);
-/* This semaphore is used to mediate the 0->1 reference get in the
- * face of object destruction (i.e. we can't allow a get on an
- * object after last put) */
-static DEFINE_MUTEX(sr_ref_mutex);
+static struct lock_class_key sr_bio_compl_lkclass;
static int sr_open(struct cdrom_device_info *, int);
static void sr_release(struct cdrom_device_info *);
static void get_sectorsize(struct scsi_cd *);
-static void get_capabilities(struct scsi_cd *);
+static int get_capabilities(struct scsi_cd *);
static unsigned int sr_check_events(struct cdrom_device_info *cdi,
unsigned int clearing, int slot);
static int sr_packet(struct cdrom_device_info *, struct packet_command *);
+static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nr, u8 *last_sense);
static const struct cdrom_device_ops sr_dops = {
.open = sr_open,
@@ -132,15 +133,14 @@ static const struct cdrom_device_ops sr_dops = {
.get_mcn = sr_get_mcn,
.reset = sr_reset,
.audio_ioctl = sr_audio_ioctl,
- .capability = SR_CAPABILITIES,
.generic_packet = sr_packet,
+ .read_cdda_bpc = sr_read_cdda_bpc,
+ .capability = SR_CAPABILITIES,
};
-static void sr_kref_release(struct kref *kref);
-
static inline struct scsi_cd *scsi_cd(struct gendisk *disk)
{
- return container_of(disk->private_data, struct scsi_cd, driver);
+ return disk->private_data;
}
static int sr_runtime_suspend(struct device *dev)
@@ -156,38 +156,6 @@ static int sr_runtime_suspend(struct device *dev)
return 0;
}
-/*
- * The get and put routines for the struct scsi_cd. Note this entity
- * has a scsi_device pointer and owns a reference to this.
- */
-static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
-{
- struct scsi_cd *cd = NULL;
-
- mutex_lock(&sr_ref_mutex);
- if (disk->private_data == NULL)
- goto out;
- cd = scsi_cd(disk);
- kref_get(&cd->kref);
- if (scsi_device_get(cd->device)) {
- kref_put(&cd->kref, sr_kref_release);
- cd = NULL;
- }
- out:
- mutex_unlock(&sr_ref_mutex);
- return cd;
-}
-
-static void scsi_cd_put(struct scsi_cd *cd)
-{
- struct scsi_device *sdev = cd->device;
-
- mutex_lock(&sr_ref_mutex);
- kref_put(&cd->kref, sr_kref_release);
- scsi_device_put(sdev);
- mutex_unlock(&sr_ref_mutex);
-}
-
static unsigned int sr_get_events(struct scsi_device *sdev)
{
u8 buf[8];
@@ -219,6 +187,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
return DISK_EVENT_EJECT_REQUEST;
else if (med->media_event_code == 2)
return DISK_EVENT_MEDIA_CHANGE;
+ else if (med->media_event_code == 3)
+ return DISK_EVENT_MEDIA_CHANGE;
return 0;
}
@@ -325,7 +295,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
int good_bytes = (result == 0 ? this_count : 0);
int block_sectors = 0;
long error_sector;
- struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
+ struct request *rq = scsi_cmd_to_rq(SCpnt);
+ struct scsi_cd *cd = scsi_cd(rq->q->disk);
#ifdef DEBUG
scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
@@ -337,7 +308,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
* care is taken to avoid unnecessary additional work such as
* memcpy's that could be avoided.
*/
- if (driver_byte(result) != 0 && /* An error occurred */
+ if (scsi_status_is_check_condition(result) &&
(SCpnt->sense_buffer[0] & 0x7f) == 0x70) { /* Sense current */
switch (SCpnt->sense_buffer[2]) {
case MEDIUM_ERROR:
@@ -345,20 +316,16 @@ static int sr_done(struct scsi_cmnd *SCpnt)
case ILLEGAL_REQUEST:
if (!(SCpnt->sense_buffer[0] & 0x90))
break;
- error_sector = (SCpnt->sense_buffer[3] << 24) |
- (SCpnt->sense_buffer[4] << 16) |
- (SCpnt->sense_buffer[5] << 8) |
- SCpnt->sense_buffer[6];
- if (SCpnt->request->bio != NULL)
- block_sectors =
- bio_sectors(SCpnt->request->bio);
+ error_sector =
+ get_unaligned_be32(&SCpnt->sense_buffer[3]);
+ if (rq->bio != NULL)
+ block_sectors = bio_sectors(rq->bio);
if (block_sectors < 4)
block_sectors = 4;
if (cd->device->sector_size == 2048)
error_sector <<= 2;
error_sector &= ~(block_sectors - 1);
- good_bytes = (error_sector -
- blk_rq_pos(SCpnt->request)) << 9;
+ good_bytes = (error_sector - blk_rq_pos(rq)) << 9;
if (good_bytes < 0 || good_bytes >= this_count)
good_bytes = 0;
/*
@@ -390,17 +357,13 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
{
int block = 0, this_count, s_size;
struct scsi_cd *cd;
- struct request *rq = SCpnt->request;
+ struct request *rq = scsi_cmd_to_rq(SCpnt);
blk_status_t ret;
- ret = scsi_init_io(SCpnt);
+ ret = scsi_alloc_sgtables(SCpnt);
if (ret != BLK_STS_OK)
- goto out;
- cd = scsi_cd(rq->rq_disk);
-
- /* from here on until we're complete, any goto out
- * is used for a killable error condition */
- ret = BLK_STS_IOERR;
+ return ret;
+ cd = scsi_cd(rq->q->disk);
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
"Doing sr request, block = %d\n", block));
@@ -421,19 +384,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
goto out;
}
- /*
- * we do lazy blocksize switching (when reading XA sectors,
- * see CDROMREADMODE2 ioctl)
- */
s_size = cd->device->sector_size;
- if (s_size > 2048) {
- if (!in_interrupt())
- sr_set_blocklength(cd, 2048);
- else
- scmd_printk(KERN_INFO, SCpnt,
- "can't switch blocksize: in interrupt\n");
- }
-
if (s_size != 512 && s_size != 1024 && s_size != 2048) {
scmd_printk(KERN_ERR, SCpnt, "bad sector size %d\n", s_size);
goto out;
@@ -496,13 +447,9 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
SCpnt->sdb.length = this_count * s_size;
}
- SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
- SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
- SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
- SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+ put_unaligned_be32(block, &SCpnt->cmnd[2]);
SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
- SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
- SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+ put_unaligned_be16(this_count, &SCpnt->cmnd[7]);
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -512,49 +459,60 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
SCpnt->transfersize = cd->device->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = MAX_RETRIES;
+ SCpnt->cmd_len = 10;
/*
- * This indicates that the command is ready from our end to be
- * queued.
+ * This indicates that the command is ready from our end to be queued.
*/
- ret = BLK_STS_OK;
+ return BLK_STS_OK;
out:
- return ret;
+ scsi_free_sgtables(SCpnt);
+ return BLK_STS_IOERR;
+}
+
+static void sr_revalidate_disk(struct scsi_cd *cd)
+{
+ struct scsi_sense_hdr sshdr;
+
+ /* if the unit is not ready, nothing more to do */
+ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
+ return;
+ sr_cd_check(&cd->cdi);
+ get_sectorsize(cd);
}
static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
- struct scsi_cd *cd;
- struct scsi_device *sdev;
- int ret = -ENXIO;
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct scsi_device *sdev = cd->device;
+ int ret;
- cd = scsi_cd_get(bdev->bd_disk);
- if (!cd)
- goto out;
+ if (scsi_device_get(cd->device))
+ return -ENXIO;
- sdev = cd->device;
scsi_autopm_get_device(sdev);
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ sr_revalidate_disk(cd);
- mutex_lock(&sr_mutex);
+ mutex_lock(&cd->lock);
ret = cdrom_open(&cd->cdi, bdev, mode);
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
scsi_autopm_put_device(sdev);
if (ret)
- scsi_cd_put(cd);
-
-out:
+ scsi_device_put(cd->device);
return ret;
}
static void sr_block_release(struct gendisk *disk, fmode_t mode)
{
struct scsi_cd *cd = scsi_cd(disk);
- mutex_lock(&sr_mutex);
+
+ mutex_lock(&cd->lock);
cdrom_release(&cd->cdi, mode);
- scsi_cd_put(cd);
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
+
+ scsi_device_put(cd->device);
}
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
@@ -565,50 +523,10 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
void __user *argp = (void __user *)arg;
int ret;
- mutex_lock(&sr_mutex);
-
- ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
- (mode & FMODE_NDELAY) != 0);
- if (ret)
- goto out;
-
- scsi_autopm_get_device(sdev);
-
- /*
- * Send SCSI addressing ioctls directly to mid level, send other
- * ioctls to cdrom/block level.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- ret = scsi_ioctl(sdev, cmd, argp);
- goto put;
- }
-
- ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
- if (ret != -ENOSYS)
- goto put;
-
- ret = scsi_ioctl(sdev, cmd, argp);
-
-put:
- scsi_autopm_put_device(sdev);
-
-out:
- mutex_unlock(&sr_mutex);
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
- unsigned long arg)
-{
- struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
- struct scsi_device *sdev = cd->device;
- void __user *argp = compat_ptr(arg);
- int ret;
+ if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
+ return -ENOIOCTLCMD;
- mutex_lock(&sr_mutex);
+ mutex_lock(&cd->lock);
ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
(mode & FMODE_NDELAY) != 0);
@@ -617,68 +535,41 @@ static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsign
scsi_autopm_get_device(sdev);
- /*
- * Send SCSI addressing ioctls directly to mid level, send other
- * ioctls to cdrom/block level.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- ret = scsi_compat_ioctl(sdev, cmd, argp);
- goto put;
+ if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
+ if (ret != -ENOSYS)
+ goto put;
}
-
- ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp);
- if (ret != -ENOSYS)
- goto put;
-
- ret = scsi_compat_ioctl(sdev, cmd, argp);
+ ret = scsi_ioctl(sdev, mode, cmd, argp);
put:
scsi_autopm_put_device(sdev);
-
out:
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
return ret;
-
}
-#endif
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
- unsigned int ret = 0;
- struct scsi_cd *cd;
+ struct scsi_cd *cd = disk->private_data;
- cd = scsi_cd_get(disk);
- if (!cd)
+ if (atomic_read(&cd->device->disk_events_disable_depth))
return 0;
-
- if (!atomic_read(&cd->device->disk_events_disable_depth))
- ret = cdrom_check_events(&cd->cdi, clearing);
-
- scsi_cd_put(cd);
- return ret;
+ return cdrom_check_events(&cd->cdi, clearing);
}
-static int sr_block_revalidate_disk(struct gendisk *disk)
+static void sr_free_disk(struct gendisk *disk)
{
- struct scsi_sense_hdr sshdr;
- struct scsi_cd *cd;
+ struct scsi_cd *cd = disk->private_data;
- cd = scsi_cd_get(disk);
- if (!cd)
- return -ENXIO;
-
- /* if the unit is not ready, nothing more to do */
- if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
- goto out;
+ spin_lock(&sr_index_lock);
+ clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
+ spin_unlock(&sr_index_lock);
- sr_cd_check(&cd->cdi);
- get_sectorsize(cd);
-out:
- scsi_cd_put(cd);
- return 0;
+ unregister_cdrom(&cd->cdi);
+ mutex_destroy(&cd->lock);
+ kfree(cd);
}
static const struct block_device_operations sr_bdops =
@@ -687,11 +578,9 @@ static const struct block_device_operations sr_bdops =
.open = sr_block_open,
.release = sr_block_release,
.ioctl = sr_block_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sr_block_compat_ioctl,
-#endif
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = sr_block_check_events,
- .revalidate_disk = sr_block_revalidate_disk,
+ .free_disk = sr_free_disk,
};
static int sr_open(struct cdrom_device_info *cdi, int purpose)
@@ -716,11 +605,6 @@ error_out:
static void sr_release(struct cdrom_device_info *cdi)
{
- struct scsi_cd *cd = cdi->handle;
-
- if (cd->device->sector_size > 2048)
- sr_set_blocklength(cd, 2048);
-
}
static int sr_probe(struct device *dev)
@@ -740,11 +624,11 @@ static int sr_probe(struct device *dev)
if (!cd)
goto fail;
- kref_init(&cd->kref);
-
- disk = alloc_disk(1);
+ disk = blk_mq_alloc_disk_for_queue(sdev->request_queue,
+ &sr_bio_compl_lkclass);
if (!disk)
goto fail_free;
+ mutex_init(&cd->lock);
spin_lock(&sr_index_lock);
minor = find_first_zero_bit(sr_index_bits, SR_DISKS);
@@ -758,18 +642,18 @@ static int sr_probe(struct device *dev)
disk->major = SCSI_CDROM_MAJOR;
disk->first_minor = minor;
+ disk->minors = 1;
sprintf(disk->disk_name, "sr%d", minor);
disk->fops = &sr_bdops;
- disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
- disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
+ disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT |
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
cd->device = sdev;
cd->disk = disk;
- cd->driver = &sr_template;
- cd->disk = disk;
cd->capacity = 0x1fffff;
cd->device->changed = 1; /* force recheck CD type */
cd->media_present = 1;
@@ -785,17 +669,16 @@ static int sr_probe(struct device *dev)
sdev->sector_size = 2048; /* A guess, just in case */
- /* FIXME: need to handle a get_capabilities failure properly ?? */
- get_capabilities(cd);
+ error = -ENOMEM;
+ if (get_capabilities(cd))
+ goto fail_minor;
sr_vendor_init(cd);
set_capacity(disk, cd->capacity);
- disk->private_data = &cd->driver;
- disk->queue = sdev->request_queue;
- cd->cdi.disk = disk;
+ disk->private_data = cd;
- if (register_cdrom(&cd->cdi))
- goto fail_put;
+ if (register_cdrom(disk, &cd->cdi))
+ goto fail_minor;
/*
* Initialize block layer runtime PM stuffs before the
@@ -804,8 +687,11 @@ static int sr_probe(struct device *dev)
blk_pm_runtime_init(sdev->request_queue, dev);
dev_set_drvdata(dev, cd);
- disk->flags |= GENHD_FL_REMOVABLE;
- device_add_disk(&sdev->sdev_gendev, disk, NULL);
+ sr_revalidate_disk(cd);
+
+ error = device_add_disk(&sdev->sdev_gendev, disk, NULL);
+ if (error)
+ goto unregister_cdrom;
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
@@ -813,8 +699,15 @@ static int sr_probe(struct device *dev)
return 0;
+unregister_cdrom:
+ unregister_cdrom(&cd->cdi);
+fail_minor:
+ spin_lock(&sr_index_lock);
+ clear_bit(minor, sr_index_bits);
+ spin_unlock(&sr_index_lock);
fail_put:
put_disk(disk);
+ mutex_destroy(&cd->lock);
fail_free:
kfree(cd);
fail:
@@ -852,8 +745,7 @@ static void get_sectorsize(struct scsi_cd *cd)
} else {
long last_written;
- cd->capacity = 1 + ((buffer[0] << 24) | (buffer[1] << 16) |
- (buffer[2] << 8) | buffer[3]);
+ cd->capacity = 1 + get_unaligned_be32(&buffer[0]);
/*
* READ_CAPACITY doesn't return the correct size on
* certain UDF media. If last_written is larger, use
@@ -864,8 +756,7 @@ static void get_sectorsize(struct scsi_cd *cd)
if (!cdrom_get_last_written(&cd->cdi, &last_written))
cd->capacity = max_t(long, cd->capacity, last_written);
- sector_size = (buffer[4] << 24) |
- (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+ sector_size = get_unaligned_be32(&buffer[4]);
switch (sector_size) {
/*
* HP 4020i CD-Recorder reports 2340 byte sectors
@@ -877,10 +768,10 @@ static void get_sectorsize(struct scsi_cd *cd)
case 2340:
case 2352:
sector_size = 2048;
- /* fall through */
+ fallthrough;
case 2048:
cd->capacity *= 4;
- /* fall through */
+ fallthrough;
case 512:
break;
default:
@@ -904,7 +795,7 @@ static void get_sectorsize(struct scsi_cd *cd)
return;
}
-static void get_capabilities(struct scsi_cd *cd)
+static int get_capabilities(struct scsi_cd *cd)
{
unsigned char *buffer;
struct scsi_mode_data data;
@@ -926,10 +817,10 @@ static void get_capabilities(struct scsi_cd *cd)
/* allocate transfer buffer */
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer) {
sr_printk(KERN_ERR, cd, "out of memory.\n");
- return;
+ return -ENOMEM;
}
/* eat unit attentions */
@@ -939,7 +830,7 @@ static void get_capabilities(struct scsi_cd *cd)
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ if (rc < 0 || data.length > ms_len ||
data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
@@ -949,17 +840,17 @@ static void get_capabilities(struct scsi_cd *cd)
CDC_MRW | CDC_MRW_W | CDC_RAM);
kfree(buffer);
sr_printk(KERN_INFO, cd, "scsi-1 drive");
- return;
+ return 0;
}
n = data.header_length + data.block_descriptor_length;
- cd->cdi.speed = ((buffer[n + 8] << 8) + buffer[n + 9]) / 176;
+ cd->cdi.speed = get_unaligned_be16(&buffer[n + 8]) / 176;
cd->readcd_known = 1;
cd->readcd_cdda = buffer[n + 5] & 0x01;
/* print some capability bits */
sr_printk(KERN_INFO, cd,
"scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n",
- ((buffer[n + 14] << 8) + buffer[n + 15]) / 176,
+ get_unaligned_be16(&buffer[n + 14]) / 176,
cd->cdi.speed,
buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */
buffer[n + 3] & 0x20 ? "dvd-ram " : "",
@@ -1008,6 +899,7 @@ static void get_capabilities(struct scsi_cd *cd)
}
kfree(buffer);
+ return 0;
}
/*
@@ -1031,31 +923,54 @@ static int sr_packet(struct cdrom_device_info *cdi,
return cgc->stat;
}
-/**
- * sr_kref_release - Called to free the scsi_cd structure
- * @kref: pointer to embedded kref
- *
- * sr_ref_mutex must be held entering this routine. Because it is
- * called on last put, you should always use the scsi_cd_get()
- * scsi_cd_put() helpers which manipulate the semaphore directly
- * and never do a direct kref_put().
- **/
-static void sr_kref_release(struct kref *kref)
+static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nr, u8 *last_sense)
{
- struct scsi_cd *cd = container_of(kref, struct scsi_cd, kref);
- struct gendisk *disk = cd->disk;
-
- spin_lock(&sr_index_lock);
- clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
- spin_unlock(&sr_index_lock);
-
- unregister_cdrom(&cd->cdi);
+ struct gendisk *disk = cdi->disk;
+ u32 len = nr * CD_FRAMESIZE_RAW;
+ struct scsi_cmnd *scmd;
+ struct request *rq;
+ struct bio *bio;
+ int ret;
- disk->private_data = NULL;
+ rq = scsi_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
- put_disk(disk);
+ ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL);
+ if (ret)
+ goto out_put_request;
+
+ scmd->cmnd[0] = GPCMD_READ_CD;
+ scmd->cmnd[1] = 1 << 2;
+ scmd->cmnd[2] = (lba >> 24) & 0xff;
+ scmd->cmnd[3] = (lba >> 16) & 0xff;
+ scmd->cmnd[4] = (lba >> 8) & 0xff;
+ scmd->cmnd[5] = lba & 0xff;
+ scmd->cmnd[6] = (nr >> 16) & 0xff;
+ scmd->cmnd[7] = (nr >> 8) & 0xff;
+ scmd->cmnd[8] = nr & 0xff;
+ scmd->cmnd[9] = 0xf8;
+ scmd->cmd_len = 12;
+ rq->timeout = 60 * HZ;
+ bio = rq->bio;
+
+ blk_execute_rq(rq, false);
+ if (scmd->result) {
+ struct scsi_sense_hdr sshdr;
+
+ scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
+ &sshdr);
+ *last_sense = sshdr.sense_key;
+ ret = -EIO;
+ }
- kfree(cd);
+ if (blk_rq_unmap_user(bio))
+ ret = -EFAULT;
+out_put_request:
+ blk_mq_free_request(rq);
+ return ret;
}
static int sr_remove(struct device *dev)
@@ -1065,11 +980,7 @@ static int sr_remove(struct device *dev)
scsi_autopm_get_device(cd->device);
del_gendisk(cd->disk);
- dev_set_drvdata(dev, NULL);
-
- mutex_lock(&sr_ref_mutex);
- kref_put(&cd->kref, sr_kref_release);
- mutex_unlock(&sr_ref_mutex);
+ put_disk(cd->disk);
return 0;
}
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index a2bb7b8bace5..1175f2e213b5 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -18,8 +18,7 @@
#ifndef _SR_H
#define _SR_H
-#include <linux/genhd.h>
-#include <linux/kref.h>
+#include <linux/mutex.h>
#define MAX_RETRIES 3
#define SR_TIMEOUT (30 * HZ)
@@ -32,7 +31,6 @@ struct scsi_device;
typedef struct scsi_cd {
- struct scsi_driver *driver;
unsigned capacity; /* size in blocks */
struct scsi_device *device;
unsigned int vendor; /* vendor code, see sr_vendor.c */
@@ -51,9 +49,7 @@ typedef struct scsi_cd {
bool ignore_get_event:1; /* GET_EVENT is unreliable, use TUR */
struct cdrom_device_info cdi;
- /* We hold gendisk and scsi_device references on probe and use
- * the refs on this kref to decide when to release them */
- struct kref kref;
+ struct mutex lock;
struct gendisk *disk;
} Scsi_CD;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ffcf902da390..fbdb5124d7f7 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -33,10 +33,6 @@ static int xa_test = 0;
module_param(xa_test, int, S_IRUGO | S_IWUSR);
-/* primitive to determine whether we need to have GFP_DMA set based on
- * the status of the unchecked_isa_dma flag in the host structure */
-#define SR_GFP_DMA(cd) (((cd)->device->host->unchecked_isa_dma) ? GFP_DMA : 0)
-
static int sr_read_tochdr(struct cdrom_device_info *cdi,
struct cdrom_tochdr *tochdr)
{
@@ -45,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -59,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tochdr->cdth_trk0 = buffer[2];
tochdr->cdth_trk1 = buffer[3];
+err:
kfree(buffer);
return result;
}
@@ -75,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -90,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tocentry->cdte_ctrl = buffer[5] & 0xf;
tocentry->cdte_adr = buffer[5] >> 4;
@@ -102,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ buffer[10]) << 8) + buffer[11];
+err:
kfree(buffer);
return result;
}
@@ -205,7 +207,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
/* Minimal error checking. Ignore cases we know about, and report the rest. */
- if (driver_byte(result) != 0) {
+ if (result < 0) {
+ err = result;
+ goto out;
+ }
+ if (scsi_status_is_check_condition(result)) {
switch (sshdr->sense_key) {
case UNIT_ATTENTION:
SDev->changed = 1;
@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
{
Scsi_CD *cd = cdi->handle;
struct packet_command cgc;
- char *buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
+ char *buffer = kzalloc(32, GFP_KERNEL);
int result;
if (!buffer)
@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = IOCTL_TIMEOUT;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
memcpy(mcn->medium_catalog_number, buffer + 9, 13);
mcn->medium_catalog_number[13] = 0;
+err:
kfree(buffer);
return result;
}
@@ -523,7 +532,7 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
return rc;
cd->readcd_known = 0;
sr_printk(KERN_INFO, cd,
- "CDROM does'nt support READ CD (0xbe) command\n");
+ "CDROM doesn't support READ CD (0xbe) command\n");
/* fall & retry the other way */
}
/* ... if this fails, we switch the blocksize using MODE SELECT */
@@ -549,6 +558,8 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
cgc.timeout = IOCTL_TIMEOUT;
rc = sr_do_ioctl(cd, &cgc);
+ if (blksize != CD_FRAMESIZE)
+ rc |= sr_set_blocklength(cd, CD_FRAMESIZE);
return rc;
}
@@ -565,7 +576,7 @@ int sr_is_xa(Scsi_CD *cd)
if (!xa_test)
return 0;
- raw_sector = kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd));
+ raw_sector = kmalloc(2048, GFP_KERNEL);
if (!raw_sector)
return -ENOMEM;
if (0 == sr_read_sector(cd, cd->ms_offset + 16,
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 17a56c87d383..a61635326ae0 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -67,9 +67,6 @@
void sr_vendor_init(Scsi_CD *cd)
{
-#ifndef CONFIG_BLK_DEV_SR_VENDOR
- cd->vendor = VENDOR_SCSI3;
-#else
const char *vendor = cd->device->vendor;
const char *model = cd->device->model;
@@ -118,7 +115,6 @@ void sr_vendor_init(Scsi_CD *cd)
CDC_PLAY_AUDIO
);
}
-#endif
}
@@ -132,12 +128,10 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
struct ccs_modesel_head *modesel;
int rc, density = 0;
-#ifdef CONFIG_BLK_DEV_SR_VENDOR
if (cd->vendor == VENDOR_TOSHIBA)
density = (blocklength > 2048) ? 0x81 : 0x83;
-#endif
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -185,7 +179,7 @@ int sr_cd_check(struct cdrom_device_info *cdi)
if (cd->cdi.mask & CDC_MULTI_SESSION)
return 0;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -223,7 +217,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
}
break;
-#ifdef CONFIG_BLK_DEV_SR_VENDOR
case VENDOR_NEC:{
unsigned long min, sec, frame;
cgc.cmd[0] = 0xde;
@@ -316,7 +309,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
sector = buffer[11] + (buffer[10] << 8) +
(buffer[9] << 16) + (buffer[8] << 24);
break;
-#endif /* CONFIG_BLK_DEV_SR_VENDOR */
default:
/* should not happen */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 393f3019ccac..b90a440e135d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying
- file Documentation/scsi/st.txt for more information.
+ file Documentation/scsi/st.rst for more information.
History:
Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara.
@@ -32,6 +32,7 @@ static const char *verstr = "20160209";
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mtio.h>
+#include <linux/major.h>
#include <linux/cdrom.h>
#include <linux/ioctl.h>
#include <linux/fcntl.h>
@@ -45,6 +46,7 @@ static const char *verstr = "20160209";
#include <linux/uaccess.h>
#include <asm/dma.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
@@ -187,7 +189,7 @@ static int st_max_sg_segs = ST_MAX_SG;
static int modes_defined;
-static int enlarge_buffer(struct st_buffer *, int, int);
+static int enlarge_buffer(struct st_buffer *, int);
static void clear_buffer(struct st_buffer *);
static void normalize_buffer(struct st_buffer *);
static int append_to_buffer(const char __user *, struct st_buffer *, int);
@@ -308,13 +310,8 @@ static char * st_incompatible(struct scsi_device* SDp)
}
-static inline char *tape_name(struct scsi_tape *tape)
-{
- return tape->disk->disk_name;
-}
-
#define st_printk(prefix, t, fmt, a...) \
- sdev_prefix_printk(prefix, (t)->device, tape_name(t), fmt, ##a)
+ sdev_prefix_printk(prefix, (t)->device, (t)->name, fmt, ##a)
#ifdef DEBUG
#define DEBC_printk(t, fmt, a...) \
if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); }
@@ -338,14 +335,14 @@ static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s)
switch (sense[0] & 0x7f) {
case 0x71:
s->deferred = 1;
- /* fall through */
+ fallthrough;
case 0x70:
s->fixed_format = 1;
s->flags = sense[2] & 0xe0;
break;
case 0x73:
s->deferred = 1;
- /* fall through */
+ fallthrough;
case 0x72:
s->fixed_format = 0;
ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
@@ -362,7 +359,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
int result = SRpnt->result;
u8 scode;
DEB(const char *stp;)
- char *name = tape_name(STp);
+ char *name = STp->name;
struct st_cmdstatus *cmdstatp;
if (!result)
@@ -389,8 +386,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (!debugging) { /* Abnormal conditions for tape */
if (!cmdstatp->have_sense)
st_printk(KERN_WARNING, STp,
- "Error %x (driver bt 0x%x, host bt 0x%x).\n",
- result, driver_byte(result), host_byte(result));
+ "Error %x (driver bt 0, host bt 0x%x).\n",
+ result, host_byte(result));
else if (cmdstatp->have_sense &&
scode != NO_SENSE &&
scode != RECOVERED_ERROR &&
@@ -475,15 +472,16 @@ static void st_release_request(struct st_request *streq)
static void st_do_stats(struct scsi_tape *STp, struct request *req)
{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
ktime_t now;
now = ktime_get();
- if (scsi_req(req)->cmd[0] == WRITE_6) {
+ if (scmd->cmnd[0] == WRITE_6) {
now = ktime_sub(now, STp->stats->write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
atomic64_inc(&STp->stats->write_cnt);
- if (scsi_req(req)->result) {
+ if (scmd->result) {
atomic64_add(atomic_read(&STp->stats->last_write_size)
- STp->buffer->cmdstat.residual,
&STp->stats->write_byte_cnt);
@@ -492,12 +490,12 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
} else
atomic64_add(atomic_read(&STp->stats->last_write_size),
&STp->stats->write_byte_cnt);
- } else if (scsi_req(req)->cmd[0] == READ_6) {
+ } else if (scmd->cmnd[0] == READ_6) {
now = ktime_sub(now, STp->stats->read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
atomic64_inc(&STp->stats->read_cnt);
- if (scsi_req(req)->result) {
+ if (scmd->result) {
atomic64_add(atomic_read(&STp->stats->last_read_size)
- STp->buffer->cmdstat.residual,
&STp->stats->read_byte_cnt);
@@ -514,26 +512,28 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
atomic64_dec(&STp->stats->in_flight);
}
-static void st_scsi_execute_end(struct request *req, blk_status_t status)
+static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
+ blk_status_t status)
{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
struct st_request *SRpnt = req->end_io_data;
- struct scsi_request *rq = scsi_req(req);
struct scsi_tape *STp = SRpnt->stp;
struct bio *tmp;
- STp->buffer->cmdstat.midlevel_result = SRpnt->result = rq->result;
- STp->buffer->cmdstat.residual = rq->resid_len;
+ STp->buffer->cmdstat.midlevel_result = SRpnt->result = scmd->result;
+ STp->buffer->cmdstat.residual = scmd->resid_len;
st_do_stats(STp, req);
tmp = SRpnt->bio;
- if (rq->sense_len)
- memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
+ if (scmd->sense_len)
+ memcpy(SRpnt->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
if (SRpnt->waiting)
complete(SRpnt->waiting);
blk_rq_unmap_user(tmp);
- blk_put_request(req);
+ blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
@@ -541,17 +541,17 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
int timeout, int retries)
{
struct request *req;
- struct scsi_request *rq;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
int err = 0;
struct scsi_tape *STp = SRpnt->stp;
+ struct scsi_cmnd *scmd;
- req = blk_get_request(SRpnt->stp->device->request_queue,
+ req = scsi_alloc_request(SRpnt->stp->device->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
- return DRIVER_ERROR << 24;
- rq = scsi_req(req);
+ return PTR_ERR(req);
+ scmd = blk_mq_rq_to_pdu(req);
req->rq_flags |= RQF_QUIET;
mdata->null_mapped = 1;
@@ -560,8 +560,8 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
GFP_KERNEL);
if (err) {
- blk_put_request(req);
- return DRIVER_ERROR << 24;
+ blk_mq_free_request(req);
+ return err;
}
}
@@ -577,14 +577,14 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
}
SRpnt->bio = req->bio;
- rq->cmd_len = COMMAND_SIZE(cmd[0]);
- memset(rq->cmd, 0, BLK_MAX_CDB);
- memcpy(rq->cmd, cmd, rq->cmd_len);
+ scmd->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(scmd->cmnd, cmd, scmd->cmd_len);
req->timeout = timeout;
- rq->retries = retries;
+ scmd->allowed = retries;
+ req->end_io = st_scsi_execute_end;
req->end_io_data = SRpnt;
- blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
+ blk_execute_rq_nowait(req, true);
return 0;
}
@@ -1268,8 +1268,8 @@ static int st_open(struct inode *inode, struct file *filp)
spin_lock(&st_use_lock);
if (STp->in_use) {
spin_unlock(&st_use_lock);
- scsi_tape_put(STp);
DEBC_printk(STp, "Device already in use.\n");
+ scsi_tape_put(STp);
return (-EBUSY);
}
@@ -1288,7 +1288,7 @@ static int st_open(struct inode *inode, struct file *filp)
}
/* See that we have at least a one page buffer available */
- if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) {
+ if (!enlarge_buffer(STp->buffer, PAGE_SIZE)) {
st_printk(KERN_WARNING, STp,
"Can't allocate one page tape buffer.\n");
retval = (-EOVERFLOW);
@@ -1456,7 +1456,6 @@ static int st_flush(struct file *filp, fl_owner_t id)
accessing this tape. */
static int st_release(struct inode *inode, struct file *filp)
{
- int result = 0;
struct scsi_tape *STp = filp->private_data;
if (STp->door_locked == ST_LOCKED_AUTO)
@@ -1469,9 +1468,9 @@ static int st_release(struct inode *inode, struct file *filp)
scsi_autopm_put_device(STp->device);
scsi_tape_put(STp);
- return result;
+ return 0;
}
-
+
/* The checks common to both reading and writing */
static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count)
{
@@ -1586,7 +1585,7 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
}
if (bufsize > STbp->buffer_size &&
- !enlarge_buffer(STbp, bufsize, STp->restr_dma)) {
+ !enlarge_buffer(STbp, bufsize)) {
st_printk(KERN_WARNING, STp,
"Can't allocate %d byte tape buffer.\n",
bufsize);
@@ -2680,8 +2679,7 @@ static void deb_space_print(struct scsi_tape *STp, int direction, char *units, u
if (!debugging)
return;
- sc = cmd[2] & 0x80 ? 0xff000000 : 0;
- sc |= (cmd[2] << 16) | (cmd[3] << 8) | cmd[4];
+ sc = sign_extend32(get_unaligned_be24(&cmd[2]), 23);
if (direction)
sc = -sc;
st_printk(ST_DEB_MSG, STp, "Spacing tape %s over %d %s.\n",
@@ -2724,7 +2722,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
switch (cmd_in) {
case MTFSFM:
chg_eof = 0; /* Changed from the FSF after this */
- /* fall through */
+ fallthrough;
case MTFSF:
cmd[0] = SPACE;
cmd[1] = 0x01; /* Space FileMarks */
@@ -2739,7 +2737,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
break;
case MTBSFM:
chg_eof = 0; /* Changed from the FSF after this */
- /* fall through */
+ fallthrough;
case MTBSF:
cmd[0] = SPACE;
cmd[1] = 0x01; /* Space FileMarks */
@@ -2847,7 +2845,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
case MTNOP:
DEBC_printk(STp, "No op on tape.\n");
return 0; /* Should do something ? */
- break;
case MTRETEN:
cmd[0] = START_STOP;
if (STp->immediate) {
@@ -3501,8 +3498,9 @@ out:
/* The ioctl command */
-static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p)
+static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
+ void __user *p = (void __user *)arg;
int i, cmd_nr, cmd_type, bt;
int retval = 0;
unsigned int blk;
@@ -3822,73 +3820,45 @@ static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user
goto out;
}
mutex_unlock(&STp->lock);
- switch (cmd_in) {
- case SCSI_IOCTL_STOP_UNIT:
- /* unload */
- retval = scsi_ioctl(STp->device, cmd_in, p);
- if (!retval) {
- STp->rew_at_close = 0;
- STp->ready = ST_NO_TAPE;
- }
- return retval;
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- break;
+ switch (cmd_in) {
+ case SG_IO:
+ case SCSI_IOCTL_SEND_COMMAND:
+ case CDROM_SEND_PACKET:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ break;
+ default:
+ break;
+ }
- default:
- if ((cmd_in == SG_IO ||
- cmd_in == SCSI_IOCTL_SEND_COMMAND ||
- cmd_in == CDROM_SEND_PACKET) &&
- !capable(CAP_SYS_RAWIO))
- i = -EPERM;
- else
- i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
- file->f_mode, cmd_in, p);
- if (i != -ENOTTY)
- return i;
- break;
+ retval = scsi_ioctl(STp->device, file->f_mode, cmd_in, p);
+ if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
+ /* unload */
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
}
- return -ENOTTY;
+ return retval;
out:
mutex_unlock(&STp->lock);
return retval;
}
-static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
-{
- void __user *p = (void __user *)arg;
- struct scsi_tape *STp = file->private_data;
- int ret;
-
- ret = st_ioctl_common(file, cmd_in, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_ioctl(STp->device, cmd_in, p);
-}
-
#ifdef CONFIG_COMPAT
static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
- void __user *p = compat_ptr(arg);
- struct scsi_tape *STp = file->private_data;
- int ret;
-
/* argument conversion is handled using put_user_mtpos/put_user_mtget */
switch (cmd_in) {
case MTIOCPOS32:
- return st_ioctl_common(file, MTIOCPOS, p);
+ cmd_in = MTIOCPOS;
+ break;
case MTIOCGET32:
- return st_ioctl_common(file, MTIOCGET, p);
+ cmd_in = MTIOCGET;
+ break;
}
- ret = st_ioctl_common(file, cmd_in, p);
- if (ret != -ENOTTY)
- return ret;
-
- return scsi_compat_ioctl(STp->device, cmd_in, p);
+ return st_ioctl(file, cmd_in, arg);
}
#endif
@@ -3896,7 +3866,7 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned lon
/* Try to allocate a new tape buffer. Calling function must not hold
dev_arr_lock. */
-static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
+static struct st_buffer *new_tape_buffer(int max_sg)
{
struct st_buffer *tb;
@@ -3907,7 +3877,6 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
}
tb->frp_segs = 0;
tb->use_sg = max_sg;
- tb->dma = need_dma;
tb->buffer_size = 0;
tb->reserved_pages = kcalloc(max_sg, sizeof(struct page *),
@@ -3924,7 +3893,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
/* Try to allocate enough space in the tape buffer */
#define ST_MAX_ORDER 6
-static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
+static int enlarge_buffer(struct st_buffer * STbuffer, int new_size)
{
int segs, max_segs, b_size, order, got;
gfp_t priority;
@@ -3938,8 +3907,6 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
max_segs = STbuffer->use_sg;
priority = GFP_KERNEL | __GFP_NOWARN;
- if (need_dma)
- priority |= GFP_DMA;
if (STbuffer->cleared)
priority |= __GFP_ZERO;
@@ -3959,7 +3926,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
if (order == ST_MAX_ORDER)
return 0;
normalize_buffer(STbuffer);
- return enlarge_buffer(STbuffer, new_size, need_dma);
+ return enlarge_buffer(STbuffer, new_size);
}
for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size;
@@ -4221,7 +4188,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
i = mode << (4 - ST_NBR_MODE_BITS);
snprintf(name, 10, "%s%s%s", rew ? "n" : "",
- tape->disk->disk_name, st_formats[i]);
+ tape->name, st_formats[i]);
dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev,
cdev_devno, &tape->modes[mode], "%s", name);
@@ -4276,17 +4243,15 @@ static void remove_cdevs(struct scsi_tape *tape)
static int st_probe(struct device *dev)
{
struct scsi_device *SDp = to_scsi_device(dev);
- struct gendisk *disk = NULL;
struct scsi_tape *tpnt = NULL;
struct st_modedef *STm;
struct st_partstat *STps;
struct st_buffer *buffer;
int i, error;
- char *stp;
if (SDp->type != TYPE_TAPE)
return -ENODEV;
- if ((stp = st_incompatible(SDp))) {
+ if (st_incompatible(SDp)) {
sdev_printk(KERN_INFO, SDp,
"OnStream tapes are no longer supported;\n");
sdev_printk(KERN_INFO, SDp,
@@ -4298,7 +4263,7 @@ static int st_probe(struct device *dev)
i = queue_max_segments(SDp->request_queue);
if (st_max_sg_segs < i)
i = st_max_sg_segs;
- buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
+ buffer = new_tape_buffer(i);
if (buffer == NULL) {
sdev_printk(KERN_ERR, SDp,
"st: Can't allocate new tape buffer. "
@@ -4306,28 +4271,13 @@ static int st_probe(struct device *dev)
goto out;
}
- disk = alloc_disk(1);
- if (!disk) {
- sdev_printk(KERN_ERR, SDp,
- "st: out of memory. Device not attached.\n");
- goto out_buffer_free;
- }
-
tpnt = kzalloc(sizeof(struct scsi_tape), GFP_KERNEL);
if (tpnt == NULL) {
sdev_printk(KERN_ERR, SDp,
"st: Can't allocate device descriptor.\n");
- goto out_put_disk;
+ goto out_buffer_free;
}
kref_init(&tpnt->kref);
- tpnt->disk = disk;
- disk->private_data = &tpnt->driver;
- /* SCSI tape doesn't register this gendisk via add_disk(). Manually
- * take queue reference that release_disk() expects. */
- if (!blk_get_queue(SDp->request_queue))
- goto out_put_disk;
- disk->queue = SDp->request_queue;
- tpnt->driver = &st_template;
tpnt->device = SDp;
if (SDp->scsi_level <= 2)
@@ -4342,7 +4292,6 @@ static int st_probe(struct device *dev)
tpnt->dirty = 0;
tpnt->in_use = 0;
tpnt->drv_buffer = 1; /* Try buffering if no mode sense */
- tpnt->restr_dma = (SDp->host)->unchecked_isa_dma;
tpnt->use_pf = (SDp->scsi_level >= SCSI_2);
tpnt->density = 0;
tpnt->do_auto_lock = ST_AUTO_LOCK;
@@ -4360,7 +4309,7 @@ static int st_probe(struct device *dev)
tpnt->nbr_partitions = 0;
blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
tpnt->long_timeout = ST_LONG_TIMEOUT;
- tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
+ tpnt->try_dio = try_direct_io;
for (i = 0; i < ST_NBR_MODES; i++) {
STm = &(tpnt->modes[i]);
@@ -4400,10 +4349,10 @@ static int st_probe(struct device *dev)
idr_preload_end();
if (error < 0) {
pr_warn("st: idr allocation failed: %d\n", error);
- goto out_put_queue;
+ goto out_free_tape;
}
tpnt->index = error;
- sprintf(disk->disk_name, "st%d", tpnt->index);
+ sprintf(tpnt->name, "st%d", tpnt->index);
tpnt->stats = kzalloc(sizeof(struct scsi_tape_stats), GFP_KERNEL);
if (tpnt->stats == NULL) {
sdev_printk(KERN_ERR, SDp,
@@ -4420,9 +4369,9 @@ static int st_probe(struct device *dev)
scsi_autopm_put_device(SDp);
sdev_printk(KERN_NOTICE, SDp,
- "Attached scsi tape %s\n", tape_name(tpnt));
+ "Attached scsi tape %s\n", tpnt->name);
sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
- tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
+ tpnt->name, tpnt->try_dio ? "yes" : "no",
queue_dma_alignment(SDp->request_queue) + 1);
return 0;
@@ -4434,10 +4383,7 @@ out_idr_remove:
spin_lock(&st_index_lock);
idr_remove(&st_index_idr, tpnt->index);
spin_unlock(&st_index_lock);
-out_put_queue:
- blk_put_queue(disk->queue);
-out_put_disk:
- put_disk(disk);
+out_free_tape:
kfree(tpnt);
out_buffer_free:
kfree(buffer);
@@ -4476,7 +4422,6 @@ static int st_remove(struct device *dev)
static void scsi_tape_release(struct kref *kref)
{
struct scsi_tape *tpnt = to_scsi_tape(kref);
- struct gendisk *disk = tpnt->disk;
tpnt->device = NULL;
@@ -4486,8 +4431,6 @@ static void scsi_tape_release(struct kref *kref)
kfree(tpnt->buffer);
}
- disk->private_data = NULL;
- put_disk(disk);
kfree(tpnt->stats);
kfree(tpnt);
return;
@@ -4922,7 +4865,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
const int nr_pages = end - start;
- int res, i, j;
+ int res, i;
struct page **pages;
struct rq_map_data *mdata = &STbp->map_data;
@@ -4944,7 +4887,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
/* Try to fault in all of the necessary pages */
/* rw==READ means read from drive, write into memory area */
- res = get_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
+ res = pin_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
pages);
/* Errors and no page mapped should return here */
@@ -4964,8 +4907,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
return nr_pages;
out_unmap:
if (res > 0) {
- for (j=0; j < res; j++)
- put_page(pages[j]);
+ unpin_user_pages(pages, res);
res = 0;
}
kfree(pages);
@@ -4977,18 +4919,9 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
static int sgl_unmap_user_pages(struct st_buffer *STbp,
const unsigned int nr_pages, int dirtied)
{
- int i;
-
- for (i=0; i < nr_pages; i++) {
- struct page *page = STbp->mapped_pages[i];
+ /* FIXME: cache flush missing for rw==READ */
+ unpin_user_pages_dirty_lock(STbp->mapped_pages, nr_pages, dirtied);
- if (dirtied)
- SetPageDirty(page);
- /* FIXME: cache flush missing for rw==READ
- * FIXME: call the correct reference counting function
- */
- put_page(page);
- }
kfree(STbp->mapped_pages);
STbp->mapped_pages = NULL;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 95d2e7a7988d..7a68eaba7e81 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -35,7 +35,6 @@ struct st_request {
/* The tape buffer descriptor. */
struct st_buffer {
- unsigned char dma; /* DMA-able buffer */
unsigned char cleared; /* internal buffer cleared after open? */
unsigned short do_dio; /* direct i/o set up? */
int buffer_size;
@@ -118,7 +117,6 @@ struct scsi_tape_stats {
/* The tape drive descriptor */
struct scsi_tape {
- struct scsi_driver *driver;
struct scsi_device *device;
struct mutex lock; /* For serialization */
struct completion wait; /* For SCSI commands */
@@ -133,7 +131,6 @@ struct scsi_tape {
unsigned char two_fm;
unsigned char fast_mteom;
unsigned char immediate;
- unsigned char restr_dma;
unsigned char scsi2_logical;
unsigned char default_drvbuffer; /* 0xff = don't touch, value 3 bits */
unsigned char cln_mode; /* 0 = none, otherwise sense byte nbr */
@@ -189,7 +186,7 @@ struct scsi_tape {
unsigned char last_cmnd[6];
unsigned char last_sense[16];
#endif
- struct gendisk *disk;
+ char name[DISK_NAME_LEN];
struct kref kref;
struct scsi_tape_stats *stats;
};
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 33287b6bdf0e..8def242675ef 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -236,7 +236,7 @@ struct req_msg {
u8 data_dir;
u8 payload_sz; /* payload size in 4-byte, not used */
u8 cdb[STEX_CDB_LENGTH];
- u32 variable[0];
+ u32 variable[];
};
struct status_msg {
@@ -398,11 +398,8 @@ static struct status_msg *stex_get_status(struct st_hba *hba)
static void stex_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
- cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
/* "Invalid field in cdb" */
- scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
- 0x0);
+ scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0x24, 0x0);
done(cmd);
}
@@ -543,7 +540,7 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
msg_h = (struct st_msg_header *)req - 1;
if (likely(cmd)) {
msg_h->channel = (u8)cmd->device->channel;
- msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
}
addr = hba->dma_handle + hba->req_head * hba->rq_size;
addr += (hba->ccb[tag].sg_count+4)/11;
@@ -577,7 +574,7 @@ static void return_abnormal_state(struct st_hba *hba, int status)
if (ccb->cmd) {
scsi_dma_unmap(ccb->cmd);
ccb->cmd->result = status << 16;
- ccb->cmd->scsi_done(ccb->cmd);
+ scsi_done(ccb->cmd);
ccb->cmd = NULL;
}
}
@@ -593,9 +590,9 @@ stex_slave_config(struct scsi_device *sdev)
return 0;
}
-static int
-stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int stex_queuecommand_lck(struct scsi_cmnd *cmd)
{
+ void (*done)(struct scsi_cmnd *) = scsi_done;
struct st_hba *hba;
struct Scsi_Host *host;
unsigned int id, lun;
@@ -625,7 +622,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
if (page == 0x8 || page == 0x3f) {
scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
sizeof(ms10_caching_page));
- cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ cmd->result = DID_OK << 16;
done(cmd);
} else
stex_invalid_field(cmd, done);
@@ -644,7 +641,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
break;
case TEST_UNIT_READY:
if (id == host->max_id - 1) {
- cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ cmd->result = DID_OK << 16;
done(cmd);
return 0;
}
@@ -661,37 +658,38 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
(cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
sizeof(console_inq_page));
- cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ cmd->result = DID_OK << 16;
done(cmd);
} else
stex_invalid_field(cmd, done);
return 0;
case PASSTHRU_CMD:
if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
- struct st_drvver ver;
+ const struct st_drvver ver = {
+ .major = ST_VER_MAJOR,
+ .minor = ST_VER_MINOR,
+ .oem = ST_OEM,
+ .build = ST_BUILD_VER,
+ .signature[0] = PASSTHRU_SIGNATURE,
+ .console_id = host->max_id - 1,
+ .host_no = hba->host->host_no,
+ };
size_t cp_len = sizeof(ver);
- ver.major = ST_VER_MAJOR;
- ver.minor = ST_VER_MINOR;
- ver.oem = ST_OEM;
- ver.build = ST_BUILD_VER;
- ver.signature[0] = PASSTHRU_SIGNATURE;
- ver.console_id = host->max_id - 1;
- ver.host_no = hba->host->host_no;
cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
- cmd->result = sizeof(ver) == cp_len ?
- DID_OK << 16 | COMMAND_COMPLETE << 8 :
- DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ if (sizeof(ver) == cp_len)
+ cmd->result = DID_OK << 16;
+ else
+ cmd->result = DID_ERROR << 16;
done(cmd);
return 0;
}
+ break;
default:
break;
}
- cmd->scsi_done = done;
-
- tag = cmd->request->tag;
+ tag = scsi_cmd_to_rq(cmd)->tag;
if (unlikely(tag >= host->can_queue))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -735,37 +733,37 @@ static void stex_scsi_done(struct st_ccb *ccb)
result = ccb->scsi_status;
switch (ccb->scsi_status) {
case SAM_STAT_GOOD:
- result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+ result |= DID_OK << 16;
break;
case SAM_STAT_CHECK_CONDITION:
- result |= DRIVER_SENSE << 24;
+ result |= DID_OK << 16;
break;
case SAM_STAT_BUSY:
- result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ result |= DID_BUS_BUSY << 16;
break;
default:
- result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ result |= DID_ERROR << 16;
break;
}
}
else if (ccb->srb_status & SRB_SEE_SENSE)
- result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
+ result = SAM_STAT_CHECK_CONDITION;
else switch (ccb->srb_status) {
case SRB_STATUS_SELECTION_TIMEOUT:
- result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ result = DID_NO_CONNECT << 16;
break;
case SRB_STATUS_BUSY:
- result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ result = DID_BUS_BUSY << 16;
break;
case SRB_STATUS_INVALID_REQUEST:
case SRB_STATUS_ERROR:
default:
- result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ result = DID_ERROR << 16;
break;
}
cmd->result = result;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
static void stex_copy_data(struct st_ccb *ccb,
@@ -1247,7 +1245,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct st_hba *hba = (struct st_hba *)host->hostdata;
- u16 tag = cmd->request->tag;
+ u16 tag = scsi_cmd_to_rq(cmd)->tag;
void __iomem *base;
u32 data;
int result = SUCCESS;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fb41636519ee..bc46721aa01c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -21,6 +21,8 @@
#include <linux/device.h>
#include <linux/hyperv.h>
#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -52,13 +54,15 @@
#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
(((MINOR_) & 0xff)))
-
#define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0)
#define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2)
#define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1)
#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
+/* channel callback timeout in ms */
+#define CALLBACK_TIMEOUT 2
+
/* Packet structure describing virtual storage requests. */
enum vstor_packet_operation {
VSTOR_OPERATION_COMPLETE_IO = 1,
@@ -134,21 +138,11 @@ struct hv_fc_wwn_packet {
*/
#define STORVSC_MAX_CMD_LEN 0x10
-#define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14
-#define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12
-
+/* Sense buffer size is the same for all versions since Windows 8 */
#define STORVSC_SENSE_BUFFER_SIZE 0x14
#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
/*
- * Sense buffer size changed in win8; have a run-time
- * variable to track the size we should use. This value will
- * likely change during protocol negotiation but it is valid
- * to start by assuming pre-Win8.
- */
-static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
-
-/*
* The storage protocol version is determined during the
* initial exchange with the host. It will indicate which
* storage functionality is available in the host.
@@ -175,18 +169,6 @@ do { \
dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \
} while (0)
-struct vmscsi_win8_extension {
- /*
- * The following were added in Windows 8
- */
- u16 reserve;
- u8 queue_tag;
- u8 queue_action;
- u32 srb_flags;
- u32 time_out_value;
- u32 queue_sort_ey;
-} __packed;
-
struct vmscsi_request {
u16 length;
u8 srb_status;
@@ -212,58 +194,23 @@ struct vmscsi_request {
/*
* The following was added in win8.
*/
- struct vmscsi_win8_extension win8_extension;
+ u16 reserve;
+ u8 queue_tag;
+ u8 queue_action;
+ u32 srb_flags;
+ u32 time_out_value;
+ u32 queue_sort_ey;
} __attribute((packed));
-
/*
- * The size of the vmscsi_request has changed in win8. The
- * additional size is because of new elements added to the
- * structure. These elements are valid only when we are talking
- * to a win8 host.
- * Track the correction to size we need to apply. This value
- * will likely change during protocol negotiation but it is
- * valid to start by assuming pre-Win8.
+ * The list of windows version in order of preference.
*/
-static int vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
-/*
- * The list of storage protocols in order of preference.
- */
-struct vmstor_protocol {
- int protocol_version;
- int sense_buffer_size;
- int vmscsi_size_delta;
-};
-
-
-static const struct vmstor_protocol vmstor_protocols[] = {
- {
+static const int protocol_version[] = {
VMSTOR_PROTO_VERSION_WIN10,
- POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
- 0
- },
- {
VMSTOR_PROTO_VERSION_WIN8_1,
- POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
- 0
- },
- {
VMSTOR_PROTO_VERSION_WIN8,
- POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
- 0
- },
- {
- VMSTOR_PROTO_VERSION_WIN7,
- PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
- sizeof(struct vmscsi_win8_extension),
- },
- {
- VMSTOR_PROTO_VERSION_WIN6,
- PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
- sizeof(struct vmscsi_win8_extension),
- }
};
@@ -378,10 +325,14 @@ static u32 max_outstanding_req_per_channel;
static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
static int storvsc_vcpus_per_sub_channel = 4;
+static unsigned int storvsc_max_hw_queues;
module_param(storvsc_ringbuffer_size, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
+module_param(storvsc_max_hw_queues, uint, 0644);
+MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues");
+
module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
@@ -399,6 +350,7 @@ static int storvsc_timeout = 180;
static struct scsi_transport_template *fc_transport_template;
#endif
+static struct scsi_host_template scsi_driver;
static void storvsc_on_channel_callback(void *context);
#define STORVSC_MAX_LUNS_PER_TARGET 255
@@ -413,6 +365,12 @@ static void storvsc_on_channel_callback(void *context);
#define STORVSC_IDE_MAX_TARGETS 1
#define STORVSC_IDE_MAX_CHANNELS 1
+/*
+ * Upper bound on the size of a storvsc packet.
+ */
+#define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\
+ sizeof(struct vstor_packet))
+
struct storvsc_cmd_request {
struct scsi_cmnd *cmd;
@@ -462,6 +420,11 @@ struct storvsc_device {
* Mask of CPUs bound to subchannels.
*/
struct cpumask alloced_cpus;
+ /*
+ * Serializes modifications of stor_chns[] from storvsc_do_io()
+ * and storvsc_change_target_cpu().
+ */
+ spinlock_t lock;
/* Used for vsc/vsp channel reset process */
struct storvsc_cmd_request init_request;
struct storvsc_cmd_request reset_request;
@@ -519,7 +482,7 @@ static void storvsc_host_scan(struct work_struct *work)
host = host_device->host;
/*
* Before scanning the host, first check to see if any of the
- * currrently known devices have been hot removed. We issue a
+ * currently known devices have been hot removed. We issue a
* "unit ready" command against all currently known devices.
* This I/O will result in an error for devices that have been
* removed. As part of handling the I/O error, we remove the device.
@@ -621,6 +584,81 @@ get_in_err:
}
+static void storvsc_change_target_cpu(struct vmbus_channel *channel, u32 old,
+ u32 new)
+{
+ struct storvsc_device *stor_device;
+ struct vmbus_channel *cur_chn;
+ bool old_is_alloced = false;
+ struct hv_device *device;
+ unsigned long flags;
+ int cpu;
+
+ device = channel->primary_channel ?
+ channel->primary_channel->device_obj
+ : channel->device_obj;
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return;
+
+ /* See storvsc_do_io() -> get_og_chn(). */
+ spin_lock_irqsave(&stor_device->lock, flags);
+
+ /*
+ * Determines if the storvsc device has other channels assigned to
+ * the "old" CPU to update the alloced_cpus mask and the stor_chns
+ * array.
+ */
+ if (device->channel != channel && device->channel->target_cpu == old) {
+ cur_chn = device->channel;
+ old_is_alloced = true;
+ goto old_is_alloced;
+ }
+ list_for_each_entry(cur_chn, &device->channel->sc_list, sc_list) {
+ if (cur_chn == channel)
+ continue;
+ if (cur_chn->target_cpu == old) {
+ old_is_alloced = true;
+ goto old_is_alloced;
+ }
+ }
+
+old_is_alloced:
+ if (old_is_alloced)
+ WRITE_ONCE(stor_device->stor_chns[old], cur_chn);
+ else
+ cpumask_clear_cpu(old, &stor_device->alloced_cpus);
+
+ /* "Flush" the stor_chns array. */
+ for_each_possible_cpu(cpu) {
+ if (stor_device->stor_chns[cpu] && !cpumask_test_cpu(
+ cpu, &stor_device->alloced_cpus))
+ WRITE_ONCE(stor_device->stor_chns[cpu], NULL);
+ }
+
+ WRITE_ONCE(stor_device->stor_chns[new], channel);
+ cpumask_set_cpu(new, &stor_device->alloced_cpus);
+
+ spin_unlock_irqrestore(&stor_device->lock, flags);
+}
+
+static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
+{
+ struct storvsc_cmd_request *request =
+ (struct storvsc_cmd_request *)(unsigned long)rqst_addr;
+
+ if (rqst_addr == VMBUS_RQST_INIT)
+ return VMBUS_RQST_INIT;
+ if (rqst_addr == VMBUS_RQST_RESET)
+ return VMBUS_RQST_RESET;
+
+ /*
+ * Cannot return an ID of 0, which is reserved for an unsolicited
+ * message from Hyper-V.
+ */
+ return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1;
+}
+
static void handle_sc_creation(struct vmbus_channel *new_sc)
{
struct hv_device *device = new_sc->primary_channel->device_obj;
@@ -634,6 +672,9 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
return;
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
+ new_sc->max_pkt_size = STORVSC_MAX_PKT_SIZE;
+
+ new_sc->next_request_id_callback = storvsc_next_request_id;
ret = vmbus_open(new_sc,
storvsc_ringbuffer_size,
@@ -648,6 +689,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
return;
}
+ new_sc->change_target_cpu_callback = storvsc_change_target_cpu;
+
/* Add the sub-channel to the array of available channels. */
stor_device->stor_chns[new_sc->target_cpu] = new_sc;
cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
@@ -696,9 +739,8 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
vstor_packet->sub_channel_count = num_sc;
ret = vmbus_sendpacket(device->channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- vmscsi_size_delta),
- (unsigned long)request,
+ sizeof(struct vstor_packet),
+ VMBUS_RQST_INIT,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -751,18 +793,22 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
struct storvsc_cmd_request *request,
bool status_check)
{
+ struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
int ret, t;
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return -ENODEV;
+
vstor_packet = &request->vstor_packet;
init_completion(&request->wait_event);
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
ret = vmbus_sendpacket(device->channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- vmscsi_size_delta),
- (unsigned long)request,
+ sizeof(struct vstor_packet),
+ VMBUS_RQST_INIT,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
@@ -811,14 +857,13 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
* Query host supported protocol version.
*/
- for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
+ for (i = 0; i < ARRAY_SIZE(protocol_version); i++) {
/* reuse the packet for version range supported */
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation =
VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
- vstor_packet->version.major_minor =
- vmstor_protocols[i].protocol_version;
+ vstor_packet->version.major_minor = protocol_version[i];
/*
* The revision number is only used in Windows; set it to 0.
@@ -832,21 +877,16 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
return -EINVAL;
if (vstor_packet->status == 0) {
- vmstor_proto_version =
- vmstor_protocols[i].protocol_version;
-
- sense_buffer_size =
- vmstor_protocols[i].sense_buffer_size;
-
- vmscsi_size_delta =
- vmstor_protocols[i].vmscsi_size_delta;
+ vmstor_proto_version = protocol_version[i];
break;
}
}
- if (vstor_packet->status != 0)
+ if (vstor_packet->status != 0) {
+ dev_err(&device->device, "Obsolete Hyper-V version\n");
return -EINVAL;
+ }
memset(vstor_packet, 0, sizeof(struct vstor_packet));
@@ -876,15 +916,16 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
if (stor_device->stor_chns == NULL)
return -ENOMEM;
+ device->channel->change_target_cpu_callback = storvsc_change_target_cpu;
+
stor_device->stor_chns[device->channel->target_cpu] = device->channel;
cpumask_set_cpu(device->channel->target_cpu,
&stor_device->alloced_cpus);
- if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
- if (vstor_packet->storage_channel_properties.flags &
- STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
- process_sub_channels = true;
- }
+ if (vstor_packet->storage_channel_properties.flags &
+ STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
+ process_sub_channels = true;
+
stor_device->max_transfer_bytes =
vstor_packet->storage_channel_properties.max_transfer_bytes;
@@ -927,17 +968,40 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
struct storvsc_scan_work *wrk;
void (*process_err_fn)(struct work_struct *work);
struct hv_host_device *host_dev = shost_priv(host);
- bool do_work = false;
- switch (SRB_STATUS(vm_srb->srb_status)) {
- case SRB_STATUS_ERROR:
+ /*
+ * In some situations, Hyper-V sets multiple bits in the
+ * srb_status, such as ABORTED and ERROR. So process them
+ * individually, with the most specific bits first.
+ */
+
+ if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) {
+ set_host_byte(scmnd, DID_NO_CONNECT);
+ process_err_fn = storvsc_remove_lun;
+ goto do_work;
+ }
+
+ if (vm_srb->srb_status & SRB_STATUS_ABORTED) {
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
+ /* Capacity data has changed */
+ (asc == 0x2a) && (ascq == 0x9)) {
+ process_err_fn = storvsc_device_scan;
+ /*
+ * Retry the I/O that triggered this.
+ */
+ set_host_byte(scmnd, DID_REQUEUE);
+ goto do_work;
+ }
+ }
+
+ if (vm_srb->srb_status & SRB_STATUS_ERROR) {
/*
* Let upper layer deal with error when
* sense message is present.
*/
-
if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
- break;
+ return;
+
/*
* If there is an error; offline the device since all
* error recovery strategies would have already been
@@ -950,43 +1014,25 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
set_host_byte(scmnd, DID_PASSTHROUGH);
break;
/*
- * On Some Windows hosts TEST_UNIT_READY command can return
- * SRB_STATUS_ERROR, let the upper level code deal with it
- * based on the sense information.
+ * On some Hyper-V hosts TEST_UNIT_READY command can
+ * return SRB_STATUS_ERROR. Let the upper level code
+ * deal with it based on the sense information.
*/
case TEST_UNIT_READY:
break;
default:
set_host_byte(scmnd, DID_ERROR);
}
- break;
- case SRB_STATUS_INVALID_LUN:
- set_host_byte(scmnd, DID_NO_CONNECT);
- do_work = true;
- process_err_fn = storvsc_remove_lun;
- break;
- case SRB_STATUS_ABORTED:
- if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
- (asc == 0x2a) && (ascq == 0x9)) {
- do_work = true;
- process_err_fn = storvsc_device_scan;
- /*
- * Retry the I/O that trigerred this.
- */
- set_host_byte(scmnd, DID_REQUEUE);
- }
- break;
}
+ return;
- if (!do_work)
- return;
-
+do_work:
/*
* We need to schedule work to process this error; schedule it.
*/
wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
if (!wrk) {
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_BAD_TARGET);
return;
}
@@ -1008,6 +1054,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
struct Scsi_Host *host;
u32 payload_sz = cmd_request->payload_sz;
void *payload = cmd_request->payload;
+ bool sense_ok;
host = stor_dev->host;
@@ -1017,11 +1064,10 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
scmnd->result = vm_srb->scsi_status;
if (scmnd->result) {
- if (scsi_normalize_sense(scmnd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, &sense_hdr) &&
- !(sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x03A) &&
- do_logging(STORVSC_LOGGING_ERROR))
+ sense_ok = scsi_normalize_sense(scmnd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, &sense_hdr);
+
+ if (sense_ok && do_logging(STORVSC_LOGGING_WARN))
scsi_print_sense_hdr(scmnd->device, "storvsc",
&sense_hdr);
}
@@ -1038,10 +1084,14 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
data_transfer_length = 0;
}
+ /* Validate data_transfer_length (from Hyper-V) */
+ if (data_transfer_length > cmd_request->payload->range.len)
+ data_transfer_length = cmd_request->payload->range.len;
+
scsi_set_resid(scmnd,
cmd_request->payload->range.len - data_transfer_length);
- scmnd->scsi_done(scmnd);
+ scsi_done(scmnd);
if (payload_sz >
sizeof(struct vmbus_channel_packet_multipage_buffer))
@@ -1074,48 +1124,51 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
}
-
/* Copy over the status...etc */
stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
- stor_pkt->vm_srb.sense_info_length =
- vstor_packet->vm_srb.sense_info_length;
-
- if (vstor_packet->vm_srb.scsi_status != 0 ||
- vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS)
- storvsc_log(device, STORVSC_LOGGING_WARN,
- "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
- stor_pkt->vm_srb.cdb[0],
- vstor_packet->vm_srb.scsi_status,
- vstor_packet->vm_srb.srb_status);
- if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
- /* CHECK_CONDITION */
- if (vstor_packet->vm_srb.srb_status &
- SRB_STATUS_AUTOSENSE_VALID) {
- /* autosense data available */
+ /*
+ * Copy over the sense_info_length, but limit to the known max
+ * size if Hyper-V returns a bad value.
+ */
+ stor_pkt->vm_srb.sense_info_length = min_t(u8, STORVSC_SENSE_BUFFER_SIZE,
+ vstor_packet->vm_srb.sense_info_length);
- storvsc_log(device, STORVSC_LOGGING_WARN,
- "stor pkt %p autosense data valid - len %d\n",
- request, vstor_packet->vm_srb.sense_info_length);
+ if (vstor_packet->vm_srb.scsi_status != 0 ||
+ vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) {
- memcpy(request->cmd->sense_buffer,
- vstor_packet->vm_srb.sense_data,
- vstor_packet->vm_srb.sense_info_length);
+ /*
+ * Log TEST_UNIT_READY errors only as warnings. Hyper-V can
+ * return errors when detecting devices using TEST_UNIT_READY,
+ * and logging these as errors produces unhelpful noise.
+ */
+ int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
+ STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
- }
+ storvsc_log(device, loglevel,
+ "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
+ scsi_cmd_to_rq(request->cmd)->tag,
+ stor_pkt->vm_srb.cdb[0],
+ vstor_packet->vm_srb.scsi_status,
+ vstor_packet->vm_srb.srb_status,
+ vstor_packet->status);
}
+ if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION &&
+ (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID))
+ memcpy(request->cmd->sense_buffer,
+ vstor_packet->vm_srb.sense_data,
+ stor_pkt->vm_srb.sense_info_length);
+
stor_pkt->vm_srb.data_transfer_length =
- vstor_packet->vm_srb.data_transfer_length;
+ vstor_packet->vm_srb.data_transfer_length;
storvsc_command_completion(request, stor_device);
if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
stor_device->drain_notify)
wake_up(&stor_device->waiting_to_drain);
-
-
}
static void storvsc_on_receive(struct storvsc_device *stor_device,
@@ -1153,6 +1206,8 @@ static void storvsc_on_channel_callback(void *context)
const struct vmpacket_descriptor *desc;
struct hv_device *device;
struct storvsc_device *stor_device;
+ struct Scsi_Host *shost;
+ unsigned long time_limit = jiffies + msecs_to_jiffies(CALLBACK_TIMEOUT);
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1163,21 +1218,77 @@ static void storvsc_on_channel_callback(void *context)
if (!stor_device)
return;
+ shost = stor_device->host;
+
foreach_vmbus_pkt(desc, channel) {
- void *packet = hv_pkt_data(desc);
- struct storvsc_cmd_request *request;
+ struct vstor_packet *packet = hv_pkt_data(desc);
+ struct storvsc_cmd_request *request = NULL;
+ u32 pktlen = hv_pkt_datalen(desc);
+ u64 rqst_id = desc->trans_id;
+ u32 minlen = rqst_id ? sizeof(struct vstor_packet) :
+ sizeof(enum vstor_packet_operation);
+
+ if (unlikely(time_after(jiffies, time_limit))) {
+ hv_pkt_iter_close(channel);
+ return;
+ }
- request = (struct storvsc_cmd_request *)
- ((unsigned long)desc->trans_id);
+ if (pktlen < minlen) {
+ dev_err(&device->device,
+ "Invalid pkt: id=%llu, len=%u, minlen=%u\n",
+ rqst_id, pktlen, minlen);
+ continue;
+ }
- if (request == &stor_device->init_request ||
- request == &stor_device->reset_request) {
- memcpy(&request->vstor_packet, packet,
- (sizeof(struct vstor_packet) - vmscsi_size_delta));
- complete(&request->wait_event);
+ if (rqst_id == VMBUS_RQST_INIT) {
+ request = &stor_device->init_request;
+ } else if (rqst_id == VMBUS_RQST_RESET) {
+ request = &stor_device->reset_request;
} else {
+ /* Hyper-V can send an unsolicited message with ID of 0 */
+ if (rqst_id == 0) {
+ /*
+ * storvsc_on_receive() looks at the vstor_packet in the message
+ * from the ring buffer.
+ *
+ * - If the operation in the vstor_packet is COMPLETE_IO, then
+ * we call storvsc_on_io_completion(), and dereference the
+ * guest memory address. Make sure we don't call
+ * storvsc_on_io_completion() with a guest memory address
+ * that is zero if Hyper-V were to construct and send such
+ * a bogus packet.
+ *
+ * - If the operation in the vstor_packet is FCHBA_DATA, then
+ * we call cache_wwn(), and access the data payload area of
+ * the packet (wwn_packet); however, there is no guarantee
+ * that the packet is big enough to contain such area.
+ * Future-proof the code by rejecting such a bogus packet.
+ */
+ if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
+ packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
+ dev_err(&device->device, "Invalid packet with ID of 0\n");
+ continue;
+ }
+ } else {
+ struct scsi_cmnd *scmnd;
+
+ /* Transaction 'rqst_id' corresponds to tag 'rqst_id - 1' */
+ scmnd = scsi_host_find_tag(shost, rqst_id - 1);
+ if (scmnd == NULL) {
+ dev_err(&device->device, "Incorrect transaction ID\n");
+ continue;
+ }
+ request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd);
+ scsi_dma_unmap(scmnd);
+ }
+
storvsc_on_receive(stor_device, packet, request);
+ continue;
}
+
+ memcpy(&request->vstor_packet, packet,
+ sizeof(struct vstor_packet));
+ complete(&request->wait_event);
}
}
@@ -1189,6 +1300,9 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
+ device->channel->max_pkt_size = STORVSC_MAX_PKT_SIZE;
+ device->channel->next_request_id_callback = storvsc_next_request_id;
+
ret = vmbus_open(device->channel,
ring_size,
ring_size,
@@ -1248,8 +1362,10 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
const struct cpumask *node_mask;
int num_channels, tgt_cpu;
- if (stor_device->num_sc == 0)
+ if (stor_device->num_sc == 0) {
+ stor_device->stor_chns[q_num] = stor_device->device->channel;
return stor_device->device->channel;
+ }
/*
* Our channel array is sparsley populated and we
@@ -1258,7 +1374,6 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
* The strategy is simple:
* I. Ensure NUMA locality
* II. Distribute evenly (best effort)
- * III. Mapping is persistent.
*/
node_mask = cpumask_of_node(cpu_to_node(q_num));
@@ -1268,8 +1383,10 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
if (cpumask_test_cpu(tgt_cpu, node_mask))
num_channels++;
}
- if (num_channels == 0)
+ if (num_channels == 0) {
+ stor_device->stor_chns[q_num] = stor_device->device->channel;
return stor_device->device->channel;
+ }
hash_qnum = q_num;
while (hash_qnum >= num_channels)
@@ -1295,6 +1412,7 @@ static int storvsc_do_io(struct hv_device *device,
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
struct vmbus_channel *outgoing_channel, *channel;
+ unsigned long flags;
int ret = 0;
const struct cpumask *node_mask;
int tgt_cpu;
@@ -1308,10 +1426,11 @@ static int storvsc_do_io(struct hv_device *device,
request->device = device;
/*
- * Select an an appropriate channel to send the request out.
+ * Select an appropriate channel to send the request out.
*/
- if (stor_device->stor_chns[q_num] != NULL) {
- outgoing_channel = stor_device->stor_chns[q_num];
+ /* See storvsc_change_target_cpu(). */
+ outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
+ if (outgoing_channel != NULL) {
if (outgoing_channel->target_cpu == q_num) {
/*
* Ideally, we want to pick a different channel if
@@ -1324,7 +1443,10 @@ static int storvsc_do_io(struct hv_device *device,
continue;
if (tgt_cpu == q_num)
continue;
- channel = stor_device->stor_chns[tgt_cpu];
+ channel = READ_ONCE(
+ stor_device->stor_chns[tgt_cpu]);
+ if (channel == NULL)
+ continue;
if (hv_get_avail_to_write_percent(
&channel->outbound)
> ring_avail_percent_lowater) {
@@ -1350,7 +1472,10 @@ static int storvsc_do_io(struct hv_device *device,
for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
if (cpumask_test_cpu(tgt_cpu, node_mask))
continue;
- channel = stor_device->stor_chns[tgt_cpu];
+ channel = READ_ONCE(
+ stor_device->stor_chns[tgt_cpu]);
+ if (channel == NULL)
+ continue;
if (hv_get_avail_to_write_percent(
&channel->outbound)
> ring_avail_percent_lowater) {
@@ -1360,17 +1485,23 @@ static int storvsc_do_io(struct hv_device *device,
}
}
} else {
+ spin_lock_irqsave(&stor_device->lock, flags);
+ outgoing_channel = stor_device->stor_chns[q_num];
+ if (outgoing_channel != NULL) {
+ spin_unlock_irqrestore(&stor_device->lock, flags);
+ goto found_channel;
+ }
outgoing_channel = get_og_chn(stor_device, q_num);
+ spin_unlock_irqrestore(&stor_device->lock, flags);
}
found_channel:
vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
- vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
- vmscsi_size_delta);
+ vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
- vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
+ vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
vstor_packet->vm_srb.data_transfer_length =
@@ -1383,13 +1514,11 @@ found_channel:
ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
request->payload, request->payload_sz,
vstor_packet,
- (sizeof(struct vstor_packet) -
- vmscsi_size_delta),
+ sizeof(struct vstor_packet),
(unsigned long)request);
} else {
ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- vmscsi_size_delta),
+ sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1478,13 +1607,13 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
struct vstor_packet *vstor_packet;
int ret, t;
-
stor_device = get_out_stor_device(device);
if (!stor_device)
return FAILED;
request = &stor_device->reset_request;
vstor_packet = &request->vstor_packet;
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
init_completion(&request->wait_event);
@@ -1493,9 +1622,8 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
vstor_packet->vm_srb.path_id = stor_device->path_id;
ret = vmbus_sendpacket(device->channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- vmscsi_size_delta),
- (unsigned long)&stor_device->reset_request,
+ sizeof(struct vstor_packet),
+ VMBUS_RQST_RESET,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
@@ -1546,7 +1674,7 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
* this. So, don't send it.
*/
case SET_WINDOW:
- scmnd->result = ILLEGAL_REQUEST << 16;
+ set_host_byte(scmnd, DID_ERROR);
allowed = false;
break;
default:
@@ -1561,11 +1689,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
- int i;
struct scatterlist *sgl;
- unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
- struct scatterlist *cur_sgl;
struct vmbus_packet_mpb_array *payload;
u32 payload_sz;
u32 length;
@@ -1580,7 +1705,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
* future versions of the host.
*/
if (!storvsc_scsi_cmd_ok(scmnd)) {
- scmnd->scsi_done(scmnd);
+ scsi_done(scmnd);
return 0;
}
}
@@ -1588,32 +1713,33 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
/* Setup the cmd request */
cmd_request->cmd = scmnd;
+ memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet));
vm_srb = &cmd_request->vstor_packet.vm_srb;
- vm_srb->win8_extension.time_out_value = 60;
+ vm_srb->time_out_value = 60;
- vm_srb->win8_extension.srb_flags |=
+ vm_srb->srb_flags |=
SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
if (scmnd->device->tagged_supported) {
- vm_srb->win8_extension.srb_flags |=
+ vm_srb->srb_flags |=
(SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
- vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
- vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
+ vm_srb->queue_tag = SP_UNTAGGED;
+ vm_srb->queue_action = SRB_SIMPLE_TAG_REQUEST;
}
/* Build the SRB */
switch (scmnd->sc_data_direction) {
case DMA_TO_DEVICE:
vm_srb->data_in = WRITE_TYPE;
- vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
+ vm_srb->srb_flags |= SRB_FLAGS_DATA_OUT;
break;
case DMA_FROM_DEVICE:
vm_srb->data_in = READ_TYPE;
- vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
+ vm_srb->srb_flags |= SRB_FLAGS_DATA_IN;
break;
case DMA_NONE:
vm_srb->data_in = UNKNOWN_TYPE;
- vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
+ vm_srb->srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
break;
default:
/*
@@ -1636,16 +1762,21 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
sgl = (struct scatterlist *)scsi_sglist(scmnd);
- sg_count = scsi_sg_count(scmnd);
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
payload_sz = sizeof(cmd_request->mpb);
- if (sg_count) {
- if (sg_count > MAX_PAGE_BUFFER_COUNT) {
+ if (scsi_sg_count(scmnd)) {
+ unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
+ unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
+ struct scatterlist *sg;
+ unsigned long hvpfn, hvpfns_to_add;
+ int j, i = 0, sg_count;
+
+ if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
- payload_sz = (sg_count * sizeof(u64) +
+ payload_sz = (hvpg_count * sizeof(u64) +
sizeof(struct vmbus_packet_mpb_array));
payload = kzalloc(payload_sz, GFP_ATOMIC);
if (!payload)
@@ -1653,13 +1784,36 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
}
payload->range.len = length;
- payload->range.offset = sgl[0].offset;
+ payload->range.offset = offset_in_hvpg;
- cur_sgl = sgl;
- for (i = 0; i < sg_count; i++) {
- payload->range.pfn_array[i] =
- page_to_pfn(sg_page((cur_sgl)));
- cur_sgl = sg_next(cur_sgl);
+ sg_count = scsi_dma_map(scmnd);
+ if (sg_count < 0) {
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto err_free_payload;
+ }
+
+ for_each_sg(sgl, sg, sg_count, j) {
+ /*
+ * Init values for the current sgl entry. hvpfns_to_add
+ * is in units of Hyper-V size pages. Handling the
+ * PAGE_SIZE != HV_HYP_PAGE_SIZE case also handles
+ * values of sgl->offset that are larger than PAGE_SIZE.
+ * Such offsets are handled even on other than the first
+ * sgl entry, provided they are a multiple of PAGE_SIZE.
+ */
+ hvpfn = HVPFN_DOWN(sg_dma_address(sg));
+ hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) +
+ sg_dma_len(sg)) - hvpfn;
+
+ /*
+ * Fill the next portion of the PFN array with
+ * sequential Hyper-V PFNs for the continguous physical
+ * memory described by the sgl entry. The end of the
+ * last sgl should be reached at the same time that
+ * the PFN array is filled.
+ */
+ while (hvpfns_to_add--)
+ payload->range.pfn_array[i++] = hvpfn++;
}
}
@@ -1671,13 +1825,18 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
put_cpu();
if (ret == -EAGAIN) {
- if (payload_sz > sizeof(cmd_request->mpb))
- kfree(payload);
/* no more space */
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto err_free_payload;
}
return 0;
+
+err_free_payload:
+ if (payload_sz > sizeof(cmd_request->mpb))
+ kfree(payload);
+
+ return ret;
}
static struct scsi_host_template scsi_driver = {
@@ -1693,10 +1852,8 @@ static struct scsi_host_template scsi_driver = {
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 2048,
.this_id = -1,
- /* Make sure we dont get a sg segment crosses a page boundary */
- .dma_boundary = PAGE_SIZE-1,
/* Ensure there are no gaps in presented sgls */
- .virt_boundary_mask = PAGE_SIZE-1,
+ .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1,
.no_write_same = 1,
.track_queue_depth = 1,
.change_queue_depth = storvsc_change_queue_depth,
@@ -1739,40 +1896,24 @@ static int storvsc_probe(struct hv_device *device,
{
int ret;
int num_cpus = num_online_cpus();
+ int num_present_cpus = num_present_cpus();
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false);
int target = 0;
struct storvsc_device *stor_device;
- int max_luns_per_target;
- int max_targets;
- int max_channels;
int max_sub_channels = 0;
+ u32 max_xfer_bytes;
/*
- * Based on the windows host we are running on,
- * set state to properly communicate with the host.
+ * We support sub-channels for storage on SCSI and FC controllers.
+ * The number of sub-channels offerred is based on the number of
+ * VCPUs in the guest.
*/
-
- if (vmbus_proto_version < VERSION_WIN8) {
- max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
- max_targets = STORVSC_IDE_MAX_TARGETS;
- max_channels = STORVSC_IDE_MAX_CHANNELS;
- } else {
- max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
- max_targets = STORVSC_MAX_TARGETS;
- max_channels = STORVSC_MAX_CHANNELS;
- /*
- * On Windows8 and above, we support sub-channels for storage
- * on SCSI and FC controllers.
- * The number of sub-channels offerred is based on the number of
- * VCPUs in the guest.
- */
- if (!dev_is_ide)
- max_sub_channels =
- (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
- }
+ if (!dev_is_ide)
+ max_sub_channels =
+ (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
scsi_driver.can_queue = max_outstanding_req_per_channel *
(max_sub_channels + 1) *
@@ -1801,7 +1942,9 @@ static int storvsc_probe(struct hv_device *device,
init_waitqueue_head(&stor_device->waiting_to_drain);
stor_device->device = device;
stor_device->host = host;
+ spin_lock_init(&stor_device->lock);
hv_set_drvdata(device, stor_device);
+ dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
@@ -1822,9 +1965,9 @@ static int storvsc_probe(struct hv_device *device,
break;
case SCSI_GUID:
- host->max_lun = max_luns_per_target;
- host->max_id = max_targets;
- host->max_channel = max_channels - 1;
+ host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
+ host->max_id = STORVSC_MAX_TARGETS;
+ host->max_channel = STORVSC_MAX_CHANNELS - 1;
break;
default:
@@ -1835,28 +1978,55 @@ static int storvsc_probe(struct hv_device *device,
}
/* max cmd length */
host->max_cmd_len = STORVSC_MAX_CMD_LEN;
-
/*
- * set the table size based on the info we got
- * from the host.
+ * Any reasonable Hyper-V configuration should provide
+ * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE,
+ * protecting it from any weird value.
*/
- host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
+ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
+ /* max_hw_sectors_kb */
+ host->max_sectors = max_xfer_bytes >> 9;
+ /*
+ * There are 2 requirements for Hyper-V storvsc sgl segments,
+ * based on which the below calculation for max segments is
+ * done:
+ *
+ * 1. Except for the first and last sgl segment, all sgl segments
+ * should be align to HV_HYP_PAGE_SIZE, that also means the
+ * maximum number of segments in a sgl can be calculated by
+ * dividing the total max transfer length by HV_HYP_PAGE_SIZE.
+ *
+ * 2. Except for the first and last, each entry in the SGL must
+ * have an offset that is a multiple of HV_HYP_PAGE_SIZE.
+ */
+ host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1;
/*
* For non-IDE disks, the host supports multiple channels.
* Set the number of HW queues we are supporting.
*/
- if (!dev_is_ide)
- host->nr_hw_queues = num_present_cpus();
+ if (!dev_is_ide) {
+ if (storvsc_max_hw_queues > num_present_cpus) {
+ storvsc_max_hw_queues = 0;
+ storvsc_log(device, STORVSC_LOGGING_WARN,
+ "Resetting invalid storvsc_max_hw_queues value to default.\n");
+ }
+ if (storvsc_max_hw_queues)
+ host->nr_hw_queues = storvsc_max_hw_queues;
+ else
+ host->nr_hw_queues = num_present_cpus;
+ }
/*
* Set the error handler work queue.
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
- WQ_MEM_RECLAIM,
+ 0,
host->host_no);
- if (!host_dev->handle_error_wq)
+ if (!host_dev->handle_error_wq) {
+ ret = -ENOMEM;
goto err_out2;
+ }
INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan);
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
@@ -1898,7 +2068,7 @@ err_out3:
err_out2:
/*
* Once we have connected with the host, we would need to
- * to invoke storvsc_dev_remove() to rollback this state and
+ * invoke storvsc_dev_remove() to rollback this state and
* this call also frees up the stor_device; hence the jump around
* err_out1 label.
*/
@@ -1955,9 +2125,6 @@ static int storvsc_suspend(struct hv_device *hv_dev)
vmbus_close(hv_dev->channel);
- memset(stor_device->stor_chns, 0,
- num_possible_cpus() * sizeof(void *));
-
kfree(stor_device->stor_chns);
stor_device->stor_chns = NULL;
@@ -2007,8 +2174,7 @@ static int __init storvsc_drv_init(void)
max_outstanding_req_per_channel =
((storvsc_ringbuffer_size - PAGE_SIZE) /
ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
- sizeof(struct vstor_packet) + sizeof(u64) -
- vmscsi_size_delta,
+ sizeof(struct vstor_packet) + sizeof(u64),
sizeof(u64)));
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 701b842296f0..abf229b847a1 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -334,9 +334,9 @@ static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata)
static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- int wanted_len = cmd->SCp.this_residual;
+ int wanted_len = NCR5380_to_ncmd(cmd)->this_residual;
- if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(cmd->request))
+ if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
return 0;
return wanted_len;
@@ -366,8 +366,9 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
}
/* clean up after our dma is done */
-static int sun3scsi_dma_finish(int write_flag)
+static int sun3scsi_dma_finish(enum dma_data_direction data_dir)
{
+ const bool write_flag = data_dir == DMA_TO_DEVICE;
unsigned short __maybe_unused count;
unsigned short fifo;
int ret = 0;
@@ -397,12 +398,12 @@ static int sun3scsi_dma_finish(int write_flag)
case CSR_LEFT_3:
*vaddr = (dregs->bpack_lo & 0xff00) >> 8;
vaddr--;
- /* Fall through */
+ fallthrough;
case CSR_LEFT_2:
*vaddr = (dregs->bpack_hi & 0x00ff);
vaddr--;
- /* Fall through */
+ fallthrough;
case CSR_LEFT_1:
*vaddr = (dregs->bpack_hi & 0xff00) >> 8;
@@ -504,7 +505,7 @@ static struct scsi_host_template sun3_scsi_template = {
.sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
};
static int __init sun3_scsi_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index f37df79e37e1..d3489ac7ab28 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
if (!esp->command_block)
goto fail_unmap_regs_dma;
- host->irq = platform_get_irq(dev, 0);
+ host->irq = err = platform_get_irq(dev, 0);
+ if (err < 0)
+ goto fail_unmap_command_block;
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
"SUN3X ESP", esp);
if (err < 0)
@@ -270,22 +272,10 @@ static struct platform_driver esp_sun3x_driver = {
.name = "sun3x_esp",
},
};
-
-static int __init sun3x_esp_init(void)
-{
- return platform_driver_register(&esp_sun3x_driver);
-}
-
-static void __exit sun3x_esp_exit(void)
-{
- platform_driver_unregister(&esp_sun3x_driver);
-}
+module_platform_driver(esp_sun3x_driver);
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(sun3x_esp_init);
-module_exit(sun3x_esp_exit);
MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 964130d2c8a6..5dc38d35745b 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -606,21 +606,9 @@ static struct platform_driver esp_sbus_driver = {
.probe = esp_sbus_probe,
.remove = esp_sbus_remove,
};
-
-static int __init sunesp_init(void)
-{
- return platform_driver_register(&esp_sbus_driver);
-}
-
-static void __exit sunesp_exit(void)
-{
- platform_driver_unregister(&esp_sbus_driver);
-}
+module_platform_driver(esp_sbus_driver);
MODULE_DESCRIPTION("Sun ESP SCSI driver");
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(sunesp_init);
-module_exit(sunesp_exit);
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
index 6d7651a7847e..c536d2a9a657 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw.c
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -369,7 +369,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
sym_name(np), (int) (cur-start));
++cur;
continue;
- };
+ }
/*
* We use the bogus value 0xf00ff00f ;-)
@@ -477,7 +477,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
default:
relocs = 0;
break;
- };
+ }
/*
* Scriptify:) the opcode.
@@ -523,7 +523,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
new = old;
break;
}
- /* fall through */
+ fallthrough;
default:
new = 0;
panic("sym_fw_bind_script: "
@@ -533,5 +533,5 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
*cur++ = cpu_to_scr(new);
}
- };
+ }
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 2ca018ce796f..2e2852bd5860 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -118,7 +118,7 @@ struct sym_ucmd { /* Override the SCSI pointer structure */
struct completion *eh_done; /* SCSI error handling */
};
-#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp))
+#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)scsi_cmd_priv(cmd))
#define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
/*
@@ -127,13 +127,12 @@ struct sym_ucmd { /* Override the SCSI pointer structure */
void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
{
struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
- BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd));
if (ucmd->eh_done)
complete(ucmd->eh_done);
scsi_dma_unmap(cmd);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/*
@@ -156,12 +155,8 @@ void sym_xpt_async_bus_reset(struct sym_hcb *np)
static int sym_xerr_cam_status(int cam_status, int x_status)
{
if (x_status) {
- if (x_status & XE_PARITY_ERR)
+ if (x_status & XE_PARITY_ERR)
cam_status = DID_PARITY;
- else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
- cam_status = DID_ERROR;
- else if (x_status & XE_BAD_PHASE)
- cam_status = DID_ERROR;
else
cam_status = DID_ERROR;
}
@@ -174,9 +169,8 @@ static int sym_xerr_cam_status(int cam_status, int x_status)
void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
{
struct scsi_cmnd *cmd = cp->cmd;
- u_int cam_status, scsi_status, drv_status;
+ u_int cam_status, scsi_status;
- drv_status = 0;
cam_status = DID_OK;
scsi_status = cp->ssss_status;
@@ -190,7 +184,6 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
cp->xerr_status == 0) {
cam_status = sym_xerr_cam_status(DID_OK,
cp->sv_xerr_status);
- drv_status = DRIVER_SENSE;
/*
* Bounce back the sense data to user.
*/
@@ -239,7 +232,7 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
}
scsi_set_resid(cmd, resid);
- cmd->result = (drv_status << 24) | (cam_status << 16) | scsi_status;
+ cmd->result = (cam_status << 16) | scsi_status;
}
static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
@@ -492,22 +485,20 @@ void sym_log_bus_error(struct Scsi_Host *shost)
* queuecommand method. Entered with the host adapter lock held and
* interrupts disabled.
*/
-static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd)
{
struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd);
int sts = 0;
- cmd->scsi_done = done;
memset(ucp, 0, sizeof(*ucp));
/*
* Shorten our settle_time if needed for
* this command not to time out.
*/
- if (np->s.settle_time_valid && cmd->request->timeout) {
- unsigned long tlimit = jiffies + cmd->request->timeout;
+ if (np->s.settle_time_valid && scsi_cmd_to_rq(cmd)->timeout) {
+ unsigned long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout;
tlimit -= SYM_CONF_TIMER_INTERVAL*2;
if (time_after(np->s.settle_time, tlimit)) {
np->s.settle_time = tlimit;
@@ -1638,6 +1629,7 @@ static struct scsi_host_template sym2_template = {
.module = THIS_MODULE,
.name = "sym53c8xx",
.info = sym53c8xx_info,
+ .cmd_size = sizeof(struct sym_ucmd),
.queuecommand = sym53c8xx_queue_command,
.slave_alloc = sym53c8xx_slave_alloc,
.slave_configure = sym53c8xx_slave_configure,
@@ -1743,7 +1735,7 @@ static void sym2_remove(struct pci_dev *pdev)
* @state: current state of the PCI slot
*/
static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev,
- enum pci_channel_state state)
+ pci_channel_state_t state)
{
/* If slot is permanently frozen, turn everything off */
if (state == pci_channel_io_perm_failure) {
@@ -1774,6 +1766,7 @@ static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev)
/**
* sym2_reset_workarounds - hardware-specific work-arounds
+ * @pdev: pointer to PCI device
*
* This routine is similar to sym_set_workarounds(), except
* that, at this point, we already know that the device was
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index a428cae4535b..f0db17e34ea0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3059,7 +3059,7 @@ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb
sym_print_addr(cp->cmd, "%s\n",
s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
}
- /* fall through */
+ fallthrough;
default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
sym_complete_error (np, cp);
break;
@@ -3598,7 +3598,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num)
}
/*
- * Gerard's alchemy:) that deals with with the data
+ * Gerard's alchemy:) that deals with the data
* pointer for both MDP and the residual calculation.
*
* I didn't want to bloat the code by more than 200
@@ -4596,7 +4596,6 @@ static void sym_int_sir(struct sym_hcb *np)
scr_to_cpu(np->lastmsg), np->msgout[0]);
}
goto out_clrack;
- break;
default:
goto out_reject;
}
@@ -4620,7 +4619,7 @@ static void sym_int_sir(struct sym_hcb *np)
* Negotiation failed.
* Target does not want answer message.
*/
- /* fall through */
+ fallthrough;
case SIR_NEGO_PROTO:
sym_nego_default(np, tp, cp);
goto out;
@@ -5352,8 +5351,10 @@ void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp)
{
struct scsi_device *sdev;
struct scsi_cmnd *cmd;
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
struct sym_tcb *tp;
struct sym_lcb *lp;
+#endif
int resid;
int i;
@@ -5370,11 +5371,13 @@ void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp)
cp->host_status, cp->ssss_status, cp->host_flags);
}
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
/*
* Get target and lun pointers.
*/
tp = &np->target[cp->target];
lp = sym_lp(tp, sdev->lun);
+#endif
/*
* Check for extended errors.
@@ -5481,8 +5484,10 @@ finish:
*/
void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp)
{
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
struct sym_tcb *tp;
struct sym_lcb *lp;
+#endif
struct scsi_cmnd *cmd;
int resid;
@@ -5498,11 +5503,13 @@ void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp)
*/
cmd = cp->cmd;
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
/*
* Get target and lun pointers.
*/
tp = &np->target[cp->target];
lp = sym_lp(tp, cp->lun);
+#endif
/*
* If all data have been transferred, given than no
@@ -5648,7 +5655,7 @@ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram
/*
* Allocate the array of lists of CCBs hashed by DSA.
*/
- np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL);
+ np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(*np->ccbh), GFP_KERNEL);
if (!np->ccbh)
goto attach_failed;
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index d37e2a69136a..e13d5351f155 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -695,7 +695,7 @@ static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram)
data, len);
if (!x)
break;
- /* fall through */
+ fallthrough;
default:
x = sym_read_T93C46_nvram(np, nvram);
break;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
deleted file mode 100644
index d14c2243e02a..000000000000
--- a/drivers/scsi/ufs/Kconfig
+++ /dev/null
@@ -1,162 +0,0 @@
-#
-# Kernel configuration file for the UFS Host Controller
-#
-# This code is based on drivers/scsi/ufs/Kconfig
-# Copyright (C) 2011-2013 Samsung India Software Operations
-#
-# Authors:
-# Santosh Yaraganavi <santosh.sy@samsung.com>
-# Vinayak Holikatti <h.vinayak@samsung.com>
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-# See the COPYING file in the top-level directory or visit
-# <http://www.gnu.org/licenses/gpl-2.0.html>
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# This program is provided "AS IS" and "WITH ALL FAULTS" and
-# without warranty of any kind. You are solely responsible for
-# determining the appropriateness of using and distributing
-# the program and assume all risks associated with your exercise
-# of rights with respect to the program, including but not limited
-# to infringement of third party rights, the risks and costs of
-# program errors, damage to or loss of data, programs or equipment,
-# and unavailability or interruption of operations. Under no
-# circumstances will the contributor of this Program be liable for
-# any damages of any kind arising from your use or distribution of
-# this program.
-
-config SCSI_UFSHCD
- tristate "Universal Flash Storage Controller Driver Core"
- depends on SCSI && SCSI_DMA
- select PM_DEVFREQ
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
- select NLS
- ---help---
- This selects the support for UFS devices in Linux, say Y and make
- sure that you know the name of your UFS host adapter (the card
- inside your computer that "speaks" the UFS protocol, also
- called UFS Host Controller), because you will be asked for it.
- The module will be called ufshcd.
-
- To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/ufs.txt>.
- However, do not compile this as a module if your root file system
- (the one containing the directory /) is located on a UFS device.
-
-config SCSI_UFSHCD_PCI
- tristate "PCI bus based UFS Controller support"
- depends on SCSI_UFSHCD && PCI
- ---help---
- This selects the PCI UFS Host Controller Interface. Select this if
- you have UFS Host Controller with PCI Interface.
-
- If you have a controller with this interface, say Y or M here.
-
- If unsure, say N.
-
-config SCSI_UFS_DWC_TC_PCI
- tristate "DesignWare pci support using a G210 Test Chip"
- depends on SCSI_UFSHCD_PCI
- ---help---
- Synopsys Test Chip is a PHY for prototyping purposes.
-
- If unsure, say N.
-
-config SCSI_UFSHCD_PLATFORM
- tristate "Platform bus based UFS Controller support"
- depends on SCSI_UFSHCD
- ---help---
- This selects the UFS host controller support. Select this if
- you have an UFS controller on Platform bus.
-
- If you have a controller with this interface, say Y or M here.
-
- If unsure, say N.
-
-config SCSI_UFS_CDNS_PLATFORM
- tristate "Cadence UFS Controller platform driver"
- depends on SCSI_UFSHCD_PLATFORM
- help
- This selects the Cadence-specific additions to UFSHCD platform driver.
-
- If unsure, say N.
-
-config SCSI_UFS_DWC_TC_PLATFORM
- tristate "DesignWare platform support using a G210 Test Chip"
- depends on SCSI_UFSHCD_PLATFORM
- ---help---
- Synopsys Test Chip is a PHY for prototyping purposes.
-
- If unsure, say N.
-
-config SCSI_UFS_QCOM
- tristate "QCOM specific hooks to UFS controller platform driver"
- depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
- select RESET_CONTROLLER
- help
- This selects the QCOM specific additions to UFSHCD platform driver.
- UFS host on QCOM needs some vendor specific configuration before
- accessing the hardware which includes PHY configuration and vendor
- specific registers.
-
- Select this if you have UFS controller on QCOM chipset.
- If unsure, say N.
-
-config SCSI_UFS_MEDIATEK
- tristate "Mediatek specific hooks to UFS controller platform driver"
- depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
- select PHY_MTK_UFS
- help
- This selects the Mediatek specific additions to UFSHCD platform driver.
- UFS host on Mediatek needs some vendor specific configuration before
- accessing the hardware which includes PHY configuration and vendor
- specific registers.
-
- Select this if you have UFS controller on Mediatek chipset.
-
- If unsure, say N.
-
-config SCSI_UFS_HISI
- tristate "Hisilicon specific hooks to UFS controller platform driver"
- depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
- ---help---
- This selects the Hisilicon specific additions to UFSHCD platform driver.
-
- Select this if you have UFS controller on Hisilicon chipset.
- If unsure, say N.
-
-config SCSI_UFS_TI_J721E
- tristate "TI glue layer for Cadence UFS Controller"
- depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
- help
- This selects driver for TI glue layer for Cadence UFS Host
- Controller IP.
-
- Selects this if you have TI platform with UFS controller.
- If unsure, say N.
-
-config SCSI_UFS_BSG
- bool "Universal Flash Storage BSG device node"
- depends on SCSI_UFSHCD
- select BLK_DEV_BSGLIB
- help
- Universal Flash Storage (UFS) is SCSI transport specification for
- accessing flash storage on digital cameras, mobile phones and
- consumer electronic devices.
- A UFS controller communicates with a UFS device by exchanging
- UFS Protocol Information Units (UPIUs).
- UPIUs can not only be used as a transport layer for the SCSI protocol
- but are also used by the UFS native command set.
- This transport driver supports exchanging UFS protocol information units
- with a UFS device. See also the ufshcd driver, which is a SCSI driver
- that supports UFS devices.
-
- Select this if you need a bsg device node for your UFS controller.
- If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
deleted file mode 100644
index 94c6c5d7334b..000000000000
--- a/drivers/scsi/ufs/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# UFSHCD makefile
-obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
-obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
-obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
-obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
-ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
-obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
-obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
-obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
-obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
-obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
deleted file mode 100644
index 56a6a1ed5ec2..000000000000
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ /dev/null
@@ -1,341 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Platform UFS Host driver for Cadence controller
- *
- * Copyright (C) 2018 Cadence Design Systems, Inc.
- *
- * Authors:
- * Jan Kotas <jank@cadence.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/time.h>
-
-#include "ufshcd-pltfrm.h"
-
-#define CDNS_UFS_REG_HCLKDIV 0xFC
-#define CDNS_UFS_REG_PHY_XCFGD1 0x113C
-#define CDNS_UFS_MAX_L4_ATTRS 12
-
-struct cdns_ufs_host {
- /**
- * cdns_ufs_dme_attr_val - for storing L4 attributes
- */
- u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS];
-};
-
-/**
- * cdns_ufs_get_l4_attr - get L4 attributes on local side
- * @hba: per adapter instance
- *
- */
-static void cdns_ufs_get_l4_attr(struct ufs_hba *hba)
-{
- struct cdns_ufs_host *host = ufshcd_get_variant(hba);
-
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID),
- &host->cdns_ufs_dme_attr_val[0]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID),
- &host->cdns_ufs_dme_attr_val[1]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
- &host->cdns_ufs_dme_attr_val[2]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID),
- &host->cdns_ufs_dme_attr_val[3]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS),
- &host->cdns_ufs_dme_attr_val[4]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
- &host->cdns_ufs_dme_attr_val[5]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
- &host->cdns_ufs_dme_attr_val[6]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
- &host->cdns_ufs_dme_attr_val[7]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
- &host->cdns_ufs_dme_attr_val[8]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
- &host->cdns_ufs_dme_attr_val[9]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE),
- &host->cdns_ufs_dme_attr_val[10]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
- &host->cdns_ufs_dme_attr_val[11]);
-}
-
-/**
- * cdns_ufs_set_l4_attr - set L4 attributes on local side
- * @hba: per adapter instance
- *
- */
-static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
-{
- struct cdns_ufs_host *host = ufshcd_get_variant(hba);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID),
- host->cdns_ufs_dme_attr_val[0]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID),
- host->cdns_ufs_dme_attr_val[1]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
- host->cdns_ufs_dme_attr_val[2]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID),
- host->cdns_ufs_dme_attr_val[3]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS),
- host->cdns_ufs_dme_attr_val[4]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
- host->cdns_ufs_dme_attr_val[5]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
- host->cdns_ufs_dme_attr_val[6]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
- host->cdns_ufs_dme_attr_val[7]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
- host->cdns_ufs_dme_attr_val[8]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
- host->cdns_ufs_dme_attr_val[9]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE),
- host->cdns_ufs_dme_attr_val[10]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
- host->cdns_ufs_dme_attr_val[11]);
-}
-
-/**
- * Sets HCLKDIV register value based on the core_clk
- * @hba: host controller instance
- *
- * Return zero for success and non-zero for failure
- */
-static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
-{
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
- unsigned long core_clk_rate = 0;
- u32 core_clk_div = 0;
-
- if (list_empty(head))
- return 0;
-
- list_for_each_entry(clki, head, list) {
- if (IS_ERR_OR_NULL(clki->clk))
- continue;
- if (!strcmp(clki->name, "core_clk"))
- core_clk_rate = clk_get_rate(clki->clk);
- }
-
- if (!core_clk_rate) {
- dev_err(hba->dev, "%s: unable to find core_clk rate\n",
- __func__);
- return -EINVAL;
- }
-
- core_clk_div = core_clk_rate / USEC_PER_SEC;
-
- ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV);
- /**
- * Make sure the register was updated,
- * UniPro layer will not work with an incorrect value.
- */
- mb();
-
- return 0;
-}
-
-/**
- * Called before and after HCE enable bit is set.
- * @hba: host controller instance
- * @status: notify stage (pre, post change)
- *
- * Return zero for success and non-zero for failure
- */
-static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- if (status != PRE_CHANGE)
- return 0;
-
- return cdns_ufs_set_hclkdiv(hba);
-}
-
-/**
- * Called around hibern8 enter/exit.
- * @hba: host controller instance
- * @cmd: UIC Command
- * @status: notify stage (pre, post change)
- *
- */
-static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
- enum ufs_notify_change_status status)
-{
- if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
- cdns_ufs_get_l4_attr(hba);
- if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
- cdns_ufs_set_l4_attr(hba);
-}
-
-/**
- * Called before and after Link startup is carried out.
- * @hba: host controller instance
- * @status: notify stage (pre, post change)
- *
- * Return zero for success and non-zero for failure
- */
-static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- if (status != PRE_CHANGE)
- return 0;
-
- /*
- * Some UFS devices have issues if LCC is enabled.
- * So we are setting PA_Local_TX_LCC_Enable to 0
- * before link startup which will make sure that both host
- * and device TX LCC are disabled once link startup is
- * completed.
- */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
-
- /*
- * Disabling Autohibern8 feature in cadence UFS
- * to mask unexpected interrupt trigger.
- */
- hba->ahit = 0;
-
- return 0;
-}
-
-/**
- * cdns_ufs_init - performs additional ufs initialization
- * @hba: host controller instance
- *
- * Returns status of initialization
- */
-static int cdns_ufs_init(struct ufs_hba *hba)
-{
- int status = 0;
- struct cdns_ufs_host *host;
- struct device *dev = hba->dev;
-
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
-
- if (!host)
- return -ENOMEM;
- ufshcd_set_variant(hba, host);
-
- if (hba->vops && hba->vops->phy_initialization)
- status = hba->vops->phy_initialization(hba);
-
- return status;
-}
-
-/**
- * cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization
- * @hba: host controller instance
- *
- * Always returns 0
- */
-static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
-{
- u32 data;
-
- /* Increase RX_Advanced_Min_ActivateTime_Capability */
- data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1);
- data |= BIT(24);
- ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1);
-
- return 0;
-}
-
-static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
- .name = "cdns-ufs-pltfm",
- .init = cdns_ufs_init,
- .hce_enable_notify = cdns_ufs_hce_enable_notify,
- .link_startup_notify = cdns_ufs_link_startup_notify,
- .hibern8_notify = cdns_ufs_hibern8_notify,
-};
-
-static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
- .name = "cdns-ufs-pltfm",
- .init = cdns_ufs_init,
- .hce_enable_notify = cdns_ufs_hce_enable_notify,
- .link_startup_notify = cdns_ufs_link_startup_notify,
- .phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
- .hibern8_notify = cdns_ufs_hibern8_notify,
-};
-
-static const struct of_device_id cdns_ufs_of_match[] = {
- {
- .compatible = "cdns,ufshc",
- .data = &cdns_ufs_pltfm_hba_vops,
- },
- {
- .compatible = "cdns,ufshc-m31-16nm",
- .data = &cdns_ufs_m31_16nm_pltfm_hba_vops,
- },
- { },
-};
-
-MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
-
-/**
- * cdns_ufs_pltfrm_probe - probe routine of the driver
- * @pdev: pointer to platform device handle
- *
- * Return zero for success and non-zero for failure
- */
-static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
-{
- int err;
- const struct of_device_id *of_id;
- struct ufs_hba_variant_ops *vops;
- struct device *dev = &pdev->dev;
-
- of_id = of_match_node(cdns_ufs_of_match, dev->of_node);
- vops = (struct ufs_hba_variant_ops *)of_id->data;
-
- /* Perform generic probe */
- err = ufshcd_pltfrm_init(pdev, vops);
- if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
-
- return err;
-}
-
-/**
- * cdns_ufs_pltfrm_remove - removes the ufs driver
- * @pdev: pointer to platform device handle
- *
- * Always returns 0
- */
-static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- ufshcd_remove(hba);
- return 0;
-}
-
-static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
-};
-
-static struct platform_driver cdns_ufs_pltfrm_driver = {
- .probe = cdns_ufs_pltfrm_probe,
- .remove = cdns_ufs_pltfrm_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "cdns-ufshcd",
- .pm = &cdns_ufs_dev_pm_ops,
- .of_match_table = cdns_ufs_of_match,
- },
-};
-
-module_platform_driver(cdns_ufs_pltfrm_driver);
-
-MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
-MODULE_DESCRIPTION("Cadence UFS host controller platform driver");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
deleted file mode 100644
index 67a6a61154b7..000000000000
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ /dev/null
@@ -1,176 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Synopsys G210 Test Chip driver
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#include "ufshcd.h"
-#include "ufshcd-dwc.h"
-#include "tc-dwc-g210.h"
-
-#include <linux/pci.h>
-#include <linux/pm_runtime.h>
-
-/* Test Chip type expected values */
-#define TC_G210_20BIT 20
-#define TC_G210_40BIT 40
-#define TC_G210_INV 0
-
-static int tc_type = TC_G210_INV;
-module_param(tc_type, int, 0);
-MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)");
-
-static int tc_dwc_g210_pci_suspend(struct device *dev)
-{
- return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_resume(struct device *dev)
-{
- return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_suspend(struct device *dev)
-{
- return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_resume(struct device *dev)
-{
- return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_idle(struct device *dev)
-{
- return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-
-/*
- * struct ufs_hba_dwc_vops - UFS DWC specific variant operations
- */
-static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = {
- .name = "tc-dwc-g210-pci",
- .link_startup_notify = ufshcd_dwc_link_startup_notify,
-};
-
-/**
- * tc_dwc_g210_pci_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void tc_dwc_g210_pci_shutdown(struct pci_dev *pdev)
-{
- ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-/**
- * tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space
- * data structure memory
- * @pdev: pointer to PCI handle
- */
-static void tc_dwc_g210_pci_remove(struct pci_dev *pdev)
-{
- struct ufs_hba *hba = pci_get_drvdata(pdev);
-
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
- ufshcd_remove(hba);
-}
-
-/**
- * tc_dwc_g210_pci_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int
-tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct ufs_hba *hba;
- void __iomem *mmio_base;
- int err;
-
- /* Check Test Chip type and set the specific setup routine */
- if (tc_type == TC_G210_20BIT) {
- tc_dwc_g210_pci_hba_vops.phy_initialization =
- tc_dwc_g210_config_20_bit;
- } else if (tc_type == TC_G210_40BIT) {
- tc_dwc_g210_pci_hba_vops.phy_initialization =
- tc_dwc_g210_config_40_bit;
- } else {
- dev_err(&pdev->dev, "test chip version not specified\n");
- return -EPERM;
- }
-
- err = pcim_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "pcim_enable_device failed\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
- if (err < 0) {
- dev_err(&pdev->dev, "request and iomap failed\n");
- return err;
- }
-
- mmio_base = pcim_iomap_table(pdev)[0];
-
- err = ufshcd_alloc_host(&pdev->dev, &hba);
- if (err) {
- dev_err(&pdev->dev, "Allocation failed\n");
- return err;
- }
-
- hba->vops = &tc_dwc_g210_pci_hba_vops;
-
- err = ufshcd_init(hba, mmio_base, pdev->irq);
- if (err) {
- dev_err(&pdev->dev, "Initialization failed\n");
- return err;
- }
-
- pci_set_drvdata(pdev, hba);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
- .suspend = tc_dwc_g210_pci_suspend,
- .resume = tc_dwc_g210_pci_resume,
- .runtime_suspend = tc_dwc_g210_pci_runtime_suspend,
- .runtime_resume = tc_dwc_g210_pci_runtime_resume,
- .runtime_idle = tc_dwc_g210_pci_runtime_idle,
-};
-
-static const struct pci_device_id tc_dwc_g210_pci_tbl[] = {
- { PCI_VENDOR_ID_SYNOPSYS, 0xB101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_SYNOPSYS, 0xB102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, tc_dwc_g210_pci_tbl);
-
-static struct pci_driver tc_dwc_g210_pci_driver = {
- .name = "tc-dwc-g210-pci",
- .id_table = tc_dwc_g210_pci_tbl,
- .probe = tc_dwc_g210_pci_probe,
- .remove = tc_dwc_g210_pci_remove,
- .shutdown = tc_dwc_g210_pci_shutdown,
- .driver = {
- .pm = &tc_dwc_g210_pci_pm_ops
- },
-};
-
-module_pci_driver(tc_dwc_g210_pci_driver);
-
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys Test Chip G210 PCI glue driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
deleted file mode 100644
index a1268e4f44d6..000000000000
--- a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Synopsys G210 Test Chip driver
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/delay.h>
-
-#include "ufshcd-pltfrm.h"
-#include "ufshcd-dwc.h"
-#include "tc-dwc-g210.h"
-
-/*
- * UFS DWC specific variant operations
- */
-static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = {
- .name = "tc-dwc-g210-pltfm",
- .link_startup_notify = ufshcd_dwc_link_startup_notify,
- .phy_initialization = tc_dwc_g210_config_20_bit,
-};
-
-static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = {
- .name = "tc-dwc-g210-pltfm",
- .link_startup_notify = ufshcd_dwc_link_startup_notify,
- .phy_initialization = tc_dwc_g210_config_40_bit,
-};
-
-static const struct of_device_id tc_dwc_g210_pltfm_match[] = {
- {
- .compatible = "snps,g210-tc-6.00-20bit",
- .data = &tc_dwc_g210_20bit_pltfm_hba_vops,
- },
- {
- .compatible = "snps,g210-tc-6.00-40bit",
- .data = &tc_dwc_g210_40bit_pltfm_hba_vops,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match);
-
-/**
- * tc_dwc_g210_pltfm_probe()
- * @pdev: pointer to platform device structure
- *
- */
-static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
-{
- int err;
- const struct of_device_id *of_id;
- struct ufs_hba_variant_ops *vops;
- struct device *dev = &pdev->dev;
-
- of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node);
- vops = (struct ufs_hba_variant_ops *)of_id->data;
-
- /* Perform generic probe */
- err = ufshcd_pltfrm_init(pdev, vops);
- if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
-
- return err;
-}
-
-/**
- * tc_dwc_g210_pltfm_remove()
- * @pdev: pointer to platform device structure
- *
- */
-static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
-
- return 0;
-}
-
-static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
-};
-
-static struct platform_driver tc_dwc_g210_pltfm_driver = {
- .probe = tc_dwc_g210_pltfm_probe,
- .remove = tc_dwc_g210_pltfm_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "tc-dwc-g210-pltfm",
- .pm = &tc_dwc_g210_pltfm_pm_ops,
- .of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match),
- },
-};
-
-module_platform_driver(tc_dwc_g210_pltfm_driver);
-
-MODULE_ALIAS("platform:tc-dwc-g210-pltfm");
-MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver");
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c
deleted file mode 100644
index f954a68f6b4c..000000000000
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ /dev/null
@@ -1,317 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Synopsys G210 Test Chip driver
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#include "ufshcd.h"
-#include "unipro.h"
-
-#include "ufshcd-dwc.h"
-#include "ufshci-dwc.h"
-#include "tc-dwc-g210.h"
-
-/**
- * tc_dwc_g210_setup_40bit_rmmi()
- * This function configures Synopsys TC specific atributes (40-bit RMMI)
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success or non-zero value on failure
- */
-static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
-{
- static const struct ufshcd_dme_attr_val setup_attrs[] = {
- { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
- { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
- { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL },
- { UIC_ARG_MIB(CBDIVFACTOR), 0x08, DME_LOCAL },
- { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
- { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
- { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
- { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x14,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 4,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
- DME_LOCAL },
- { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
- { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
- DME_LOCAL },
- { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
- };
-
- return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
- ARRAY_SIZE(setup_attrs));
-}
-
-/**
- * tc_dwc_g210_setup_20bit_rmmi_lane0()
- * This function configures Synopsys TC 20-bit RMMI Lane 0
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success or non-zero value on failure
- */
-static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
-{
- static const struct ufshcd_dme_attr_val setup_attrs[] = {
- { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x12,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 2,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
- DME_LOCAL },
- { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
- { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
- DME_LOCAL },
- { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
- };
-
- return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
- ARRAY_SIZE(setup_attrs));
-}
-
-/**
- * tc_dwc_g210_setup_20bit_rmmi_lane1()
- * This function configures Synopsys TC 20-bit RMMI Lane 1
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success or non-zero value on failure
- */
-static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
-{
- int connected_rx_lanes = 0;
- int connected_tx_lanes = 0;
- int ret = 0;
-
- static const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
- { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN1_TX), 0x12,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
- DME_LOCAL },
- };
-
- static const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
- { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN1_RX), 2,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN1_RX), 0x80,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN1_RX), 0x03,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN1_RX), 0x16,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN1_RX), 0x42,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN1_RX), 0xa4,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN1_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN1_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN1_RX), 0x28,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN1_RX), 0x1E,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN1_RX), 0x2f,
- DME_LOCAL },
- };
-
- /* Get the available lane count */
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
- &connected_rx_lanes);
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
- &connected_tx_lanes);
-
- if (connected_tx_lanes == 2) {
-
- ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs,
- ARRAY_SIZE(setup_tx_attrs));
-
- if (ret)
- goto out;
- }
-
- if (connected_rx_lanes == 2) {
- ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs,
- ARRAY_SIZE(setup_rx_attrs));
- }
-
-out:
- return ret;
-}
-
-/**
- * tc_dwc_g210_setup_20bit_rmmi()
- * This function configures Synopsys TC specific atributes (20-bit RMMI)
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success or non-zero value on failure
- */
-static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
-{
- int ret = 0;
-
- static const struct ufshcd_dme_attr_val setup_attrs[] = {
- { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
- { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
- { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL },
- { UIC_ARG_MIB(CBDIVFACTOR), 0x44, DME_LOCAL },
- { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
- { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
- { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
- };
-
- ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
- ARRAY_SIZE(setup_attrs));
- if (ret)
- goto out;
-
- /* Lane 0 configuration*/
- ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba);
- if (ret)
- goto out;
-
- /* Lane 1 configuration*/
- ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba);
- if (ret)
- goto out;
-
-out:
- return ret;
-}
-
-/**
- * tc_dwc_g210_config_40_bit()
- * This function configures Local (host) Synopsys 40-bit TC specific attributes
- *
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success non-zero value on failure
- */
-int tc_dwc_g210_config_40_bit(struct ufs_hba *hba)
-{
- int ret = 0;
-
- dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n");
- ret = tc_dwc_g210_setup_40bit_rmmi(hba);
- if (ret) {
- dev_err(hba->dev, "Configuration failed\n");
- goto out;
- }
-
- /* To write Shadow register bank to effective configuration block */
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
- if (ret)
- goto out;
-
- /* To configure Debug OMC */
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(tc_dwc_g210_config_40_bit);
-
-/**
- * tc_dwc_g210_config_20_bit()
- * This function configures Local (host) Synopsys 20-bit TC specific attributes
- *
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success non-zero value on failure
- */
-int tc_dwc_g210_config_20_bit(struct ufs_hba *hba)
-{
- int ret = 0;
-
- dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n");
- ret = tc_dwc_g210_setup_20bit_rmmi(hba);
- if (ret) {
- dev_err(hba->dev, "Configuration failed\n");
- goto out;
- }
-
- /* To write Shadow register bank to effective configuration block */
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
- if (ret)
- goto out;
-
- /* To configure Debug OMC */
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(tc_dwc_g210_config_20_bit);
-
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys G210 Test Chip driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/tc-dwc-g210.h b/drivers/scsi/ufs/tc-dwc-g210.h
deleted file mode 100644
index 5a506da03f4a..000000000000
--- a/drivers/scsi/ufs/tc-dwc-g210.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Synopsys G210 Test Chip driver
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#ifndef _TC_DWC_G210_H
-#define _TC_DWC_G210_H
-
-int tc_dwc_g210_config_40_bit(struct ufs_hba *hba);
-int tc_dwc_g210_config_20_bit(struct ufs_hba *hba);
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
deleted file mode 100644
index 5216d228cdd9..000000000000
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
-//
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#define TI_UFS_SS_CTRL 0x4
-#define TI_UFS_SS_RST_N_PCS BIT(0)
-#define TI_UFS_SS_CLK_26MHZ BIT(4)
-
-static int ti_j721e_ufs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- unsigned long clk_rate;
- void __iomem *regbase;
- struct clk *clk;
- u32 reg = 0;
- int ret;
-
- regbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(regbase))
- return PTR_ERR(regbase);
-
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
- return ret;
- }
-
- /* Select MPHY refclk frequency */
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(dev, "Cannot claim MPHY clock.\n");
- return PTR_ERR(clk);
- }
- clk_rate = clk_get_rate(clk);
- if (clk_rate == 26000000)
- reg |= TI_UFS_SS_CLK_26MHZ;
- devm_clk_put(dev, clk);
-
- /* Take UFS slave device out of reset */
- reg |= TI_UFS_SS_RST_N_PCS;
- writel(reg, regbase + TI_UFS_SS_CTRL);
-
- ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
- dev);
- if (ret) {
- dev_err(dev, "failed to populate child nodes %d\n", ret);
- pm_runtime_put_sync(dev);
- }
-
- return ret;
-}
-
-static int ti_j721e_ufs_remove(struct platform_device *pdev)
-{
- of_platform_depopulate(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
-
- return 0;
-}
-
-static const struct of_device_id ti_j721e_ufs_of_match[] = {
- {
- .compatible = "ti,j721e-ufs",
- },
- { },
-};
-
-static struct platform_driver ti_j721e_ufs_driver = {
- .probe = ti_j721e_ufs_probe,
- .remove = ti_j721e_ufs_remove,
- .driver = {
- .name = "ti-j721e-ufs",
- .of_match_table = ti_j721e_ufs_of_match,
- },
-};
-module_platform_driver(ti_j721e_ufs_driver);
-
-MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
-MODULE_DESCRIPTION("TI UFS host controller glue driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
deleted file mode 100644
index 5d6487350a6c..000000000000
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ /dev/null
@@ -1,604 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * HiSilicon Hixxxx UFS Driver
- *
- * Copyright (c) 2016-2017 Linaro Ltd.
- * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
- */
-
-#include <linux/time.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/reset.h>
-
-#include "ufshcd.h"
-#include "ufshcd-pltfrm.h"
-#include "unipro.h"
-#include "ufs-hisi.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
-
-static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
-{
- int err = 0;
- u32 tx_fsm_val_0 = 0;
- u32 tx_fsm_val_1 = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
-
- do {
- err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
- &tx_fsm_val_0);
- err |= ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
- if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
- tx_fsm_val_1 == TX_FSM_HIBERN8))
- break;
-
- /* sleep for max. 200us */
- usleep_range(100, 200);
- } while (time_before(jiffies, timeout));
-
- /*
- * we might have scheduled out for long during polling so
- * check the state again.
- */
- if (time_after(jiffies, timeout)) {
- err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
- &tx_fsm_val_0);
- err |= ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
- }
-
- if (err) {
- dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
- __func__, err);
- } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
- tx_fsm_val_1 != TX_FSM_HIBERN8) {
- err = -1;
- dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
- __func__, tx_fsm_val_0, tx_fsm_val_1);
- }
-
- return err;
-}
-
-static void ufs_hisi_clk_init(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
- if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
- mdelay(1);
- /* use abb clk */
- ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
- ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
- /* open mphy ref clk */
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
-}
-
-static void ufs_hisi_soc_init(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
- u32 reg;
-
- if (!IS_ERR(host->rst))
- reset_control_assert(host->rst);
-
- /* HC_PSW powerup */
- ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
- udelay(10);
- /* notify PWR ready */
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
- ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
- UFS_DEVICE_RESET_CTRL);
-
- reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
- reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
- /* set cfg clk freq */
- ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
- /* set ref clk freq */
- ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
- /* bypass ufs clk gate */
- ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
- CLOCK_GATE_BYPASS);
- ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
-
- /* open psw clk */
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
- /* disable ufshc iso */
- ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
- /* disable phy iso */
- ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
- /* notice iso disable */
- ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
-
- /* disable lp_reset_n */
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
- mdelay(1);
-
- ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
- UFS_DEVICE_RESET_CTRL);
-
- msleep(20);
-
- /*
- * enable the fix of linereset recovery,
- * and enable rx_reset/tx_rest beat
- * enable ref_clk_en override(bit5) &
- * override value = 1(bit4), with mask
- */
- ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
-
- if (!IS_ERR(host->rst))
- reset_control_deassert(host->rst);
-}
-
-static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
- int err;
- uint32_t value;
- uint32_t reg;
-
- /* Unipro VS_mphy_disable */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
- /* PA_HSSeries */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
- /* MPHY CBRATESEL */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
- /* MPHY CBOVRCTRL2 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
- /* MPHY CBOVRCTRL3 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
-
- if (host->caps & UFS_HISI_CAP_PHY10nm) {
- /* MPHY CBOVRCTRL4 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
- /* MPHY CBOVRCTRL5 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
- }
-
- /* Unipro VS_MphyCfgUpdt */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
- /* MPHY RXOVRCTRL4 rx0 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
- /* MPHY RXOVRCTRL4 rx1 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
- /* MPHY RXOVRCTRL5 rx0 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
- /* MPHY RXOVRCTRL5 rx1 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
- /* MPHY RXSQCONTROL rx0 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
- /* MPHY RXSQCONTROL rx1 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
- /* Unipro VS_MphyCfgUpdt */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
-
- if (host->caps & UFS_HISI_CAP_PHY10nm) {
- /* RX_Hibern8Time_Capability*/
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
- /* RX_Hibern8Time_Capability*/
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
- /* RX_Min_ActivateTime */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
- /* RX_Min_ActivateTime*/
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
- } else {
- /* Tactive RX */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
- /* Tactive RX */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
- }
-
- /* Gear3 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
- /* Gear3 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
- /* Gear2 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
- /* Gear2 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
- /* Gear1 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
- /* Gear1 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
- /* Thibernate Tx */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
- /* Thibernate Tx */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
- /* Unipro VS_mphy_disable */
- ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
- if (value != 0x1)
- dev_info(hba->dev,
- "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
-
- /* Unipro VS_mphy_disable */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
- err = ufs_hisi_check_hibern8(hba);
- if (err)
- dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
-
- if (!(host->caps & UFS_HISI_CAP_PHY10nm))
- ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
-
- /* disable auto H8 */
- reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
- reg = reg & (~UFS_AHIT_AH8ITV_MASK);
- ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
-
- /* Unipro PA_Local_TX_LCC_Enable */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0);
- /* close Unipro VS_Mk2ExtnSupport */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
- ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
- if (value != 0) {
- /* Ensure close success */
- dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
- }
-
- return err;
-}
-
-static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- /* Unipro DL_AFC0CreditThreshold */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
- /* Unipro DL_TC0OutAckThreshold */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
- /* Unipro DL_TC0TXFCThreshold */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
-
- /* not bypass ufs clk gate */
- ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
- CLOCK_GATE_BYPASS);
- ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
- UFS_SYSCTRL);
-
- /* select received symbol cnt */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
- /* reset counter0 and enable */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
-
- return 0;
-}
-
-static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- int err = 0;
-
- switch (status) {
- case PRE_CHANGE:
- err = ufs_hisi_link_startup_pre_change(hba);
- break;
- case POST_CHANGE:
- err = ufs_hisi_link_startup_post_change(hba);
- break;
- default:
- break;
- }
-
- return err;
-}
-
-static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
-{
- hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
- hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
- hisi_param->hs_rx_gear = UFS_HISI_LIMIT_HSGEAR_RX;
- hisi_param->hs_tx_gear = UFS_HISI_LIMIT_HSGEAR_TX;
- hisi_param->pwm_rx_gear = UFS_HISI_LIMIT_PWMGEAR_RX;
- hisi_param->pwm_tx_gear = UFS_HISI_LIMIT_PWMGEAR_TX;
- hisi_param->rx_pwr_pwm = UFS_HISI_LIMIT_RX_PWR_PWM;
- hisi_param->tx_pwr_pwm = UFS_HISI_LIMIT_TX_PWR_PWM;
- hisi_param->rx_pwr_hs = UFS_HISI_LIMIT_RX_PWR_HS;
- hisi_param->tx_pwr_hs = UFS_HISI_LIMIT_TX_PWR_HS;
- hisi_param->hs_rate = UFS_HISI_LIMIT_HS_RATE;
- hisi_param->desired_working_mode = UFS_HISI_LIMIT_DESIRED_MODE;
-}
-
-static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- if (host->caps & UFS_HISI_CAP_PHY10nm) {
- /*
- * Boston platform need to set SaveConfigTime to 0x13,
- * and change sync length to maximum value
- */
- /* VS_DebugSaveConfigTime */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
- /* g1 sync length */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
- /* g2 sync length */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
- /* g3 sync length */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
- /* PA_Hibern8Time */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
- /* PA_Tactivate */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
- }
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
- pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
- /* VS_DebugSaveConfigTime */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
- /* sync length */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
- }
-
- /* update */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
- /* PA_TxSkip */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
- /*PA_PWRModeUserData0 = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
- /*PA_PWRModeUserData1 = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
- /*PA_PWRModeUserData2 = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
- /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
- /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
- /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
- /*PA_PWRModeUserData3 = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
- /*PA_PWRModeUserData4 = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
- /*PA_PWRModeUserData5 = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
- /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
- /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
- /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
-}
-
-static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- struct ufs_dev_params ufs_hisi_cap;
- int ret = 0;
-
- if (!dev_req_params) {
- dev_err(hba->dev,
- "%s: incoming dev_req_params is NULL\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- switch (status) {
- case PRE_CHANGE:
- ufs_hisi_set_dev_cap(&ufs_hisi_cap);
- ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
- dev_max_params, dev_req_params);
- if (ret) {
- dev_err(hba->dev,
- "%s: failed to determine capabilities\n", __func__);
- goto out;
- }
-
- ufs_hisi_pwr_change_pre_change(hba);
- break;
- case POST_CHANGE:
- break;
- default:
- ret = -EINVAL;
- break;
- }
-out:
- return ret;
-}
-
-static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- if (ufshcd_is_runtime_pm(pm_op))
- return 0;
-
- if (host->in_suspend) {
- WARN_ON(1);
- return 0;
- }
-
- ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
- udelay(10);
- /* set ref_dig_clk override of PHY PCS to 0 */
- ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
-
- host->in_suspend = true;
-
- return 0;
-}
-
-static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- if (!host->in_suspend)
- return 0;
-
- /* set ref_dig_clk override of PHY PCS to 1 */
- ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
- udelay(10);
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
-
- host->in_suspend = false;
- return 0;
-}
-
-static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
-{
- struct device *dev = host->hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
-
- /* get resource of ufs sys ctrl */
- host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
- return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
-}
-
-static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
-{
- hba->rpm_lvl = UFS_PM_LVL_1;
- hba->spm_lvl = UFS_PM_LVL_3;
-}
-
-/**
- * ufs_hisi_init_common
- * @hba: host controller instance
- */
-static int ufs_hisi_init_common(struct ufs_hba *hba)
-{
- int err = 0;
- struct device *dev = hba->dev;
- struct ufs_hisi_host *host;
-
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host)
- return -ENOMEM;
-
- host->hba = hba;
- ufshcd_set_variant(hba, host);
-
- host->rst = devm_reset_control_get(dev, "rst");
- if (IS_ERR(host->rst)) {
- dev_err(dev, "%s: failed to get reset control\n", __func__);
- return PTR_ERR(host->rst);
- }
-
- ufs_hisi_set_pm_lvl(hba);
-
- err = ufs_hisi_get_resource(host);
- if (err) {
- ufshcd_set_variant(hba, NULL);
- return err;
- }
-
- return 0;
-}
-
-static int ufs_hi3660_init(struct ufs_hba *hba)
-{
- int ret = 0;
- struct device *dev = hba->dev;
-
- ret = ufs_hisi_init_common(hba);
- if (ret) {
- dev_err(dev, "%s: ufs common init fail\n", __func__);
- return ret;
- }
-
- ufs_hisi_clk_init(hba);
-
- ufs_hisi_soc_init(hba);
-
- return 0;
-}
-
-static int ufs_hi3670_init(struct ufs_hba *hba)
-{
- int ret = 0;
- struct device *dev = hba->dev;
- struct ufs_hisi_host *host;
-
- ret = ufs_hisi_init_common(hba);
- if (ret) {
- dev_err(dev, "%s: ufs common init fail\n", __func__);
- return ret;
- }
-
- ufs_hisi_clk_init(hba);
-
- ufs_hisi_soc_init(hba);
-
- /* Add cap for 10nm PHY variant on HI3670 SoC */
- host = ufshcd_get_variant(hba);
- host->caps |= UFS_HISI_CAP_PHY10nm;
-
- return 0;
-}
-
-static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
- .name = "hi3660",
- .init = ufs_hi3660_init,
- .link_startup_notify = ufs_hisi_link_startup_notify,
- .pwr_change_notify = ufs_hisi_pwr_change_notify,
- .suspend = ufs_hisi_suspend,
- .resume = ufs_hisi_resume,
-};
-
-static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
- .name = "hi3670",
- .init = ufs_hi3670_init,
- .link_startup_notify = ufs_hisi_link_startup_notify,
- .pwr_change_notify = ufs_hisi_pwr_change_notify,
- .suspend = ufs_hisi_suspend,
- .resume = ufs_hisi_resume,
-};
-
-static const struct of_device_id ufs_hisi_of_match[] = {
- { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
- { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
-
-static int ufs_hisi_probe(struct platform_device *pdev)
-{
- const struct of_device_id *of_id;
-
- of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
-
- return ufshcd_pltfrm_init(pdev, of_id->data);
-}
-
-static int ufs_hisi_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- ufshcd_remove(hba);
- return 0;
-}
-
-static const struct dev_pm_ops ufs_hisi_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
-};
-
-static struct platform_driver ufs_hisi_pltform = {
- .probe = ufs_hisi_probe,
- .remove = ufs_hisi_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "ufshcd-hisi",
- .pm = &ufs_hisi_pm_ops,
- .of_match_table = of_match_ptr(ufs_hisi_of_match),
- },
-};
-module_platform_driver(ufs_hisi_pltform);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ufshcd-hisi");
-MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h
deleted file mode 100644
index 3231d3d81c98..000000000000
--- a/drivers/scsi/ufs/ufs-hisi.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017, HiSilicon. All rights reserved.
- */
-
-#ifndef UFS_HISI_H_
-#define UFS_HISI_H_
-
-#define HBRN8_POLL_TOUT_MS 1000
-
-/*
- * ufs sysctrl specific define
- */
-#define PSW_POWER_CTRL (0x04)
-#define PHY_ISO_EN (0x08)
-#define HC_LP_CTRL (0x0C)
-#define PHY_CLK_CTRL (0x10)
-#define PSW_CLK_CTRL (0x14)
-#define CLOCK_GATE_BYPASS (0x18)
-#define RESET_CTRL_EN (0x1C)
-#define UFS_SYSCTRL (0x5C)
-#define UFS_DEVICE_RESET_CTRL (0x60)
-
-#define BIT_UFS_PSW_ISO_CTRL (1 << 16)
-#define BIT_UFS_PSW_MTCMOS_EN (1 << 0)
-#define BIT_UFS_REFCLK_ISO_EN (1 << 16)
-#define BIT_UFS_PHY_ISO_CTRL (1 << 0)
-#define BIT_SYSCTRL_LP_ISOL_EN (1 << 16)
-#define BIT_SYSCTRL_PWR_READY (1 << 8)
-#define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24)
-#define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8)
-#define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF)
-#define UFS_FREQ_CFG_CLK (0x39)
-#define BIT_SYSCTRL_PSW_CLK_EN (1 << 4)
-#define MASK_UFS_CLK_GATE_BYPASS (0x3F)
-#define BIT_SYSCTRL_LP_RESET_N (1 << 0)
-#define BIT_UFS_REFCLK_SRC_SEl (1 << 0)
-#define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16)
-#define MASK_UFS_DEVICE_RESET (0x1 << 16)
-#define BIT_UFS_DEVICE_RESET (0x1)
-
-/*
- * M-TX Configuration Attributes for Hixxxx
- */
-#define MPHY_TX_FSM_STATE 0x41
-#define TX_FSM_HIBERN8 0x1
-
-/*
- * Hixxxx UFS HC specific Registers
- */
-enum {
- UFS_REG_OCPTHRTL = 0xc0,
- UFS_REG_OOCPR = 0xc4,
-
- UFS_REG_CDACFG = 0xd0,
- UFS_REG_CDATX1 = 0xd4,
- UFS_REG_CDATX2 = 0xd8,
- UFS_REG_CDARX1 = 0xdc,
- UFS_REG_CDARX2 = 0xe0,
- UFS_REG_CDASTA = 0xe4,
-
- UFS_REG_LBMCFG = 0xf0,
- UFS_REG_LBMSTA = 0xf4,
- UFS_REG_UFSMODE = 0xf8,
-
- UFS_REG_HCLKDIV = 0xfc,
-};
-
-/* AHIT - Auto-Hibernate Idle Timer */
-#define UFS_AHIT_AH8ITV_MASK 0x3FF
-
-/* REG UFS_REG_OCPTHRTL definition */
-#define UFS_HCLKDIV_NORMAL_VALUE 0xE4
-
-/* vendor specific pre-defined parameters */
-#define SLOW 1
-#define FAST 2
-
-#define UFS_HISI_LIMIT_NUM_LANES_RX 2
-#define UFS_HISI_LIMIT_NUM_LANES_TX 2
-#define UFS_HISI_LIMIT_HSGEAR_RX UFS_HS_G3
-#define UFS_HISI_LIMIT_HSGEAR_TX UFS_HS_G3
-#define UFS_HISI_LIMIT_PWMGEAR_RX UFS_PWM_G4
-#define UFS_HISI_LIMIT_PWMGEAR_TX UFS_PWM_G4
-#define UFS_HISI_LIMIT_RX_PWR_PWM SLOW_MODE
-#define UFS_HISI_LIMIT_TX_PWR_PWM SLOW_MODE
-#define UFS_HISI_LIMIT_RX_PWR_HS FAST_MODE
-#define UFS_HISI_LIMIT_TX_PWR_HS FAST_MODE
-#define UFS_HISI_LIMIT_HS_RATE PA_HS_MODE_B
-#define UFS_HISI_LIMIT_DESIRED_MODE FAST
-
-#define UFS_HISI_CAP_RESERVED BIT(0)
-#define UFS_HISI_CAP_PHY10nm BIT(1)
-
-struct ufs_hisi_host {
- struct ufs_hba *hba;
- void __iomem *ufs_sys_ctrl;
-
- struct reset_control *rst;
-
- uint64_t caps;
-
- bool in_suspend;
-};
-
-#define ufs_sys_ctrl_writel(host, val, reg) \
- writel((val), (host)->ufs_sys_ctrl + (reg))
-#define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg))
-#define ufs_sys_ctrl_set_bits(host, mask, reg) \
- ufs_sys_ctrl_writel( \
- (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg))
-#define ufs_sys_ctrl_clr_bits(host, mask, reg) \
- ufs_sys_ctrl_writel((host), \
- ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \
- (reg))
-
-#endif /* UFS_HISI_H_ */
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
deleted file mode 100644
index 53eae5fe2ade..000000000000
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ /dev/null
@@ -1,569 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2019 MediaTek Inc.
- * Authors:
- * Stanley Chu <stanley.chu@mediatek.com>
- * Peter Wang <peter.wang@mediatek.com>
- */
-
-#include <linux/arm-smccc.h>
-#include <linux/bitfield.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/soc/mediatek/mtk_sip_svc.h>
-
-#include "ufshcd.h"
-#include "ufshcd-pltfrm.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
-#include "ufs-mediatek.h"
-
-#define ufs_mtk_smc(cmd, val, res) \
- arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
- cmd, val, 0, 0, 0, 0, 0, &(res))
-
-#define ufs_mtk_ref_clk_notify(on, res) \
- ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
-
-#define ufs_mtk_device_reset_ctrl(high, res) \
- ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
-
-static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
-{
- u32 tmp;
-
- if (enable) {
- ufshcd_dme_get(hba,
- UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
- tmp = tmp |
- (1 << RX_SYMBOL_CLK_GATE_EN) |
- (1 << SYS_CLK_GATE_EN) |
- (1 << TX_CLK_GATE_EN);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
-
- ufshcd_dme_get(hba,
- UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
- tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
- } else {
- ufshcd_dme_get(hba,
- UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
- tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
- (1 << SYS_CLK_GATE_EN) |
- (1 << TX_CLK_GATE_EN));
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
-
- ufshcd_dme_get(hba,
- UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
- tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
- }
-}
-
-static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct device *dev = hba->dev;
- struct device_node *np = dev->of_node;
- int err = 0;
-
- host->mphy = devm_of_phy_get_by_index(dev, np, 0);
-
- if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
- /*
- * UFS driver might be probed before the phy driver does.
- * In that case we would like to return EPROBE_DEFER code.
- */
- err = -EPROBE_DEFER;
- dev_info(dev,
- "%s: required phy hasn't probed yet. err = %d\n",
- __func__, err);
- } else if (IS_ERR(host->mphy)) {
- err = PTR_ERR(host->mphy);
- dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
- }
-
- if (err)
- host->mphy = NULL;
-
- return err;
-}
-
-static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct arm_smccc_res res;
- unsigned long timeout;
- u32 value;
-
- if (host->ref_clk_enabled == on)
- return 0;
-
- if (on) {
- ufs_mtk_ref_clk_notify(on, res);
- ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
- } else {
- ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
- }
-
- /* Wait for ack */
- timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS);
- do {
- value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
-
- /* Wait until ack bit equals to req bit */
- if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
- goto out;
-
- usleep_range(100, 200);
- } while (time_before(jiffies, timeout));
-
- dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
-
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
-
- return -ETIMEDOUT;
-
-out:
- host->ref_clk_enabled = on;
- if (!on)
- ufs_mtk_ref_clk_notify(on, res);
-
- return 0;
-}
-
-/**
- * ufs_mtk_setup_clocks - enables/disable clocks
- * @hba: host controller instance
- * @on: If true, enable clocks else disable them.
- * @status: PRE_CHANGE or POST_CHANGE notify
- *
- * Returns 0 on success, non-zero on failure.
- */
-static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
- enum ufs_notify_change_status status)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- int ret = -EINVAL;
-
- /*
- * In case ufs_mtk_init() is not yet done, simply ignore.
- * This ufs_mtk_setup_clocks() shall be called from
- * ufs_mtk_init() after init is done.
- */
- if (!host)
- return 0;
-
- switch (status) {
- case PRE_CHANGE:
- if (!on) {
- ufs_mtk_setup_ref_clk(hba, on);
- ret = phy_power_off(host->mphy);
- }
- break;
- case POST_CHANGE:
- if (on) {
- ret = phy_power_on(host->mphy);
- ufs_mtk_setup_ref_clk(hba, on);
- }
- break;
- }
-
- return ret;
-}
-
-/**
- * ufs_mtk_init - find other essential mmio bases
- * @hba: host controller instance
- *
- * Binds PHY with controller and powers up PHY enabling clocks
- * and regulators.
- *
- * Returns -EPROBE_DEFER if binding fails, returns negative error
- * on phy power up failure and returns zero on success.
- */
-static int ufs_mtk_init(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host;
- struct device *dev = hba->dev;
- int err = 0;
-
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- err = -ENOMEM;
- dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
- goto out;
- }
-
- host->hba = hba;
- ufshcd_set_variant(hba, host);
-
- err = ufs_mtk_bind_mphy(hba);
- if (err)
- goto out_variant_clear;
-
- /* Enable runtime autosuspend */
- hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
-
- /* Enable clock-gating */
- hba->caps |= UFSHCD_CAP_CLK_GATING;
-
- /*
- * ufshcd_vops_init() is invoked after
- * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
- * phy clock setup is skipped.
- *
- * Enable phy clocks specifically here.
- */
- ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
-
- goto out;
-
-out_variant_clear:
- ufshcd_set_variant(hba, NULL);
-out:
- return err;
-}
-
-static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- struct ufs_dev_params host_cap;
- int ret;
-
- host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
- host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
- host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
- host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
- host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
- host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
- host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
- host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
- host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
- host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
- host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
- host_cap.desired_working_mode =
- UFS_MTK_LIMIT_DESIRED_MODE;
-
- ret = ufshcd_get_pwr_dev_param(&host_cap,
- dev_max_params,
- dev_req_params);
- if (ret) {
- pr_info("%s: failed to determine capabilities\n",
- __func__);
- }
-
- return ret;
-}
-
-static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status stage,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- int ret = 0;
-
- switch (stage) {
- case PRE_CHANGE:
- ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
- dev_req_params);
- break;
- case POST_CHANGE:
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int ufs_mtk_pre_link(struct ufs_hba *hba)
-{
- int ret;
- u32 tmp;
-
- /* disable deep stall */
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
- if (ret)
- return ret;
-
- tmp &= ~(1 << 6);
-
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
-
- return ret;
-}
-
-static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
-{
- unsigned long flags;
- u32 ah_ms;
-
- if (ufshcd_is_clkgating_allowed(hba)) {
- if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
- ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
- hba->ahit);
- else
- ah_ms = 10;
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.delay_ms = ah_ms + 5;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
-}
-
-static int ufs_mtk_post_link(struct ufs_hba *hba)
-{
- /* disable device LCC */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
-
- /* enable unipro clock gating feature */
- ufs_mtk_cfg_unipro_cg(hba, true);
-
- /* configure auto-hibern8 timer to 10ms */
- if (ufshcd_is_auto_hibern8_supported(hba)) {
- ufshcd_auto_hibern8_update(hba,
- FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
- }
-
- ufs_mtk_setup_clk_gating(hba);
-
- return 0;
-}
-
-static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status stage)
-{
- int ret = 0;
-
- switch (stage) {
- case PRE_CHANGE:
- ret = ufs_mtk_pre_link(hba);
- break;
- case POST_CHANGE:
- ret = ufs_mtk_post_link(hba);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static void ufs_mtk_device_reset(struct ufs_hba *hba)
-{
- struct arm_smccc_res res;
-
- ufs_mtk_device_reset_ctrl(0, res);
-
- /*
- * The reset signal is active low. UFS devices shall detect
- * more than or equal to 1us of positive or negative RST_n
- * pulse width.
- *
- * To be on safe side, keep the reset low for at least 10us.
- */
- usleep_range(10, 15);
-
- ufs_mtk_device_reset_ctrl(1, res);
-
- /* Some devices may need time to respond to rst_n */
- usleep_range(10000, 15000);
-
- dev_info(hba->dev, "device reset done\n");
-}
-
-static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
-{
- int err;
-
- err = ufshcd_hba_enable(hba);
- if (err)
- return err;
-
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- 0);
- if (err)
- return err;
-
- err = ufshcd_uic_hibern8_exit(hba);
- if (!err)
- ufshcd_set_link_active(hba);
- else
- return err;
-
- err = ufshcd_make_hba_operational(hba);
- if (err)
- return err;
-
- return 0;
-}
-
-static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
-{
- int err;
-
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- 1);
- if (err) {
- /* Resume UniPro state for following error recovery */
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- 0);
- return err;
- }
-
- return 0;
-}
-
-static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- int err;
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- if (ufshcd_is_link_hibern8(hba)) {
- err = ufs_mtk_link_set_lpm(hba);
- if (err)
- return -EAGAIN;
- phy_power_off(host->mphy);
- ufs_mtk_setup_ref_clk(hba, false);
- }
-
- return 0;
-}
-
-static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- int err;
-
- if (ufshcd_is_link_hibern8(hba)) {
- ufs_mtk_setup_ref_clk(hba, true);
- phy_power_on(host->mphy);
- err = ufs_mtk_link_set_hpm(hba);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
-{
- ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
-
- ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
-
- ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
- REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
- "MPHY Ctrl ");
-
- /* Direct debugging information to REG_MTK_PROBE */
- ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
- ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
-}
-
-static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
-{
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
-
- return 0;
-}
-
-/**
- * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
- *
- * The variant operations configure the necessary controller and PHY
- * handshake during initialization.
- */
-static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
- .name = "mediatek.ufshci",
- .init = ufs_mtk_init,
- .setup_clocks = ufs_mtk_setup_clocks,
- .link_startup_notify = ufs_mtk_link_startup_notify,
- .pwr_change_notify = ufs_mtk_pwr_change_notify,
- .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
- .suspend = ufs_mtk_suspend,
- .resume = ufs_mtk_resume,
- .dbg_register_dump = ufs_mtk_dbg_register_dump,
- .device_reset = ufs_mtk_device_reset,
-};
-
-/**
- * ufs_mtk_probe - probe routine of the driver
- * @pdev: pointer to Platform device handle
- *
- * Return zero for success and non-zero for failure
- */
-static int ufs_mtk_probe(struct platform_device *pdev)
-{
- int err;
- struct device *dev = &pdev->dev;
-
- /* perform generic probe */
- err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
- if (err)
- dev_info(dev, "probe failed %d\n", err);
-
- return err;
-}
-
-/**
- * ufs_mtk_remove - set driver_data of the device to NULL
- * @pdev: pointer to platform device handle
- *
- * Always return 0
- */
-static int ufs_mtk_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
- return 0;
-}
-
-static const struct of_device_id ufs_mtk_of_match[] = {
- { .compatible = "mediatek,mt8183-ufshci"},
- {},
-};
-
-static const struct dev_pm_ops ufs_mtk_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
-};
-
-static struct platform_driver ufs_mtk_pltform = {
- .probe = ufs_mtk_probe,
- .remove = ufs_mtk_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "ufshcd-mtk",
- .pm = &ufs_mtk_pm_ops,
- .of_match_table = ufs_mtk_of_match,
- },
-};
-
-MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
-MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
-MODULE_DESCRIPTION("MediaTek UFS Host Driver");
-MODULE_LICENSE("GPL v2");
-
-module_platform_driver(ufs_mtk_pltform);
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
deleted file mode 100644
index fccdd979d6fb..000000000000
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2019 MediaTek Inc.
- */
-
-#ifndef _UFS_MEDIATEK_H
-#define _UFS_MEDIATEK_H
-
-#include <linux/bitops.h>
-#include <linux/soc/mediatek/mtk_sip_svc.h>
-
-/*
- * Vendor specific UFSHCI Registers
- */
-#define REG_UFS_REFCLK_CTRL 0x144
-#define REG_UFS_EXTREG 0x2100
-#define REG_UFS_MPHYCTRL 0x2200
-#define REG_UFS_REJECT_MON 0x22AC
-#define REG_UFS_DEBUG_SEL 0x22C0
-#define REG_UFS_PROBE 0x22C8
-
-/*
- * Ref-clk control
- *
- * Values for register REG_UFS_REFCLK_CTRL
- */
-#define REFCLK_RELEASE 0x0
-#define REFCLK_REQUEST BIT(0)
-#define REFCLK_ACK BIT(1)
-
-#define REFCLK_REQ_TIMEOUT_MS 3
-
-/*
- * Vendor specific pre-defined parameters
- */
-#define UFS_MTK_LIMIT_NUM_LANES_RX 1
-#define UFS_MTK_LIMIT_NUM_LANES_TX 1
-#define UFS_MTK_LIMIT_HSGEAR_RX UFS_HS_G3
-#define UFS_MTK_LIMIT_HSGEAR_TX UFS_HS_G3
-#define UFS_MTK_LIMIT_PWMGEAR_RX UFS_PWM_G4
-#define UFS_MTK_LIMIT_PWMGEAR_TX UFS_PWM_G4
-#define UFS_MTK_LIMIT_RX_PWR_PWM SLOW_MODE
-#define UFS_MTK_LIMIT_TX_PWR_PWM SLOW_MODE
-#define UFS_MTK_LIMIT_RX_PWR_HS FAST_MODE
-#define UFS_MTK_LIMIT_TX_PWR_HS FAST_MODE
-#define UFS_MTK_LIMIT_HS_RATE PA_HS_MODE_B
-#define UFS_MTK_LIMIT_DESIRED_MODE UFS_HS_MODE
-
-/*
- * Other attributes
- */
-#define VS_DEBUGCLOCKENABLE 0xD0A1
-#define VS_SAVEPOWERCONTROL 0xD0A6
-#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8
-
-/*
- * SiP commands
- */
-#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
-#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
-#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
-
-/*
- * VS_DEBUGCLOCKENABLE
- */
-enum {
- TX_SYMBOL_CLK_REQ_FORCE = 5,
-};
-
-/*
- * VS_SAVEPOWERCONTROL
- */
-enum {
- RX_SYMBOL_CLK_GATE_EN = 0,
- SYS_CLK_GATE_EN = 2,
- TX_CLK_GATE_EN = 3,
-};
-
-struct ufs_mtk_host {
- struct ufs_hba *hba;
- struct phy *mphy;
- bool ref_clk_enabled;
-};
-
-#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
deleted file mode 100644
index c69c29a1ceb9..000000000000
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ /dev/null
@@ -1,1725 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
- */
-
-#include <linux/acpi.h>
-#include <linux/time.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
-#include <linux/gpio/consumer.h>
-#include <linux/reset-controller.h>
-
-#include "ufshcd.h"
-#include "ufshcd-pltfrm.h"
-#include "unipro.h"
-#include "ufs-qcom.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
-#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
-
-enum {
- TSTBUS_UAWM,
- TSTBUS_UARM,
- TSTBUS_TXUC,
- TSTBUS_RXUC,
- TSTBUS_DFC,
- TSTBUS_TRLUT,
- TSTBUS_TMRLUT,
- TSTBUS_OCSC,
- TSTBUS_UTP_HCI,
- TSTBUS_COMBINED,
- TSTBUS_WRAPPER,
- TSTBUS_UNIPRO,
- TSTBUS_MAX,
-};
-
-static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
-
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
-static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
- u32 clk_cycles);
-
-static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
-{
- return container_of(rcd, struct ufs_qcom_host, rcdev);
-}
-
-static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
- const char *prefix, void *priv)
-{
- ufshcd_dump_regs(hba, offset, len * 4, prefix);
-}
-
-static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
-{
- int err = 0;
-
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
- __func__, err);
-
- return err;
-}
-
-static int ufs_qcom_host_clk_get(struct device *dev,
- const char *name, struct clk **clk_out, bool optional)
-{
- struct clk *clk;
- int err = 0;
-
- clk = devm_clk_get(dev, name);
- if (!IS_ERR(clk)) {
- *clk_out = clk;
- return 0;
- }
-
- err = PTR_ERR(clk);
-
- if (optional && err == -ENOENT) {
- *clk_out = NULL;
- return 0;
- }
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get %s err %d\n", name, err);
-
- return err;
-}
-
-static int ufs_qcom_host_clk_enable(struct device *dev,
- const char *name, struct clk *clk)
-{
- int err = 0;
-
- err = clk_prepare_enable(clk);
- if (err)
- dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
-
- return err;
-}
-
-static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
-{
- if (!host->is_lane_clks_enabled)
- return;
-
- clk_disable_unprepare(host->tx_l1_sync_clk);
- clk_disable_unprepare(host->tx_l0_sync_clk);
- clk_disable_unprepare(host->rx_l1_sync_clk);
- clk_disable_unprepare(host->rx_l0_sync_clk);
-
- host->is_lane_clks_enabled = false;
-}
-
-static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
-{
- int err = 0;
- struct device *dev = host->hba->dev;
-
- if (host->is_lane_clks_enabled)
- return 0;
-
- err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
- host->rx_l0_sync_clk);
- if (err)
- goto out;
-
- err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
- host->tx_l0_sync_clk);
- if (err)
- goto disable_rx_l0;
-
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
- host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
-
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
- host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
-
- host->is_lane_clks_enabled = true;
- goto out;
-
-disable_rx_l1:
- clk_disable_unprepare(host->rx_l1_sync_clk);
-disable_tx_l0:
- clk_disable_unprepare(host->tx_l0_sync_clk);
-disable_rx_l0:
- clk_disable_unprepare(host->rx_l0_sync_clk);
-out:
- return err;
-}
-
-static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
-{
- int err = 0;
- struct device *dev = host->hba->dev;
-
- if (has_acpi_companion(dev))
- return 0;
-
- err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
- &host->rx_l0_sync_clk, false);
- if (err)
- goto out;
-
- err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
- &host->tx_l0_sync_clk, false);
- if (err)
- goto out;
-
- /* In case of single lane per direction, don't read lane1 clocks */
- if (host->hba->lanes_per_direction > 1) {
- err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk, false);
- if (err)
- goto out;
-
- err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk, true);
- }
-out:
- return err;
-}
-
-static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
-{
- u32 tx_lanes;
-
- return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
-}
-
-static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
-{
- int err;
- u32 tx_fsm_val = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
-
- do {
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &tx_fsm_val);
- if (err || tx_fsm_val == TX_FSM_HIBERN8)
- break;
-
- /* sleep for max. 200us */
- usleep_range(100, 200);
- } while (time_before(jiffies, timeout));
-
- /*
- * we might have scheduled out for long during polling so
- * check the state again.
- */
- if (time_after(jiffies, timeout))
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &tx_fsm_val);
-
- if (err) {
- dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
- __func__, err);
- } else if (tx_fsm_val != TX_FSM_HIBERN8) {
- err = tx_fsm_val;
- dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
- __func__, err);
- }
-
- return err;
-}
-
-static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
-{
- ufshcd_rmwl(host->hba, QUNIPRO_SEL,
- ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
- REG_UFS_CFG1);
- /* make sure above configuration is applied before we return */
- mb();
-}
-
-/**
- * ufs_qcom_host_reset - reset host controller and PHY
- */
-static int ufs_qcom_host_reset(struct ufs_hba *hba)
-{
- int ret = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (!host->core_reset) {
- dev_warn(hba->dev, "%s: reset control not set\n", __func__);
- goto out;
- }
-
- ret = reset_control_assert(host->core_reset);
- if (ret) {
- dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
- __func__, ret);
- goto out;
- }
-
- /*
- * The hardware requirement for delay between assert/deassert
- * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
- * ~125us (4/32768). To be on the safe side add 200us delay.
- */
- usleep_range(200, 210);
-
- ret = reset_control_deassert(host->core_reset);
- if (ret)
- dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
- __func__, ret);
-
- usleep_range(1000, 1100);
-
-out:
- return ret;
-}
-
-static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- int ret = 0;
- bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
- ? true : false;
-
- /* Reset UFS Host Controller and PHY */
- ret = ufs_qcom_host_reset(hba);
- if (ret)
- dev_warn(hba->dev, "%s: host reset returned %d\n",
- __func__, ret);
-
- if (is_rate_B)
- phy_set_mode(phy, PHY_MODE_UFS_HS_B);
-
- /* phy initialization - calibrate the phy */
- ret = phy_init(phy);
- if (ret) {
- dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
- __func__, ret);
- goto out;
- }
-
- /* power on phy - start serdes and phy's power and clocks */
- ret = phy_power_on(phy);
- if (ret) {
- dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
- __func__, ret);
- goto out_disable_phy;
- }
-
- ufs_qcom_select_unipro_mode(host);
-
- return 0;
-
-out_disable_phy:
- phy_exit(phy);
-out:
- return ret;
-}
-
-/*
- * The UTP controller has a number of internal clock gating cells (CGCs).
- * Internal hardware sub-modules within the UTP controller control the CGCs.
- * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
- * in a specific operation, UTP controller CGCs are by default disabled and
- * this function enables them (after every UFS link startup) to save some power
- * leakage.
- */
-static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
-{
- ufshcd_writel(hba,
- ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
- REG_UFS_CFG2);
-
- /* Ensure that HW clock gating is enabled before next operations */
- mb();
-}
-
-static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err = 0;
-
- switch (status) {
- case PRE_CHANGE:
- ufs_qcom_power_up_sequence(hba);
- /*
- * The PHY PLL output is the source of tx/rx lane symbol
- * clocks, hence, enable the lane clocks only after PHY
- * is initialized.
- */
- err = ufs_qcom_enable_lane_clks(host);
- break;
- case POST_CHANGE:
- /* check if UFS PHY moved from DISABLED to HIBERN8 */
- err = ufs_qcom_check_hibern8(hba);
- ufs_qcom_enable_hw_clk_gating(hba);
-
- break;
- default:
- dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
- err = -EINVAL;
- break;
- }
- return err;
-}
-
-/**
- * Returns zero for success and non-zero in case of a failure
- */
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
- u32 hs, u32 rate, bool update_link_startup_timer)
-{
- int ret = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_clk_info *clki;
- u32 core_clk_period_in_ns;
- u32 tx_clk_cycles_per_us = 0;
- unsigned long core_clk_rate = 0;
- u32 core_clk_cycles_per_us = 0;
-
- static u32 pwm_fr_table[][2] = {
- {UFS_PWM_G1, 0x1},
- {UFS_PWM_G2, 0x1},
- {UFS_PWM_G3, 0x1},
- {UFS_PWM_G4, 0x1},
- };
-
- static u32 hs_fr_table_rA[][2] = {
- {UFS_HS_G1, 0x1F},
- {UFS_HS_G2, 0x3e},
- {UFS_HS_G3, 0x7D},
- };
-
- static u32 hs_fr_table_rB[][2] = {
- {UFS_HS_G1, 0x24},
- {UFS_HS_G2, 0x49},
- {UFS_HS_G3, 0x92},
- };
-
- /*
- * The Qunipro controller does not use following registers:
- * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
- * UFS_REG_PA_LINK_STARTUP_TIMER
- * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
- * Aggregation logic.
- */
- if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
- goto out;
-
- if (gear == 0) {
- dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- goto out_error;
- }
-
- list_for_each_entry(clki, &hba->clk_list_head, list) {
- if (!strcmp(clki->name, "core_clk"))
- core_clk_rate = clk_get_rate(clki->clk);
- }
-
- /* If frequency is smaller than 1MHz, set to 1MHz */
- if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
- core_clk_rate = DEFAULT_CLK_RATE_HZ;
-
- core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
- if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
- ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
- /*
- * make sure above write gets applied before we return from
- * this function.
- */
- mb();
- }
-
- if (ufs_qcom_cap_qunipro(host))
- goto out;
-
- core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
- core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
- core_clk_period_in_ns &= MASK_CLK_NS_REG;
-
- switch (hs) {
- case FASTAUTO_MODE:
- case FAST_MODE:
- if (rate == PA_HS_MODE_A) {
- if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rA));
- goto out_error;
- }
- tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
- } else if (rate == PA_HS_MODE_B) {
- if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rB));
- goto out_error;
- }
- tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
- } else {
- dev_err(hba->dev, "%s: invalid rate = %d\n",
- __func__, rate);
- goto out_error;
- }
- break;
- case SLOWAUTO_MODE:
- case SLOW_MODE:
- if (gear > ARRAY_SIZE(pwm_fr_table)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(pwm_fr_table));
- goto out_error;
- }
- tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
- break;
- case UNCHANGED:
- default:
- dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- goto out_error;
- }
-
- if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
- (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
- /* this register 2 fields shall be written at once */
- ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
- REG_UFS_TX_SYMBOL_CLK_NS_US);
- /*
- * make sure above write gets applied before we return from
- * this function.
- */
- mb();
- }
-
- if (update_link_startup_timer) {
- ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
- REG_UFS_PA_LINK_STARTUP_TIMER);
- /*
- * make sure that this configuration is applied before
- * we return
- */
- mb();
- }
- goto out;
-
-out_error:
- ret = -EINVAL;
-out:
- return ret;
-}
-
-static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- int err = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- switch (status) {
- case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
- 0, true)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- err = -EINVAL;
- goto out;
- }
-
- if (ufs_qcom_cap_qunipro(host))
- /*
- * set unipro core clock cycles to 150 & clear clock
- * divider
- */
- err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
- 150);
-
- /*
- * Some UFS devices (and may be host) have issues if LCC is
- * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
- * before link startup which will make sure that both host
- * and device TX LCC are disabled once link startup is
- * completed.
- */
- if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
- 0);
-
- break;
- case POST_CHANGE:
- ufs_qcom_link_startup_post_change(hba);
- break;
- default:
- break;
- }
-
-out:
- return err;
-}
-
-static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- int ret = 0;
-
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
-
- } else if (!ufs_qcom_is_link_active(hba)) {
- ufs_qcom_disable_lane_clks(host);
- }
-
- return ret;
-}
-
-static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- int err;
-
- if (ufs_qcom_is_link_off(hba)) {
- err = phy_power_on(phy);
- if (err) {
- dev_err(hba->dev, "%s: failed PHY power on: %d\n",
- __func__, err);
- return err;
- }
-
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
-
- } else if (!ufs_qcom_is_link_active(hba)) {
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
- }
-
- hba->is_sys_suspended = false;
- return 0;
-}
-
-#ifdef CONFIG_MSM_BUS_SCALING
-static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
- const char *speed_mode)
-{
- struct device *dev = host->hba->dev;
- struct device_node *np = dev->of_node;
- int err;
- const char *key = "qcom,bus-vector-names";
-
- if (!speed_mode) {
- err = -EINVAL;
- goto out;
- }
-
- if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
- err = of_property_match_string(np, key, "MAX");
- else
- err = of_property_match_string(np, key, speed_mode);
-
-out:
- if (err < 0)
- dev_err(dev, "%s: Invalid %s mode %d\n",
- __func__, speed_mode, err);
- return err;
-}
-
-static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
-{
- int gear = max_t(u32, p->gear_rx, p->gear_tx);
- int lanes = max_t(u32, p->lane_rx, p->lane_tx);
- int pwr;
-
- /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
- if (!gear)
- gear = 1;
-
- if (!lanes)
- lanes = 1;
-
- if (!p->pwr_rx && !p->pwr_tx) {
- pwr = SLOWAUTO_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
- } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
- p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
- pwr = FAST_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
- p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
- } else {
- pwr = SLOW_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
- "PWM", gear, lanes);
- }
-}
-
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
-{
- int err = 0;
-
- if (vote != host->bus_vote.curr_vote) {
- err = msm_bus_scale_client_update_request(
- host->bus_vote.client_handle, vote);
- if (err) {
- dev_err(host->hba->dev,
- "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
- __func__, host->bus_vote.client_handle,
- vote, err);
- goto out;
- }
-
- host->bus_vote.curr_vote = vote;
- }
-out:
- return err;
-}
-
-static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
-{
- int vote;
- int err = 0;
- char mode[BUS_VECTOR_NAME_LEN];
-
- ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
-
- vote = ufs_qcom_get_bus_vote(host, mode);
- if (vote >= 0)
- err = ufs_qcom_set_bus_vote(host, vote);
- else
- err = vote;
-
- if (err)
- dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
- else
- host->bus_vote.saved_vote = vote;
- return err;
-}
-
-static ssize_t
-show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- return snprintf(buf, PAGE_SIZE, "%u\n",
- host->bus_vote.is_max_bw_needed);
-}
-
-static ssize_t
-store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- uint32_t value;
-
- if (!kstrtou32(buf, 0, &value)) {
- host->bus_vote.is_max_bw_needed = !!value;
- ufs_qcom_update_bus_bw_vote(host);
- }
-
- return count;
-}
-
-static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
-{
- int err;
- struct msm_bus_scale_pdata *bus_pdata;
- struct device *dev = host->hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct device_node *np = dev->of_node;
-
- bus_pdata = msm_bus_cl_get_pdata(pdev);
- if (!bus_pdata) {
- dev_err(dev, "%s: failed to get bus vectors\n", __func__);
- err = -ENODATA;
- goto out;
- }
-
- err = of_property_count_strings(np, "qcom,bus-vector-names");
- if (err < 0 || err != bus_pdata->num_usecases) {
- dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
- __func__, err);
- goto out;
- }
-
- host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
- if (!host->bus_vote.client_handle) {
- dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
- __func__);
- err = -EFAULT;
- goto out;
- }
-
- /* cache the vote index for minimum and maximum bandwidth */
- host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
- host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
-
- host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
- host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
- sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
- host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
- host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
- err = device_create_file(dev, &host->bus_vote.max_bus_bw);
-out:
- return err;
-}
-#else /* CONFIG_MSM_BUS_SCALING */
-static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
-{
- return 0;
-}
-
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
-{
- return 0;
-}
-
-static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
-{
- return 0;
-}
-#endif /* CONFIG_MSM_BUS_SCALING */
-
-static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
-{
- if (host->dev_ref_clk_ctrl_mmio &&
- (enable ^ host->is_dev_ref_clk_enabled)) {
- u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
-
- if (enable)
- temp |= host->dev_ref_clk_en_mask;
- else
- temp &= ~host->dev_ref_clk_en_mask;
-
- /*
- * If we are here to disable this clock it might be immediately
- * after entering into hibern8 in which case we need to make
- * sure that device ref_clk is active at least 1us after the
- * hibern8 enter.
- */
- if (!enable)
- udelay(1);
-
- writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
-
- /* ensure that ref_clk is enabled/disabled before we return */
- wmb();
-
- /*
- * If we call hibern8 exit after this, we need to make sure that
- * device ref_clk is stable for at least 1us before the hibern8
- * exit command.
- */
- if (enable)
- udelay(1);
-
- host->is_dev_ref_clk_enabled = enable;
- }
-}
-
-static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_dev_params ufs_qcom_cap;
- int ret = 0;
-
- if (!dev_req_params) {
- pr_err("%s: incoming dev_req_params is NULL\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- switch (status) {
- case PRE_CHANGE:
- ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
- ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
- ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
- ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
- ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
- ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
- ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
- ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
- ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
- ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
- ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
- ufs_qcom_cap.desired_working_mode =
- UFS_QCOM_LIMIT_DESIRED_MODE;
-
- if (host->hw_ver.major == 0x1) {
- /*
- * HS-G3 operations may not reliably work on legacy QCOM
- * UFS host controller hardware even though capability
- * exchange during link startup phase may end up
- * negotiating maximum supported gear as G3.
- * Hence downgrade the maximum supported gear to HS-G2.
- */
- if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
- if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
- }
-
- ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
- dev_max_params,
- dev_req_params);
- if (ret) {
- pr_err("%s: failed to determine capabilities\n",
- __func__);
- goto out;
- }
-
- /* enable the device ref clock before changing to HS mode */
- if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
- ufshcd_is_hs_mode(dev_req_params))
- ufs_qcom_dev_ref_clk_ctrl(host, true);
- break;
- case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate, false)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- /*
- * we return error code at the end of the routine,
- * but continue to configure UFS_PHY_TX_LANE_ENABLE
- * and bus voting as usual
- */
- ret = -EINVAL;
- }
-
- /* cache the power mode parameters to use internally */
- memcpy(&host->dev_req_params,
- dev_req_params, sizeof(*dev_req_params));
- ufs_qcom_update_bus_bw_vote(host);
-
- /* disable the device ref clock if entered PWM mode */
- if (ufshcd_is_hs_mode(&hba->pwr_info) &&
- !ufshcd_is_hs_mode(dev_req_params))
- ufs_qcom_dev_ref_clk_ctrl(host, false);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-out:
- return ret;
-}
-
-static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
-{
- int err;
- u32 pa_vs_config_reg1;
-
- err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
- &pa_vs_config_reg1);
- if (err)
- goto out;
-
- /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
- err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
- (pa_vs_config_reg1 | (1 << 12)));
-
-out:
- return err;
-}
-
-static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
-{
- int err = 0;
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
- err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
-
- return err;
-}
-
-static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (host->hw_ver.major == 0x1)
- return UFSHCI_VERSION_11;
- else
- return UFSHCI_VERSION_20;
-}
-
-/**
- * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
- * @hba: host controller instance
- *
- * QCOM UFS host controller might have some non standard behaviours (quirks)
- * than what is specified by UFSHCI specification. Advertise all such
- * quirks to standard UFS host controller driver so standard takes them into
- * account.
- */
-static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (host->hw_ver.major == 0x01) {
- hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
-
- if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
- hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
-
- hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
- }
-
- if (host->hw_ver.major == 0x2) {
- hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
-
- if (!ufs_qcom_cap_qunipro(host))
- /* Legacy UniPro mode still need following quirks */
- hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
- }
-}
-
-static void ufs_qcom_set_caps(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- hba->caps |= UFSHCD_CAP_CLK_SCALING;
- hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
-
- if (host->hw_ver.major >= 0x2) {
- host->caps = UFS_QCOM_CAP_QUNIPRO |
- UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
- }
-}
-
-/**
- * ufs_qcom_setup_clocks - enables/disable clocks
- * @hba: host controller instance
- * @on: If true, enable clocks else disable them.
- * @status: PRE_CHANGE or POST_CHANGE notify
- *
- * Returns 0 on success, non-zero on failure.
- */
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
- enum ufs_notify_change_status status)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err;
- int vote = 0;
-
- /*
- * In case ufs_qcom_init() is not yet done, simply ignore.
- * This ufs_qcom_setup_clocks() shall be called from
- * ufs_qcom_init() after init is done.
- */
- if (!host)
- return 0;
-
- if (on && (status == POST_CHANGE)) {
- /* enable the device ref clock for HS mode*/
- if (ufshcd_is_hs_mode(&hba->pwr_info))
- ufs_qcom_dev_ref_clk_ctrl(host, true);
- vote = host->bus_vote.saved_vote;
- if (vote == host->bus_vote.min_bw_vote)
- ufs_qcom_update_bus_bw_vote(host);
-
- } else if (!on && (status == PRE_CHANGE)) {
- if (!ufs_qcom_is_link_active(hba)) {
- /* disable device ref_clk */
- ufs_qcom_dev_ref_clk_ctrl(host, false);
- }
-
- vote = host->bus_vote.min_bw_vote;
- }
-
- err = ufs_qcom_set_bus_vote(host, vote);
- if (err)
- dev_err(hba->dev, "%s: set bus vote failed %d\n",
- __func__, err);
-
- return err;
-}
-
-static int
-ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
-{
- struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
-
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
- ufs_qcom_assert_reset(host->hba);
- /* provide 1ms delay to let the reset pulse propagate. */
- usleep_range(1000, 1100);
- return 0;
-}
-
-static int
-ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
-{
- struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
-
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
- ufs_qcom_deassert_reset(host->hba);
-
- /*
- * after reset deassertion, phy will need all ref clocks,
- * voltage, current to settle down before starting serdes.
- */
- usleep_range(1000, 1100);
- return 0;
-}
-
-static const struct reset_control_ops ufs_qcom_reset_ops = {
- .assert = ufs_qcom_reset_assert,
- .deassert = ufs_qcom_reset_deassert,
-};
-
-#define ANDROID_BOOT_DEV_MAX 30
-static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
-
-#ifndef MODULE
-static int __init get_android_boot_dev(char *str)
-{
- strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
- return 1;
-}
-__setup("androidboot.bootdevice=", get_android_boot_dev);
-#endif
-
-/**
- * ufs_qcom_init - bind phy with controller
- * @hba: host controller instance
- *
- * Binds PHY with controller and powers up PHY enabling clocks
- * and regulators.
- *
- * Returns -EPROBE_DEFER if binding fails, returns negative error
- * on phy power up failure and returns zero on success.
- */
-static int ufs_qcom_init(struct ufs_hba *hba)
-{
- int err;
- struct device *dev = hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct ufs_qcom_host *host;
- struct resource *res;
-
- if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
- return -ENODEV;
-
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- err = -ENOMEM;
- dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
- goto out;
- }
-
- /* Make a two way bind between the qcom host and the hba */
- host->hba = hba;
- ufshcd_set_variant(hba, host);
-
- /* Setup the reset control of HCI */
- host->core_reset = devm_reset_control_get(hba->dev, "rst");
- if (IS_ERR(host->core_reset)) {
- err = PTR_ERR(host->core_reset);
- dev_warn(dev, "Failed to get reset control %d\n", err);
- host->core_reset = NULL;
- err = 0;
- }
-
- /* Fire up the reset controller. Failure here is non-fatal. */
- host->rcdev.of_node = dev->of_node;
- host->rcdev.ops = &ufs_qcom_reset_ops;
- host->rcdev.owner = dev->driver->owner;
- host->rcdev.nr_resets = 1;
- err = devm_reset_controller_register(dev, &host->rcdev);
- if (err) {
- dev_warn(dev, "Failed to register reset controller\n");
- err = 0;
- }
-
- /*
- * voting/devoting device ref_clk source is time consuming hence
- * skip devoting it during aggressive clock gating. This clock
- * will still be gated off during runtime suspend.
- */
- host->generic_phy = devm_phy_get(dev, "ufsphy");
-
- if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
- /*
- * UFS driver might be probed before the phy driver does.
- * In that case we would like to return EPROBE_DEFER code.
- */
- err = -EPROBE_DEFER;
- dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
- __func__, err);
- goto out_variant_clear;
- } else if (IS_ERR(host->generic_phy)) {
- if (has_acpi_companion(dev)) {
- host->generic_phy = NULL;
- } else {
- err = PTR_ERR(host->generic_phy);
- dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
- goto out_variant_clear;
- }
- }
-
- host->device_reset = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_HIGH);
- if (IS_ERR(host->device_reset)) {
- err = PTR_ERR(host->device_reset);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to acquire reset gpio: %d\n", err);
- goto out_variant_clear;
- }
-
- err = ufs_qcom_bus_register(host);
- if (err)
- goto out_variant_clear;
-
- ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
- &host->hw_ver.minor, &host->hw_ver.step);
-
- /*
- * for newer controllers, device reference clock control bit has
- * moved inside UFS controller register address space itself.
- */
- if (host->hw_ver.major >= 0x02) {
- host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
- host->dev_ref_clk_en_mask = BIT(26);
- } else {
- /* "dev_ref_clk_ctrl_mem" is optional resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- host->dev_ref_clk_ctrl_mmio =
- devm_ioremap_resource(dev, res);
- if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
- dev_warn(dev,
- "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
- __func__,
- PTR_ERR(host->dev_ref_clk_ctrl_mmio));
- host->dev_ref_clk_ctrl_mmio = NULL;
- }
- host->dev_ref_clk_en_mask = BIT(5);
- }
- }
-
- err = ufs_qcom_init_lane_clks(host);
- if (err)
- goto out_variant_clear;
-
- ufs_qcom_set_caps(hba);
- ufs_qcom_advertise_quirks(hba);
-
- ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
-
- if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
- ufs_qcom_hosts[hba->dev->id] = host;
-
- host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
- ufs_qcom_get_default_testbus_cfg(host);
- err = ufs_qcom_testbus_config(host);
- if (err) {
- dev_warn(dev, "%s: failed to configure the testbus %d\n",
- __func__, err);
- err = 0;
- }
-
- goto out;
-
-out_variant_clear:
- ufshcd_set_variant(hba, NULL);
-out:
- return err;
-}
-
-static void ufs_qcom_exit(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(host->generic_phy);
- phy_exit(host->generic_phy);
-}
-
-static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
- u32 clk_cycles)
-{
- int err;
- u32 core_clk_ctrl_reg;
-
- if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
- return -EINVAL;
-
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- &core_clk_ctrl_reg);
- if (err)
- goto out;
-
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
- core_clk_ctrl_reg |= clk_cycles;
-
- /* Clear CORE_CLK_DIV_EN */
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
-
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- core_clk_ctrl_reg);
-out:
- return err;
-}
-
-static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
-{
- /* nothing to do as of now */
- return 0;
-}
-
-static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
- /* set unipro core clock cycles to 150 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
-}
-
-static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err;
- u32 core_clk_ctrl_reg;
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- &core_clk_ctrl_reg);
-
- /* make sure CORE_CLK_DIV_EN is cleared */
- if (!err &&
- (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- core_clk_ctrl_reg);
- }
-
- return err;
-}
-
-static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
- /* set unipro core clock cycles to 75 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
-}
-
-static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
- bool scale_up, enum ufs_notify_change_status status)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
- int err = 0;
-
- if (status == PRE_CHANGE) {
- if (scale_up)
- err = ufs_qcom_clk_scale_up_pre_change(hba);
- else
- err = ufs_qcom_clk_scale_down_pre_change(hba);
- } else {
- if (scale_up)
- err = ufs_qcom_clk_scale_up_post_change(hba);
- else
- err = ufs_qcom_clk_scale_down_post_change(hba);
-
- if (err || !dev_req_params)
- goto out;
-
- ufs_qcom_cfg_timers(hba,
- dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate,
- false);
- ufs_qcom_update_bus_bw_vote(host);
- }
-
-out:
- return err;
-}
-
-static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
- void *priv, void (*print_fn)(struct ufs_hba *hba,
- int offset, int num_regs, const char *str, void *priv))
-{
- u32 reg;
- struct ufs_qcom_host *host;
-
- if (unlikely(!hba)) {
- pr_err("%s: hba is NULL\n", __func__);
- return;
- }
- if (unlikely(!print_fn)) {
- dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
- return;
- }
-
- host = ufshcd_get_variant(hba);
- if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
- return;
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
- print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
-
- reg = ufshcd_readl(hba, REG_UFS_CFG1);
- reg |= UTP_DBG_RAMS_EN;
- ufshcd_writel(hba, reg, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
- print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
- print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
- print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
-
- /* clear bit 17 - UTP_DBG_RAMS_EN */
- ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
- print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
- print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
- print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
- print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
- print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
-}
-
-static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
-{
- if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
- UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
- } else {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
- }
-}
-
-static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
-{
- /* provide a legal default configuration */
- host->testbus.select_major = TSTBUS_UNIPRO;
- host->testbus.select_minor = 37;
-}
-
-static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
-{
- if (host->testbus.select_major >= TSTBUS_MAX) {
- dev_err(host->hba->dev,
- "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
- __func__, host->testbus.select_major);
- return false;
- }
-
- return true;
-}
-
-int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
-{
- int reg;
- int offset;
- u32 mask = TEST_BUS_SUB_SEL_MASK;
-
- if (!host)
- return -EINVAL;
-
- if (!ufs_qcom_testbus_cfg_is_ok(host))
- return -EPERM;
-
- switch (host->testbus.select_major) {
- case TSTBUS_UAWM:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 24;
- break;
- case TSTBUS_UARM:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 16;
- break;
- case TSTBUS_TXUC:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 8;
- break;
- case TSTBUS_RXUC:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 0;
- break;
- case TSTBUS_DFC:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 24;
- break;
- case TSTBUS_TRLUT:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 16;
- break;
- case TSTBUS_TMRLUT:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 8;
- break;
- case TSTBUS_OCSC:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 0;
- break;
- case TSTBUS_WRAPPER:
- reg = UFS_TEST_BUS_CTRL_2;
- offset = 16;
- break;
- case TSTBUS_COMBINED:
- reg = UFS_TEST_BUS_CTRL_2;
- offset = 8;
- break;
- case TSTBUS_UTP_HCI:
- reg = UFS_TEST_BUS_CTRL_2;
- offset = 0;
- break;
- case TSTBUS_UNIPRO:
- reg = UFS_UNIPRO_CFG;
- offset = 20;
- mask = 0xFFF;
- break;
- /*
- * No need for a default case, since
- * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
- * is legal
- */
- }
- mask <<= offset;
-
- pm_runtime_get_sync(host->hba->dev);
- ufshcd_hold(host->hba, false);
- ufshcd_rmwl(host->hba, TEST_BUS_SEL,
- (u32)host->testbus.select_major << 19,
- REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, mask,
- (u32)host->testbus.select_minor << offset,
- reg);
- ufs_qcom_enable_test_bus(host);
- /*
- * Make sure the test bus configuration is
- * committed before returning.
- */
- mb();
- ufshcd_release(host->hba);
- pm_runtime_put_sync(host->hba->dev);
-
- return 0;
-}
-
-static void ufs_qcom_testbus_read(struct ufs_hba *hba)
-{
- ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
-}
-
-static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- u32 *testbus = NULL;
- int i, nminor = 256, testbus_len = nminor * sizeof(u32);
-
- testbus = kmalloc(testbus_len, GFP_KERNEL);
- if (!testbus)
- return;
-
- host->testbus.select_major = TSTBUS_UNIPRO;
- for (i = 0; i < nminor; i++) {
- host->testbus.select_minor = i;
- ufs_qcom_testbus_config(host);
- testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
- }
- print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
- 16, 4, testbus, testbus_len, false);
- kfree(testbus);
-}
-
-static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
-{
- ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
- "HCI Vendor Specific Registers ");
-
- /* sleep a bit intermittently as we are dumping too much data */
- ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
- usleep_range(1000, 1100);
- ufs_qcom_testbus_read(hba);
- usleep_range(1000, 1100);
- ufs_qcom_print_unipro_testbus(hba);
- usleep_range(1000, 1100);
-}
-
-/**
- * ufs_qcom_device_reset() - toggle the (optional) device reset line
- * @hba: per-adapter instance
- *
- * Toggles the (optional) reset line to reset the attached device.
- */
-static void ufs_qcom_device_reset(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- /* reset gpio is optional */
- if (!host->device_reset)
- return;
-
- /*
- * The UFS device shall detect reset pulses of 1us, sleep for 10us to
- * be on the safe side.
- */
- gpiod_set_value_cansleep(host->device_reset, 1);
- usleep_range(10, 15);
-
- gpiod_set_value_cansleep(host->device_reset, 0);
- usleep_range(10, 15);
-}
-
-/**
- * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
- *
- * The variant operations configure the necessary controller and PHY
- * handshake during initialization.
- */
-static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
- .name = "qcom",
- .init = ufs_qcom_init,
- .exit = ufs_qcom_exit,
- .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
- .clk_scale_notify = ufs_qcom_clk_scale_notify,
- .setup_clocks = ufs_qcom_setup_clocks,
- .hce_enable_notify = ufs_qcom_hce_enable_notify,
- .link_startup_notify = ufs_qcom_link_startup_notify,
- .pwr_change_notify = ufs_qcom_pwr_change_notify,
- .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
- .suspend = ufs_qcom_suspend,
- .resume = ufs_qcom_resume,
- .dbg_register_dump = ufs_qcom_dump_dbg_regs,
- .device_reset = ufs_qcom_device_reset,
-};
-
-/**
- * ufs_qcom_probe - probe routine of the driver
- * @pdev: pointer to Platform device handle
- *
- * Return zero for success and non-zero for failure
- */
-static int ufs_qcom_probe(struct platform_device *pdev)
-{
- int err;
- struct device *dev = &pdev->dev;
-
- /* Perform generic probe */
- err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
- if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
-
- return err;
-}
-
-/**
- * ufs_qcom_remove - set driver_data of the device to NULL
- * @pdev: pointer to platform device handle
- *
- * Always returns 0
- */
-static int ufs_qcom_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
- return 0;
-}
-
-static const struct of_device_id ufs_qcom_of_match[] = {
- { .compatible = "qcom,ufshc"},
- {},
-};
-MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id ufs_qcom_acpi_match[] = {
- { "QCOM24A5" },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
-#endif
-
-static const struct dev_pm_ops ufs_qcom_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
-};
-
-static struct platform_driver ufs_qcom_pltform = {
- .probe = ufs_qcom_probe,
- .remove = ufs_qcom_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "ufshcd-qcom",
- .pm = &ufs_qcom_pm_ops,
- .of_match_table = of_match_ptr(ufs_qcom_of_match),
- .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
- },
-};
-module_platform_driver(ufs_qcom_pltform);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
deleted file mode 100644
index 2d95e7cc7187..000000000000
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ /dev/null
@@ -1,267 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef UFS_QCOM_H_
-#define UFS_QCOM_H_
-
-#include <linux/reset-controller.h>
-#include <linux/reset.h>
-
-#define MAX_UFS_QCOM_HOSTS 1
-#define MAX_U32 (~(u32)0)
-#define MPHY_TX_FSM_STATE 0x41
-#define TX_FSM_HIBERN8 0x1
-#define HBRN8_POLL_TOUT_MS 100
-#define DEFAULT_CLK_RATE_HZ 1000000
-#define BUS_VECTOR_NAME_LEN 32
-
-#define UFS_HW_VER_MAJOR_SHFT (28)
-#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
-#define UFS_HW_VER_MINOR_SHFT (16)
-#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT)
-#define UFS_HW_VER_STEP_SHFT (0)
-#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT)
-
-/* vendor specific pre-defined parameters */
-#define SLOW 1
-#define FAST 2
-
-#define UFS_QCOM_LIMIT_NUM_LANES_RX 2
-#define UFS_QCOM_LIMIT_NUM_LANES_TX 2
-#define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G3
-#define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G3
-#define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4
-#define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4
-#define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE
-#define UFS_QCOM_LIMIT_TX_PWR_PWM SLOW_MODE
-#define UFS_QCOM_LIMIT_RX_PWR_HS FAST_MODE
-#define UFS_QCOM_LIMIT_TX_PWR_HS FAST_MODE
-#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
-#define UFS_QCOM_LIMIT_DESIRED_MODE FAST
-
-/* QCOM UFS host controller vendor specific registers */
-enum {
- REG_UFS_SYS1CLK_1US = 0xC0,
- REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
- REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
- REG_UFS_PA_ERR_CODE = 0xCC,
- REG_UFS_RETRY_TIMER_REG = 0xD0,
- REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8,
- REG_UFS_CFG1 = 0xDC,
- REG_UFS_CFG2 = 0xE0,
- REG_UFS_HW_VERSION = 0xE4,
-
- UFS_TEST_BUS = 0xE8,
- UFS_TEST_BUS_CTRL_0 = 0xEC,
- UFS_TEST_BUS_CTRL_1 = 0xF0,
- UFS_TEST_BUS_CTRL_2 = 0xF4,
- UFS_UNIPRO_CFG = 0xF8,
-
- /*
- * QCOM UFS host controller vendor specific registers
- * added in HW Version 3.0.0
- */
- UFS_AH8_CFG = 0xFC,
-};
-
-/* QCOM UFS host controller vendor specific debug registers */
-enum {
- UFS_DBG_RD_REG_UAWM = 0x100,
- UFS_DBG_RD_REG_UARM = 0x200,
- UFS_DBG_RD_REG_TXUC = 0x300,
- UFS_DBG_RD_REG_RXUC = 0x400,
- UFS_DBG_RD_REG_DFC = 0x500,
- UFS_DBG_RD_REG_TRLUT = 0x600,
- UFS_DBG_RD_REG_TMRLUT = 0x700,
- UFS_UFS_DBG_RD_REG_OCSC = 0x800,
-
- UFS_UFS_DBG_RD_DESC_RAM = 0x1500,
- UFS_UFS_DBG_RD_PRDT_RAM = 0x1700,
- UFS_UFS_DBG_RD_RESP_RAM = 0x1800,
- UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
-};
-
-#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
-#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
-
-/* bit definitions for REG_UFS_CFG1 register */
-#define QUNIPRO_SEL 0x1
-#define UTP_DBG_RAMS_EN 0x20000
-#define TEST_BUS_EN BIT(18)
-#define TEST_BUS_SEL GENMASK(22, 19)
-#define UFS_REG_TEST_BUS_EN BIT(30)
-
-/* bit definitions for REG_UFS_CFG2 register */
-#define UAWM_HW_CGC_EN (1 << 0)
-#define UARM_HW_CGC_EN (1 << 1)
-#define TXUC_HW_CGC_EN (1 << 2)
-#define RXUC_HW_CGC_EN (1 << 3)
-#define DFC_HW_CGC_EN (1 << 4)
-#define TRLUT_HW_CGC_EN (1 << 5)
-#define TMRLUT_HW_CGC_EN (1 << 6)
-#define OCSC_HW_CGC_EN (1 << 7)
-
-/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
-#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */
-
-#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
- TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
- DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
- TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
-
-/* bit offset */
-enum {
- OFFSET_UFS_PHY_SOFT_RESET = 1,
- OFFSET_CLK_NS_REG = 10,
-};
-
-/* bit masks */
-enum {
- MASK_UFS_PHY_SOFT_RESET = 0x2,
- MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF,
- MASK_CLK_NS_REG = 0xFFFC00,
-};
-
-/* QCOM UFS debug print bit mask */
-#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
-#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1)
-#define UFS_QCOM_DBG_PRINT_TEST_BUS_EN BIT(2)
-
-#define UFS_QCOM_DBG_PRINT_ALL \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \
- UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
-
-/* QUniPro Vendor specific attributes */
-#define PA_VS_CONFIG_REG1 0x9000
-#define DME_VS_CORE_CLK_CTRL 0xD002
-/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
-#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
-#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF
-
-static inline void
-ufs_qcom_get_controller_revision(struct ufs_hba *hba,
- u8 *major, u16 *minor, u16 *step)
-{
- u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
-
- *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
- *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
- *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
-};
-
-static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
-{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
-
- /*
- * Make sure assertion of ufs phy reset is written to
- * register before returning
- */
- mb();
-}
-
-static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
-{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
-
- /*
- * Make sure de-assertion of ufs phy reset is written to
- * register before returning
- */
- mb();
-}
-
-struct ufs_qcom_bus_vote {
- uint32_t client_handle;
- uint32_t curr_vote;
- int min_bw_vote;
- int max_bw_vote;
- int saved_vote;
- bool is_max_bw_needed;
- struct device_attribute max_bus_bw;
-};
-
-/* Host controller hardware version: major.minor.step */
-struct ufs_hw_version {
- u16 step;
- u16 minor;
- u8 major;
-};
-
-struct ufs_qcom_testbus {
- u8 select_major;
- u8 select_minor;
-};
-
-struct gpio_desc;
-
-struct ufs_qcom_host {
- /*
- * Set this capability if host controller supports the QUniPro mode
- * and if driver wants the Host controller to operate in QUniPro mode.
- * Note: By default this capability will be kept enabled if host
- * controller supports the QUniPro mode.
- */
- #define UFS_QCOM_CAP_QUNIPRO 0x1
-
- /*
- * Set this capability if host controller can retain the secure
- * configuration even after UFS controller core power collapse.
- */
- #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
- u32 caps;
-
- struct phy *generic_phy;
- struct ufs_hba *hba;
- struct ufs_qcom_bus_vote bus_vote;
- struct ufs_pa_layer_attr dev_req_params;
- struct clk *rx_l0_sync_clk;
- struct clk *tx_l0_sync_clk;
- struct clk *rx_l1_sync_clk;
- struct clk *tx_l1_sync_clk;
- bool is_lane_clks_enabled;
-
- void __iomem *dev_ref_clk_ctrl_mmio;
- bool is_dev_ref_clk_enabled;
- struct ufs_hw_version hw_ver;
-
- u32 dev_ref_clk_en_mask;
-
- /* Bitmask for enabling debug prints */
- u32 dbg_print_en;
- struct ufs_qcom_testbus testbus;
-
- /* Reset control of HCI */
- struct reset_control *core_reset;
- struct reset_controller_dev rcdev;
-
- struct gpio_desc *device_reset;
-};
-
-static inline u32
-ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
-{
- if (host->hw_ver.major <= 0x02)
- return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg);
-
- return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
-};
-
-#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
-#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
-#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
-
-int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
-
-static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
-{
- if (host->caps & UFS_QCOM_CAP_QUNIPRO)
- return true;
- else
- return false;
-}
-
-#endif /* UFS_QCOM_H_ */
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
deleted file mode 100644
index dbdf8b01abed..000000000000
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ /dev/null
@@ -1,800 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Western Digital Corporation
-
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/bitfield.h>
-#include <asm/unaligned.h>
-
-#include "ufs.h"
-#include "ufs-sysfs.h"
-
-static const char *ufschd_uic_link_state_to_string(
- enum uic_link_state state)
-{
- switch (state) {
- case UIC_LINK_OFF_STATE: return "OFF";
- case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
- case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
- default: return "UNKNOWN";
- }
-}
-
-static const char *ufschd_ufs_dev_pwr_mode_to_string(
- enum ufs_dev_pwr_mode state)
-{
- switch (state) {
- case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
- case UFS_SLEEP_PWR_MODE: return "SLEEP";
- case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
- default: return "UNKNOWN";
- }
-}
-
-static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count,
- bool rpm)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags, value;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- if (value >= UFS_PM_LVL_MAX)
- return -EINVAL;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (rpm)
- hba->rpm_lvl = value;
- else
- hba->spm_lvl = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return count;
-}
-
-static ssize_t rpm_lvl_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", hba->rpm_lvl);
-}
-
-static ssize_t rpm_lvl_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true);
-}
-
-static ssize_t rpm_target_dev_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
- ufs_pm_lvl_states[hba->rpm_lvl].dev_state));
-}
-
-static ssize_t rpm_target_link_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", ufschd_uic_link_state_to_string(
- ufs_pm_lvl_states[hba->rpm_lvl].link_state));
-}
-
-static ssize_t spm_lvl_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", hba->spm_lvl);
-}
-
-static ssize_t spm_lvl_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false);
-}
-
-static ssize_t spm_target_dev_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
- ufs_pm_lvl_states[hba->spm_lvl].dev_state));
-}
-
-static ssize_t spm_target_link_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", ufschd_uic_link_state_to_string(
- ufs_pm_lvl_states[hba->spm_lvl].link_state));
-}
-
-/* Convert Auto-Hibernate Idle Timer register value to microseconds */
-static int ufshcd_ahit_to_us(u32 ahit)
-{
- int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit);
- int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit);
-
- for (; scale > 0; --scale)
- timer *= UFSHCI_AHIBERN8_SCALE_FACTOR;
-
- return timer;
-}
-
-/* Convert microseconds to Auto-Hibernate Idle Timer register value */
-static u32 ufshcd_us_to_ahit(unsigned int timer)
-{
- unsigned int scale;
-
- for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
- timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
-
- return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
-}
-
-static ssize_t auto_hibern8_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return -EOPNOTSUPP;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(hba->ahit));
-}
-
-static ssize_t auto_hibern8_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned int timer;
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &timer))
- return -EINVAL;
-
- if (timer > UFSHCI_AHIBERN8_MAX)
- return -EINVAL;
-
- ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
-
- return count;
-}
-
-static DEVICE_ATTR_RW(rpm_lvl);
-static DEVICE_ATTR_RO(rpm_target_dev_state);
-static DEVICE_ATTR_RO(rpm_target_link_state);
-static DEVICE_ATTR_RW(spm_lvl);
-static DEVICE_ATTR_RO(spm_target_dev_state);
-static DEVICE_ATTR_RO(spm_target_link_state);
-static DEVICE_ATTR_RW(auto_hibern8);
-
-static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
- &dev_attr_rpm_lvl.attr,
- &dev_attr_rpm_target_dev_state.attr,
- &dev_attr_rpm_target_link_state.attr,
- &dev_attr_spm_lvl.attr,
- &dev_attr_spm_target_dev_state.attr,
- &dev_attr_spm_target_link_state.attr,
- &dev_attr_auto_hibern8.attr,
- NULL
-};
-
-static const struct attribute_group ufs_sysfs_default_group = {
- .attrs = ufs_sysfs_ufshcd_attrs,
-};
-
-static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
- enum desc_idn desc_id,
- u8 desc_index,
- u8 param_offset,
- u8 *sysfs_buf,
- u8 param_size)
-{
- u8 desc_buf[8] = {0};
- int ret;
-
- if (param_size > 8)
- return -EINVAL;
-
- ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
- param_offset, desc_buf, param_size);
- if (ret)
- return -EINVAL;
- switch (param_size) {
- case 1:
- ret = sprintf(sysfs_buf, "0x%02X\n", *desc_buf);
- break;
- case 2:
- ret = sprintf(sysfs_buf, "0x%04X\n",
- get_unaligned_be16(desc_buf));
- break;
- case 4:
- ret = sprintf(sysfs_buf, "0x%08X\n",
- get_unaligned_be32(desc_buf));
- break;
- case 8:
- ret = sprintf(sysfs_buf, "0x%016llX\n",
- get_unaligned_be64(desc_buf));
- break;
- }
-
- return ret;
-}
-
-#define UFS_DESC_PARAM(_name, _puname, _duname, _size) \
-static ssize_t _name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
- 0, _duname##_DESC_PARAM##_puname, buf, _size); \
-} \
-static DEVICE_ATTR_RO(_name)
-
-#define UFS_DEVICE_DESC_PARAM(_name, _uname, _size) \
- UFS_DESC_PARAM(_name, _uname, DEVICE, _size)
-
-UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1);
-UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1);
-UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1);
-UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1);
-UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1);
-UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1);
-UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1);
-UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1);
-UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1);
-UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1);
-UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1);
-UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1);
-UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1);
-UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1);
-UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2);
-UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2);
-UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2);
-UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1);
-UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2);
-UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1);
-UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1);
-UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1);
-UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
-UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
-UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
-UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
-
-static struct attribute *ufs_sysfs_device_descriptor[] = {
- &dev_attr_device_type.attr,
- &dev_attr_device_class.attr,
- &dev_attr_device_sub_class.attr,
- &dev_attr_protocol.attr,
- &dev_attr_number_of_luns.attr,
- &dev_attr_number_of_wluns.attr,
- &dev_attr_boot_enable.attr,
- &dev_attr_descriptor_access_enable.attr,
- &dev_attr_initial_power_mode.attr,
- &dev_attr_high_priority_lun.attr,
- &dev_attr_secure_removal_type.attr,
- &dev_attr_support_security_lun.attr,
- &dev_attr_bkops_termination_latency.attr,
- &dev_attr_initial_active_icc_level.attr,
- &dev_attr_specification_version.attr,
- &dev_attr_manufacturing_date.attr,
- &dev_attr_manufacturer_id.attr,
- &dev_attr_rtt_capability.attr,
- &dev_attr_rtc_update.attr,
- &dev_attr_ufs_features.attr,
- &dev_attr_ffu_timeout.attr,
- &dev_attr_queue_depth.attr,
- &dev_attr_device_version.attr,
- &dev_attr_number_of_secure_wpa.attr,
- &dev_attr_psa_max_data_size.attr,
- &dev_attr_psa_state_timeout.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_device_descriptor_group = {
- .name = "device_descriptor",
- .attrs = ufs_sysfs_device_descriptor,
-};
-
-#define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size) \
- UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size)
-
-UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2);
-UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2);
-
-static struct attribute *ufs_sysfs_interconnect_descriptor[] = {
- &dev_attr_unipro_version.attr,
- &dev_attr_mphy_version.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = {
- .name = "interconnect_descriptor",
- .attrs = ufs_sysfs_interconnect_descriptor,
-};
-
-#define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size) \
- UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size)
-
-UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8);
-UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1);
-UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4);
-UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1);
-UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1);
-UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1);
-UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1);
-UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2);
-UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units,
- _SCM_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor,
- _SCM_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units,
- _NPM_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor,
- _NPM_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units,
- _ENM1_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor,
- _ENM1_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units,
- _ENM2_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor,
- _ENM2_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units,
- _ENM3_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor,
- _ENM3_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
- _ENM4_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
- _ENM4_CAP_ADJ_FCTR, 2);
-
-static struct attribute *ufs_sysfs_geometry_descriptor[] = {
- &dev_attr_raw_device_capacity.attr,
- &dev_attr_max_number_of_luns.attr,
- &dev_attr_segment_size.attr,
- &dev_attr_allocation_unit_size.attr,
- &dev_attr_min_addressable_block_size.attr,
- &dev_attr_optimal_read_block_size.attr,
- &dev_attr_optimal_write_block_size.attr,
- &dev_attr_max_in_buffer_size.attr,
- &dev_attr_max_out_buffer_size.attr,
- &dev_attr_rpmb_rw_size.attr,
- &dev_attr_dyn_capacity_resource_policy.attr,
- &dev_attr_data_ordering.attr,
- &dev_attr_max_number_of_contexts.attr,
- &dev_attr_sys_data_tag_unit_size.attr,
- &dev_attr_sys_data_tag_resource_size.attr,
- &dev_attr_secure_removal_types.attr,
- &dev_attr_memory_types.attr,
- &dev_attr_sys_code_memory_max_alloc_units.attr,
- &dev_attr_sys_code_memory_capacity_adjustment_factor.attr,
- &dev_attr_non_persist_memory_max_alloc_units.attr,
- &dev_attr_non_persist_memory_capacity_adjustment_factor.attr,
- &dev_attr_enh1_memory_max_alloc_units.attr,
- &dev_attr_enh1_memory_capacity_adjustment_factor.attr,
- &dev_attr_enh2_memory_max_alloc_units.attr,
- &dev_attr_enh2_memory_capacity_adjustment_factor.attr,
- &dev_attr_enh3_memory_max_alloc_units.attr,
- &dev_attr_enh3_memory_capacity_adjustment_factor.attr,
- &dev_attr_enh4_memory_max_alloc_units.attr,
- &dev_attr_enh4_memory_capacity_adjustment_factor.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_geometry_descriptor_group = {
- .name = "geometry_descriptor",
- .attrs = ufs_sysfs_geometry_descriptor,
-};
-
-#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \
- UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
-
-UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
-UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
-UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
-
-static struct attribute *ufs_sysfs_health_descriptor[] = {
- &dev_attr_eol_info.attr,
- &dev_attr_life_time_estimation_a.attr,
- &dev_attr_life_time_estimation_b.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_health_descriptor_group = {
- .name = "health_descriptor",
- .attrs = ufs_sysfs_health_descriptor,
-};
-
-#define UFS_POWER_DESC_PARAM(_name, _uname, _index) \
-static ssize_t _name##_index##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \
- PWR_DESC##_uname##_0 + _index * 2, buf, 2); \
-} \
-static DEVICE_ATTR_RO(_name##_index)
-
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15);
-
-static struct attribute *ufs_sysfs_power_descriptor[] = {
- &dev_attr_active_icc_levels_vcc0.attr,
- &dev_attr_active_icc_levels_vcc1.attr,
- &dev_attr_active_icc_levels_vcc2.attr,
- &dev_attr_active_icc_levels_vcc3.attr,
- &dev_attr_active_icc_levels_vcc4.attr,
- &dev_attr_active_icc_levels_vcc5.attr,
- &dev_attr_active_icc_levels_vcc6.attr,
- &dev_attr_active_icc_levels_vcc7.attr,
- &dev_attr_active_icc_levels_vcc8.attr,
- &dev_attr_active_icc_levels_vcc9.attr,
- &dev_attr_active_icc_levels_vcc10.attr,
- &dev_attr_active_icc_levels_vcc11.attr,
- &dev_attr_active_icc_levels_vcc12.attr,
- &dev_attr_active_icc_levels_vcc13.attr,
- &dev_attr_active_icc_levels_vcc14.attr,
- &dev_attr_active_icc_levels_vcc15.attr,
- &dev_attr_active_icc_levels_vccq0.attr,
- &dev_attr_active_icc_levels_vccq1.attr,
- &dev_attr_active_icc_levels_vccq2.attr,
- &dev_attr_active_icc_levels_vccq3.attr,
- &dev_attr_active_icc_levels_vccq4.attr,
- &dev_attr_active_icc_levels_vccq5.attr,
- &dev_attr_active_icc_levels_vccq6.attr,
- &dev_attr_active_icc_levels_vccq7.attr,
- &dev_attr_active_icc_levels_vccq8.attr,
- &dev_attr_active_icc_levels_vccq9.attr,
- &dev_attr_active_icc_levels_vccq10.attr,
- &dev_attr_active_icc_levels_vccq11.attr,
- &dev_attr_active_icc_levels_vccq12.attr,
- &dev_attr_active_icc_levels_vccq13.attr,
- &dev_attr_active_icc_levels_vccq14.attr,
- &dev_attr_active_icc_levels_vccq15.attr,
- &dev_attr_active_icc_levels_vccq20.attr,
- &dev_attr_active_icc_levels_vccq21.attr,
- &dev_attr_active_icc_levels_vccq22.attr,
- &dev_attr_active_icc_levels_vccq23.attr,
- &dev_attr_active_icc_levels_vccq24.attr,
- &dev_attr_active_icc_levels_vccq25.attr,
- &dev_attr_active_icc_levels_vccq26.attr,
- &dev_attr_active_icc_levels_vccq27.attr,
- &dev_attr_active_icc_levels_vccq28.attr,
- &dev_attr_active_icc_levels_vccq29.attr,
- &dev_attr_active_icc_levels_vccq210.attr,
- &dev_attr_active_icc_levels_vccq211.attr,
- &dev_attr_active_icc_levels_vccq212.attr,
- &dev_attr_active_icc_levels_vccq213.attr,
- &dev_attr_active_icc_levels_vccq214.attr,
- &dev_attr_active_icc_levels_vccq215.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_power_descriptor_group = {
- .name = "power_descriptor",
- .attrs = ufs_sysfs_power_descriptor,
-};
-
-#define UFS_STRING_DESCRIPTOR(_name, _pname) \
-static ssize_t _name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- u8 index; \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- int ret; \
- int desc_len = QUERY_DESC_MAX_SIZE; \
- u8 *desc_buf; \
- \
- desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
- if (!desc_buf) \
- return -ENOMEM; \
- ret = ufshcd_query_descriptor_retry(hba, \
- UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
- 0, 0, desc_buf, &desc_len); \
- if (ret) { \
- ret = -EINVAL; \
- goto out; \
- } \
- index = desc_buf[DEVICE_DESC_PARAM##_pname]; \
- kfree(desc_buf); \
- desc_buf = NULL; \
- ret = ufshcd_read_string_desc(hba, index, &desc_buf, \
- SD_ASCII_STD); \
- if (ret < 0) \
- goto out; \
- ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \
-out: \
- kfree(desc_buf); \
- return ret; \
-} \
-static DEVICE_ATTR_RO(_name)
-
-UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME);
-UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME);
-UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID);
-UFS_STRING_DESCRIPTOR(serial_number, _SN);
-UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV);
-
-static struct attribute *ufs_sysfs_string_descriptors[] = {
- &dev_attr_manufacturer_name.attr,
- &dev_attr_product_name.attr,
- &dev_attr_oem_id.attr,
- &dev_attr_serial_number.attr,
- &dev_attr_product_revision.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_string_descriptors_group = {
- .name = "string_descriptors",
- .attrs = ufs_sysfs_string_descriptors,
-};
-
-#define UFS_FLAG(_name, _uname) \
-static ssize_t _name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- bool flag; \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- if (ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
- QUERY_FLAG_IDN##_uname, &flag)) \
- return -EINVAL; \
- return sprintf(buf, "%s\n", flag ? "true" : "false"); \
-} \
-static DEVICE_ATTR_RO(_name)
-
-UFS_FLAG(device_init, _FDEVICEINIT);
-UFS_FLAG(permanent_wpe, _PERMANENT_WPE);
-UFS_FLAG(power_on_wpe, _PWR_ON_WPE);
-UFS_FLAG(bkops_enable, _BKOPS_EN);
-UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
-UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
-UFS_FLAG(busy_rtc, _BUSY_RTC);
-UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
-
-static struct attribute *ufs_sysfs_device_flags[] = {
- &dev_attr_device_init.attr,
- &dev_attr_permanent_wpe.attr,
- &dev_attr_power_on_wpe.attr,
- &dev_attr_bkops_enable.attr,
- &dev_attr_life_span_mode_enable.attr,
- &dev_attr_phy_resource_removal.attr,
- &dev_attr_busy_rtc.attr,
- &dev_attr_disable_fw_update.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_flags_group = {
- .name = "flags",
- .attrs = ufs_sysfs_device_flags,
-};
-
-#define UFS_ATTRIBUTE(_name, _uname) \
-static ssize_t _name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- u32 value; \
- if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
- QUERY_ATTR_IDN##_uname, 0, 0, &value)) \
- return -EINVAL; \
- return sprintf(buf, "0x%08X\n", value); \
-} \
-static DEVICE_ATTR_RO(_name)
-
-UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
-UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
-UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
-UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
-UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS);
-UFS_ATTRIBUTE(purge_status, _PURGE_STATUS);
-UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
-UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
-UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
-UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
-UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT);
-UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
-UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
-UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
-UFS_ATTRIBUTE(psa_state, _PSA_STATE);
-UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
-
-static struct attribute *ufs_sysfs_attributes[] = {
- &dev_attr_boot_lun_enabled.attr,
- &dev_attr_current_power_mode.attr,
- &dev_attr_active_icc_level.attr,
- &dev_attr_ooo_data_enabled.attr,
- &dev_attr_bkops_status.attr,
- &dev_attr_purge_status.attr,
- &dev_attr_max_data_in_size.attr,
- &dev_attr_max_data_out_size.attr,
- &dev_attr_reference_clock_frequency.attr,
- &dev_attr_configuration_descriptor_lock.attr,
- &dev_attr_max_number_of_rtt.attr,
- &dev_attr_exception_event_control.attr,
- &dev_attr_exception_event_status.attr,
- &dev_attr_ffu_status.attr,
- &dev_attr_psa_state.attr,
- &dev_attr_psa_data_size.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_attributes_group = {
- .name = "attributes",
- .attrs = ufs_sysfs_attributes,
-};
-
-static const struct attribute_group *ufs_sysfs_groups[] = {
- &ufs_sysfs_default_group,
- &ufs_sysfs_device_descriptor_group,
- &ufs_sysfs_interconnect_descriptor_group,
- &ufs_sysfs_geometry_descriptor_group,
- &ufs_sysfs_health_descriptor_group,
- &ufs_sysfs_power_descriptor_group,
- &ufs_sysfs_string_descriptors_group,
- &ufs_sysfs_flags_group,
- &ufs_sysfs_attributes_group,
- NULL,
-};
-
-#define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size) \
-static ssize_t _pname##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct scsi_device *sdev = to_scsi_device(dev); \
- struct ufs_hba *hba = shost_priv(sdev->host); \
- u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
- if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
- return -EINVAL; \
- return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
- lun, _duname##_DESC_PARAM##_puname, buf, _size); \
-} \
-static DEVICE_ATTR_RO(_pname)
-
-#define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \
- UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
-
-UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
-UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
-UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
-UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1);
-UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1);
-UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1);
-UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
-UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
-UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
-UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
-UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
-UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
-UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
-
-static struct attribute *ufs_sysfs_unit_descriptor[] = {
- &dev_attr_boot_lun_id.attr,
- &dev_attr_lun_write_protect.attr,
- &dev_attr_lun_queue_depth.attr,
- &dev_attr_psa_sensitive.attr,
- &dev_attr_lun_memory_type.attr,
- &dev_attr_data_reliability.attr,
- &dev_attr_logical_block_size.attr,
- &dev_attr_logical_block_count.attr,
- &dev_attr_erase_block_size.attr,
- &dev_attr_provisioning_type.attr,
- &dev_attr_physical_memory_resourse_count.attr,
- &dev_attr_context_capabilities.attr,
- &dev_attr_large_unit_granularity.attr,
- NULL,
-};
-
-const struct attribute_group ufs_sysfs_unit_descriptor_group = {
- .name = "unit_descriptor",
- .attrs = ufs_sysfs_unit_descriptor,
-};
-
-static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u32 value;
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufs_hba *hba = shost_priv(sdev->host);
- u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
-
- if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value))
- return -EINVAL;
- return sprintf(buf, "0x%08X\n", value);
-}
-static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
-
-static struct attribute *ufs_sysfs_lun_attributes[] = {
- &dev_attr_dyn_cap_needed_attribute.attr,
- NULL,
-};
-
-const struct attribute_group ufs_sysfs_lun_attributes_group = {
- .attrs = ufs_sysfs_lun_attributes,
-};
-
-void ufs_sysfs_add_nodes(struct device *dev)
-{
- int ret;
-
- ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
- if (ret)
- dev_err(dev,
- "%s: sysfs groups creation failed (err = %d)\n",
- __func__, ret);
-}
-
-void ufs_sysfs_remove_nodes(struct device *dev)
-{
- sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups);
-}
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h
deleted file mode 100644
index 0f4e750a6748..000000000000
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018 Western Digital Corporation
- */
-
-#ifndef __UFS_SYSFS_H__
-#define __UFS_SYSFS_H__
-
-#include <linux/sysfs.h>
-
-#include "ufshcd.h"
-
-void ufs_sysfs_add_nodes(struct device *dev);
-void ufs_sysfs_remove_nodes(struct device *dev);
-
-extern const struct attribute_group ufs_sysfs_unit_descriptor_group;
-extern const struct attribute_group ufs_sysfs_lun_attributes_group;
-#endif
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
deleted file mode 100644
index cfe380348bf0..000000000000
--- a/drivers/scsi/ufs/ufs.h
+++ /dev/null
@@ -1,556 +0,0 @@
-/*
- * Universal Flash Storage Host controller driver
- *
- * This code is based on drivers/scsi/ufs/ufs.h
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * See the COPYING file in the top-level directory or visit
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
- * without warranty of any kind. You are solely responsible for
- * determining the appropriateness of using and distributing
- * the program and assume all risks associated with your exercise
- * of rights with respect to the program, including but not limited
- * to infringement of third party rights, the risks and costs of
- * program errors, damage to or loss of data, programs or equipment,
- * and unavailability or interruption of operations. Under no
- * circumstances will the contributor of this Program be liable for
- * any damages of any kind arising from your use or distribution of
- * this program.
- */
-
-#ifndef _UFS_H
-#define _UFS_H
-
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <uapi/scsi/scsi_bsg_ufs.h>
-
-#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
-#define QUERY_DESC_MAX_SIZE 255
-#define QUERY_DESC_MIN_SIZE 2
-#define QUERY_DESC_HDR_SIZE 2
-#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
- (sizeof(struct utp_upiu_header)))
-#define UFS_SENSE_SIZE 18
-
-#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
- cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
- (byte1 << 8) | (byte0))
-/*
- * UFS device may have standard LUs and LUN id could be from 0x00 to
- * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
- * UFS device may also have the Well Known LUs (also referred as W-LU)
- * which again could be from 0x00 to 0x7F. For W-LUs, device only use
- * the "Extended Addressing Format" which means the W-LUNs would be
- * from 0xc100 (SCSI_W_LUN_BASE) onwards.
- * This means max. LUN number reported from UFS device could be 0xC17F.
- */
-#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
-#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
-#define UFS_UPIU_WLUN_ID (1 << 7)
-
-/* Well known logical unit id in LUN field of UPIU */
-enum {
- UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
- UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
- UFS_UPIU_BOOT_WLUN = 0xB0,
- UFS_UPIU_RPMB_WLUN = 0xC4,
-};
-
-/*
- * UFS Protocol Information Unit related definitions
- */
-
-/* Task management functions */
-enum {
- UFS_ABORT_TASK = 0x01,
- UFS_ABORT_TASK_SET = 0x02,
- UFS_CLEAR_TASK_SET = 0x04,
- UFS_LOGICAL_RESET = 0x08,
- UFS_QUERY_TASK = 0x80,
- UFS_QUERY_TASK_SET = 0x81,
-};
-
-/* UTP UPIU Transaction Codes Initiator to Target */
-enum {
- UPIU_TRANSACTION_NOP_OUT = 0x00,
- UPIU_TRANSACTION_COMMAND = 0x01,
- UPIU_TRANSACTION_DATA_OUT = 0x02,
- UPIU_TRANSACTION_TASK_REQ = 0x04,
- UPIU_TRANSACTION_QUERY_REQ = 0x16,
-};
-
-/* UTP UPIU Transaction Codes Target to Initiator */
-enum {
- UPIU_TRANSACTION_NOP_IN = 0x20,
- UPIU_TRANSACTION_RESPONSE = 0x21,
- UPIU_TRANSACTION_DATA_IN = 0x22,
- UPIU_TRANSACTION_TASK_RSP = 0x24,
- UPIU_TRANSACTION_READY_XFER = 0x31,
- UPIU_TRANSACTION_QUERY_RSP = 0x36,
- UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
-};
-
-/* UPIU Read/Write flags */
-enum {
- UPIU_CMD_FLAGS_NONE = 0x00,
- UPIU_CMD_FLAGS_WRITE = 0x20,
- UPIU_CMD_FLAGS_READ = 0x40,
-};
-
-/* UPIU Task Attributes */
-enum {
- UPIU_TASK_ATTR_SIMPLE = 0x00,
- UPIU_TASK_ATTR_ORDERED = 0x01,
- UPIU_TASK_ATTR_HEADQ = 0x02,
- UPIU_TASK_ATTR_ACA = 0x03,
-};
-
-/* UPIU Query request function */
-enum {
- UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
- UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
-};
-
-/* Flag idn for Query Requests*/
-enum flag_idn {
- QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
- QUERY_FLAG_IDN_PERMANENT_WPE = 0x02,
- QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
- QUERY_FLAG_IDN_BKOPS_EN = 0x04,
- QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05,
- QUERY_FLAG_IDN_PURGE_ENABLE = 0x06,
- QUERY_FLAG_IDN_RESERVED2 = 0x07,
- QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08,
- QUERY_FLAG_IDN_BUSY_RTC = 0x09,
- QUERY_FLAG_IDN_RESERVED3 = 0x0A,
- QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
-};
-
-/* Attribute idn for Query requests */
-enum attr_idn {
- QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
- QUERY_ATTR_IDN_RESERVED = 0x01,
- QUERY_ATTR_IDN_POWER_MODE = 0x02,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
- QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
- QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
- QUERY_ATTR_IDN_PURGE_STATUS = 0x06,
- QUERY_ATTR_IDN_MAX_DATA_IN = 0x07,
- QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08,
- QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09,
- QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A,
- QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B,
- QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C,
- QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
- QUERY_ATTR_IDN_EE_STATUS = 0x0E,
- QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F,
- QUERY_ATTR_IDN_CNTX_CONF = 0x10,
- QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11,
- QUERY_ATTR_IDN_RESERVED2 = 0x12,
- QUERY_ATTR_IDN_RESERVED3 = 0x13,
- QUERY_ATTR_IDN_FFU_STATUS = 0x14,
- QUERY_ATTR_IDN_PSA_STATE = 0x15,
- QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
-};
-
-/* Descriptor idn for Query requests */
-enum desc_idn {
- QUERY_DESC_IDN_DEVICE = 0x0,
- QUERY_DESC_IDN_CONFIGURATION = 0x1,
- QUERY_DESC_IDN_UNIT = 0x2,
- QUERY_DESC_IDN_RFU_0 = 0x3,
- QUERY_DESC_IDN_INTERCONNECT = 0x4,
- QUERY_DESC_IDN_STRING = 0x5,
- QUERY_DESC_IDN_RFU_1 = 0x6,
- QUERY_DESC_IDN_GEOMETRY = 0x7,
- QUERY_DESC_IDN_POWER = 0x8,
- QUERY_DESC_IDN_HEALTH = 0x9,
- QUERY_DESC_IDN_MAX,
-};
-
-enum desc_header_offset {
- QUERY_DESC_LENGTH_OFFSET = 0x00,
- QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
-};
-
-enum ufs_desc_def_size {
- QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
- QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
- QUERY_DESC_UNIT_DEF_SIZE = 0x23,
- QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
- QUERY_DESC_POWER_DEF_SIZE = 0x62,
- QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
-};
-
-/* Unit descriptor parameters offsets in bytes*/
-enum unit_desc_param {
- UNIT_DESC_PARAM_LEN = 0x0,
- UNIT_DESC_PARAM_TYPE = 0x1,
- UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
- UNIT_DESC_PARAM_LU_ENABLE = 0x3,
- UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
- UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
- UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
- UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
- UNIT_DESC_PARAM_MEM_TYPE = 0x8,
- UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9,
- UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
- UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
- UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13,
- UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
- UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
- UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
- UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
-};
-
-/* Device descriptor parameters offsets in bytes*/
-enum device_desc_param {
- DEVICE_DESC_PARAM_LEN = 0x0,
- DEVICE_DESC_PARAM_TYPE = 0x1,
- DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
- DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
- DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
- DEVICE_DESC_PARAM_PRTCL = 0x5,
- DEVICE_DESC_PARAM_NUM_LU = 0x6,
- DEVICE_DESC_PARAM_NUM_WLU = 0x7,
- DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
- DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
- DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
- DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
- DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
- DEVICE_DESC_PARAM_SEC_LU = 0xD,
- DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
- DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
- DEVICE_DESC_PARAM_SPEC_VER = 0x10,
- DEVICE_DESC_PARAM_MANF_DATE = 0x12,
- DEVICE_DESC_PARAM_MANF_NAME = 0x14,
- DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
- DEVICE_DESC_PARAM_SN = 0x16,
- DEVICE_DESC_PARAM_OEM_ID = 0x17,
- DEVICE_DESC_PARAM_MANF_ID = 0x18,
- DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
- DEVICE_DESC_PARAM_UD_LEN = 0x1B,
- DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
- DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
- DEVICE_DESC_PARAM_UFS_FEAT = 0x1F,
- DEVICE_DESC_PARAM_FFU_TMT = 0x20,
- DEVICE_DESC_PARAM_Q_DPTH = 0x21,
- DEVICE_DESC_PARAM_DEV_VER = 0x22,
- DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24,
- DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
- DEVICE_DESC_PARAM_PSA_TMT = 0x29,
- DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
-};
-
-/* Interconnect descriptor parameters offsets in bytes*/
-enum interconnect_desc_param {
- INTERCONNECT_DESC_PARAM_LEN = 0x0,
- INTERCONNECT_DESC_PARAM_TYPE = 0x1,
- INTERCONNECT_DESC_PARAM_UNIPRO_VER = 0x2,
- INTERCONNECT_DESC_PARAM_MPHY_VER = 0x4,
-};
-
-/* Geometry descriptor parameters offsets in bytes*/
-enum geometry_desc_param {
- GEOMETRY_DESC_PARAM_LEN = 0x0,
- GEOMETRY_DESC_PARAM_TYPE = 0x1,
- GEOMETRY_DESC_PARAM_DEV_CAP = 0x4,
- GEOMETRY_DESC_PARAM_MAX_NUM_LUN = 0xC,
- GEOMETRY_DESC_PARAM_SEG_SIZE = 0xD,
- GEOMETRY_DESC_PARAM_ALLOC_UNIT_SIZE = 0x11,
- GEOMETRY_DESC_PARAM_MIN_BLK_SIZE = 0x12,
- GEOMETRY_DESC_PARAM_OPT_RD_BLK_SIZE = 0x13,
- GEOMETRY_DESC_PARAM_OPT_WR_BLK_SIZE = 0x14,
- GEOMETRY_DESC_PARAM_MAX_IN_BUF_SIZE = 0x15,
- GEOMETRY_DESC_PARAM_MAX_OUT_BUF_SIZE = 0x16,
- GEOMETRY_DESC_PARAM_RPMB_RW_SIZE = 0x17,
- GEOMETRY_DESC_PARAM_DYN_CAP_RSRC_PLC = 0x18,
- GEOMETRY_DESC_PARAM_DATA_ORDER = 0x19,
- GEOMETRY_DESC_PARAM_MAX_NUM_CTX = 0x1A,
- GEOMETRY_DESC_PARAM_TAG_UNIT_SIZE = 0x1B,
- GEOMETRY_DESC_PARAM_TAG_RSRC_SIZE = 0x1C,
- GEOMETRY_DESC_PARAM_SEC_RM_TYPES = 0x1D,
- GEOMETRY_DESC_PARAM_MEM_TYPES = 0x1E,
- GEOMETRY_DESC_PARAM_SCM_MAX_NUM_UNITS = 0x20,
- GEOMETRY_DESC_PARAM_SCM_CAP_ADJ_FCTR = 0x24,
- GEOMETRY_DESC_PARAM_NPM_MAX_NUM_UNITS = 0x26,
- GEOMETRY_DESC_PARAM_NPM_CAP_ADJ_FCTR = 0x2A,
- GEOMETRY_DESC_PARAM_ENM1_MAX_NUM_UNITS = 0x2C,
- GEOMETRY_DESC_PARAM_ENM1_CAP_ADJ_FCTR = 0x30,
- GEOMETRY_DESC_PARAM_ENM2_MAX_NUM_UNITS = 0x32,
- GEOMETRY_DESC_PARAM_ENM2_CAP_ADJ_FCTR = 0x36,
- GEOMETRY_DESC_PARAM_ENM3_MAX_NUM_UNITS = 0x38,
- GEOMETRY_DESC_PARAM_ENM3_CAP_ADJ_FCTR = 0x3C,
- GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
- GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
- GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
-};
-
-/* Health descriptor parameters offsets in bytes*/
-enum health_desc_param {
- HEALTH_DESC_PARAM_LEN = 0x0,
- HEALTH_DESC_PARAM_TYPE = 0x1,
- HEALTH_DESC_PARAM_EOL_INFO = 0x2,
- HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 0x3,
- HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
-};
-
-/*
- * Logical Unit Write Protect
- * 00h: LU not write protected
- * 01h: LU write protected when fPowerOnWPEn =1
- * 02h: LU permanently write protected when fPermanentWPEn =1
- */
-enum ufs_lu_wp_type {
- UFS_LU_NO_WP = 0x00,
- UFS_LU_POWER_ON_WP = 0x01,
- UFS_LU_PERM_WP = 0x02,
-};
-
-/* bActiveICCLevel parameter current units */
-enum {
- UFSHCD_NANO_AMP = 0,
- UFSHCD_MICRO_AMP = 1,
- UFSHCD_MILI_AMP = 2,
- UFSHCD_AMP = 3,
-};
-
-#define POWER_DESC_MAX_SIZE 0x62
-#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
-
-/* Attribute bActiveICCLevel parameter bit masks definitions */
-#define ATTR_ICC_LVL_UNIT_OFFSET 14
-#define ATTR_ICC_LVL_UNIT_MASK (0x3 << ATTR_ICC_LVL_UNIT_OFFSET)
-#define ATTR_ICC_LVL_VALUE_MASK 0x3FF
-
-/* Power descriptor parameters offsets in bytes */
-enum power_desc_param_offset {
- PWR_DESC_LEN = 0x0,
- PWR_DESC_TYPE = 0x1,
- PWR_DESC_ACTIVE_LVLS_VCC_0 = 0x2,
- PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 0x22,
- PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 0x42,
-};
-
-/* Exception event mask values */
-enum {
- MASK_EE_STATUS = 0xFFFF,
- MASK_EE_URGENT_BKOPS = (1 << 2),
-};
-
-/* Background operation status */
-enum bkops_status {
- BKOPS_STATUS_NO_OP = 0x0,
- BKOPS_STATUS_NON_CRITICAL = 0x1,
- BKOPS_STATUS_PERF_IMPACT = 0x2,
- BKOPS_STATUS_CRITICAL = 0x3,
- BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL,
-};
-
-/* UTP QUERY Transaction Specific Fields OpCode */
-enum query_opcode {
- UPIU_QUERY_OPCODE_NOP = 0x0,
- UPIU_QUERY_OPCODE_READ_DESC = 0x1,
- UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
- UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
- UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
- UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
- UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
- UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
- UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
-};
-
-/* bRefClkFreq attribute values */
-enum ufs_ref_clk_freq {
- REF_CLK_FREQ_19_2_MHZ = 0,
- REF_CLK_FREQ_26_MHZ = 1,
- REF_CLK_FREQ_38_4_MHZ = 2,
- REF_CLK_FREQ_52_MHZ = 3,
- REF_CLK_FREQ_INVAL = -1,
-};
-
-struct ufs_ref_clk {
- unsigned long freq_hz;
- enum ufs_ref_clk_freq val;
-};
-
-/* Query response result code */
-enum {
- QUERY_RESULT_SUCCESS = 0x00,
- QUERY_RESULT_NOT_READABLE = 0xF6,
- QUERY_RESULT_NOT_WRITEABLE = 0xF7,
- QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
- QUERY_RESULT_INVALID_LENGTH = 0xF9,
- QUERY_RESULT_INVALID_VALUE = 0xFA,
- QUERY_RESULT_INVALID_SELECTOR = 0xFB,
- QUERY_RESULT_INVALID_INDEX = 0xFC,
- QUERY_RESULT_INVALID_IDN = 0xFD,
- QUERY_RESULT_INVALID_OPCODE = 0xFE,
- QUERY_RESULT_GENERAL_FAILURE = 0xFF,
-};
-
-/* UTP Transfer Request Command Type (CT) */
-enum {
- UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
- UPIU_COMMAND_SET_TYPE_UFS = 0x1,
- UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
-};
-
-/* UTP Transfer Request Command Offset */
-#define UPIU_COMMAND_TYPE_OFFSET 28
-
-/* Offset of the response code in the UPIU header */
-#define UPIU_RSP_CODE_OFFSET 8
-
-enum {
- MASK_SCSI_STATUS = 0xFF,
- MASK_TASK_RESPONSE = 0xFF00,
- MASK_RSP_UPIU_RESULT = 0xFFFF,
- MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_EXCEPTION_EVENT = 0x10000,
- MASK_TM_SERVICE_RESP = 0xFF,
- MASK_TM_FUNC = 0xFF,
-};
-
-/* Task management service response */
-enum {
- UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
- UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
- UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
- UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
- UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
-};
-
-/* UFS device power modes */
-enum ufs_dev_pwr_mode {
- UFS_ACTIVE_PWR_MODE = 1,
- UFS_SLEEP_PWR_MODE = 2,
- UFS_POWERDOWN_PWR_MODE = 3,
-};
-
-/**
- * struct utp_cmd_rsp - Response UPIU structure
- * @residual_transfer_count: Residual transfer count DW-3
- * @reserved: Reserved double words DW-4 to DW-7
- * @sense_data_len: Sense data length DW-8 U16
- * @sense_data: Sense data field DW-8 to DW-12
- */
-struct utp_cmd_rsp {
- __be32 residual_transfer_count;
- __be32 reserved[4];
- __be16 sense_data_len;
- u8 sense_data[UFS_SENSE_SIZE];
-};
-
-/**
- * struct utp_upiu_rsp - general upiu response structure
- * @header: UPIU header structure DW-0 to DW-2
- * @sr: fields structure for scsi command DW-3 to DW-12
- * @qr: fields structure for query request DW-3 to DW-7
- */
-struct utp_upiu_rsp {
- struct utp_upiu_header header;
- union {
- struct utp_cmd_rsp sr;
- struct utp_upiu_query qr;
- };
-};
-
-/**
- * struct ufs_query_req - parameters for building a query request
- * @query_func: UPIU header query function
- * @upiu_req: the query request data
- */
-struct ufs_query_req {
- u8 query_func;
- struct utp_upiu_query upiu_req;
-};
-
-/**
- * struct ufs_query_resp - UPIU QUERY
- * @response: device response code
- * @upiu_res: query response data
- */
-struct ufs_query_res {
- u8 response;
- struct utp_upiu_query upiu_res;
-};
-
-#define UFS_VREG_VCC_MIN_UV 2700000 /* uV */
-#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */
-#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */
-#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV 1140000 /* uV */
-#define UFS_VREG_VCCQ_MAX_UV 1260000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV 1700000 /* uV */
-#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */
-
-/*
- * VCCQ & VCCQ2 current requirement when UFS device is in sleep state
- * and link is in Hibern8 state.
- */
-#define UFS_VREG_LPM_LOAD_UA 1000 /* uA */
-
-struct ufs_vreg {
- struct regulator *reg;
- const char *name;
- bool enabled;
- int min_uV;
- int max_uV;
- int max_uA;
-};
-
-struct ufs_vreg_info {
- struct ufs_vreg *vcc;
- struct ufs_vreg *vccq;
- struct ufs_vreg *vccq2;
- struct ufs_vreg *vdd_hba;
-};
-
-struct ufs_dev_info {
- bool f_power_on_wp_en;
- /* Keeps information if any of the LU is power on write protected */
- bool is_lu_power_on_wp;
- /* Maximum number of general LU supported by the UFS device */
- u8 max_lu_supported;
- u16 wmanufacturerid;
- /*UFS device Product Name */
- u8 *model;
-};
-
-/**
- * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
- * @dev_info: pointer of instance of struct ufs_dev_info
- * @lun: LU number to check
- * @return: true if the lun has a matching unit descriptor, false otherwise
- */
-static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
- u8 lun)
-{
- if (!dev_info || !dev_info->max_lu_supported) {
- pr_err("Max General LU supported by UFS isn't initialized\n");
- return false;
- }
-
- return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
-}
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
deleted file mode 100644
index 53dd87628cbe..000000000000
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bsg endpoint that supports UPIUs
- *
- * Copyright (C) 2018 Western Digital Corporation
- */
-#include "ufs_bsg.h"
-
-static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
- struct utp_upiu_query *qr)
-{
- int desc_size = be16_to_cpu(qr->length);
- int desc_id = qr->idn;
- int ret;
-
- if (desc_size <= 0)
- return -EINVAL;
-
- ret = ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
- if (ret || !*desc_len)
- return -EINVAL;
-
- *desc_len = min_t(int, *desc_len, desc_size);
-
- return 0;
-}
-
-static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
- unsigned int request_len,
- unsigned int reply_len)
-{
- int min_req_len = sizeof(struct ufs_bsg_request);
- int min_rsp_len = sizeof(struct ufs_bsg_reply);
-
- if (min_req_len > request_len || min_rsp_len > reply_len) {
- dev_err(hba->dev, "not enough space assigned\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job,
- uint8_t **desc_buff, int *desc_len,
- enum query_opcode desc_op)
-{
- struct ufs_bsg_request *bsg_request = job->request;
- struct utp_upiu_query *qr;
- u8 *descp;
-
- if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC &&
- desc_op != UPIU_QUERY_OPCODE_READ_DESC)
- goto out;
-
- qr = &bsg_request->upiu_req.qr;
- if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
- dev_err(hba->dev, "Illegal desc size\n");
- return -EINVAL;
- }
-
- if (*desc_len > job->request_payload.payload_len) {
- dev_err(hba->dev, "Illegal desc size\n");
- return -EINVAL;
- }
-
- descp = kzalloc(*desc_len, GFP_KERNEL);
- if (!descp)
- return -ENOMEM;
-
- if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt, descp,
- *desc_len);
-
- *desc_buff = descp;
-
-out:
- return 0;
-}
-
-static int ufs_bsg_request(struct bsg_job *job)
-{
- struct ufs_bsg_request *bsg_request = job->request;
- struct ufs_bsg_reply *bsg_reply = job->reply;
- struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
- unsigned int req_len = job->request_len;
- unsigned int reply_len = job->reply_len;
- struct uic_command uc = {};
- int msgcode;
- uint8_t *desc_buff = NULL;
- int desc_len = 0;
- enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
- int ret;
-
- ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
- if (ret)
- goto out;
-
- bsg_reply->reply_payload_rcv_len = 0;
-
- pm_runtime_get_sync(hba->dev);
-
- msgcode = bsg_request->msgcode;
- switch (msgcode) {
- case UPIU_TRANSACTION_QUERY_REQ:
- desc_op = bsg_request->upiu_req.qr.opcode;
- ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
- &desc_len, desc_op);
- if (ret)
- goto out;
-
- /* fall through */
- case UPIU_TRANSACTION_NOP_OUT:
- case UPIU_TRANSACTION_TASK_REQ:
- ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
- &bsg_reply->upiu_rsp, msgcode,
- desc_buff, &desc_len, desc_op);
- if (ret)
- dev_err(hba->dev,
- "exe raw upiu: error code %d\n", ret);
-
- break;
- case UPIU_TRANSACTION_UIC_CMD:
- memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
- ret = ufshcd_send_uic_cmd(hba, &uc);
- if (ret)
- dev_err(hba->dev,
- "send uic cmd: error code %d\n", ret);
-
- memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
-
- break;
- default:
- ret = -ENOTSUPP;
- dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
-
- break;
- }
-
- pm_runtime_put_sync(hba->dev);
-
- if (!desc_buff)
- goto out;
-
- if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
- bsg_reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- desc_buff, desc_len);
-
- kfree(desc_buff);
-
-out:
- bsg_reply->result = ret;
- job->reply_len = sizeof(struct ufs_bsg_reply);
- /* complete the job here only if no error */
- if (ret == 0)
- bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
-
- return ret;
-}
-
-/**
- * ufs_bsg_remove - detach and remove the added ufs-bsg node
- * @hba: per adapter object
- *
- * Should be called when unloading the driver.
- */
-void ufs_bsg_remove(struct ufs_hba *hba)
-{
- struct device *bsg_dev = &hba->bsg_dev;
-
- if (!hba->bsg_queue)
- return;
-
- bsg_remove_queue(hba->bsg_queue);
-
- device_del(bsg_dev);
- put_device(bsg_dev);
-}
-
-static inline void ufs_bsg_node_release(struct device *dev)
-{
- put_device(dev->parent);
-}
-
-/**
- * ufs_bsg_probe - Add ufs bsg device node
- * @hba: per adapter object
- *
- * Called during initial loading of the driver, and before scsi_scan_host.
- */
-int ufs_bsg_probe(struct ufs_hba *hba)
-{
- struct device *bsg_dev = &hba->bsg_dev;
- struct Scsi_Host *shost = hba->host;
- struct device *parent = &shost->shost_gendev;
- struct request_queue *q;
- int ret;
-
- device_initialize(bsg_dev);
-
- bsg_dev->parent = get_device(parent);
- bsg_dev->release = ufs_bsg_node_release;
-
- dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
-
- ret = device_add(bsg_dev);
- if (ret)
- goto out;
-
- q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
- if (IS_ERR(q)) {
- ret = PTR_ERR(q);
- goto out;
- }
-
- hba->bsg_queue = q;
-
- return 0;
-
-out:
- dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
- put_device(bsg_dev);
- return ret;
-}
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/scsi/ufs/ufs_bsg.h
deleted file mode 100644
index d09918758631..000000000000
--- a/drivers/scsi/ufs/ufs_bsg.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2018 Western Digital Corporation
- */
-#ifndef UFS_BSG_H
-#define UFS_BSG_H
-
-#include <linux/bsg-lib.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-
-#include "ufshcd.h"
-#include "ufs.h"
-
-#ifdef CONFIG_SCSI_UFS_BSG
-void ufs_bsg_remove(struct ufs_hba *hba);
-int ufs_bsg_probe(struct ufs_hba *hba);
-#else
-static inline void ufs_bsg_remove(struct ufs_hba *hba) {}
-static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; }
-#endif
-
-#endif /* UFS_BSG_H */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
deleted file mode 100644
index d0ab147f98d3..000000000000
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _UFS_QUIRKS_H_
-#define _UFS_QUIRKS_H_
-
-/* return true if s1 is a prefix of s2 */
-#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
-
-#define UFS_ANY_VENDOR 0xFFFF
-#define UFS_ANY_MODEL "ANY_MODEL"
-
-#define UFS_VENDOR_TOSHIBA 0x198
-#define UFS_VENDOR_SAMSUNG 0x1CE
-#define UFS_VENDOR_SKHYNIX 0x1AD
-
-/**
- * ufs_dev_fix - ufs device quirk info
- * @card: ufs card details
- * @quirk: device quirk
- */
-struct ufs_dev_fix {
- u16 wmanufacturerid;
- u8 *model;
- unsigned int quirk;
-};
-
-#define END_FIX { }
-
-/* add specific device quirk */
-#define UFS_FIX(_vendor, _model, _quirk) { \
- .wmanufacturerid = (_vendor),\
- .model = (_model), \
- .quirk = (_quirk), \
-}
-
-/*
- * Some vendor's UFS device sends back to back NACs for the DL data frames
- * causing the host controller to raise the DFES error status. Sometimes
- * such UFS devices send back to back NAC without waiting for new
- * retransmitted DL frame from the host and in such cases it might be possible
- * the Host UniPro goes into bad state without raising the DFES error
- * interrupt. If this happens then all the pending commands would timeout
- * only after respective SW command (which is generally too large).
- *
- * We can workaround such device behaviour like this:
- * - As soon as SW sees the DL NAC error, it should schedule the error handler
- * - Error handler would sleep for 50ms to see if there are any fatal errors
- * raised by UFS controller.
- * - If there are fatal errors then SW does normal error recovery.
- * - If there are no fatal errors then SW sends the NOP command to device
- * to check if link is alive.
- * - If NOP command times out, SW does normal error recovery
- * - If NOP command succeed, skip the error handling.
- *
- * If DL NAC error is seen multiple times with some vendor's UFS devices then
- * enable this quirk to initiate quick error recovery and also silence related
- * error logs to reduce spamming of kernel logs.
- */
-#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
-
-/*
- * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
- * 600us which may not be enough for reliable hibern8 exit hardware sequence
- * from UFS device.
- * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
- * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
- */
-#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
-
-/*
- * It seems some UFS devices may keep drawing more than sleep current
- * (atleast for 500us) from UFS rails (especially from VCCQ rail).
- * To avoid this situation, add 2ms delay before putting these UFS
- * rails in LPM mode.
- */
-#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
-
-/*
- * Some UFS devices require host PA_TACTIVATE to be lower than device
- * PA_TACTIVATE, enabling this quirk ensure this.
- */
-#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
-
-/*
- * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
- * some vendors.
- * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
- * Gear switch can be issued by host controller as an error recovery and any
- * software delay will not help on this case so we need to increase
- * PA_SaveConfigTime to >32us as per vendor recommendation.
- */
-#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
-
-/*
- * Some UFS devices require VS_DebugSaveConfigTime is 0x10,
- * enabling this quirk ensure this.
- */
-#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
-
-#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
deleted file mode 100644
index 6a901da2d15a..000000000000
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UFS Host driver for Synopsys Designware Core
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#include "ufshcd.h"
-#include "unipro.h"
-
-#include "ufshcd-dwc.h"
-#include "ufshci-dwc.h"
-
-int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
- const struct ufshcd_dme_attr_val *v, int n)
-{
- int ret = 0;
- int attr_node = 0;
-
- for (attr_node = 0; attr_node < n; attr_node++) {
- ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel,
- ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer);
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs);
-
-/**
- * ufshcd_dwc_program_clk_div()
- * This function programs the clk divider value. This value is needed to
- * provide 1 microsecond tick to unipro layer.
- * @hba: Private Structure pointer
- * @divider_val: clock divider value to be programmed
- *
- */
-static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val)
-{
- ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV);
-}
-
-/**
- * ufshcd_dwc_link_is_up()
- * Check if link is up
- * @hba: private structure pointer
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
-{
- int dme_result = 0;
-
- ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result);
-
- if (dme_result == UFSHCD_LINK_IS_UP) {
- ufshcd_set_link_active(hba);
- return 0;
- }
-
- return 1;
-}
-
-/**
- * ufshcd_dwc_connection_setup()
- * This function configures both the local side (host) and the peer side
- * (device) unipro attributes to establish the connection to application/
- * cport.
- * This function is not required if the hardware is properly configured to
- * have this connection setup on reset. But invoking this function does no
- * harm and should be fine even working with any ufs device.
- *
- * @hba: pointer to drivers private data
- *
- * Returns 0 on success non-zero value on failure
- */
-static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
-{
- static const struct ufshcd_dme_attr_val setup_attrs[] = {
- { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
- { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
- { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
- { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL },
- { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL },
- { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL },
- { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL },
- { UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL },
- { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL },
- { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER },
- { UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER },
- { UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER },
- { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER },
- { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER },
- { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER },
- { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER },
- { UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER },
- { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER }
- };
-
- return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs));
-}
-
-/**
- * ufshcd_dwc_link_startup_notify()
- * UFS Host DWC specific link startup sequence
- * @hba: private structure pointer
- * @status: Callback notify status
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- int err = 0;
-
- if (status == PRE_CHANGE) {
- ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125);
-
- if (hba->vops->phy_initialization) {
- err = hba->vops->phy_initialization(hba);
- if (err) {
- dev_err(hba->dev, "Phy setup failed (%d)\n",
- err);
- goto out;
- }
- }
- } else { /* POST_CHANGE */
- err = ufshcd_dwc_link_is_up(hba);
- if (err) {
- dev_err(hba->dev, "Link is not up\n");
- goto out;
- }
-
- err = ufshcd_dwc_connection_setup(hba);
- if (err)
- dev_err(hba->dev, "Connection setup failed (%d)\n",
- err);
- }
-
-out:
- return err;
-}
-EXPORT_SYMBOL(ufshcd_dwc_link_startup_notify);
-
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_DESCRIPTION("UFS Host driver for Synopsys Designware Core");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/ufshcd-dwc.h b/drivers/scsi/ufs/ufshcd-dwc.h
deleted file mode 100644
index 4268ca2eb64c..000000000000
--- a/drivers/scsi/ufs/ufshcd-dwc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UFS Host driver for Synopsys Designware Core
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#ifndef _UFSHCD_DWC_H
-#define _UFSHCD_DWC_H
-
-struct ufshcd_dme_attr_val {
- u32 attr_sel;
- u32 mib_val;
- u8 peer;
-};
-
-int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status);
-int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
- const struct ufshcd_dme_attr_val *v, int n);
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
deleted file mode 100644
index 3b19de3ae9a3..000000000000
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Universal Flash Storage Host controller PCI glue driver
- *
- * This code is based on drivers/scsi/ufs/ufshcd-pci.c
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * See the COPYING file in the top-level directory or visit
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
- * without warranty of any kind. You are solely responsible for
- * determining the appropriateness of using and distributing
- * the program and assume all risks associated with your exercise
- * of rights with respect to the program, including but not limited
- * to infringement of third party rights, the risks and costs of
- * program errors, damage to or loss of data, programs or equipment,
- * and unavailability or interruption of operations. Under no
- * circumstances will the contributor of this Program be liable for
- * any damages of any kind arising from your use or distribution of
- * this program.
- */
-
-#include "ufshcd.h"
-#include <linux/pci.h>
-#include <linux/pm_runtime.h>
-
-static int ufs_intel_disable_lcc(struct ufs_hba *hba)
-{
- u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
- u32 lcc_enable = 0;
-
- ufshcd_dme_get(hba, attr, &lcc_enable);
- if (lcc_enable)
- ufshcd_dme_set(hba, attr, 0);
-
- return 0;
-}
-
-static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- int err = 0;
-
- switch (status) {
- case PRE_CHANGE:
- err = ufs_intel_disable_lcc(hba);
- break;
- case POST_CHANGE:
- break;
- default:
- break;
- }
-
- return err;
-}
-
-static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
- .name = "intel-pci",
- .link_startup_notify = ufs_intel_link_startup_notify,
-};
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * ufshcd_pci_suspend - suspend power management function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_suspend(struct device *dev)
-{
- return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-
-/**
- * ufshcd_pci_resume - resume power management function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_resume(struct device *dev)
-{
- return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-#endif /* !CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM
-static int ufshcd_pci_runtime_suspend(struct device *dev)
-{
- return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-static int ufshcd_pci_runtime_resume(struct device *dev)
-{
- return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-static int ufshcd_pci_runtime_idle(struct device *dev)
-{
- return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-#endif /* !CONFIG_PM */
-
-/**
- * ufshcd_pci_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void ufshcd_pci_shutdown(struct pci_dev *pdev)
-{
- ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-/**
- * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
- * data structure memory
- * @pdev: pointer to PCI handle
- */
-static void ufshcd_pci_remove(struct pci_dev *pdev)
-{
- struct ufs_hba *hba = pci_get_drvdata(pdev);
-
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
- ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
-}
-
-/**
- * ufshcd_pci_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int
-ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct ufs_hba *hba;
- void __iomem *mmio_base;
- int err;
-
- err = pcim_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "pcim_enable_device failed\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
- if (err < 0) {
- dev_err(&pdev->dev, "request and iomap failed\n");
- return err;
- }
-
- mmio_base = pcim_iomap_table(pdev)[0];
-
- err = ufshcd_alloc_host(&pdev->dev, &hba);
- if (err) {
- dev_err(&pdev->dev, "Allocation failed\n");
- return err;
- }
-
- hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
-
- err = ufshcd_init(hba, mmio_base, pdev->irq);
- if (err) {
- dev_err(&pdev->dev, "Initialization failed\n");
- ufshcd_dealloc_host(hba);
- return err;
- }
-
- pci_set_drvdata(pdev, hba);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
- ufshcd_pci_resume)
- SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
- ufshcd_pci_runtime_resume,
- ufshcd_pci_runtime_idle)
-};
-
-static const struct pci_device_id ufshcd_pci_tbl[] = {
- { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
- { } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
-
-static struct pci_driver ufshcd_pci_driver = {
- .name = UFSHCD,
- .id_table = ufshcd_pci_tbl,
- .probe = ufshcd_pci_probe,
- .remove = ufshcd_pci_remove,
- .shutdown = ufshcd_pci_shutdown,
- .driver = {
- .pm = &ufshcd_pci_pm_ops
- },
-};
-
-module_pci_driver(ufshcd_pci_driver);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
-MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("UFS host controller PCI glue driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
deleted file mode 100644
index 76f9be71c31b..000000000000
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * Universal Flash Storage Host controller Platform bus based glue driver
- *
- * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * See the COPYING file in the top-level directory or visit
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
- * without warranty of any kind. You are solely responsible for
- * determining the appropriateness of using and distributing
- * the program and assume all risks associated with your exercise
- * of rights with respect to the program, including but not limited
- * to infringement of third party rights, the risks and costs of
- * program errors, damage to or loss of data, programs or equipment,
- * and unavailability or interruption of operations. Under no
- * circumstances will the contributor of this Program be liable for
- * any damages of any kind arising from your use or distribution of
- * this program.
- */
-
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/of.h>
-
-#include "ufshcd.h"
-#include "ufshcd-pltfrm.h"
-#include "unipro.h"
-
-#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
-
-static int ufshcd_parse_clock_info(struct ufs_hba *hba)
-{
- int ret = 0;
- int cnt;
- int i;
- struct device *dev = hba->dev;
- struct device_node *np = dev->of_node;
- char *name;
- u32 *clkfreq = NULL;
- struct ufs_clk_info *clki;
- int len = 0;
- size_t sz = 0;
-
- if (!np)
- goto out;
-
- cnt = of_property_count_strings(np, "clock-names");
- if (!cnt || (cnt == -EINVAL)) {
- dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
- __func__);
- } else if (cnt < 0) {
- dev_err(dev, "%s: count clock strings failed, err %d\n",
- __func__, cnt);
- ret = cnt;
- }
-
- if (cnt <= 0)
- goto out;
-
- if (!of_get_property(np, "freq-table-hz", &len)) {
- dev_info(dev, "freq-table-hz property not specified\n");
- goto out;
- }
-
- if (len <= 0)
- goto out;
-
- sz = len / sizeof(*clkfreq);
- if (sz != 2 * cnt) {
- dev_err(dev, "%s len mismatch\n", "freq-table-hz");
- ret = -EINVAL;
- goto out;
- }
-
- clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
- GFP_KERNEL);
- if (!clkfreq) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = of_property_read_u32_array(np, "freq-table-hz",
- clkfreq, sz);
- if (ret && (ret != -EINVAL)) {
- dev_err(dev, "%s: error reading array %d\n",
- "freq-table-hz", ret);
- return ret;
- }
-
- for (i = 0; i < sz; i += 2) {
- ret = of_property_read_string_index(np,
- "clock-names", i/2, (const char **)&name);
- if (ret)
- goto out;
-
- clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
- if (!clki) {
- ret = -ENOMEM;
- goto out;
- }
-
- clki->min_freq = clkfreq[i];
- clki->max_freq = clkfreq[i+1];
- clki->name = kstrdup(name, GFP_KERNEL);
- dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
- clki->min_freq, clki->max_freq, clki->name);
- list_add_tail(&clki->list, &hba->clk_list_head);
- }
-out:
- return ret;
-}
-
-#define MAX_PROP_SIZE 32
-static int ufshcd_populate_vreg(struct device *dev, const char *name,
- struct ufs_vreg **out_vreg)
-{
- int ret = 0;
- char prop_name[MAX_PROP_SIZE];
- struct ufs_vreg *vreg = NULL;
- struct device_node *np = dev->of_node;
-
- if (!np) {
- dev_err(dev, "%s: non DT initialization\n", __func__);
- goto out;
- }
-
- snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
- if (!of_parse_phandle(np, prop_name, 0)) {
- dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
- __func__, prop_name);
- goto out;
- }
-
- vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
- if (!vreg)
- return -ENOMEM;
-
- vreg->name = kstrdup(name, GFP_KERNEL);
-
- snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
- if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
- dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
- vreg->max_uA = 0;
- }
-
- if (!strcmp(name, "vcc")) {
- if (of_property_read_bool(np, "vcc-supply-1p8")) {
- vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
- vreg->max_uV = UFS_VREG_VCC_1P8_MAX_UV;
- } else {
- vreg->min_uV = UFS_VREG_VCC_MIN_UV;
- vreg->max_uV = UFS_VREG_VCC_MAX_UV;
- }
- } else if (!strcmp(name, "vccq")) {
- vreg->min_uV = UFS_VREG_VCCQ_MIN_UV;
- vreg->max_uV = UFS_VREG_VCCQ_MAX_UV;
- } else if (!strcmp(name, "vccq2")) {
- vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV;
- vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV;
- }
-
- goto out;
-
-out:
- if (!ret)
- *out_vreg = vreg;
- return ret;
-}
-
-/**
- * ufshcd_parse_regulator_info - get regulator info from device tree
- * @hba: per adapter instance
- *
- * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
- * If any of the supplies are not defined it is assumed that they are always-on
- * and hence return zero. If the property is defined but parsing is failed
- * then return corresponding error.
- */
-static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
-{
- int err;
- struct device *dev = hba->dev;
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
- if (err)
- goto out;
-
- err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
- if (err)
- goto out;
-
- err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
- if (err)
- goto out;
-
- err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
-out:
- return err;
-}
-
-#ifdef CONFIG_PM
-/**
- * ufshcd_pltfrm_suspend - suspend power management function
- * @dev: pointer to device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-int ufshcd_pltfrm_suspend(struct device *dev)
-{
- return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_suspend);
-
-/**
- * ufshcd_pltfrm_resume - resume power management function
- * @dev: pointer to device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-int ufshcd_pltfrm_resume(struct device *dev)
-{
- return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_resume);
-
-int ufshcd_pltfrm_runtime_suspend(struct device *dev)
-{
- return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_suspend);
-
-int ufshcd_pltfrm_runtime_resume(struct device *dev)
-{
- return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_resume);
-
-int ufshcd_pltfrm_runtime_idle(struct device *dev)
-{
- return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_idle);
-
-#endif /* CONFIG_PM */
-
-void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
-{
- ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
-
-static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
-{
- struct device *dev = hba->dev;
- int ret;
-
- ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
- &hba->lanes_per_direction);
- if (ret) {
- dev_dbg(hba->dev,
- "%s: failed to read lanes-per-direction, ret=%d\n",
- __func__, ret);
- hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
- }
-}
-
-/**
- * ufshcd_get_pwr_dev_param - get finally agreed attributes for
- * power mode change
- * @pltfrm_param: pointer to platform parameters
- * @dev_max: pointer to device attributes
- * @agreed_pwr: returned agreed attributes
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
- struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr)
-{
- int min_pltfrm_gear;
- int min_dev_gear;
- bool is_dev_sup_hs = false;
- bool is_pltfrm_max_hs = false;
-
- if (dev_max->pwr_rx == FAST_MODE)
- is_dev_sup_hs = true;
-
- if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
- is_pltfrm_max_hs = true;
- min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
- pltfrm_param->hs_tx_gear);
- } else {
- min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
- pltfrm_param->pwm_tx_gear);
- }
-
- /*
- * device doesn't support HS but
- * pltfrm_param->desired_working_mode is HS,
- * thus device and pltfrm_param don't agree
- */
- if (!is_dev_sup_hs && is_pltfrm_max_hs) {
- pr_info("%s: device doesn't support HS\n",
- __func__);
- return -ENOTSUPP;
- } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
- /*
- * since device supports HS, it supports FAST_MODE.
- * since pltfrm_param->desired_working_mode is also HS
- * then final decision (FAST/FASTAUTO) is done according
- * to pltfrm_params as it is the restricting factor
- */
- agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
- agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
- } else {
- /*
- * here pltfrm_param->desired_working_mode is PWM.
- * it doesn't matter whether device supports HS or PWM,
- * in both cases pltfrm_param->desired_working_mode will
- * determine the mode
- */
- agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
- agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
- }
-
- /*
- * we would like tx to work in the minimum number of lanes
- * between device capability and vendor preferences.
- * the same decision will be made for rx
- */
- agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
- pltfrm_param->tx_lanes);
- agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
- pltfrm_param->rx_lanes);
-
- /* device maximum gear is the minimum between device rx and tx gears */
- min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
-
- /*
- * if both device capabilities and vendor pre-defined preferences are
- * both HS or both PWM then set the minimum gear to be the chosen
- * working gear.
- * if one is PWM and one is HS then the one that is PWM get to decide
- * what is the gear, as it is the one that also decided previously what
- * pwr the device will be configured to.
- */
- if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
- (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
- agreed_pwr->gear_rx =
- min_t(u32, min_dev_gear, min_pltfrm_gear);
- } else if (!is_dev_sup_hs) {
- agreed_pwr->gear_rx = min_dev_gear;
- } else {
- agreed_pwr->gear_rx = min_pltfrm_gear;
- }
- agreed_pwr->gear_tx = agreed_pwr->gear_rx;
-
- agreed_pwr->hs_rate = pltfrm_param->hs_rate;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
-
-/**
- * ufshcd_pltfrm_init - probe routine of the driver
- * @pdev: pointer to Platform device handle
- * @vops: pointer to variant ops
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_pltfrm_init(struct platform_device *pdev,
- const struct ufs_hba_variant_ops *vops)
-{
- struct ufs_hba *hba;
- void __iomem *mmio_base;
- int irq, err;
- struct device *dev = &pdev->dev;
-
- mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mmio_base)) {
- err = PTR_ERR(mmio_base);
- goto out;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = -ENODEV;
- goto out;
- }
-
- err = ufshcd_alloc_host(dev, &hba);
- if (err) {
- dev_err(&pdev->dev, "Allocation failed\n");
- goto out;
- }
-
- hba->vops = vops;
-
- err = ufshcd_parse_clock_info(hba);
- if (err) {
- dev_err(&pdev->dev, "%s: clock parse failed %d\n",
- __func__, err);
- goto dealloc_host;
- }
- err = ufshcd_parse_regulator_info(hba);
- if (err) {
- dev_err(&pdev->dev, "%s: regulator init failed %d\n",
- __func__, err);
- goto dealloc_host;
- }
-
- ufshcd_init_lanes_per_dir(hba);
-
- err = ufshcd_init(hba, mmio_base, irq);
- if (err) {
- dev_err(dev, "Initialization failed\n");
- goto dealloc_host;
- }
-
- platform_set_drvdata(pdev, hba);
-
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
- return 0;
-
-dealloc_host:
- ufshcd_dealloc_host(hba);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
-MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
deleted file mode 100644
index b79cdf9129a0..000000000000
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef UFSHCD_PLTFRM_H_
-#define UFSHCD_PLTFRM_H_
-
-#include "ufshcd.h"
-
-#define UFS_PWM_MODE 1
-#define UFS_HS_MODE 2
-
-struct ufs_dev_params {
- u32 pwm_rx_gear; /* pwm rx gear to work in */
- u32 pwm_tx_gear; /* pwm tx gear to work in */
- u32 hs_rx_gear; /* hs rx gear to work in */
- u32 hs_tx_gear; /* hs tx gear to work in */
- u32 rx_lanes; /* number of rx lanes */
- u32 tx_lanes; /* number of tx lanes */
- u32 rx_pwr_pwm; /* rx pwm working pwr */
- u32 tx_pwr_pwm; /* tx pwm working pwr */
- u32 rx_pwr_hs; /* rx hs working pwr */
- u32 tx_pwr_hs; /* tx hs working pwr */
- u32 hs_rate; /* rate A/B to work in HS */
- u32 desired_working_mode;
-};
-
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param,
- struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr);
-int ufshcd_pltfrm_init(struct platform_device *pdev,
- const struct ufs_hba_variant_ops *vops);
-void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
-
-#ifdef CONFIG_PM
-
-int ufshcd_pltfrm_suspend(struct device *dev);
-int ufshcd_pltfrm_resume(struct device *dev);
-int ufshcd_pltfrm_runtime_suspend(struct device *dev);
-int ufshcd_pltfrm_runtime_resume(struct device *dev);
-int ufshcd_pltfrm_runtime_idle(struct device *dev);
-
-#else /* !CONFIG_PM */
-
-#define ufshcd_pltfrm_suspend NULL
-#define ufshcd_pltfrm_resume NULL
-#define ufshcd_pltfrm_runtime_suspend NULL
-#define ufshcd_pltfrm_runtime_resume NULL
-#define ufshcd_pltfrm_runtime_idle NULL
-
-#endif /* CONFIG_PM */
-
-#endif /* UFSHCD_PLTFRM_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
deleted file mode 100644
index 2d705694636c..000000000000
--- a/drivers/scsi/ufs/ufshcd.c
+++ /dev/null
@@ -1,8587 +0,0 @@
-/*
- * Universal Flash Storage Host controller driver Core
- *
- * This code is based on drivers/scsi/ufs/ufshcd.c
- * Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * See the COPYING file in the top-level directory or visit
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
- * without warranty of any kind. You are solely responsible for
- * determining the appropriateness of using and distributing
- * the program and assume all risks associated with your exercise
- * of rights with respect to the program, including but not limited
- * to infringement of third party rights, the risks and costs of
- * program errors, damage to or loss of data, programs or equipment,
- * and unavailability or interruption of operations. Under no
- * circumstances will the contributor of this Program be liable for
- * any damages of any kind arising from your use or distribution of
- * this program.
- *
- * The Linux Foundation chooses to take subject only to the GPLv2
- * license terms, and distributes only under these terms.
- */
-
-#include <linux/async.h>
-#include <linux/devfreq.h>
-#include <linux/nls.h>
-#include <linux/of.h>
-#include <linux/bitfield.h>
-#include "ufshcd.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
-#include "ufs-sysfs.h"
-#include "ufs_bsg.h"
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/ufs.h>
-
-#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
- UTP_TASK_REQ_COMPL |\
- UFSHCD_ERROR_MASK)
-/* UIC command timeout, unit: ms */
-#define UIC_CMD_TIMEOUT 500
-
-/* NOP OUT retries waiting for NOP IN response */
-#define NOP_OUT_RETRIES 10
-/* Timeout after 30 msecs if NOP OUT hangs without response */
-#define NOP_OUT_TIMEOUT 30 /* msecs */
-
-/* Query request retries */
-#define QUERY_REQ_RETRIES 3
-/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
-
-/* Task management command timeout */
-#define TM_CMD_TIMEOUT 100 /* msecs */
-
-/* maximum number of retries for a general UIC command */
-#define UFS_UIC_COMMAND_RETRIES 3
-
-/* maximum number of link-startup retries */
-#define DME_LINKSTARTUP_RETRIES 3
-
-/* Maximum retries for Hibern8 enter */
-#define UIC_HIBERN8_ENTER_RETRIES 3
-
-/* maximum number of reset retries before giving up */
-#define MAX_HOST_RESET_RETRIES 5
-
-/* Expose the flag value from utp_upiu_query.value */
-#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
-
-/* Interrupt aggregation default timeout, unit: 40us */
-#define INT_AGGR_DEF_TO 0x02
-
-/* default delay of autosuspend: 2000 ms */
-#define RPM_AUTOSUSPEND_DELAY_MS 2000
-
-#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
- ({ \
- int _ret; \
- if (_on) \
- _ret = ufshcd_enable_vreg(_dev, _vreg); \
- else \
- _ret = ufshcd_disable_vreg(_dev, _vreg); \
- _ret; \
- })
-
-#define ufshcd_hex_dump(prefix_str, buf, len) do { \
- size_t __len = (len); \
- print_hex_dump(KERN_ERR, prefix_str, \
- __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
- 16, 4, buf, __len, false); \
-} while (0)
-
-int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
- const char *prefix)
-{
- u32 *regs;
- size_t pos;
-
- if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
- return -EINVAL;
-
- regs = kzalloc(len, GFP_ATOMIC);
- if (!regs)
- return -ENOMEM;
-
- for (pos = 0; pos < len; pos += 4)
- regs[pos / 4] = ufshcd_readl(hba, offset + pos);
-
- ufshcd_hex_dump(prefix, regs, len);
- kfree(regs);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
-
-enum {
- UFSHCD_MAX_CHANNEL = 0,
- UFSHCD_MAX_ID = 1,
- UFSHCD_CMD_PER_LUN = 32,
- UFSHCD_CAN_QUEUE = 32,
-};
-
-/* UFSHCD states */
-enum {
- UFSHCD_STATE_RESET,
- UFSHCD_STATE_ERROR,
- UFSHCD_STATE_OPERATIONAL,
- UFSHCD_STATE_EH_SCHEDULED,
-};
-
-/* UFSHCD error handling flags */
-enum {
- UFSHCD_EH_IN_PROGRESS = (1 << 0),
-};
-
-/* UFSHCD UIC layer error flags */
-enum {
- UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
- UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
- UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
- UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
-};
-
-#define ufshcd_set_eh_in_progress(h) \
- ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
-#define ufshcd_eh_in_progress(h) \
- ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
-#define ufshcd_clear_eh_in_progress(h) \
- ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
-
-#define ufshcd_set_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
-#define ufshcd_set_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
-#define ufshcd_set_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
-#define ufshcd_is_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
-#define ufshcd_is_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
-#define ufshcd_is_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
-
-struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
- {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
-};
-
-static inline enum ufs_dev_pwr_mode
-ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
-{
- return ufs_pm_lvl_states[lvl].dev_state;
-}
-
-static inline enum uic_link_state
-ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
-{
- return ufs_pm_lvl_states[lvl].link_state;
-}
-
-static inline enum ufs_pm_level
-ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
- enum uic_link_state link_state)
-{
- enum ufs_pm_level lvl;
-
- for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
- if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
- (ufs_pm_lvl_states[lvl].link_state == link_state))
- return lvl;
- }
-
- /* if no match found, return the level 0 */
- return UFS_PM_LVL_0;
-}
-
-static struct ufs_dev_fix ufs_fixups[] = {
- /* UFS cards deviations table */
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
- UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
- UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
-
- END_FIX
-};
-
-static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
-static void ufshcd_async_scan(void *data, async_cookie_t cookie);
-static int ufshcd_reset_and_restore(struct ufs_hba *hba);
-static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
-static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
-static void ufshcd_hba_exit(struct ufs_hba *hba);
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
- bool skip_ref_clk);
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
-static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
-static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
-static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
-static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
-static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_mode);
-static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
-{
- return tag >= 0 && tag < hba->nutrs;
-}
-
-static inline void ufshcd_enable_irq(struct ufs_hba *hba)
-{
- if (!hba->is_irq_enabled) {
- enable_irq(hba->irq);
- hba->is_irq_enabled = true;
- }
-}
-
-static inline void ufshcd_disable_irq(struct ufs_hba *hba)
-{
- if (hba->is_irq_enabled) {
- disable_irq(hba->irq);
- hba->is_irq_enabled = false;
- }
-}
-
-static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
-{
- if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
- scsi_unblock_requests(hba->host);
-}
-
-static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
-{
- if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
- scsi_block_requests(hba->host);
-}
-
-static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
-{
- struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
-
- trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
-}
-
-static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
-{
- struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
-
- trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
-}
-
-static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
-{
- int off = (int)tag - hba->nutrs;
- struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
-
- trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
- &descp->input_param1);
-}
-
-static void ufshcd_add_command_trace(struct ufs_hba *hba,
- unsigned int tag, const char *str)
-{
- sector_t lba = -1;
- u8 opcode = 0;
- u32 intr, doorbell;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- struct scsi_cmnd *cmd = lrbp->cmd;
- int transfer_len = -1;
-
- if (!trace_ufshcd_command_enabled()) {
- /* trace UPIU W/O tracing command */
- if (cmd)
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
- return;
- }
-
- if (cmd) { /* data phase exists */
- /* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
- opcode = cmd->cmnd[0];
- if ((opcode == READ_10) || (opcode == WRITE_10)) {
- /*
- * Currently we only fully trace read(10) and write(10)
- * commands
- */
- if (cmd->request && cmd->request->bio)
- lba = cmd->request->bio->bi_iter.bi_sector;
- transfer_len = be32_to_cpu(
- lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
- }
- }
-
- intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- trace_ufshcd_command(dev_name(hba->dev), str, tag,
- doorbell, transfer_len, intr, lba, opcode);
-}
-
-static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
-{
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
-
- if (list_empty(head))
- return;
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
- clki->max_freq)
- dev_err(hba->dev, "clk: %s, rate: %u\n",
- clki->name, clki->curr_freq);
- }
-}
-
-static void ufshcd_print_err_hist(struct ufs_hba *hba,
- struct ufs_err_reg_hist *err_hist,
- char *err_name)
-{
- int i;
- bool found = false;
-
- for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
- int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
-
- if (err_hist->tstamp[p] == 0)
- continue;
- dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
- err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
- found = true;
- }
-
- if (!found)
- dev_err(hba->dev, "No record of %s\n", err_name);
-}
-
-static void ufshcd_print_host_regs(struct ufs_hba *hba)
-{
- ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
- dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
- hba->ufs_version, hba->capabilities);
- dev_err(hba->dev,
- "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
- (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
- dev_err(hba->dev,
- "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
- ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
- hba->ufs_stats.hibern8_exit_cnt);
-
- ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
- "auto_hibern8_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
- "link_startup_fail");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
- "suspend_fail");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
-
- ufshcd_print_clk_freqs(hba);
-
- ufshcd_vops_dbg_register_dump(hba);
-}
-
-static
-void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
-{
- struct ufshcd_lrb *lrbp;
- int prdt_length;
- int tag;
-
- for_each_set_bit(tag, &bitmap, hba->nutrs) {
- lrbp = &hba->lrb[tag];
-
- dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
- tag, ktime_to_us(lrbp->issue_time_stamp));
- dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
- tag, ktime_to_us(lrbp->compl_time_stamp));
- dev_err(hba->dev,
- "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
- tag, (u64)lrbp->utrd_dma_addr);
-
- ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
- sizeof(struct utp_transfer_req_desc));
- dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
- (u64)lrbp->ucd_req_dma_addr);
- ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
- sizeof(struct utp_upiu_req));
- dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
- (u64)lrbp->ucd_rsp_dma_addr);
- ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
- sizeof(struct utp_upiu_rsp));
-
- prdt_length = le16_to_cpu(
- lrbp->utr_descriptor_ptr->prd_table_length);
- dev_err(hba->dev,
- "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
- tag, prdt_length,
- (u64)lrbp->ucd_prdt_dma_addr);
-
- if (pr_prdt)
- ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
- sizeof(struct ufshcd_sg_entry) * prdt_length);
- }
-}
-
-static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
-{
- int tag;
-
- for_each_set_bit(tag, &bitmap, hba->nutmrs) {
- struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
-
- dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
- ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
- }
-}
-
-static void ufshcd_print_host_state(struct ufs_hba *hba)
-{
- dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->outstanding_reqs, hba->outstanding_tasks);
- dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
- hba->saved_err, hba->saved_uic_err);
- dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
- hba->pm_op_in_progress, hba->is_sys_suspended);
- dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
- hba->auto_bkops_enabled, hba->host->host_self_blocked);
- dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
- dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
- hba->eh_flags, hba->req_abort_count);
- dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
- hba->capabilities, hba->caps);
- dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
- hba->dev_quirks);
-}
-
-/**
- * ufshcd_print_pwr_info - print power params as saved in hba
- * power info
- * @hba: per-adapter instance
- */
-static void ufshcd_print_pwr_info(struct ufs_hba *hba)
-{
- static const char * const names[] = {
- "INVALID MODE",
- "FAST MODE",
- "SLOW_MODE",
- "INVALID MODE",
- "FASTAUTO_MODE",
- "SLOWAUTO_MODE",
- "INVALID MODE",
- };
-
- dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
- __func__,
- hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
- hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
- names[hba->pwr_info.pwr_rx],
- names[hba->pwr_info.pwr_tx],
- hba->pwr_info.hs_rate);
-}
-
-/*
- * ufshcd_wait_for_register - wait for register value to change
- * @hba - per-adapter interface
- * @reg - mmio register offset
- * @mask - mask to apply to read register value
- * @val - wait condition
- * @interval_us - polling interval in microsecs
- * @timeout_ms - timeout in millisecs
- * @can_sleep - perform sleep or just spin
- *
- * Returns -ETIMEDOUT on error, zero on success
- */
-int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
- u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep)
-{
- int err = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
-
- /* ignore bits that we don't intend to wait on */
- val = val & mask;
-
- while ((ufshcd_readl(hba, reg) & mask) != val) {
- if (can_sleep)
- usleep_range(interval_us, interval_us + 50);
- else
- udelay(interval_us);
- if (time_after(jiffies, timeout)) {
- if ((ufshcd_readl(hba, reg) & mask) != val)
- err = -ETIMEDOUT;
- break;
- }
- }
-
- return err;
-}
-
-/**
- * ufshcd_get_intr_mask - Get the interrupt bit mask
- * @hba: Pointer to adapter instance
- *
- * Returns interrupt bit mask per version
- */
-static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
-{
- u32 intr_mask = 0;
-
- switch (hba->ufs_version) {
- case UFSHCI_VERSION_10:
- intr_mask = INTERRUPT_MASK_ALL_VER_10;
- break;
- case UFSHCI_VERSION_11:
- case UFSHCI_VERSION_20:
- intr_mask = INTERRUPT_MASK_ALL_VER_11;
- break;
- case UFSHCI_VERSION_21:
- default:
- intr_mask = INTERRUPT_MASK_ALL_VER_21;
- break;
- }
-
- return intr_mask;
-}
-
-/**
- * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
- * @hba: Pointer to adapter instance
- *
- * Returns UFSHCI version supported by the controller
- */
-static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
-{
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
- return ufshcd_vops_get_ufs_hci_version(hba);
-
- return ufshcd_readl(hba, REG_UFS_VERSION);
-}
-
-/**
- * ufshcd_is_device_present - Check if any device connected to
- * the host controller
- * @hba: pointer to adapter instance
- *
- * Returns true if device present, false if no device detected
- */
-static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
-{
- return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
- DEVICE_PRESENT) ? true : false;
-}
-
-/**
- * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
- * @lrbp: pointer to local command reference block
- *
- * This function is used to get the OCS field from UTRD
- * Returns the OCS field in the UTRD
- */
-static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
-{
- return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
-}
-
-/**
- * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
- * @hba: per adapter instance
- * @pos: position of the bit to be cleared
- */
-static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
-{
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
- ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
- else
- ufshcd_writel(hba, ~(1 << pos),
- REG_UTP_TRANSFER_REQ_LIST_CLEAR);
-}
-
-/**
- * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
- * @hba: per adapter instance
- * @pos: position of the bit to be cleared
- */
-static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
-{
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
- ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
- else
- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
-}
-
-/**
- * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
- * @hba: per adapter instance
- * @tag: position of the bit to be cleared
- */
-static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
-{
- __clear_bit(tag, &hba->outstanding_reqs);
-}
-
-/**
- * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
- * @reg: Register value of host controller status
- *
- * Returns integer, 0 on Success and positive value if failed
- */
-static inline int ufshcd_get_lists_status(u32 reg)
-{
- return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
-}
-
-/**
- * ufshcd_get_uic_cmd_result - Get the UIC command result
- * @hba: Pointer to adapter instance
- *
- * This function gets the result of UIC command completion
- * Returns 0 on success, non zero value on error
- */
-static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
-{
- return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
- MASK_UIC_COMMAND_RESULT;
-}
-
-/**
- * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
- * @hba: Pointer to adapter instance
- *
- * This function gets UIC command argument3
- * Returns 0 on success, non zero value on error
- */
-static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
-{
- return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
-}
-
-/**
- * ufshcd_get_req_rsp - returns the TR response transaction type
- * @ucd_rsp_ptr: pointer to response UPIU
- */
-static inline int
-ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
-}
-
-/**
- * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
- *
- * This function gets the response status and scsi_status from response UPIU
- * Returns the response result code.
- */
-static inline int
-ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
-}
-
-/*
- * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
- * from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
- *
- * Return the data segment length.
- */
-static inline unsigned int
-ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
- MASK_RSP_UPIU_DATA_SEG_LEN;
-}
-
-/**
- * ufshcd_is_exception_event - Check if the device raised an exception event
- * @ucd_rsp_ptr: pointer to response UPIU
- *
- * The function checks if the device raised an exception event indicated in
- * the Device Information field of response UPIU.
- *
- * Returns true if exception is raised, false otherwise.
- */
-static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
- MASK_RSP_EXCEPTION_EVENT ? true : false;
-}
-
-/**
- * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
- * @hba: per adapter instance
- */
-static inline void
-ufshcd_reset_intr_aggr(struct ufs_hba *hba)
-{
- ufshcd_writel(hba, INT_AGGR_ENABLE |
- INT_AGGR_COUNTER_AND_TIMER_RESET,
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-}
-
-/**
- * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
- * @hba: per adapter instance
- * @cnt: Interrupt aggregation counter threshold
- * @tmout: Interrupt aggregation timeout value
- */
-static inline void
-ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
-{
- ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
- INT_AGGR_COUNTER_THLD_VAL(cnt) |
- INT_AGGR_TIMEOUT_VAL(tmout),
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-}
-
-/**
- * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
- * @hba: per adapter instance
- */
-static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
-{
- ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-}
-
-/**
- * ufshcd_enable_run_stop_reg - Enable run-stop registers,
- * When run-stop registers are set to 1, it indicates the
- * host controller that it can process the requests
- * @hba: per adapter instance
- */
-static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
-{
- ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
- REG_UTP_TASK_REQ_LIST_RUN_STOP);
- ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
- REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
-}
-
-/**
- * ufshcd_hba_start - Start controller initialization sequence
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_start(struct ufs_hba *hba)
-{
- ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
-}
-
-/**
- * ufshcd_is_hba_active - Get controller state
- * @hba: per adapter instance
- *
- * Returns false if controller is active, true otherwise
- */
-static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
-{
- return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
- ? false : true;
-}
-
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
-{
- /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
- (hba->ufs_version == UFSHCI_VERSION_11))
- return UFS_UNIPRO_VER_1_41;
- else
- return UFS_UNIPRO_VER_1_6;
-}
-EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
-
-static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
-{
- /*
- * If both host and device support UniPro ver1.6 or later, PA layer
- * parameters tuning happens during link startup itself.
- *
- * We can manually tune PA layer parameters if either host or device
- * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
- * logic simple, we will only do manual tuning if local unipro version
- * doesn't support ver1.6 or later.
- */
- if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
- return true;
- else
- return false;
-}
-
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
-{
- int ret = 0;
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
- ktime_t start = ktime_get();
- bool clk_state_changed = false;
-
- if (list_empty(head))
- goto out;
-
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
- if (ret)
- return ret;
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk)) {
- if (scale_up && clki->max_freq) {
- if (clki->curr_freq == clki->max_freq)
- continue;
-
- clk_state_changed = true;
- ret = clk_set_rate(clki->clk, clki->max_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->max_freq, ret);
- break;
- }
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
- "scaled up", clki->name,
- clki->curr_freq,
- clki->max_freq);
-
- clki->curr_freq = clki->max_freq;
-
- } else if (!scale_up && clki->min_freq) {
- if (clki->curr_freq == clki->min_freq)
- continue;
-
- clk_state_changed = true;
- ret = clk_set_rate(clki->clk, clki->min_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->min_freq, ret);
- break;
- }
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
- "scaled down", clki->name,
- clki->curr_freq,
- clki->min_freq);
- clki->curr_freq = clki->min_freq;
- }
- }
- dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
- clki->name, clk_get_rate(clki->clk));
- }
-
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
-
-out:
- if (clk_state_changed)
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
- (scale_up ? "up" : "down"),
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
- return ret;
-}
-
-/**
- * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
- * @hba: per adapter instance
- * @scale_up: True if scaling up and false if scaling down
- *
- * Returns true if scaling is required, false otherwise.
- */
-static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
- bool scale_up)
-{
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
-
- if (list_empty(head))
- return false;
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk)) {
- if (scale_up && clki->max_freq) {
- if (clki->curr_freq == clki->max_freq)
- continue;
- return true;
- } else if (!scale_up && clki->min_freq) {
- if (clki->curr_freq == clki->min_freq)
- continue;
- return true;
- }
- }
- }
-
- return false;
-}
-
-static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
- u64 wait_timeout_us)
-{
- unsigned long flags;
- int ret = 0;
- u32 tm_doorbell;
- u32 tr_doorbell;
- bool timeout = false, do_last_check = false;
- ktime_t start;
-
- ufshcd_hold(hba, false);
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * Wait for all the outstanding tasks/transfer requests.
- * Verify by checking the doorbell registers are clear.
- */
- start = ktime_get();
- do {
- if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
- ret = -EBUSY;
- goto out;
- }
-
- tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (!tm_doorbell && !tr_doorbell) {
- timeout = false;
- break;
- } else if (do_last_check) {
- break;
- }
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- schedule();
- if (ktime_to_us(ktime_sub(ktime_get(), start)) >
- wait_timeout_us) {
- timeout = true;
- /*
- * We might have scheduled out for long time so make
- * sure to check if doorbells are cleared by this time
- * or not.
- */
- do_last_check = true;
- }
- spin_lock_irqsave(hba->host->host_lock, flags);
- } while (tm_doorbell || tr_doorbell);
-
- if (timeout) {
- dev_err(hba->dev,
- "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
- __func__, tm_doorbell, tr_doorbell);
- ret = -EBUSY;
- }
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release(hba);
- return ret;
-}
-
-/**
- * ufshcd_scale_gear - scale up/down UFS gear
- * @hba: per adapter instance
- * @scale_up: True for scaling up gear and false for scaling down
- *
- * Returns 0 for success,
- * Returns -EBUSY if scaling can't happen at this time
- * Returns non-zero for any other errors
- */
-static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
-{
- #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
- int ret = 0;
- struct ufs_pa_layer_attr new_pwr_info;
-
- if (scale_up) {
- memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
- sizeof(struct ufs_pa_layer_attr));
- } else {
- memcpy(&new_pwr_info, &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
-
- if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
- || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
- /* save the current power mode */
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
-
- /* scale down gear */
- new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
- new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
- }
- }
-
- /* check if the power mode needs to be changed or not? */
- ret = ufshcd_change_power_mode(hba, &new_pwr_info);
-
- if (ret)
- dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
- __func__, ret,
- hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
- new_pwr_info.gear_tx, new_pwr_info.gear_rx);
-
- return ret;
-}
-
-static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
-{
- #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
- int ret = 0;
- /*
- * make sure that there are no outstanding requests when
- * clock scaling is in progress
- */
- ufshcd_scsi_block_requests(hba);
- down_write(&hba->clk_scaling_lock);
- if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
- ret = -EBUSY;
- up_write(&hba->clk_scaling_lock);
- ufshcd_scsi_unblock_requests(hba);
- }
-
- return ret;
-}
-
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
-{
- up_write(&hba->clk_scaling_lock);
- ufshcd_scsi_unblock_requests(hba);
-}
-
-/**
- * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
- * @hba: per adapter instance
- * @scale_up: True for scaling up and false for scalin down
- *
- * Returns 0 for success,
- * Returns -EBUSY if scaling can't happen at this time
- * Returns non-zero for any other errors
- */
-static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
-{
- int ret = 0;
-
- /* let's not get into low power until clock scaling is completed */
- ufshcd_hold(hba, false);
-
- ret = ufshcd_clock_scaling_prepare(hba);
- if (ret)
- return ret;
-
- /* scale down the gear before scaling down clocks */
- if (!scale_up) {
- ret = ufshcd_scale_gear(hba, false);
- if (ret)
- goto out;
- }
-
- ret = ufshcd_scale_clks(hba, scale_up);
- if (ret) {
- if (!scale_up)
- ufshcd_scale_gear(hba, true);
- goto out;
- }
-
- /* scale up the gear after scaling up clocks */
- if (scale_up) {
- ret = ufshcd_scale_gear(hba, true);
- if (ret) {
- ufshcd_scale_clks(hba, false);
- goto out;
- }
- }
-
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
-
-out:
- ufshcd_clock_scaling_unprepare(hba);
- ufshcd_release(hba);
- return ret;
-}
-
-static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
-{
- struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_scaling.suspend_work);
- unsigned long irq_flags;
-
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
- }
- hba->clk_scaling.is_suspended = true;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
- __ufshcd_suspend_clkscaling(hba);
-}
-
-static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
-{
- struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_scaling.resume_work);
- unsigned long irq_flags;
-
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (!hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
- }
- hba->clk_scaling.is_suspended = false;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
- devfreq_resume_device(hba->devfreq);
-}
-
-static int ufshcd_devfreq_target(struct device *dev,
- unsigned long *freq, u32 flags)
-{
- int ret = 0;
- struct ufs_hba *hba = dev_get_drvdata(dev);
- ktime_t start;
- bool scale_up, sched_clk_scaling_suspend_work = false;
- struct list_head *clk_list = &hba->clk_list_head;
- struct ufs_clk_info *clki;
- unsigned long irq_flags;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return -EINVAL;
-
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
-
- if (!hba->clk_scaling.active_reqs)
- sched_clk_scaling_suspend_work = true;
-
- if (list_empty(clk_list)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- goto out;
- }
-
- clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
- scale_up = (*freq == clki->max_freq) ? true : false;
- if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- ret = 0;
- goto out; /* no state change required */
- }
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
- start = ktime_get();
- ret = ufshcd_devfreq_scale(hba, scale_up);
-
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
- (scale_up ? "up" : "down"),
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
-
-out:
- if (sched_clk_scaling_suspend_work)
- queue_work(hba->clk_scaling.workq,
- &hba->clk_scaling.suspend_work);
-
- return ret;
-}
-
-static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
-{
- int *busy = priv;
-
- WARN_ON_ONCE(reserved);
- (*busy)++;
- return false;
-}
-
-/* Whether or not any tag is in use by a request that is in progress. */
-static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
-{
- struct request_queue *q = hba->cmd_queue;
- int busy = 0;
-
- blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
- return busy;
-}
-
-static int ufshcd_devfreq_get_dev_status(struct device *dev,
- struct devfreq_dev_status *stat)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return -EINVAL;
-
- memset(stat, 0, sizeof(*stat));
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!scaling->window_start_t)
- goto start_window;
-
- if (scaling->is_busy_started)
- scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
- scaling->busy_start_t));
-
- stat->total_time = jiffies_to_usecs((long)jiffies -
- (long)scaling->window_start_t);
- stat->busy_time = scaling->tot_busy_t;
-start_window:
- scaling->window_start_t = jiffies;
- scaling->tot_busy_t = 0;
-
- if (hba->outstanding_reqs) {
- scaling->busy_start_t = ktime_get();
- scaling->is_busy_started = true;
- } else {
- scaling->busy_start_t = 0;
- scaling->is_busy_started = false;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return 0;
-}
-
-static struct devfreq_dev_profile ufs_devfreq_profile = {
- .polling_ms = 100,
- .target = ufshcd_devfreq_target,
- .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
-
-static int ufshcd_devfreq_init(struct ufs_hba *hba)
-{
- struct list_head *clk_list = &hba->clk_list_head;
- struct ufs_clk_info *clki;
- struct devfreq *devfreq;
- int ret;
-
- /* Skip devfreq if we don't have any clocks in the list */
- if (list_empty(clk_list))
- return 0;
-
- clki = list_first_entry(clk_list, struct ufs_clk_info, list);
- dev_pm_opp_add(hba->dev, clki->min_freq, 0);
- dev_pm_opp_add(hba->dev, clki->max_freq, 0);
-
- devfreq = devfreq_add_device(hba->dev,
- &ufs_devfreq_profile,
- DEVFREQ_GOV_SIMPLE_ONDEMAND,
- NULL);
- if (IS_ERR(devfreq)) {
- ret = PTR_ERR(devfreq);
- dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
-
- dev_pm_opp_remove(hba->dev, clki->min_freq);
- dev_pm_opp_remove(hba->dev, clki->max_freq);
- return ret;
- }
-
- hba->devfreq = devfreq;
-
- return 0;
-}
-
-static void ufshcd_devfreq_remove(struct ufs_hba *hba)
-{
- struct list_head *clk_list = &hba->clk_list_head;
- struct ufs_clk_info *clki;
-
- if (!hba->devfreq)
- return;
-
- devfreq_remove_device(hba->devfreq);
- hba->devfreq = NULL;
-
- clki = list_first_entry(clk_list, struct ufs_clk_info, list);
- dev_pm_opp_remove(hba->dev, clki->min_freq);
- dev_pm_opp_remove(hba->dev, clki->max_freq);
-}
-
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
-{
- unsigned long flags;
-
- devfreq_suspend_device(hba->devfreq);
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_scaling.window_start_t = 0;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-
-static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
-{
- unsigned long flags;
- bool suspend = false;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->clk_scaling.is_suspended) {
- suspend = true;
- hba->clk_scaling.is_suspended = true;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (suspend)
- __ufshcd_suspend_clkscaling(hba);
-}
-
-static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
-{
- unsigned long flags;
- bool resume = false;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_scaling.is_suspended) {
- resume = true;
- hba->clk_scaling.is_suspended = false;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (resume)
- devfreq_resume_device(hba->devfreq);
-}
-
-static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
-}
-
-static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- u32 value;
- int err;
-
- if (kstrtou32(buf, 0, &value))
- return -EINVAL;
-
- value = !!value;
- if (value == hba->clk_scaling.is_allowed)
- goto out;
-
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
-
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
-
- hba->clk_scaling.is_allowed = value;
-
- if (value) {
- ufshcd_resume_clkscaling(hba);
- } else {
- ufshcd_suspend_clkscaling(hba);
- err = ufshcd_devfreq_scale(hba, true);
- if (err)
- dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
- __func__, err);
- }
-
- ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
-out:
- return count;
-}
-
-static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
-{
- hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
- hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
- sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
- hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
- hba->clk_scaling.enable_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
-}
-
-static void ufshcd_ungate_work(struct work_struct *work)
-{
- int ret;
- unsigned long flags;
- struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_gating.ungate_work);
-
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == CLKS_ON) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- goto unblock_reqs;
- }
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_setup_clocks(hba, true);
-
- ufshcd_enable_irq(hba);
-
- /* Exit from hibern8 */
- if (ufshcd_can_hibern8_during_gating(hba)) {
- /* Prevent gating in this path */
- hba->clk_gating.is_suspended = true;
- if (ufshcd_is_link_hibern8(hba)) {
- ret = ufshcd_uic_hibern8_exit(hba);
- if (ret)
- dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
- __func__, ret);
- else
- ufshcd_set_link_active(hba);
- }
- hba->clk_gating.is_suspended = false;
- }
-unblock_reqs:
- ufshcd_scsi_unblock_requests(hba);
-}
-
-/**
- * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
- * Also, exit from hibern8 mode and set the link as active.
- * @hba: per adapter instance
- * @async: This indicates whether caller should ungate clocks asynchronously.
- */
-int ufshcd_hold(struct ufs_hba *hba, bool async)
-{
- int rc = 0;
- unsigned long flags;
-
- if (!ufshcd_is_clkgating_allowed(hba))
- goto out;
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.active_reqs++;
-
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return 0;
- }
-
-start:
- switch (hba->clk_gating.state) {
- case CLKS_ON:
- /*
- * Wait for the ungate work to complete if in progress.
- * Though the clocks may be in ON state, the link could
- * still be in hibner8 state if hibern8 is allowed
- * during clock gating.
- * Make sure we exit hibern8 state also in addition to
- * clocks being ON.
- */
- if (ufshcd_can_hibern8_during_gating(hba) &&
- ufshcd_is_link_hibern8(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->clk_gating.ungate_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- goto start;
- }
- break;
- case REQ_CLKS_OFF:
- if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- break;
- }
- /*
- * If we are here, it means gating work is either done or
- * currently running. Hence, fall through to cancel gating
- * work and to enable clocks.
- */
- /* fallthrough */
- case CLKS_OFF:
- ufshcd_scsi_block_requests(hba);
- hba->clk_gating.state = REQ_CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- queue_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.ungate_work);
- /*
- * fall through to check if we should wait for this
- * work to be done or not.
- */
- /* fallthrough */
- case REQ_CLKS_ON:
- if (async) {
- rc = -EAGAIN;
- hba->clk_gating.active_reqs--;
- break;
- }
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->clk_gating.ungate_work);
- /* Make sure state is CLKS_ON before returning */
- spin_lock_irqsave(hba->host->host_lock, flags);
- goto start;
- default:
- dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
- __func__, hba->clk_gating.state);
- break;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return rc;
-}
-EXPORT_SYMBOL_GPL(ufshcd_hold);
-
-static void ufshcd_gate_work(struct work_struct *work)
-{
- struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_gating.gate_work.work);
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case save time by
- * skipping the gating work and exit after changing the clock
- * state to CLKS_ON.
- */
- if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state != REQ_CLKS_OFF)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto rel_lock;
- }
-
- if (hba->clk_gating.active_reqs
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done)
- goto rel_lock;
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* put the link into hibern8 mode before turning off clocks */
- if (ufshcd_can_hibern8_during_gating(hba)) {
- if (ufshcd_uic_hibern8_enter(hba)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto out;
- }
- ufshcd_set_link_hibern8(hba);
- }
-
- ufshcd_disable_irq(hba);
-
- if (!ufshcd_is_link_active(hba))
- ufshcd_setup_clocks(hba, false);
- else
- /* If link is active, device ref_clk can't be switched off */
- __ufshcd_setup_clocks(hba, false, true);
-
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case keep the state
- * as REQ_CLKS_ON which would anyway imply that clocks are off
- * and a request to turn them on is pending. By doing this way,
- * we keep the state machine in tact and this would ultimately
- * prevent from doing cancel work multiple times when there are
- * new requests arriving before the current cancel work is done.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == REQ_CLKS_OFF) {
- hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- }
-rel_lock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return;
-}
-
-/* host lock must be held before calling this variant */
-static void __ufshcd_release(struct ufs_hba *hba)
-{
- if (!ufshcd_is_clkgating_allowed(hba))
- return;
-
- hba->clk_gating.active_reqs--;
-
- if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done
- || ufshcd_eh_in_progress(hba))
- return;
-
- hba->clk_gating.state = REQ_CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- queue_delayed_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.gate_work,
- msecs_to_jiffies(hba->clk_gating.delay_ms));
-}
-
-void ufshcd_release(struct ufs_hba *hba)
-{
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- __ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-EXPORT_SYMBOL_GPL(ufshcd_release);
-
-static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
-}
-
-static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags, value;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.delay_ms = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return count;
-}
-
-static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
-}
-
-static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
- u32 value;
-
- if (kstrtou32(buf, 0, &value))
- return -EINVAL;
-
- value = !!value;
- if (value == hba->clk_gating.is_enabled)
- goto out;
-
- if (value) {
- ufshcd_release(hba);
- } else {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.active_reqs++;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
-
- hba->clk_gating.is_enabled = value;
-out:
- return count;
-}
-
-static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
-{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- INIT_WORK(&hba->clk_scaling.suspend_work,
- ufshcd_clk_scaling_suspend_work);
- INIT_WORK(&hba->clk_scaling.resume_work,
- ufshcd_clk_scaling_resume_work);
-
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
-
- ufshcd_clkscaling_init_sysfs(hba);
-}
-
-static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
-{
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- destroy_workqueue(hba->clk_scaling.workq);
- ufshcd_devfreq_remove(hba);
-}
-
-static void ufshcd_init_clk_gating(struct ufs_hba *hba)
-{
- char wq_name[sizeof("ufs_clk_gating_00")];
-
- if (!ufshcd_is_clkgating_allowed(hba))
- return;
-
- hba->clk_gating.delay_ms = 150;
- INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
- INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
-
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
- hba->host->host_no);
- hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM);
-
- hba->clk_gating.is_enabled = true;
-
- hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
- hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
- sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
- hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
- hba->clk_gating.delay_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
-
- hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
- hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
- sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
- hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
- hba->clk_gating.enable_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
-}
-
-static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
-{
- if (!ufshcd_is_clkgating_allowed(hba))
- return;
- device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
- device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
- cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- destroy_workqueue(hba->clk_gating.clk_gating_workq);
-}
-
-/* Must be called with host lock acquired */
-static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
-{
- bool queue_resume_work = false;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- if (!hba->clk_scaling.active_reqs++)
- queue_resume_work = true;
-
- if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
- return;
-
- if (queue_resume_work)
- queue_work(hba->clk_scaling.workq,
- &hba->clk_scaling.resume_work);
-
- if (!hba->clk_scaling.window_start_t) {
- hba->clk_scaling.window_start_t = jiffies;
- hba->clk_scaling.tot_busy_t = 0;
- hba->clk_scaling.is_busy_started = false;
- }
-
- if (!hba->clk_scaling.is_busy_started) {
- hba->clk_scaling.busy_start_t = ktime_get();
- hba->clk_scaling.is_busy_started = true;
- }
-}
-
-static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
-{
- struct ufs_clk_scaling *scaling = &hba->clk_scaling;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- if (!hba->outstanding_reqs && scaling->is_busy_started) {
- scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
- scaling->busy_start_t));
- scaling->busy_start_t = 0;
- scaling->is_busy_started = false;
- }
-}
-/**
- * ufshcd_send_command - Send SCSI or device management commands
- * @hba: per adapter instance
- * @task_tag: Task tag of the command
- */
-static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
-{
- hba->lrb[task_tag].issue_time_stamp = ktime_get();
- hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
- ufshcd_add_command_trace(hba, task_tag, "send");
- ufshcd_clk_scaling_start_busy(hba);
- __set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- /* Make sure that doorbell is committed immediately */
- wmb();
-}
-
-/**
- * ufshcd_copy_sense_data - Copy sense data in case of check condition
- * @lrbp: pointer to local reference block
- */
-static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
-{
- int len;
- if (lrbp->sense_buffer &&
- ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
- int len_to_copy;
-
- len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
- len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
-
- memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
- len_to_copy);
- }
-}
-
-/**
- * ufshcd_copy_query_response() - Copy the Query Response and the data
- * descriptor
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- */
-static
-int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
-
- memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
-
- /* Get the descriptor */
- if (hba->dev_cmd.query.descriptor &&
- lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
- u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
- GENERAL_UPIU_REQUEST_SIZE;
- u16 resp_len;
- u16 buf_len;
-
- /* data segment length */
- resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
- MASK_QUERY_DATA_SEG_LEN;
- buf_len = be16_to_cpu(
- hba->dev_cmd.query.request.upiu_req.length);
- if (likely(buf_len >= resp_len)) {
- memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
- } else {
- dev_warn(hba->dev,
- "%s: rsp size %d is bigger than buffer size %d",
- __func__, resp_len, buf_len);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
- * ufshcd_hba_capabilities - Read controller capabilities
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
-{
- hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
-
- /* nutrs and nutmrs are 0 based values */
- hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
- hba->nutmrs =
- ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
-}
-
-/**
- * ufshcd_ready_for_uic_cmd - Check if controller is ready
- * to accept UIC commands
- * @hba: per adapter instance
- * Return true on success, else false
- */
-static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
-{
- if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
- return true;
- else
- return false;
-}
-
-/**
- * ufshcd_get_upmcrs - Get the power mode change request status
- * @hba: Pointer to adapter instance
- *
- * This function gets the UPMCRS field of HCS register
- * Returns value of UPMCRS field
- */
-static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
-{
- return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
-}
-
-/**
- * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
- * @hba: per adapter instance
- * @uic_cmd: UIC command
- *
- * Mutex must be held.
- */
-static inline void
-ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
-{
- WARN_ON(hba->active_uic_cmd);
-
- hba->active_uic_cmd = uic_cmd;
-
- /* Write Args */
- ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
- ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
- ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
-
- /* Write UIC Cmd */
- ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
- REG_UIC_COMMAND);
-}
-
-/**
- * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
- * @hba: per adapter instance
- * @uic_cmd: UIC command
- *
- * Must be called with mutex held.
- * Returns 0 only if success.
- */
-static int
-ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
-{
- int ret;
- unsigned long flags;
-
- if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT)))
- ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
- else
- ret = -ETIMEDOUT;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->active_uic_cmd = NULL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- return ret;
-}
-
-/**
- * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
- * @hba: per adapter instance
- * @uic_cmd: UIC command
- * @completion: initialize the completion only if this is set to true
- *
- * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
- * with mutex held and host_lock locked.
- * Returns 0 only if success.
- */
-static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
- bool completion)
-{
- if (!ufshcd_ready_for_uic_cmd(hba)) {
- dev_err(hba->dev,
- "Controller not ready to accept UIC commands\n");
- return -EIO;
- }
-
- if (completion)
- init_completion(&uic_cmd->done);
-
- ufshcd_dispatch_uic_cmd(hba, uic_cmd);
-
- return 0;
-}
-
-/**
- * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
- * @hba: per adapter instance
- * @uic_cmd: UIC command
- *
- * Returns 0 only if success.
- */
-int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
-{
- int ret;
- unsigned long flags;
-
- ufshcd_hold(hba, false);
- mutex_lock(&hba->uic_cmd_mutex);
- ufshcd_add_delay_before_dme_cmd(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (!ret)
- ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
-
- mutex_unlock(&hba->uic_cmd_mutex);
-
- ufshcd_release(hba);
- return ret;
-}
-
-/**
- * ufshcd_map_sg - Map scatter-gather list to prdt
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- *
- * Returns 0 in case of success, non-zero value in case of failure
- */
-static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufshcd_sg_entry *prd_table;
- struct scatterlist *sg;
- struct scsi_cmnd *cmd;
- int sg_segments;
- int i;
-
- cmd = lrbp->cmd;
- sg_segments = scsi_dma_map(cmd);
- if (sg_segments < 0)
- return sg_segments;
-
- if (sg_segments) {
- if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16)(sg_segments *
- sizeof(struct ufshcd_sg_entry)));
- else
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
-
- prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
-
- scsi_for_each_sg(cmd, sg, sg_segments, i) {
- prd_table[i].size =
- cpu_to_le32(((u32) sg_dma_len(sg))-1);
- prd_table[i].base_addr =
- cpu_to_le32(lower_32_bits(sg->dma_address));
- prd_table[i].upper_addr =
- cpu_to_le32(upper_32_bits(sg->dma_address));
- prd_table[i].reserved = 0;
- }
- } else {
- lrbp->utr_descriptor_ptr->prd_table_length = 0;
- }
-
- return 0;
-}
-
-/**
- * ufshcd_enable_intr - enable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- if (hba->ufs_version == UFSHCI_VERSION_10) {
- u32 rw;
- rw = set & INTERRUPT_MASK_RW_VER_10;
- set = rw | ((set ^ intrs) & intrs);
- } else {
- set |= intrs;
- }
-
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
- * ufshcd_disable_intr - disable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- if (hba->ufs_version == UFSHCI_VERSION_10) {
- u32 rw;
- rw = (set & INTERRUPT_MASK_RW_VER_10) &
- ~(intrs & INTERRUPT_MASK_RW_VER_10);
- set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
-
- } else {
- set &= ~intrs;
- }
-
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
- * ufshcd_prepare_req_desc_hdr() - Fills the requests header
- * descriptor according to request
- * @lrbp: pointer to local reference block
- * @upiu_flags: flags required in the header
- * @cmd_dir: requests data direction
- */
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
- u32 *upiu_flags, enum dma_data_direction cmd_dir)
-{
- struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
- u32 data_direction;
- u32 dword_0;
-
- if (cmd_dir == DMA_FROM_DEVICE) {
- data_direction = UTP_DEVICE_TO_HOST;
- *upiu_flags = UPIU_CMD_FLAGS_READ;
- } else if (cmd_dir == DMA_TO_DEVICE) {
- data_direction = UTP_HOST_TO_DEVICE;
- *upiu_flags = UPIU_CMD_FLAGS_WRITE;
- } else {
- data_direction = UTP_NO_DATA_TRANSFER;
- *upiu_flags = UPIU_CMD_FLAGS_NONE;
- }
-
- dword_0 = data_direction | (lrbp->command_type
- << UPIU_COMMAND_TYPE_OFFSET);
- if (lrbp->intr_cmd)
- dword_0 |= UTP_REQ_DESC_INT_CMD;
-
- /* Transfer request descriptor header fields */
- req_desc->header.dword_0 = cpu_to_le32(dword_0);
- /* dword_1 is reserved, hence it is set to 0 */
- req_desc->header.dword_1 = 0;
- /*
- * assigning invalid value for command status. Controller
- * updates OCS on command completion, with the command
- * status
- */
- req_desc->header.dword_2 =
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
- /* dword_3 is reserved, hence it is set to 0 */
- req_desc->header.dword_3 = 0;
-
- req_desc->prd_table_length = 0;
-}
-
-/**
- * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
- * for scsi commands
- * @lrbp: local reference block pointer
- * @upiu_flags: flags
- */
-static
-void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
-{
- struct scsi_cmnd *cmd = lrbp->cmd;
- struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
- unsigned short cdb_len;
-
- /* command descriptor fields */
- ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_COMMAND, upiu_flags,
- lrbp->lun, lrbp->task_tag);
- ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
- UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
-
- /* Total EHS length and Data segment length will be zero */
- ucd_req_ptr->header.dword_2 = 0;
-
- ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
-
- cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
- memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
- memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
-}
-
-/**
- * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
- * for query requsts
- * @hba: UFS hba
- * @lrbp: local reference block pointer
- * @upiu_flags: flags
- */
-static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, u32 upiu_flags)
-{
- struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
- struct ufs_query *query = &hba->dev_cmd.query;
- u16 len = be16_to_cpu(query->request.upiu_req.length);
-
- /* Query request header */
- ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
- lrbp->lun, lrbp->task_tag);
- ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
- 0, query->request.query_func, 0, 0);
-
- /* Data segment length only need for WRITE_DESC */
- if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
- ucd_req_ptr->header.dword_2 =
- UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
- else
- ucd_req_ptr->header.dword_2 = 0;
-
- /* Copy the Query Request buffer as is */
- memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
- QUERY_OSF_SIZE);
-
- /* Copy the Descriptor */
- if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
- memcpy(ucd_req_ptr + 1, query->descriptor, len);
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
-}
-
-static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
-{
- struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
-
- memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
-
- /* command descriptor fields */
- ucd_req_ptr->header.dword_0 =
- UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
- /* clear rest of the fields of basic header */
- ucd_req_ptr->header.dword_1 = 0;
- ucd_req_ptr->header.dword_2 = 0;
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
-}
-
-/**
- * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
- * for Device Management Purposes
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- */
-static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- u32 upiu_flags;
- int ret = 0;
-
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
- (hba->ufs_version == UFSHCI_VERSION_11))
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
- if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
- ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
- else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
- ufshcd_prepare_utp_nop_upiu(lrbp);
- else
- ret = -EINVAL;
-
- return ret;
-}
-
-/**
- * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
- * for SCSI Purposes
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- */
-static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- u32 upiu_flags;
- int ret = 0;
-
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
- (hba->ufs_version == UFSHCI_VERSION_11))
- lrbp->command_type = UTP_CMD_TYPE_SCSI;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-
- if (likely(lrbp->cmd)) {
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction);
- ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
- } else {
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-/**
- * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
- * @upiu_wlun_id: UPIU W-LUN id
- *
- * Returns SCSI W-LUN id
- */
-static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
-{
- return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
-}
-
-/**
- * ufshcd_queuecommand - main entry point for SCSI requests
- * @host: SCSI host pointer
- * @cmd: command from SCSI Midlayer
- *
- * Returns 0 for success, non-zero in case of failure
- */
-static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
-{
- struct ufshcd_lrb *lrbp;
- struct ufs_hba *hba;
- unsigned long flags;
- int tag;
- int err = 0;
-
- hba = shost_priv(host);
-
- tag = cmd->request->tag;
- if (!ufshcd_valid_tag(hba, tag)) {
- dev_err(hba->dev,
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
- __func__, tag, cmd, cmd->request);
- BUG();
- }
-
- if (!down_read_trylock(&hba->clk_scaling_lock))
- return SCSI_MLQUEUE_HOST_BUSY;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- switch (hba->ufshcd_state) {
- case UFSHCD_STATE_OPERATIONAL:
- break;
- case UFSHCD_STATE_EH_SCHEDULED:
- case UFSHCD_STATE_RESET:
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out_unlock;
- case UFSHCD_STATE_ERROR:
- set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
- goto out_unlock;
- default:
- dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
- __func__, hba->ufshcd_state);
- set_host_byte(cmd, DID_BAD_TARGET);
- cmd->scsi_done(cmd);
- goto out_unlock;
- }
-
- /* if error handling is in progress, don't issue commands */
- if (ufshcd_eh_in_progress(hba)) {
- set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
- goto out_unlock;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- hba->req_abort_count = 0;
-
- err = ufshcd_hold(hba, true);
- if (err) {
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- WARN_ON(hba->clk_gating.state != CLKS_ON);
-
- lrbp = &hba->lrb[tag];
-
- WARN_ON(lrbp->cmd);
- lrbp->cmd = cmd;
- lrbp->sense_bufflen = UFS_SENSE_SIZE;
- lrbp->sense_buffer = cmd->sense_buffer;
- lrbp->task_tag = tag;
- lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
- lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
- lrbp->req_abort_skip = false;
-
- ufshcd_comp_scsi_upiu(hba, lrbp);
-
- err = ufshcd_map_sg(hba, lrbp);
- if (err) {
- lrbp->cmd = NULL;
- ufshcd_release(hba);
- goto out;
- }
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
-
- /* issue command to the controller */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
- ufshcd_send_command(hba, tag);
-out_unlock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- up_read(&hba->clk_scaling_lock);
- return err;
-}
-
-static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
-{
- lrbp->cmd = NULL;
- lrbp->sense_bufflen = 0;
- lrbp->sense_buffer = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = 0; /* device management cmd is not specific to any LUN */
- lrbp->intr_cmd = true; /* No interrupt aggregation */
- hba->dev_cmd.type = cmd_type;
-
- return ufshcd_comp_devman_upiu(hba, lrbp);
-}
-
-static int
-ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
-{
- int err = 0;
- unsigned long flags;
- u32 mask = 1 << tag;
-
- /* clear outstanding transaction before retry */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_utrl_clear(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /*
- * wait for for h/w to clear corresponding bit in door-bell.
- * max. wait is 1 sec.
- */
- err = ufshcd_wait_for_register(hba,
- REG_UTP_TRANSFER_REQ_DOOR_BELL,
- mask, ~mask, 1000, 1000, true);
-
- return err;
-}
-
-static int
-ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
-
- /* Get the UPIU response */
- query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
- UPIU_RSP_CODE_OFFSET;
- return query_res->response;
-}
-
-/**
- * ufshcd_dev_cmd_completion() - handles device management command responses
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- */
-static int
-ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- int resp;
- int err = 0;
-
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
- resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
-
- switch (resp) {
- case UPIU_TRANSACTION_NOP_IN:
- if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
- err = -EINVAL;
- dev_err(hba->dev, "%s: unexpected response %x\n",
- __func__, resp);
- }
- break;
- case UPIU_TRANSACTION_QUERY_RSP:
- err = ufshcd_check_query_response(hba, lrbp);
- if (!err)
- err = ufshcd_copy_query_response(hba, lrbp);
- break;
- case UPIU_TRANSACTION_REJECT_UPIU:
- /* TODO: handle Reject UPIU Response */
- err = -EPERM;
- dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
- __func__);
- break;
- default:
- err = -EINVAL;
- dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
- __func__, resp);
- break;
- }
-
- return err;
-}
-
-static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, int max_timeout)
-{
- int err = 0;
- unsigned long time_left;
- unsigned long flags;
-
- time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
- msecs_to_jiffies(max_timeout));
-
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->dev_cmd.complete = NULL;
- if (likely(time_left)) {
- err = ufshcd_get_tr_ocs(lrbp);
- if (!err)
- err = ufshcd_dev_cmd_completion(hba, lrbp);
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (!time_left) {
- err = -ETIMEDOUT;
- dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
- __func__, lrbp->task_tag);
- if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
- /* successfully cleared the command, retry if needed */
- err = -EAGAIN;
- /*
- * in case of an error, after clearing the doorbell,
- * we also need to clear the outstanding_request
- * field in hba
- */
- ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
- }
-
- return err;
-}
-
-/**
- * ufshcd_exec_dev_cmd - API for sending device management requests
- * @hba: UFS hba
- * @cmd_type: specifies the type (NOP, Query...)
- * @timeout: time in seconds
- *
- * NOTE: Since there is only one available tag for device management commands,
- * it is expected you hold the hba->dev_cmd.lock mutex.
- */
-static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
- enum dev_cmd_type cmd_type, int timeout)
-{
- struct request_queue *q = hba->cmd_queue;
- struct request *req;
- struct ufshcd_lrb *lrbp;
- int err;
- int tag;
- struct completion wait;
- unsigned long flags;
-
- down_read(&hba->clk_scaling_lock);
-
- /*
- * Get free slot, sleep if slots are unavailable.
- * Even though we use wait_event() which sleeps indefinitely,
- * the maximum wait time is bounded by SCSI request timeout.
- */
- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
-
- init_completion(&wait);
- lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
- err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
- if (unlikely(err))
- goto out_put_tag;
-
- hba->dev_cmd.complete = &wait;
-
- ufshcd_add_query_upiu_trace(hba, tag, "query_send");
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
- ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
-
- ufshcd_add_query_upiu_trace(hba, tag,
- err ? "query_complete_err" : "query_complete");
-
-out_put_tag:
- blk_put_request(req);
-out_unlock:
- up_read(&hba->clk_scaling_lock);
- return err;
-}
-
-/**
- * ufshcd_init_query() - init the query response and request parameters
- * @hba: per-adapter instance
- * @request: address of the request pointer to be initialized
- * @response: address of the response pointer to be initialized
- * @opcode: operation to perform
- * @idn: flag idn to access
- * @index: LU number to access
- * @selector: query/flag/descriptor further identification
- */
-static inline void ufshcd_init_query(struct ufs_hba *hba,
- struct ufs_query_req **request, struct ufs_query_res **response,
- enum query_opcode opcode, u8 idn, u8 index, u8 selector)
-{
- *request = &hba->dev_cmd.query.request;
- *response = &hba->dev_cmd.query.response;
- memset(*request, 0, sizeof(struct ufs_query_req));
- memset(*response, 0, sizeof(struct ufs_query_res));
- (*request)->upiu_req.opcode = opcode;
- (*request)->upiu_req.idn = idn;
- (*request)->upiu_req.index = index;
- (*request)->upiu_req.selector = selector;
-}
-
-static int ufshcd_query_flag_retry(struct ufs_hba *hba,
- enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
-{
- int ret;
- int retries;
-
- for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
- ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
- if (ret)
- dev_dbg(hba->dev,
- "%s: failed with error %d, retries %d\n",
- __func__, ret, retries);
- else
- break;
- }
-
- if (ret)
- dev_err(hba->dev,
- "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
- __func__, opcode, idn, ret, retries);
- return ret;
-}
-
-/**
- * ufshcd_query_flag() - API function for sending flag query requests
- * @hba: per-adapter instance
- * @opcode: flag query to perform
- * @idn: flag idn to access
- * @flag_res: the flag value after the query request completes
- *
- * Returns 0 for success, non-zero in case of failure
- */
-int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res)
-{
- struct ufs_query_req *request = NULL;
- struct ufs_query_res *response = NULL;
- int err, index = 0, selector = 0;
- int timeout = QUERY_REQ_TIMEOUT;
-
- BUG_ON(!hba);
-
- ufshcd_hold(hba, false);
- mutex_lock(&hba->dev_cmd.lock);
- ufshcd_init_query(hba, &request, &response, opcode, idn, index,
- selector);
-
- switch (opcode) {
- case UPIU_QUERY_OPCODE_SET_FLAG:
- case UPIU_QUERY_OPCODE_CLEAR_FLAG:
- case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
- break;
- case UPIU_QUERY_OPCODE_READ_FLAG:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
- if (!flag_res) {
- /* No dummy reads */
- dev_err(hba->dev, "%s: Invalid argument for read request\n",
- __func__);
- err = -EINVAL;
- goto out_unlock;
- }
- break;
- default:
- dev_err(hba->dev,
- "%s: Expected query flag opcode but got = %d\n",
- __func__, opcode);
- err = -EINVAL;
- goto out_unlock;
- }
-
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
-
- if (err) {
- dev_err(hba->dev,
- "%s: Sending flag query for idn %d failed, err = %d\n",
- __func__, idn, err);
- goto out_unlock;
- }
-
- if (flag_res)
- *flag_res = (be32_to_cpu(response->upiu_res.value) &
- MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
-
-out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_query_attr - API function for sending attribute requests
- * @hba: per-adapter instance
- * @opcode: attribute opcode
- * @idn: attribute idn to access
- * @index: index field
- * @selector: selector field
- * @attr_val: the attribute value after the query request completes
- *
- * Returns 0 for success, non-zero in case of failure
-*/
-int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
-{
- struct ufs_query_req *request = NULL;
- struct ufs_query_res *response = NULL;
- int err;
-
- BUG_ON(!hba);
-
- ufshcd_hold(hba, false);
- if (!attr_val) {
- dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
- __func__, opcode);
- err = -EINVAL;
- goto out;
- }
-
- mutex_lock(&hba->dev_cmd.lock);
- ufshcd_init_query(hba, &request, &response, opcode, idn, index,
- selector);
-
- switch (opcode) {
- case UPIU_QUERY_OPCODE_WRITE_ATTR:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
- request->upiu_req.value = cpu_to_be32(*attr_val);
- break;
- case UPIU_QUERY_OPCODE_READ_ATTR:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
- break;
- default:
- dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
- __func__, opcode);
- err = -EINVAL;
- goto out_unlock;
- }
-
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
-
- if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
- __func__, opcode, idn, index, err);
- goto out_unlock;
- }
-
- *attr_val = be32_to_cpu(response->upiu_res.value);
-
-out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
-out:
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_query_attr_retry() - API function for sending query
- * attribute with retries
- * @hba: per-adapter instance
- * @opcode: attribute opcode
- * @idn: attribute idn to access
- * @index: index field
- * @selector: selector field
- * @attr_val: the attribute value after the query request
- * completes
- *
- * Returns 0 for success, non-zero in case of failure
-*/
-static int ufshcd_query_attr_retry(struct ufs_hba *hba,
- enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
- u32 *attr_val)
-{
- int ret = 0;
- u32 retries;
-
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- ret = ufshcd_query_attr(hba, opcode, idn, index,
- selector, attr_val);
- if (ret)
- dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
- __func__, ret, retries);
- else
- break;
- }
-
- if (ret)
- dev_err(hba->dev,
- "%s: query attribute, idn %d, failed with error %d after %d retires\n",
- __func__, idn, ret, QUERY_REQ_RETRIES);
- return ret;
-}
-
-static int __ufshcd_query_descriptor(struct ufs_hba *hba,
- enum query_opcode opcode, enum desc_idn idn, u8 index,
- u8 selector, u8 *desc_buf, int *buf_len)
-{
- struct ufs_query_req *request = NULL;
- struct ufs_query_res *response = NULL;
- int err;
-
- BUG_ON(!hba);
-
- ufshcd_hold(hba, false);
- if (!desc_buf) {
- dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
- __func__, opcode);
- err = -EINVAL;
- goto out;
- }
-
- if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
- dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
- __func__, *buf_len);
- err = -EINVAL;
- goto out;
- }
-
- mutex_lock(&hba->dev_cmd.lock);
- ufshcd_init_query(hba, &request, &response, opcode, idn, index,
- selector);
- hba->dev_cmd.query.descriptor = desc_buf;
- request->upiu_req.length = cpu_to_be16(*buf_len);
-
- switch (opcode) {
- case UPIU_QUERY_OPCODE_WRITE_DESC:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
- break;
- case UPIU_QUERY_OPCODE_READ_DESC:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
- break;
- default:
- dev_err(hba->dev,
- "%s: Expected query descriptor opcode but got = 0x%.2x\n",
- __func__, opcode);
- err = -EINVAL;
- goto out_unlock;
- }
-
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
-
- if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
- __func__, opcode, idn, index, err);
- goto out_unlock;
- }
-
- *buf_len = be16_to_cpu(response->upiu_res.length);
-
-out_unlock:
- hba->dev_cmd.query.descriptor = NULL;
- mutex_unlock(&hba->dev_cmd.lock);
-out:
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_query_descriptor_retry - API function for sending descriptor requests
- * @hba: per-adapter instance
- * @opcode: attribute opcode
- * @idn: attribute idn to access
- * @index: index field
- * @selector: selector field
- * @desc_buf: the buffer that contains the descriptor
- * @buf_len: length parameter passed to the device
- *
- * Returns 0 for success, non-zero in case of failure.
- * The buf_len parameter will contain, on return, the length parameter
- * received on the response.
- */
-int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
- enum query_opcode opcode,
- enum desc_idn idn, u8 index,
- u8 selector,
- u8 *desc_buf, int *buf_len)
-{
- int err;
- int retries;
-
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- err = __ufshcd_query_descriptor(hba, opcode, idn, index,
- selector, desc_buf, buf_len);
- if (!err || err == -EINVAL)
- break;
- }
-
- return err;
-}
-
-/**
- * ufshcd_read_desc_length - read the specified descriptor length from header
- * @hba: Pointer to adapter instance
- * @desc_id: descriptor idn value
- * @desc_index: descriptor index
- * @desc_length: pointer to variable to read the length of descriptor
- *
- * Return 0 in case of success, non-zero otherwise
- */
-static int ufshcd_read_desc_length(struct ufs_hba *hba,
- enum desc_idn desc_id,
- int desc_index,
- int *desc_length)
-{
- int ret;
- u8 header[QUERY_DESC_HDR_SIZE];
- int header_len = QUERY_DESC_HDR_SIZE;
-
- if (desc_id >= QUERY_DESC_IDN_MAX)
- return -EINVAL;
-
- ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0, header,
- &header_len);
-
- if (ret) {
- dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
- __func__, desc_id);
- return ret;
- } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
- dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
- __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
- desc_id);
- ret = -EINVAL;
- }
-
- *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
- return ret;
-
-}
-
-/**
- * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
- * @hba: Pointer to adapter instance
- * @desc_id: descriptor idn value
- * @desc_len: mapped desc length (out)
- *
- * Return 0 in case of success, non-zero otherwise
- */
-int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
- enum desc_idn desc_id, int *desc_len)
-{
- switch (desc_id) {
- case QUERY_DESC_IDN_DEVICE:
- *desc_len = hba->desc_size.dev_desc;
- break;
- case QUERY_DESC_IDN_POWER:
- *desc_len = hba->desc_size.pwr_desc;
- break;
- case QUERY_DESC_IDN_GEOMETRY:
- *desc_len = hba->desc_size.geom_desc;
- break;
- case QUERY_DESC_IDN_CONFIGURATION:
- *desc_len = hba->desc_size.conf_desc;
- break;
- case QUERY_DESC_IDN_UNIT:
- *desc_len = hba->desc_size.unit_desc;
- break;
- case QUERY_DESC_IDN_INTERCONNECT:
- *desc_len = hba->desc_size.interc_desc;
- break;
- case QUERY_DESC_IDN_STRING:
- *desc_len = QUERY_DESC_MAX_SIZE;
- break;
- case QUERY_DESC_IDN_HEALTH:
- *desc_len = hba->desc_size.hlth_desc;
- break;
- case QUERY_DESC_IDN_RFU_0:
- case QUERY_DESC_IDN_RFU_1:
- *desc_len = 0;
- break;
- default:
- *desc_len = 0;
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
-
-/**
- * ufshcd_read_desc_param - read the specified descriptor parameter
- * @hba: Pointer to adapter instance
- * @desc_id: descriptor idn value
- * @desc_index: descriptor index
- * @param_offset: offset of the parameter to read
- * @param_read_buf: pointer to buffer where parameter would be read
- * @param_size: sizeof(param_read_buf)
- *
- * Return 0 in case of success, non-zero otherwise
- */
-int ufshcd_read_desc_param(struct ufs_hba *hba,
- enum desc_idn desc_id,
- int desc_index,
- u8 param_offset,
- u8 *param_read_buf,
- u8 param_size)
-{
- int ret;
- u8 *desc_buf;
- int buff_len;
- bool is_kmalloc = true;
-
- /* Safety check */
- if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
- return -EINVAL;
-
- /* Get the max length of descriptor from structure filled up at probe
- * time.
- */
- ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
-
- /* Sanity checks */
- if (ret || !buff_len) {
- dev_err(hba->dev, "%s: Failed to get full descriptor length",
- __func__);
- return ret;
- }
-
- /* Check whether we need temp memory */
- if (param_offset != 0 || param_size < buff_len) {
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
- if (!desc_buf)
- return -ENOMEM;
- } else {
- desc_buf = param_read_buf;
- is_kmalloc = false;
- }
-
- /* Request for full descriptor */
- ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0,
- desc_buf, &buff_len);
-
- if (ret) {
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
- __func__, desc_id, desc_index, param_offset, ret);
- goto out;
- }
-
- /* Sanity check */
- if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
- dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
- __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
- ret = -EINVAL;
- goto out;
- }
-
- /* Check wherher we will not copy more data, than available */
- if (is_kmalloc && param_size > buff_len)
- param_size = buff_len;
-
- if (is_kmalloc)
- memcpy(param_read_buf, &desc_buf[param_offset], param_size);
-out:
- if (is_kmalloc)
- kfree(desc_buf);
- return ret;
-}
-
-static inline int ufshcd_read_desc(struct ufs_hba *hba,
- enum desc_idn desc_id,
- int desc_index,
- void *buf,
- u32 size)
-{
- return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
-}
-
-
-/**
- * struct uc_string_id - unicode string
- *
- * @len: size of this descriptor inclusive
- * @type: descriptor type
- * @uc: unicode string character
- */
-struct uc_string_id {
- u8 len;
- u8 type;
- wchar_t uc[0];
-} __packed;
-
-/* replace non-printable or non-ASCII characters with spaces */
-static inline char ufshcd_remove_non_printable(u8 ch)
-{
- return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
-}
-
-/**
- * ufshcd_read_string_desc - read string descriptor
- * @hba: pointer to adapter instance
- * @desc_index: descriptor index
- * @buf: pointer to buffer where descriptor would be read,
- * the caller should free the memory.
- * @ascii: if true convert from unicode to ascii characters
- * null terminated string.
- *
- * Return:
- * * string size on success.
- * * -ENOMEM: on allocation failure
- * * -EINVAL: on a wrong parameter
- */
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii)
-{
- struct uc_string_id *uc_str;
- u8 *str;
- int ret;
-
- if (!buf)
- return -EINVAL;
-
- uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
- if (!uc_str)
- return -ENOMEM;
-
- ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
- desc_index, uc_str,
- QUERY_DESC_MAX_SIZE);
- if (ret < 0) {
- dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
- QUERY_REQ_RETRIES, ret);
- str = NULL;
- goto out;
- }
-
- if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
- dev_dbg(hba->dev, "String Desc is of zero length\n");
- str = NULL;
- ret = 0;
- goto out;
- }
-
- if (ascii) {
- ssize_t ascii_len;
- int i;
- /* remove header and divide by 2 to move from UTF16 to UTF8 */
- ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
- str = kzalloc(ascii_len, GFP_KERNEL);
- if (!str) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * the descriptor contains string in UTF16 format
- * we need to convert to utf-8 so it can be displayed
- */
- ret = utf16s_to_utf8s(uc_str->uc,
- uc_str->len - QUERY_DESC_HDR_SIZE,
- UTF16_BIG_ENDIAN, str, ascii_len);
-
- /* replace non-printable or non-ASCII characters with spaces */
- for (i = 0; i < ret; i++)
- str[i] = ufshcd_remove_non_printable(str[i]);
-
- str[ret++] = '\0';
-
- } else {
- str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
- if (!str) {
- ret = -ENOMEM;
- goto out;
- }
- ret = uc_str->len;
- }
-out:
- *buf = str;
- kfree(uc_str);
- return ret;
-}
-
-/**
- * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
- * @hba: Pointer to adapter instance
- * @lun: lun id
- * @param_offset: offset of the parameter to read
- * @param_read_buf: pointer to buffer where parameter would be read
- * @param_size: sizeof(param_read_buf)
- *
- * Return 0 in case of success, non-zero otherwise
- */
-static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
- int lun,
- enum unit_desc_param param_offset,
- u8 *param_read_buf,
- u32 param_size)
-{
- /*
- * Unit descriptors are only available for general purpose LUs (LUN id
- * from 0 to 7) and RPMB Well known LU.
- */
- if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
- return -EOPNOTSUPP;
-
- return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
- param_offset, param_read_buf, param_size);
-}
-
-/**
- * ufshcd_memory_alloc - allocate memory for host memory space data structures
- * @hba: per adapter instance
- *
- * 1. Allocate DMA memory for Command Descriptor array
- * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
- * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
- * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
- * (UTMRDL)
- * 4. Allocate memory for local reference block(lrb).
- *
- * Returns 0 for success, non-zero in case of failure
- */
-static int ufshcd_memory_alloc(struct ufs_hba *hba)
-{
- size_t utmrdl_size, utrdl_size, ucdl_size;
-
- /* Allocate memory for UTP command descriptors */
- ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
- ucdl_size,
- &hba->ucdl_dma_addr,
- GFP_KERNEL);
-
- /*
- * UFSHCI requires UTP command descriptor to be 128 byte aligned.
- * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
- * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
- * be aligned to 128 bytes as well
- */
- if (!hba->ucdl_base_addr ||
- WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(hba->dev,
- "Command Descriptor Memory allocation failed\n");
- goto out;
- }
-
- /*
- * Allocate memory for UTP Transfer descriptors
- * UFSHCI requires 1024 byte alignment of UTRD
- */
- utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
- utrdl_size,
- &hba->utrdl_dma_addr,
- GFP_KERNEL);
- if (!hba->utrdl_base_addr ||
- WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(hba->dev,
- "Transfer Descriptor Memory allocation failed\n");
- goto out;
- }
-
- /*
- * Allocate memory for UTP Task Management descriptors
- * UFSHCI requires 1024 byte alignment of UTMRD
- */
- utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
- utmrdl_size,
- &hba->utmrdl_dma_addr,
- GFP_KERNEL);
- if (!hba->utmrdl_base_addr ||
- WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(hba->dev,
- "Task Management Descriptor Memory allocation failed\n");
- goto out;
- }
-
- /* Allocate memory for local reference block */
- hba->lrb = devm_kcalloc(hba->dev,
- hba->nutrs, sizeof(struct ufshcd_lrb),
- GFP_KERNEL);
- if (!hba->lrb) {
- dev_err(hba->dev, "LRB Memory allocation failed\n");
- goto out;
- }
- return 0;
-out:
- return -ENOMEM;
-}
-
-/**
- * ufshcd_host_memory_configure - configure local reference block with
- * memory offsets
- * @hba: per adapter instance
- *
- * Configure Host memory space
- * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
- * address.
- * 2. Update each UTRD with Response UPIU offset, Response UPIU length
- * and PRDT offset.
- * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
- * into local reference block.
- */
-static void ufshcd_host_memory_configure(struct ufs_hba *hba)
-{
- struct utp_transfer_cmd_desc *cmd_descp;
- struct utp_transfer_req_desc *utrdlp;
- dma_addr_t cmd_desc_dma_addr;
- dma_addr_t cmd_desc_element_addr;
- u16 response_offset;
- u16 prdt_offset;
- int cmd_desc_size;
- int i;
-
- utrdlp = hba->utrdl_base_addr;
- cmd_descp = hba->ucdl_base_addr;
-
- response_offset =
- offsetof(struct utp_transfer_cmd_desc, response_upiu);
- prdt_offset =
- offsetof(struct utp_transfer_cmd_desc, prd_table);
-
- cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
- cmd_desc_dma_addr = hba->ucdl_dma_addr;
-
- for (i = 0; i < hba->nutrs; i++) {
- /* Configure UTRD with command descriptor base address */
- cmd_desc_element_addr =
- (cmd_desc_dma_addr + (cmd_desc_size * i));
- utrdlp[i].command_desc_base_addr_lo =
- cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
- utrdlp[i].command_desc_base_addr_hi =
- cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
-
- /* Response upiu and prdt offset should be in double words */
- if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
- utrdlp[i].response_upiu_offset =
- cpu_to_le16(response_offset);
- utrdlp[i].prd_table_offset =
- cpu_to_le16(prdt_offset);
- utrdlp[i].response_upiu_length =
- cpu_to_le16(ALIGNED_UPIU_SIZE);
- } else {
- utrdlp[i].response_upiu_offset =
- cpu_to_le16((response_offset >> 2));
- utrdlp[i].prd_table_offset =
- cpu_to_le16((prdt_offset >> 2));
- utrdlp[i].response_upiu_length =
- cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
- }
-
- hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
- hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
- (i * sizeof(struct utp_transfer_req_desc));
- hba->lrb[i].ucd_req_ptr =
- (struct utp_upiu_req *)(cmd_descp + i);
- hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
- hba->lrb[i].ucd_rsp_ptr =
- (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
- hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
- response_offset;
- hba->lrb[i].ucd_prdt_ptr =
- (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
- hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
- prdt_offset;
- }
-}
-
-/**
- * ufshcd_dme_link_startup - Notify Unipro to perform link startup
- * @hba: per adapter instance
- *
- * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
- * in order to initialize the Unipro link startup procedure.
- * Once the Unipro links are up, the device connected to the controller
- * is detected.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_link_startup(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_dbg(hba->dev,
- "dme-link-startup: error code %d\n", ret);
- return ret;
-}
-/**
- * ufshcd_dme_reset - UIC command for DME_RESET
- * @hba: per adapter instance
- *
- * DME_RESET command is issued in order to reset UniPro stack.
- * This function now deal with cold reset.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_reset(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_RESET;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
-
- return ret;
-}
-
-/**
- * ufshcd_dme_enable - UIC command for DME_ENABLE
- * @hba: per adapter instance
- *
- * DME_ENABLE command is issued in order to enable UniPro stack.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_enable(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_ENABLE;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
-
- return ret;
-}
-
-static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
-{
- #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
- unsigned long min_sleep_time_us;
-
- if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
- return;
-
- /*
- * last_dme_cmd_tstamp will be 0 only for 1st call to
- * this function
- */
- if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
- min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
- } else {
- unsigned long delta =
- (unsigned long) ktime_to_us(
- ktime_sub(ktime_get(),
- hba->last_dme_cmd_tstamp));
-
- if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
- min_sleep_time_us =
- MIN_DELAY_BEFORE_DME_CMDS_US - delta;
- else
- return; /* no more delay required */
- }
-
- /* allow sleep for extra 50us if needed */
- usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
-}
-
-/**
- * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
- * @hba: per adapter instance
- * @attr_sel: uic command argument1
- * @attr_set: attribute set type as uic command argument2
- * @mib_val: setting value as uic command argument3
- * @peer: indicate whether peer or local
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
- u8 attr_set, u32 mib_val, u8 peer)
-{
- struct uic_command uic_cmd = {0};
- static const char *const action[] = {
- "dme-set",
- "dme-peer-set"
- };
- const char *set = action[!!peer];
- int ret;
- int retries = UFS_UIC_COMMAND_RETRIES;
-
- uic_cmd.command = peer ?
- UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
- uic_cmd.argument1 = attr_sel;
- uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
- uic_cmd.argument3 = mib_val;
-
- do {
- /* for peer attributes we retry upon failure */
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
- } while (ret && peer && --retries);
-
- if (ret)
- dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val,
- UFS_UIC_COMMAND_RETRIES - retries);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
-
-/**
- * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
- * @hba: per adapter instance
- * @attr_sel: uic command argument1
- * @mib_val: the value of the attribute as returned by the UIC command
- * @peer: indicate whether peer or local
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
- u32 *mib_val, u8 peer)
-{
- struct uic_command uic_cmd = {0};
- static const char *const action[] = {
- "dme-get",
- "dme-peer-get"
- };
- const char *get = action[!!peer];
- int ret;
- int retries = UFS_UIC_COMMAND_RETRIES;
- struct ufs_pa_layer_attr orig_pwr_info;
- struct ufs_pa_layer_attr temp_pwr_info;
- bool pwr_mode_change = false;
-
- if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
- orig_pwr_info = hba->pwr_info;
- temp_pwr_info = orig_pwr_info;
-
- if (orig_pwr_info.pwr_tx == FAST_MODE ||
- orig_pwr_info.pwr_rx == FAST_MODE) {
- temp_pwr_info.pwr_tx = FASTAUTO_MODE;
- temp_pwr_info.pwr_rx = FASTAUTO_MODE;
- pwr_mode_change = true;
- } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
- orig_pwr_info.pwr_rx == SLOW_MODE) {
- temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
- temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
- pwr_mode_change = true;
- }
- if (pwr_mode_change) {
- ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
- if (ret)
- goto out;
- }
- }
-
- uic_cmd.command = peer ?
- UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
- uic_cmd.argument1 = attr_sel;
-
- do {
- /* for peer attributes we retry upon failure */
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
- get, UIC_GET_ATTR_ID(attr_sel), ret);
- } while (ret && peer && --retries);
-
- if (ret)
- dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
- get, UIC_GET_ATTR_ID(attr_sel),
- UFS_UIC_COMMAND_RETRIES - retries);
-
- if (mib_val && !ret)
- *mib_val = uic_cmd.argument3;
-
- if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
- && pwr_mode_change)
- ufshcd_change_power_mode(hba, &orig_pwr_info);
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
-
-/**
- * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
- * state) and waits for it to take effect.
- *
- * @hba: per adapter instance
- * @cmd: UIC command to execute
- *
- * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
- * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
- * and device UniPro link and hence it's final completion would be indicated by
- * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
- * addition to normal UIC command completion Status (UCCS). This function only
- * returns after the relevant status bits indicate the completion.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
-{
- struct completion uic_async_done;
- unsigned long flags;
- u8 status;
- int ret;
- bool reenable_intr = false;
-
- mutex_lock(&hba->uic_cmd_mutex);
- init_completion(&uic_async_done);
- ufshcd_add_delay_before_dme_cmd(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->uic_async_done = &uic_async_done;
- if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
- ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
- /*
- * Make sure UIC command completion interrupt is disabled before
- * issuing UIC command.
- */
- wmb();
- reenable_intr = true;
- }
- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (ret) {
- dev_err(hba->dev,
- "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
- cmd->command, cmd->argument3, ret);
- goto out;
- }
-
- if (!wait_for_completion_timeout(hba->uic_async_done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
- dev_err(hba->dev,
- "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
- cmd->command, cmd->argument3);
- ret = -ETIMEDOUT;
- goto out;
- }
-
- status = ufshcd_get_upmcrs(hba);
- if (status != PWR_LOCAL) {
- dev_err(hba->dev,
- "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
- cmd->command, status);
- ret = (status != PWR_OK) ? status : -1;
- }
-out:
- if (ret) {
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_host_regs(hba);
- }
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->active_uic_cmd = NULL;
- hba->uic_async_done = NULL;
- if (reenable_intr)
- ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- mutex_unlock(&hba->uic_cmd_mutex);
-
- return ret;
-}
-
-/**
- * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
- * using DME_SET primitives.
- * @hba: per adapter instance
- * @mode: powr mode value
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
- ret = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
- if (ret) {
- dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
- __func__, ret);
- goto out;
- }
- }
-
- uic_cmd.command = UIC_CMD_DME_SET;
- uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
- uic_cmd.argument3 = mode;
- ufshcd_hold(hba, false);
- ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- ufshcd_release(hba);
-
-out:
- return ret;
-}
-
-static int ufshcd_link_recovery(struct ufs_hba *hba)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* Reset the attached device */
- ufshcd_vops_device_reset(hba);
-
- ret = ufshcd_host_reset_and_restore(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ret)
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- ufshcd_clear_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (ret)
- dev_err(hba->dev, "%s: link recovery failed, err %d",
- __func__, ret);
-
- return ret;
-}
-
-static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
-{
- int ret;
- struct uic_command uic_cmd = {0};
- ktime_t start = ktime_get();
-
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
-
- uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
- ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
-
- if (ret) {
- int err;
-
- dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
- __func__, ret);
-
- /*
- * If link recovery fails then return error code returned from
- * ufshcd_link_recovery().
- * If link recovery succeeds then return -EAGAIN to attempt
- * hibern8 enter retry again.
- */
- err = ufshcd_link_recovery(hba);
- if (err) {
- dev_err(hba->dev, "%s: link recovery failed", __func__);
- ret = err;
- } else {
- ret = -EAGAIN;
- }
- } else
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
- POST_CHANGE);
-
- return ret;
-}
-
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
-{
- int ret = 0, retries;
-
- for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
- ret = __ufshcd_uic_hibern8_enter(hba);
- if (!ret)
- goto out;
- }
-out:
- return ret;
-}
-
-int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
- ktime_t start = ktime_get();
-
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
-
- uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
- ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
-
- if (ret) {
- dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
- __func__, ret);
- ret = ufshcd_link_recovery(hba);
- } else {
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
- POST_CHANGE);
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
- hba->ufs_stats.hibern8_exit_cnt++;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
-
-void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
-{
- unsigned long flags;
- bool update = false;
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit != ahit) {
- hba->ahit = ahit;
- update = true;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (update && !pm_runtime_suspended(hba->dev)) {
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
- ufshcd_auto_hibern8_enable(hba);
- ufshcd_release(hba);
- pm_runtime_put(hba->dev);
- }
-}
-EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
-
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
-{
- unsigned long flags;
-
- if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-
- /**
- * ufshcd_init_pwr_info - setting the POR (power on reset)
- * values in hba power info
- * @hba: per-adapter instance
- */
-static void ufshcd_init_pwr_info(struct ufs_hba *hba)
-{
- hba->pwr_info.gear_rx = UFS_PWM_G1;
- hba->pwr_info.gear_tx = UFS_PWM_G1;
- hba->pwr_info.lane_rx = 1;
- hba->pwr_info.lane_tx = 1;
- hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
- hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
- hba->pwr_info.hs_rate = 0;
-}
-
-/**
- * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
- * @hba: per-adapter instance
- */
-static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
-{
- struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
-
- if (hba->max_pwr_info.is_valid)
- return 0;
-
- pwr_info->pwr_tx = FAST_MODE;
- pwr_info->pwr_rx = FAST_MODE;
- pwr_info->hs_rate = PA_HS_MODE_B;
-
- /* Get the connected lane count */
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
- &pwr_info->lane_rx);
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
- &pwr_info->lane_tx);
-
- if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
- dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
- __func__,
- pwr_info->lane_rx,
- pwr_info->lane_tx);
- return -EINVAL;
- }
-
- /*
- * First, get the maximum gears of HS speed.
- * If a zero value, it means there is no HSGEAR capability.
- * Then, get the maximum gears of PWM speed.
- */
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
- if (!pwr_info->gear_rx) {
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
- &pwr_info->gear_rx);
- if (!pwr_info->gear_rx) {
- dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
- __func__, pwr_info->gear_rx);
- return -EINVAL;
- }
- pwr_info->pwr_rx = SLOW_MODE;
- }
-
- ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
- &pwr_info->gear_tx);
- if (!pwr_info->gear_tx) {
- ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
- &pwr_info->gear_tx);
- if (!pwr_info->gear_tx) {
- dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
- __func__, pwr_info->gear_tx);
- return -EINVAL;
- }
- pwr_info->pwr_tx = SLOW_MODE;
- }
-
- hba->max_pwr_info.is_valid = true;
- return 0;
-}
-
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_mode)
-{
- int ret;
-
- /* if already configured to the requested pwr_mode */
- if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
- pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
- pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
- pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
- pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
- pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
- pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
- dev_dbg(hba->dev, "%s: power already configured\n", __func__);
- return 0;
- }
-
- /*
- * Configure attributes for power mode change with below.
- * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
- * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
- * - PA_HSSERIES
- */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
- pwr_mode->lane_rx);
- if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
- pwr_mode->pwr_rx == FAST_MODE)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
- else
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
- pwr_mode->lane_tx);
- if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
- pwr_mode->pwr_tx == FAST_MODE)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
- else
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
-
- if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
- pwr_mode->pwr_tx == FASTAUTO_MODE ||
- pwr_mode->pwr_rx == FAST_MODE ||
- pwr_mode->pwr_tx == FAST_MODE)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
- pwr_mode->hs_rate);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
- DL_AFC0ReqTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
- DL_FC1ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
- DL_TC1ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
- DL_AFC1ReqTimeOutVal_Default);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
- DL_AFC0ReqTimeOutVal_Default);
-
- ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
- | pwr_mode->pwr_tx);
-
- if (ret) {
- dev_err(hba->dev,
- "%s: power mode change failed %d\n", __func__, ret);
- } else {
- ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
- pwr_mode);
-
- memcpy(&hba->pwr_info, pwr_mode,
- sizeof(struct ufs_pa_layer_attr));
- }
-
- return ret;
-}
-
-/**
- * ufshcd_config_pwr_mode - configure a new power mode
- * @hba: per-adapter instance
- * @desired_pwr_mode: desired power configuration
- */
-int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode)
-{
- struct ufs_pa_layer_attr final_params = { 0 };
- int ret;
-
- ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
- desired_pwr_mode, &final_params);
-
- if (ret)
- memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
-
- ret = ufshcd_change_power_mode(hba, &final_params);
- if (!ret)
- ufshcd_print_pwr_info(hba);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
-
-/**
- * ufshcd_complete_dev_init() - checks device readiness
- * @hba: per-adapter instance
- *
- * Set fDeviceInit flag and poll until device toggles it.
- */
-static int ufshcd_complete_dev_init(struct ufs_hba *hba)
-{
- int i;
- int err;
- bool flag_res = 1;
-
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
- if (err) {
- dev_err(hba->dev,
- "%s setting fDeviceInit flag failed with error %d\n",
- __func__, err);
- goto out;
- }
-
- /* poll for max. 1000 iterations for fDeviceInit flag to clear */
- for (i = 0; i < 1000 && !err && flag_res; i++)
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
-
- if (err)
- dev_err(hba->dev,
- "%s reading fDeviceInit flag failed with error %d\n",
- __func__, err);
- else if (flag_res)
- dev_err(hba->dev,
- "%s fDeviceInit was not cleared by the device\n",
- __func__);
-
-out:
- return err;
-}
-
-/**
- * ufshcd_make_hba_operational - Make UFS controller operational
- * @hba: per adapter instance
- *
- * To bring UFS host controller to operational state,
- * 1. Enable required interrupts
- * 2. Configure interrupt aggregation
- * 3. Program UTRL and UTMRL base address
- * 4. Configure run-stop-registers
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_make_hba_operational(struct ufs_hba *hba)
-{
- int err = 0;
- u32 reg;
-
- /* Enable required interrupts */
- ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
-
- /* Configure interrupt aggregation */
- if (ufshcd_is_intr_aggr_allowed(hba))
- ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
- else
- ufshcd_disable_intr_aggr(hba);
-
- /* Configure UTRL and UTMRL base address registers */
- ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
- REG_UTP_TRANSFER_REQ_LIST_BASE_L);
- ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
- REG_UTP_TRANSFER_REQ_LIST_BASE_H);
- ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
- REG_UTP_TASK_REQ_LIST_BASE_L);
- ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
- REG_UTP_TASK_REQ_LIST_BASE_H);
-
- /*
- * Make sure base address and interrupt setup are updated before
- * enabling the run/stop registers below.
- */
- wmb();
-
- /*
- * UCRDY, UTMRLDY and UTRLRDY bits must be 1
- */
- reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
- if (!(ufshcd_get_lists_status(reg))) {
- ufshcd_enable_run_stop_reg(hba);
- } else {
- dev_err(hba->dev,
- "Host controller not ready to process requests");
- err = -EIO;
- goto out;
- }
-
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
-
-/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- * @can_sleep: perform sleep or just spin
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
-{
- int err;
-
- ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
- err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
- CONTROLLER_ENABLE, CONTROLLER_DISABLE,
- 10, 1, can_sleep);
- if (err)
- dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
-}
-
-/**
- * ufshcd_hba_execute_hce - initialize the controller
- * @hba: per adapter instance
- *
- * The controller resets itself and controller firmware initialization
- * sequence kicks off. When controller is ready it will set
- * the Host Controller Enable bit to 1.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
-{
- int retry;
-
- if (!ufshcd_is_hba_active(hba))
- /* change controller state to "reset state" */
- ufshcd_hba_stop(hba, true);
-
- /* UniPro link is disabled at this point */
- ufshcd_set_link_off(hba);
-
- ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
-
- /* start controller initialization sequence */
- ufshcd_hba_start(hba);
-
- /*
- * To initialize a UFS host controller HCE bit must be set to 1.
- * During initialization the HCE bit value changes from 1->0->1.
- * When the host controller completes initialization sequence
- * it sets the value of HCE bit to 1. The same HCE bit is read back
- * to check if the controller has completed initialization sequence.
- * So without this delay the value HCE = 1, set in the previous
- * instruction might be read back.
- * This delay can be changed based on the controller.
- */
- usleep_range(1000, 1100);
-
- /* wait for the host controller to complete initialization */
- retry = 10;
- while (ufshcd_is_hba_active(hba)) {
- if (retry) {
- retry--;
- } else {
- dev_err(hba->dev,
- "Controller enable failed\n");
- return -EIO;
- }
- usleep_range(5000, 5100);
- }
-
- /* enable UIC related interrupts */
- ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
-
- ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
-
- return 0;
-}
-
-int ufshcd_hba_enable(struct ufs_hba *hba)
-{
- int ret;
-
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
- ufshcd_set_link_off(hba);
- ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
-
- /* enable UIC related interrupts */
- ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
- ret = ufshcd_dme_reset(hba);
- if (!ret) {
- ret = ufshcd_dme_enable(hba);
- if (!ret)
- ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
- if (ret)
- dev_err(hba->dev,
- "Host controller enable failed with non-hce\n");
- }
- } else {
- ret = ufshcd_hba_execute_hce(hba);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
-
-static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
-{
- int tx_lanes, i, err = 0;
-
- if (!peer)
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
- &tx_lanes);
- else
- ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
- &tx_lanes);
- for (i = 0; i < tx_lanes; i++) {
- if (!peer)
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
- 0);
- else
- err = ufshcd_dme_peer_set(hba,
- UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
- 0);
- if (err) {
- dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
- __func__, peer, i, err);
- break;
- }
- }
-
- return err;
-}
-
-static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
-{
- return ufshcd_disable_tx_lcc(hba, true);
-}
-
-void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
- u32 reg)
-{
- reg_hist->reg[reg_hist->pos] = reg;
- reg_hist->tstamp[reg_hist->pos] = ktime_get();
- reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
-}
-EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
-
-/**
- * ufshcd_link_startup - Initialize unipro link startup
- * @hba: per adapter instance
- *
- * Returns 0 for success, non-zero in case of failure
- */
-static int ufshcd_link_startup(struct ufs_hba *hba)
-{
- int ret;
- int retries = DME_LINKSTARTUP_RETRIES;
- bool link_startup_again = false;
-
- /*
- * If UFS device isn't active then we will have to issue link startup
- * 2 times to make sure the device state move to active.
- */
- if (!ufshcd_is_ufs_dev_active(hba))
- link_startup_again = true;
-
-link_startup:
- do {
- ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
-
- ret = ufshcd_dme_link_startup(hba);
-
- /* check if device is detected by inter-connect layer */
- if (!ret && !ufshcd_is_device_present(hba)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
- 0);
- dev_err(hba->dev, "%s: Device not present\n", __func__);
- ret = -ENXIO;
- goto out;
- }
-
- /*
- * DME link lost indication is only received when link is up,
- * but we can't be sure if the link is up until link startup
- * succeeds. So reset the local Uni-Pro and try again.
- */
- if (ret && ufshcd_hba_enable(hba)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
- (u32)ret);
- goto out;
- }
- } while (ret && retries--);
-
- if (ret) {
- /* failed to get the link up... retire */
- ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
- (u32)ret);
- goto out;
- }
-
- if (link_startup_again) {
- link_startup_again = false;
- retries = DME_LINKSTARTUP_RETRIES;
- goto link_startup;
- }
-
- /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
- ufshcd_init_pwr_info(hba);
- ufshcd_print_pwr_info(hba);
-
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
- ret = ufshcd_disable_device_tx_lcc(hba);
- if (ret)
- goto out;
- }
-
- /* Include any host controller configuration via UIC commands */
- ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
- if (ret)
- goto out;
-
- ret = ufshcd_make_hba_operational(hba);
-out:
- if (ret) {
- dev_err(hba->dev, "link startup failed %d\n", ret);
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_host_regs(hba);
- }
- return ret;
-}
-
-/**
- * ufshcd_verify_dev_init() - Verify device initialization
- * @hba: per-adapter instance
- *
- * Send NOP OUT UPIU and wait for NOP IN response to check whether the
- * device Transport Protocol (UTP) layer is ready after a reset.
- * If the UTP layer at the device side is not initialized, it may
- * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
- * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
- */
-static int ufshcd_verify_dev_init(struct ufs_hba *hba)
-{
- int err = 0;
- int retries;
-
- ufshcd_hold(hba, false);
- mutex_lock(&hba->dev_cmd.lock);
- for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
- NOP_OUT_TIMEOUT);
-
- if (!err || err == -ETIMEDOUT)
- break;
-
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
- }
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
-
- if (err)
- dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
- return err;
-}
-
-/**
- * ufshcd_set_queue_depth - set lun queue depth
- * @sdev: pointer to SCSI device
- *
- * Read bLUQueueDepth value and activate scsi tagged command
- * queueing. For WLUN, queue depth is set to 1. For best-effort
- * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
- * value that host can queue.
- */
-static void ufshcd_set_queue_depth(struct scsi_device *sdev)
-{
- int ret = 0;
- u8 lun_qdepth;
- struct ufs_hba *hba;
-
- hba = shost_priv(sdev->host);
-
- lun_qdepth = hba->nutrs;
- ret = ufshcd_read_unit_desc_param(hba,
- ufshcd_scsi_to_upiu_lun(sdev->lun),
- UNIT_DESC_PARAM_LU_Q_DEPTH,
- &lun_qdepth,
- sizeof(lun_qdepth));
-
- /* Some WLUN doesn't support unit descriptor */
- if (ret == -EOPNOTSUPP)
- lun_qdepth = 1;
- else if (!lun_qdepth)
- /* eventually, we can figure out the real queue depth */
- lun_qdepth = hba->nutrs;
- else
- lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
-
- dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
- __func__, lun_qdepth);
- scsi_change_queue_depth(sdev, lun_qdepth);
-}
-
-/*
- * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
- * @hba: per-adapter instance
- * @lun: UFS device lun id
- * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
- *
- * Returns 0 in case of success and b_lu_write_protect status would be returned
- * @b_lu_write_protect parameter.
- * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
- * Returns -EINVAL in case of invalid parameters passed to this function.
- */
-static int ufshcd_get_lu_wp(struct ufs_hba *hba,
- u8 lun,
- u8 *b_lu_write_protect)
-{
- int ret;
-
- if (!b_lu_write_protect)
- ret = -EINVAL;
- /*
- * According to UFS device spec, RPMB LU can't be write
- * protected so skip reading bLUWriteProtect parameter for
- * it. For other W-LUs, UNIT DESCRIPTOR is not available.
- */
- else if (lun >= hba->dev_info.max_lu_supported)
- ret = -ENOTSUPP;
- else
- ret = ufshcd_read_unit_desc_param(hba,
- lun,
- UNIT_DESC_PARAM_LU_WR_PROTECT,
- b_lu_write_protect,
- sizeof(*b_lu_write_protect));
- return ret;
-}
-
-/**
- * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
- * status
- * @hba: per-adapter instance
- * @sdev: pointer to SCSI device
- *
- */
-static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
- struct scsi_device *sdev)
-{
- if (hba->dev_info.f_power_on_wp_en &&
- !hba->dev_info.is_lu_power_on_wp) {
- u8 b_lu_write_protect;
-
- if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
- &b_lu_write_protect) &&
- (b_lu_write_protect == UFS_LU_POWER_ON_WP))
- hba->dev_info.is_lu_power_on_wp = true;
- }
-}
-
-/**
- * ufshcd_slave_alloc - handle initial SCSI device configurations
- * @sdev: pointer to SCSI device
- *
- * Returns success
- */
-static int ufshcd_slave_alloc(struct scsi_device *sdev)
-{
- struct ufs_hba *hba;
-
- hba = shost_priv(sdev->host);
-
- /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
- sdev->use_10_for_ms = 1;
-
- /* DBD field should be set to 1 in mode sense(10) */
- sdev->set_dbd_for_ms = 1;
-
- /* allow SCSI layer to restart the device in case of errors */
- sdev->allow_restart = 1;
-
- /* REPORT SUPPORTED OPERATION CODES is not supported */
- sdev->no_report_opcodes = 1;
-
- /* WRITE_SAME command is not supported */
- sdev->no_write_same = 1;
-
- ufshcd_set_queue_depth(sdev);
-
- ufshcd_get_lu_power_on_wp_status(hba, sdev);
-
- return 0;
-}
-
-/**
- * ufshcd_change_queue_depth - change queue depth
- * @sdev: pointer to SCSI device
- * @depth: required depth to set
- *
- * Change queue depth and make sure the max. limits are not crossed.
- */
-static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
-{
- struct ufs_hba *hba = shost_priv(sdev->host);
-
- if (depth > hba->nutrs)
- depth = hba->nutrs;
- return scsi_change_queue_depth(sdev, depth);
-}
-
-/**
- * ufshcd_slave_configure - adjust SCSI device configurations
- * @sdev: pointer to SCSI device
- */
-static int ufshcd_slave_configure(struct scsi_device *sdev)
-{
- struct ufs_hba *hba = shost_priv(sdev->host);
- struct request_queue *q = sdev->request_queue;
-
- blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
-
- if (ufshcd_is_rpm_autosuspend_allowed(hba))
- sdev->rpm_autosuspend = 1;
-
- return 0;
-}
-
-/**
- * ufshcd_slave_destroy - remove SCSI device configurations
- * @sdev: pointer to SCSI device
- */
-static void ufshcd_slave_destroy(struct scsi_device *sdev)
-{
- struct ufs_hba *hba;
-
- hba = shost_priv(sdev->host);
- /* Drop the reference as it won't be needed anymore */
- if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->sdev_ufs_device = NULL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
-}
-
-/**
- * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
- * @lrbp: pointer to local reference block of completed command
- * @scsi_status: SCSI command status
- *
- * Returns value base on SCSI command status
- */
-static inline int
-ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
-{
- int result = 0;
-
- switch (scsi_status) {
- case SAM_STAT_CHECK_CONDITION:
- ufshcd_copy_sense_data(lrbp);
- /* fallthrough */
- case SAM_STAT_GOOD:
- result |= DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- scsi_status;
- break;
- case SAM_STAT_TASK_SET_FULL:
- case SAM_STAT_BUSY:
- case SAM_STAT_TASK_ABORTED:
- ufshcd_copy_sense_data(lrbp);
- result |= scsi_status;
- break;
- default:
- result |= DID_ERROR << 16;
- break;
- } /* end of switch */
-
- return result;
-}
-
-/**
- * ufshcd_transfer_rsp_status - Get overall status of the response
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block of completed command
- *
- * Returns result of the command to notify SCSI midlayer
- */
-static inline int
-ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- int result = 0;
- int scsi_status;
- int ocs;
-
- /* overall command status of utrd */
- ocs = ufshcd_get_tr_ocs(lrbp);
-
- switch (ocs) {
- case OCS_SUCCESS:
- result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
- switch (result) {
- case UPIU_TRANSACTION_RESPONSE:
- /*
- * get the response UPIU result to extract
- * the SCSI command status
- */
- result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
-
- /*
- * get the result based on SCSI status response
- * to notify the SCSI midlayer of the command status
- */
- scsi_status = result & MASK_SCSI_STATUS;
- result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
-
- /*
- * Currently we are only supporting BKOPs exception
- * events hence we can ignore BKOPs exception event
- * during power management callbacks. BKOPs exception
- * event is not expected to be raised in runtime suspend
- * callback as it allows the urgent bkops.
- * During system suspend, we are anyway forcefully
- * disabling the bkops and if urgent bkops is needed
- * it will be enabled on system resume. Long term
- * solution could be to abort the system suspend if
- * UFS device needs urgent BKOPs.
- */
- if (!hba->pm_op_in_progress &&
- ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
- schedule_work(&hba->eeh_work);
- break;
- case UPIU_TRANSACTION_REJECT_UPIU:
- /* TODO: handle Reject UPIU Response */
- result = DID_ERROR << 16;
- dev_err(hba->dev,
- "Reject UPIU not fully implemented\n");
- break;
- default:
- dev_err(hba->dev,
- "Unexpected request response code = %x\n",
- result);
- result = DID_ERROR << 16;
- break;
- }
- break;
- case OCS_ABORTED:
- result |= DID_ABORT << 16;
- break;
- case OCS_INVALID_COMMAND_STATUS:
- result |= DID_REQUEUE << 16;
- break;
- case OCS_INVALID_CMD_TABLE_ATTR:
- case OCS_INVALID_PRDT_ATTR:
- case OCS_MISMATCH_DATA_BUF_SIZE:
- case OCS_MISMATCH_RESP_UPIU_SIZE:
- case OCS_PEER_COMM_FAILURE:
- case OCS_FATAL_ERROR:
- default:
- result |= DID_ERROR << 16;
- dev_err(hba->dev,
- "OCS error from controller = %x for tag %d\n",
- ocs, lrbp->task_tag);
- ufshcd_print_host_regs(hba);
- ufshcd_print_host_state(hba);
- break;
- } /* end of switch */
-
- if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
- ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
- return result;
-}
-
-/**
- * ufshcd_uic_cmd_compl - handle completion of uic command
- * @hba: per adapter instance
- * @intr_status: interrupt status generated by the controller
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
-{
- irqreturn_t retval = IRQ_NONE;
-
- if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
- hba->active_uic_cmd->argument2 |=
- ufshcd_get_uic_cmd_result(hba);
- hba->active_uic_cmd->argument3 =
- ufshcd_get_dme_attr_val(hba);
- complete(&hba->active_uic_cmd->done);
- retval = IRQ_HANDLED;
- }
-
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
- complete(hba->uic_async_done);
- retval = IRQ_HANDLED;
- }
- return retval;
-}
-
-/**
- * __ufshcd_transfer_req_compl - handle SCSI and query command completion
- * @hba: per adapter instance
- * @completed_reqs: requests to complete
- */
-static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
-{
- struct ufshcd_lrb *lrbp;
- struct scsi_cmnd *cmd;
- int result;
- int index;
-
- for_each_set_bit(index, &completed_reqs, hba->nutrs) {
- lrbp = &hba->lrb[index];
- cmd = lrbp->cmd;
- if (cmd) {
- ufshcd_add_command_trace(hba, index, "complete");
- result = ufshcd_transfer_rsp_status(hba, lrbp);
- scsi_dma_unmap(cmd);
- cmd->result = result;
- /* Mark completed command as NULL in LRB */
- lrbp->cmd = NULL;
- lrbp->compl_time_stamp = ktime_get();
- /* Do not touch lrbp after scsi done */
- cmd->scsi_done(cmd);
- __ufshcd_release(hba);
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
- lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- lrbp->compl_time_stamp = ktime_get();
- if (hba->dev_cmd.complete) {
- ufshcd_add_command_trace(hba, index,
- "dev_complete");
- complete(hba->dev_cmd.complete);
- }
- }
- if (ufshcd_is_clkscaling_supported(hba))
- hba->clk_scaling.active_reqs--;
- }
-
- /* clear corresponding bits of completed commands */
- hba->outstanding_reqs ^= completed_reqs;
-
- ufshcd_clk_scaling_update_busy(hba);
-}
-
-/**
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
- * @hba: per adapter instance
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
-{
- unsigned long completed_reqs;
- u32 tr_doorbell;
-
- /* Resetting interrupt aggregation counters first and reading the
- * DOOR_BELL afterward allows us to handle all the completed requests.
- * In order to prevent other interrupts starvation the DB is read once
- * after reset. The down side of this solution is the possibility of
- * false interrupt if device completes another request after resetting
- * aggregation and before reading the DB.
- */
- if (ufshcd_is_intr_aggr_allowed(hba) &&
- !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
- ufshcd_reset_intr_aggr(hba);
-
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
-
- if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs);
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
-}
-
-/**
- * ufshcd_disable_ee - disable exception event
- * @hba: per-adapter instance
- * @mask: exception event to disable
- *
- * Disables exception event in the device so that the EVENT_ALERT
- * bit is not set.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
-{
- int err = 0;
- u32 val;
-
- if (!(hba->ee_ctrl_mask & mask))
- goto out;
-
- val = hba->ee_ctrl_mask & ~mask;
- val &= MASK_EE_STATUS;
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
- if (!err)
- hba->ee_ctrl_mask &= ~mask;
-out:
- return err;
-}
-
-/**
- * ufshcd_enable_ee - enable exception event
- * @hba: per-adapter instance
- * @mask: exception event to enable
- *
- * Enable corresponding exception event in the device to allow
- * device to alert host in critical scenarios.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
-{
- int err = 0;
- u32 val;
-
- if (hba->ee_ctrl_mask & mask)
- goto out;
-
- val = hba->ee_ctrl_mask | mask;
- val &= MASK_EE_STATUS;
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
- if (!err)
- hba->ee_ctrl_mask |= mask;
-out:
- return err;
-}
-
-/**
- * ufshcd_enable_auto_bkops - Allow device managed BKOPS
- * @hba: per-adapter instance
- *
- * Allow device to manage background operations on its own. Enabling
- * this might lead to inconsistent latencies during normal data transfers
- * as the device is allowed to manage its own way of handling background
- * operations.
- *
- * Returns zero on success, non-zero on failure.
- */
-static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
-{
- int err = 0;
-
- if (hba->auto_bkops_enabled)
- goto out;
-
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
- if (err) {
- dev_err(hba->dev, "%s: failed to enable bkops %d\n",
- __func__, err);
- goto out;
- }
-
- hba->auto_bkops_enabled = true;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
-
- /* No need of URGENT_BKOPS exception from the device */
- err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
- if (err)
- dev_err(hba->dev, "%s: failed to disable exception event %d\n",
- __func__, err);
-out:
- return err;
-}
-
-/**
- * ufshcd_disable_auto_bkops - block device in doing background operations
- * @hba: per-adapter instance
- *
- * Disabling background operations improves command response latency but
- * has drawback of device moving into critical state where the device is
- * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
- * host is idle so that BKOPS are managed effectively without any negative
- * impacts.
- *
- * Returns zero on success, non-zero on failure.
- */
-static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
-{
- int err = 0;
-
- if (!hba->auto_bkops_enabled)
- goto out;
-
- /*
- * If host assisted BKOPs is to be enabled, make sure
- * urgent bkops exception is allowed.
- */
- err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
- if (err) {
- dev_err(hba->dev, "%s: failed to enable exception event %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
- if (err) {
- dev_err(hba->dev, "%s: failed to disable bkops %d\n",
- __func__, err);
- ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
- goto out;
- }
-
- hba->auto_bkops_enabled = false;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
- hba->is_urgent_bkops_lvl_checked = false;
-out:
- return err;
-}
-
-/**
- * ufshcd_force_reset_auto_bkops - force reset auto bkops state
- * @hba: per adapter instance
- *
- * After a device reset the device may toggle the BKOPS_EN flag
- * to default value. The s/w tracking variables should be updated
- * as well. This function would change the auto-bkops state based on
- * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
- */
-static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
-{
- if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
- hba->auto_bkops_enabled = false;
- hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
- ufshcd_enable_auto_bkops(hba);
- } else {
- hba->auto_bkops_enabled = true;
- hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
- ufshcd_disable_auto_bkops(hba);
- }
- hba->is_urgent_bkops_lvl_checked = false;
-}
-
-static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
-{
- return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
-}
-
-/**
- * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
- * @hba: per-adapter instance
- * @status: bkops_status value
- *
- * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
- * flag in the device to permit background operations if the device
- * bkops_status is greater than or equal to "status" argument passed to
- * this function, disable otherwise.
- *
- * Returns 0 for success, non-zero in case of failure.
- *
- * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
- * to know whether auto bkops is enabled or disabled after this function
- * returns control to it.
- */
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
- enum bkops_status status)
-{
- int err;
- u32 curr_status = 0;
-
- err = ufshcd_get_bkops_status(hba, &curr_status);
- if (err) {
- dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
- __func__, err);
- goto out;
- } else if (curr_status > BKOPS_STATUS_MAX) {
- dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
- __func__, curr_status);
- err = -EINVAL;
- goto out;
- }
-
- if (curr_status >= status)
- err = ufshcd_enable_auto_bkops(hba);
- else
- err = ufshcd_disable_auto_bkops(hba);
- hba->urgent_bkops_lvl = curr_status;
-out:
- return err;
-}
-
-/**
- * ufshcd_urgent_bkops - handle urgent bkops exception event
- * @hba: per-adapter instance
- *
- * Enable fBackgroundOpsEn flag in the device to permit background
- * operations.
- *
- * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
- * and negative error value for any other failure.
- */
-static int ufshcd_urgent_bkops(struct ufs_hba *hba)
-{
- return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
-}
-
-static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
-{
- return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
-}
-
-static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
-{
- int err;
- u32 curr_status = 0;
-
- if (hba->is_urgent_bkops_lvl_checked)
- goto enable_auto_bkops;
-
- err = ufshcd_get_bkops_status(hba, &curr_status);
- if (err) {
- dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
- __func__, err);
- goto out;
- }
-
- /*
- * We are seeing that some devices are raising the urgent bkops
- * exception events even when BKOPS status doesn't indicate performace
- * impacted or critical. Handle these device by determining their urgent
- * bkops status at runtime.
- */
- if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
- dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
- __func__, curr_status);
- /* update the current status as the urgent bkops level */
- hba->urgent_bkops_lvl = curr_status;
- hba->is_urgent_bkops_lvl_checked = true;
- }
-
-enable_auto_bkops:
- err = ufshcd_enable_auto_bkops(hba);
-out:
- if (err < 0)
- dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
- __func__, err);
-}
-
-/**
- * ufshcd_exception_event_handler - handle exceptions raised by device
- * @work: pointer to work data
- *
- * Read bExceptionEventStatus attribute from the device and handle the
- * exception event accordingly.
- */
-static void ufshcd_exception_event_handler(struct work_struct *work)
-{
- struct ufs_hba *hba;
- int err;
- u32 status = 0;
- hba = container_of(work, struct ufs_hba, eeh_work);
-
- pm_runtime_get_sync(hba->dev);
- ufshcd_scsi_block_requests(hba);
- err = ufshcd_get_ee_status(hba, &status);
- if (err) {
- dev_err(hba->dev, "%s: failed to get exception status %d\n",
- __func__, err);
- goto out;
- }
-
- status &= hba->ee_ctrl_mask;
-
- if (status & MASK_EE_URGENT_BKOPS)
- ufshcd_bkops_exception_event_handler(hba);
-
-out:
- ufshcd_scsi_unblock_requests(hba);
- pm_runtime_put_sync(hba->dev);
- return;
-}
-
-/* Complete requests that have door-bell cleared */
-static void ufshcd_complete_requests(struct ufs_hba *hba)
-{
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
-}
-
-/**
- * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
- * to recover from the DL NAC errors or not.
- * @hba: per-adapter instance
- *
- * Returns true if error handling is required, false otherwise
- */
-static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
-{
- unsigned long flags;
- bool err_handling = true;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
- * device fatal error and/or DL NAC & REPLAY timeout errors.
- */
- if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
- goto out;
-
- if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
- goto out;
-
- if ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
- int err;
- /*
- * wait for 50ms to see if we can get any other errors or not.
- */
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- msleep(50);
- spin_lock_irqsave(hba->host->host_lock, flags);
-
- /*
- * now check if we have got any other severe errors other than
- * DL NAC error?
- */
- if ((hba->saved_err & INT_FATAL_ERRORS) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
- goto out;
-
- /*
- * As DL NAC is the only error received so far, send out NOP
- * command to confirm if link is still active or not.
- * - If we don't get any response then do error recovery.
- * - If we get response then clear the DL NAC error bit.
- */
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- err = ufshcd_verify_dev_init(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
-
- if (err)
- goto out;
-
- /* Link seems to be alive hence ignore the DL NAC errors */
- if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
- hba->saved_err &= ~UIC_ERROR;
- /* clear NAC error */
- hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- if (!hba->saved_uic_err) {
- err_handling = false;
- goto out;
- }
- }
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return err_handling;
-}
-
-/**
- * ufshcd_err_handler - handle UFS errors that require s/w attention
- * @work: pointer to work structure
- */
-static void ufshcd_err_handler(struct work_struct *work)
-{
- struct ufs_hba *hba;
- unsigned long flags;
- u32 err_xfer = 0;
- u32 err_tm = 0;
- int err = 0;
- int tag;
- bool needs_reset = false;
-
- hba = container_of(work, struct ufs_hba, eh_work);
-
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ufshcd_state == UFSHCD_STATE_RESET)
- goto out;
-
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
-
- /* Complete requests that have door-bell cleared by h/w */
- ufshcd_complete_requests(hba);
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- bool ret;
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
- ret = ufshcd_quirk_dl_nac_errors(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!ret)
- goto skip_err_handling;
- }
- if ((hba->saved_err & INT_FATAL_ERRORS) ||
- (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
- needs_reset = true;
-
- /*
- * if host reset is required then skip clearing the pending
- * transfers forcefully because they will get cleared during
- * host reset and restore
- */
- if (needs_reset)
- goto skip_pending_xfer_clear;
-
- /* release lock as clear command might sleep */
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- /* Clear pending transfer requests */
- for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
- if (ufshcd_clear_cmd(hba, tag)) {
- err_xfer = true;
- goto lock_skip_pending_xfer_clear;
- }
- }
-
- /* Clear pending task management requests */
- for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
- if (ufshcd_clear_tm_cmd(hba, tag)) {
- err_tm = true;
- goto lock_skip_pending_xfer_clear;
- }
- }
-
-lock_skip_pending_xfer_clear:
- spin_lock_irqsave(hba->host->host_lock, flags);
-
- /* Complete the requests that are cleared by s/w */
- ufshcd_complete_requests(hba);
-
- if (err_xfer || err_tm)
- needs_reset = true;
-
-skip_pending_xfer_clear:
- /* Fatal errors need reset */
- if (needs_reset) {
- unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
-
- /*
- * ufshcd_reset_and_restore() does the link reinitialization
- * which will need atleast one empty doorbell slot to send the
- * device management commands (NOP and query commands).
- * If there is no slot empty at this moment then free up last
- * slot forcefully.
- */
- if (hba->outstanding_reqs == max_doorbells)
- __ufshcd_transfer_req_compl(hba,
- (1UL << (hba->nutrs - 1)));
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- err = ufshcd_reset_and_restore(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (err) {
- dev_err(hba->dev, "%s: reset and restore failed\n",
- __func__);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- }
- /*
- * Inform scsi mid-layer that we did reset and allow to handle
- * Unit Attention properly.
- */
- scsi_report_bus_reset(hba->host, 0);
- hba->saved_err = 0;
- hba->saved_uic_err = 0;
- }
-
-skip_err_handling:
- if (!needs_reset) {
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- if (hba->saved_err || hba->saved_uic_err)
- dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
- __func__, hba->saved_err, hba->saved_uic_err);
- }
-
- ufshcd_clear_eh_in_progress(hba);
-
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_scsi_unblock_requests(hba);
- ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
-}
-
-/**
- * ufshcd_update_uic_error - check and set fatal UIC error flags.
- * @hba: per-adapter instance
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
-{
- u32 reg;
- irqreturn_t retval = IRQ_NONE;
-
- /* PHY layer lane error */
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
- /* Ignore LINERESET indication, as this is not an error */
- if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
- /*
- * To know whether this error is fatal or not, DB timeout
- * must be checked but this error is handled separately.
- */
- dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
- ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
- retval |= IRQ_HANDLED;
- }
-
- /* PA_INIT_ERROR is fatal and needs UIC reset */
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
- (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
-
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- else if (hba->dev_quirks &
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
- hba->uic_error |=
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
- }
- retval |= IRQ_HANDLED;
- }
-
- /* UIC NL/TL/DME errors needs software retry */
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if ((reg & UIC_NETWORK_LAYER_ERROR) &&
- (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
- hba->uic_error |= UFSHCD_UIC_NL_ERROR;
- retval |= IRQ_HANDLED;
- }
-
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
- (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
- hba->uic_error |= UFSHCD_UIC_TL_ERROR;
- retval |= IRQ_HANDLED;
- }
-
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if ((reg & UIC_DME_ERROR) &&
- (reg & UIC_DME_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
- hba->uic_error |= UFSHCD_UIC_DME_ERROR;
- retval |= IRQ_HANDLED;
- }
-
- dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
- __func__, hba->uic_error);
- return retval;
-}
-
-static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
- u32 intr_mask)
-{
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return false;
-
- if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
- return false;
-
- if (hba->active_uic_cmd &&
- (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
- hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
- return false;
-
- return true;
-}
-
-/**
- * ufshcd_check_errors - Check for errors that need s/w attention
- * @hba: per-adapter instance
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
-{
- bool queue_eh_work = false;
- irqreturn_t retval = IRQ_NONE;
-
- if (hba->errors & INT_FATAL_ERRORS) {
- ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
- queue_eh_work = true;
- }
-
- if (hba->errors & UIC_ERROR) {
- hba->uic_error = 0;
- retval = ufshcd_update_uic_error(hba);
- if (hba->uic_error)
- queue_eh_work = true;
- }
-
- if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
- dev_err(hba->dev,
- "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
- __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
- "Enter" : "Exit",
- hba->errors, ufshcd_get_upmcrs(hba));
- ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
- hba->errors);
- queue_eh_work = true;
- }
-
- if (queue_eh_work) {
- /*
- * update the transfer error masks to sticky bits, let's do this
- * irrespective of current ufshcd_state.
- */
- hba->saved_err |= hba->errors;
- hba->saved_uic_err |= hba->uic_error;
-
- /* handle fatal errors only when link is functional */
- if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
- /* block commands from scsi mid-layer */
- ufshcd_scsi_block_requests(hba);
-
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
-
- /* dump controller state before resetting */
- if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
- bool pr_prdt = !!(hba->saved_err &
- SYSTEM_BUS_FATAL_ERROR);
-
- dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
- __func__, hba->saved_err,
- hba->saved_uic_err);
-
- ufshcd_print_host_regs(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_tmrs(hba, hba->outstanding_tasks);
- ufshcd_print_trs(hba, hba->outstanding_reqs,
- pr_prdt);
- }
- schedule_work(&hba->eh_work);
- }
- retval |= IRQ_HANDLED;
- }
- /*
- * if (!queue_eh_work) -
- * Other errors are either non-fatal where host recovers
- * itself without s/w intervention or errors that will be
- * handled by the SCSI core layer.
- */
- return retval;
-}
-
-struct ctm_info {
- struct ufs_hba *hba;
- unsigned long pending;
- unsigned int ncpl;
-};
-
-static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
-{
- struct ctm_info *const ci = priv;
- struct completion *c;
-
- WARN_ON_ONCE(reserved);
- if (test_bit(req->tag, &ci->pending))
- return true;
- ci->ncpl++;
- c = req->end_io_data;
- if (c)
- complete(c);
- return true;
-}
-
-/**
- * ufshcd_tmc_handler - handle task management function completion
- * @hba: per adapter instance
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
-{
- struct request_queue *q = hba->tmf_queue;
- struct ctm_info ci = {
- .hba = hba,
- .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
- };
-
- blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
- return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
-}
-
-/**
- * ufshcd_sl_intr - Interrupt service routine
- * @hba: per adapter instance
- * @intr_status: contains interrupts generated by the controller
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
-{
- irqreturn_t retval = IRQ_NONE;
-
- hba->errors = UFSHCD_ERROR_MASK & intr_status;
-
- if (ufshcd_is_auto_hibern8_error(hba, intr_status))
- hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
-
- if (hba->errors)
- retval |= ufshcd_check_errors(hba);
-
- if (intr_status & UFSHCD_UIC_MASK)
- retval |= ufshcd_uic_cmd_compl(hba, intr_status);
-
- if (intr_status & UTP_TASK_REQ_COMPL)
- retval |= ufshcd_tmc_handler(hba);
-
- if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_transfer_req_compl(hba);
-
- return retval;
-}
-
-/**
- * ufshcd_intr - Main interrupt service routine
- * @irq: irq number
- * @__hba: pointer to adapter instance
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_intr(int irq, void *__hba)
-{
- u32 intr_status, enabled_intr_status;
- irqreturn_t retval = IRQ_NONE;
- struct ufs_hba *hba = __hba;
- int retries = hba->nutrs;
-
- spin_lock(hba->host->host_lock);
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
-
- /*
- * There could be max of hba->nutrs reqs in flight and in worst case
- * if the reqs get finished 1 by 1 after the interrupt status is
- * read, make sure we handle them by checking the interrupt status
- * again in a loop until we process all of the reqs before returning.
- */
- do {
- enabled_intr_status =
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
- if (enabled_intr_status)
- retval |= ufshcd_sl_intr(hba, enabled_intr_status);
-
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- } while (intr_status && --retries);
-
- if (retval == IRQ_NONE) {
- dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
- __func__, intr_status);
- ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
- }
-
- spin_unlock(hba->host->host_lock);
- return retval;
-}
-
-static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
-{
- int err = 0;
- u32 mask = 1 << tag;
- unsigned long flags;
-
- if (!test_bit(tag, &hba->outstanding_tasks))
- goto out;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_utmrl_clear(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* poll for max. 1 sec to clear door bell register by h/w */
- err = ufshcd_wait_for_register(hba,
- REG_UTP_TASK_REQ_DOOR_BELL,
- mask, 0, 1000, 1000, true);
-out:
- return err;
-}
-
-static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
- struct utp_task_req_desc *treq, u8 tm_function)
-{
- struct request_queue *q = hba->tmf_queue;
- struct Scsi_Host *host = hba->host;
- DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
- unsigned long flags;
- int free_slot, task_tag, err;
-
- /*
- * Get free slot, sleep if slots are unavailable.
- * Even though we use wait_event() which sleeps indefinitely,
- * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
- */
- req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
- req->end_io_data = &wait;
- free_slot = req->tag;
- WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
- ufshcd_hold(hba, false);
-
- spin_lock_irqsave(host->host_lock, flags);
- task_tag = hba->nutrs + free_slot;
-
- treq->req_header.dword_0 |= cpu_to_be32(task_tag);
-
- memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
- ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
-
- /* send command to the controller */
- __set_bit(free_slot, &hba->outstanding_tasks);
-
- /* Make sure descriptors are ready before ringing the task doorbell */
- wmb();
-
- ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
- /* Make sure that doorbell is committed immediately */
- wmb();
-
- spin_unlock_irqrestore(host->host_lock, flags);
-
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
-
- /* wait until the task management command is completed */
- err = wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(TM_CMD_TIMEOUT));
- if (!err) {
- /*
- * Make sure that ufshcd_compl_tm() does not trigger a
- * use-after-free.
- */
- req->end_io_data = NULL;
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
- dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
- __func__, tm_function);
- if (ufshcd_clear_tm_cmd(hba, free_slot))
- dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
- __func__, free_slot);
- err = -ETIMEDOUT;
- } else {
- err = 0;
- memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
-
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
- }
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- __clear_bit(free_slot, &hba->outstanding_tasks);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- blk_put_request(req);
-
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_issue_tm_cmd - issues task management commands to controller
- * @hba: per adapter instance
- * @lun_id: LUN ID to which TM command is sent
- * @task_id: task ID to which the TM command is applicable
- * @tm_function: task management function opcode
- * @tm_response: task management service response return value
- *
- * Returns non-zero value on error, zero on success.
- */
-static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
- u8 tm_function, u8 *tm_response)
-{
- struct utp_task_req_desc treq = { { 0 }, };
- int ocs_value, err;
-
- /* Configure task request descriptor */
- treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
- /* Configure task request UPIU */
- treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
- cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
- treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
-
- /*
- * The host shall provide the same value for LUN field in the basic
- * header and for Input Parameter.
- */
- treq.input_param1 = cpu_to_be32(lun_id);
- treq.input_param2 = cpu_to_be32(task_id);
-
- err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
- if (err == -ETIMEDOUT)
- return err;
-
- ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
- if (ocs_value != OCS_SUCCESS)
- dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
- __func__, ocs_value);
- else if (tm_response)
- *tm_response = be32_to_cpu(treq.output_param1) &
- MASK_TM_SERVICE_RESP;
- return err;
-}
-
-/**
- * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
- * @hba: per-adapter instance
- * @req_upiu: upiu request
- * @rsp_upiu: upiu reply
- * @desc_buff: pointer to descriptor buffer, NULL if NA
- * @buff_len: descriptor size, 0 if NA
- * @cmd_type: specifies the type (NOP, Query...)
- * @desc_op: descriptor operation
- *
- * Those type of requests uses UTP Transfer Request Descriptor - utrd.
- * Therefore, it "rides" the device management infrastructure: uses its tag and
- * tasks work queues.
- *
- * Since there is only one available tag for device management commands,
- * the caller is expected to hold the hba->dev_cmd.lock mutex.
- */
-static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- u8 *desc_buff, int *buff_len,
- enum dev_cmd_type cmd_type,
- enum query_opcode desc_op)
-{
- struct request_queue *q = hba->cmd_queue;
- struct request *req;
- struct ufshcd_lrb *lrbp;
- int err = 0;
- int tag;
- struct completion wait;
- unsigned long flags;
- u32 upiu_flags;
-
- down_read(&hba->clk_scaling_lock);
-
- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
-
- init_completion(&wait);
- lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
-
- lrbp->cmd = NULL;
- lrbp->sense_bufflen = 0;
- lrbp->sense_buffer = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = 0;
- lrbp->intr_cmd = true;
- hba->dev_cmd.type = cmd_type;
-
- switch (hba->ufs_version) {
- case UFSHCI_VERSION_10:
- case UFSHCI_VERSION_11:
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- break;
- default:
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- break;
- }
-
- /* update the task tag in the request upiu */
- req_upiu->header.dword_0 |= cpu_to_be32(tag);
-
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
-
- /* just copy the upiu request as it is */
- memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
- if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
- /* The Data Segment Area is optional depending upon the query
- * function value. for WRITE DESCRIPTOR, the data segment
- * follows right after the tsf.
- */
- memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
- *buff_len = 0;
- }
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
-
- hba->dev_cmd.complete = &wait;
-
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /*
- * ignore the returning value here - ufshcd_check_query_response is
- * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
- * read the response directly ignoring all errors.
- */
- ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
-
- /* just copy the upiu response as it is */
- memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
- if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
- u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
- u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
- MASK_QUERY_DATA_SEG_LEN;
-
- if (*buff_len >= resp_len) {
- memcpy(desc_buff, descp, resp_len);
- *buff_len = resp_len;
- } else {
- dev_warn(hba->dev,
- "%s: rsp size %d is bigger than buffer size %d",
- __func__, resp_len, *buff_len);
- *buff_len = 0;
- err = -EINVAL;
- }
- }
-
- blk_put_request(req);
-out_unlock:
- up_read(&hba->clk_scaling_lock);
- return err;
-}
-
-/**
- * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
- * @hba: per-adapter instance
- * @req_upiu: upiu request
- * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
- * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
- * @desc_buff: pointer to descriptor buffer, NULL if NA
- * @buff_len: descriptor size, 0 if NA
- * @desc_op: descriptor operation
- *
- * Supports UTP Transfer requests (nop and query), and UTP Task
- * Management requests.
- * It is up to the caller to fill the upiu conent properly, as it will
- * be copied without any further input validations.
- */
-int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- int msgcode,
- u8 *desc_buff, int *buff_len,
- enum query_opcode desc_op)
-{
- int err;
- enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
- struct utp_task_req_desc treq = { { 0 }, };
- int ocs_value;
- u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
-
- switch (msgcode) {
- case UPIU_TRANSACTION_NOP_OUT:
- cmd_type = DEV_CMD_TYPE_NOP;
- /* fall through */
- case UPIU_TRANSACTION_QUERY_REQ:
- ufshcd_hold(hba, false);
- mutex_lock(&hba->dev_cmd.lock);
- err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
- desc_buff, buff_len,
- cmd_type, desc_op);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
-
- break;
- case UPIU_TRANSACTION_TASK_REQ:
- treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
- memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
-
- err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
- if (err == -ETIMEDOUT)
- break;
-
- ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
- if (ocs_value != OCS_SUCCESS) {
- dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
- ocs_value);
- break;
- }
-
- memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
-
- break;
- default:
- err = -EINVAL;
-
- break;
- }
-
- return err;
-}
-
-/**
- * ufshcd_eh_device_reset_handler - device reset handler registered to
- * scsi layer.
- * @cmd: SCSI command pointer
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
-{
- struct Scsi_Host *host;
- struct ufs_hba *hba;
- unsigned int tag;
- u32 pos;
- int err;
- u8 resp = 0xF;
- struct ufshcd_lrb *lrbp;
- unsigned long flags;
-
- host = cmd->device->host;
- hba = shost_priv(host);
- tag = cmd->request->tag;
-
- lrbp = &hba->lrb[tag];
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
- if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err)
- err = resp;
- goto out;
- }
-
- /* clear the commands that were pending for corresponding LUN */
- for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[pos].lun == lrbp->lun) {
- err = ufshcd_clear_cmd(hba, pos);
- if (err)
- break;
- }
- }
- spin_lock_irqsave(host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- spin_unlock_irqrestore(host->host_lock, flags);
-
-out:
- hba->req_abort_count = 0;
- ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
- if (!err) {
- err = SUCCESS;
- } else {
- dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
- err = FAILED;
- }
- return err;
-}
-
-static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
-{
- struct ufshcd_lrb *lrbp;
- int tag;
-
- for_each_set_bit(tag, &bitmap, hba->nutrs) {
- lrbp = &hba->lrb[tag];
- lrbp->req_abort_skip = true;
- }
-}
-
-/**
- * ufshcd_abort - abort a specific command
- * @cmd: SCSI command pointer
- *
- * Abort the pending command in device by sending UFS_ABORT_TASK task management
- * command, and in host controller by clearing the door-bell register. There can
- * be race between controller sending the command to the device while abort is
- * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
- * really issued and then try to abort it.
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_abort(struct scsi_cmnd *cmd)
-{
- struct Scsi_Host *host;
- struct ufs_hba *hba;
- unsigned long flags;
- unsigned int tag;
- int err = 0;
- int poll_cnt;
- u8 resp = 0xF;
- struct ufshcd_lrb *lrbp;
- u32 reg;
-
- host = cmd->device->host;
- hba = shost_priv(host);
- tag = cmd->request->tag;
- lrbp = &hba->lrb[tag];
- if (!ufshcd_valid_tag(hba, tag)) {
- dev_err(hba->dev,
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
- __func__, tag, cmd, cmd->request);
- BUG();
- }
-
- /*
- * Task abort to the device W-LUN is illegal. When this command
- * will fail, due to spec violation, scsi err handling next step
- * will be to send LU reset which, again, is a spec violation.
- * To avoid these unnecessary/illegal step we skip to the last error
- * handling stage: reset and restore.
- */
- if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
- return ufshcd_eh_host_reset_handler(cmd);
-
- ufshcd_hold(hba, false);
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- /* If command is already aborted/completed, return SUCCESS */
- if (!(test_bit(tag, &hba->outstanding_reqs))) {
- dev_err(hba->dev,
- "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
- __func__, tag, hba->outstanding_reqs, reg);
- goto out;
- }
-
- if (!(reg & (1 << tag))) {
- dev_err(hba->dev,
- "%s: cmd was completed, but without a notifying intr, tag = %d",
- __func__, tag);
- }
-
- /* Print Transfer Request of aborted task */
- dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
-
- /*
- * Print detailed info about aborted request.
- * As more than one request might get aborted at the same time,
- * print full information only for the first aborted request in order
- * to reduce repeated printouts. For other aborted requests only print
- * basic details.
- */
- scsi_print_command(hba->lrb[tag].cmd);
- if (!hba->req_abort_count) {
- ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
- ufshcd_print_host_regs(hba);
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_trs(hba, 1 << tag, true);
- } else {
- ufshcd_print_trs(hba, 1 << tag, false);
- }
- hba->req_abort_count++;
-
- /* Skip task abort in case previous aborts failed and report failure */
- if (lrbp->req_abort_skip) {
- err = -EIO;
- goto out;
- }
-
- for (poll_cnt = 100; poll_cnt; poll_cnt--) {
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_QUERY_TASK, &resp);
- if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
- /* cmd pending in the device */
- dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
- __func__, tag);
- break;
- } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- /*
- * cmd not pending in the device, check if it is
- * in transition.
- */
- dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
- __func__, tag);
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (reg & (1 << tag)) {
- /* sleep for max. 200us to stabilize */
- usleep_range(100, 200);
- continue;
- }
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
- __func__, tag);
- goto out;
- } else {
- dev_err(hba->dev,
- "%s: no response from device. tag = %d, err %d\n",
- __func__, tag, err);
- if (!err)
- err = resp; /* service response error */
- goto out;
- }
- }
-
- if (!poll_cnt) {
- err = -EBUSY;
- goto out;
- }
-
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_ABORT_TASK, &resp);
- if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err) {
- err = resp; /* service response error */
- dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
- __func__, tag, err);
- }
- goto out;
- }
-
- err = ufshcd_clear_cmd(hba, tag);
- if (err) {
- dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
- __func__, tag, err);
- goto out;
- }
-
- scsi_dma_unmap(cmd);
-
- spin_lock_irqsave(host->host_lock, flags);
- ufshcd_outstanding_req_clear(hba, tag);
- hba->lrb[tag].cmd = NULL;
- spin_unlock_irqrestore(host->host_lock, flags);
-
-out:
- if (!err) {
- err = SUCCESS;
- } else {
- dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
- ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
- err = FAILED;
- }
-
- /*
- * This ufshcd_release() corresponds to the original scsi cmd that got
- * aborted here (as we won't get any IRQ for it).
- */
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_host_reset_and_restore - reset and restore host controller
- * @hba: per-adapter instance
- *
- * Note that host controller reset may issue DME_RESET to
- * local and remote (device) Uni-Pro stack and the attributes
- * are reset to default state.
- *
- * Returns zero on success, non-zero on failure
- */
-static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
-{
- int err;
- unsigned long flags;
-
- /*
- * Stop the host controller and complete the requests
- * cleared by h/w
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_hba_stop(hba, false);
- hba->silence_err_logs = true;
- ufshcd_complete_requests(hba);
- hba->silence_err_logs = false;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* scale up clocks to max frequency before full reinitialization */
- ufshcd_scale_clks(hba, true);
-
- err = ufshcd_hba_enable(hba);
- if (err)
- goto out;
-
- /* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba, false);
-
- if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
- err = -EIO;
-out:
- if (err)
- dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
- ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
- return err;
-}
-
-/**
- * ufshcd_reset_and_restore - reset and re-initialize host/device
- * @hba: per-adapter instance
- *
- * Reset and recover device, host and re-establish link. This
- * is helpful to recover the communication in fatal error conditions.
- *
- * Returns zero on success, non-zero on failure
- */
-static int ufshcd_reset_and_restore(struct ufs_hba *hba)
-{
- int err = 0;
- int retries = MAX_HOST_RESET_RETRIES;
-
- do {
- /* Reset the attached device */
- ufshcd_vops_device_reset(hba);
-
- err = ufshcd_host_reset_and_restore(hba);
- } while (err && --retries);
-
- return err;
-}
-
-/**
- * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
- * @cmd: SCSI command pointer
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
-{
- int err;
- unsigned long flags;
- struct ufs_hba *hba;
-
- hba = shost_priv(cmd->device->host);
-
- ufshcd_hold(hba, false);
- /*
- * Check if there is any race with fatal error handling.
- * If so, wait for it to complete. Even though fatal error
- * handling does reset and restore in some cases, don't assume
- * anything out of it. We are just avoiding race here.
- */
- do {
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!(work_pending(&hba->eh_work) ||
- hba->ufshcd_state == UFSHCD_STATE_RESET ||
- hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
- break;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
- flush_work(&hba->eh_work);
- } while (1);
-
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- err = ufshcd_reset_and_restore(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!err) {
- err = SUCCESS;
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- } else {
- err = FAILED;
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- }
- ufshcd_clear_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_get_max_icc_level - calculate the ICC level
- * @sup_curr_uA: max. current supported by the regulator
- * @start_scan: row at the desc table to start scan from
- * @buff: power descriptor buffer
- *
- * Returns calculated max ICC level for specific regulator
- */
-static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
-{
- int i;
- int curr_uA;
- u16 data;
- u16 unit;
-
- for (i = start_scan; i >= 0; i--) {
- data = be16_to_cpup((__be16 *)&buff[2 * i]);
- unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
- ATTR_ICC_LVL_UNIT_OFFSET;
- curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
- switch (unit) {
- case UFSHCD_NANO_AMP:
- curr_uA = curr_uA / 1000;
- break;
- case UFSHCD_MILI_AMP:
- curr_uA = curr_uA * 1000;
- break;
- case UFSHCD_AMP:
- curr_uA = curr_uA * 1000 * 1000;
- break;
- case UFSHCD_MICRO_AMP:
- default:
- break;
- }
- if (sup_curr_uA >= curr_uA)
- break;
- }
- if (i < 0) {
- i = 0;
- pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
- }
-
- return (u32)i;
-}
-
-/**
- * ufshcd_calc_icc_level - calculate the max ICC level
- * In case regulators are not initialized we'll return 0
- * @hba: per-adapter instance
- * @desc_buf: power descriptor buffer to extract ICC levels from.
- * @len: length of desc_buff
- *
- * Returns calculated ICC level
- */
-static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
- u8 *desc_buf, int len)
-{
- u32 icc_level = 0;
-
- if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
- !hba->vreg_info.vccq2) {
- dev_err(hba->dev,
- "%s: Regulator capability was not set, actvIccLevel=%d",
- __func__, icc_level);
- goto out;
- }
-
- if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
- icc_level = ufshcd_get_max_icc_level(
- hba->vreg_info.vcc->max_uA,
- POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
- &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
-
- if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
- icc_level = ufshcd_get_max_icc_level(
- hba->vreg_info.vccq->max_uA,
- icc_level,
- &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
-
- if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
- icc_level = ufshcd_get_max_icc_level(
- hba->vreg_info.vccq2->max_uA,
- icc_level,
- &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
-out:
- return icc_level;
-}
-
-static void ufshcd_init_icc_levels(struct ufs_hba *hba)
-{
- int ret;
- int buff_len = hba->desc_size.pwr_desc;
- u8 *desc_buf;
-
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
- if (!desc_buf)
- return;
-
- ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0,
- desc_buf, buff_len);
- if (ret) {
- dev_err(hba->dev,
- "%s: Failed reading power descriptor.len = %d ret = %d",
- __func__, buff_len, ret);
- goto out;
- }
-
- hba->init_prefetch_data.icc_level =
- ufshcd_find_max_sup_active_icc_level(hba,
- desc_buf, buff_len);
- dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
- __func__, hba->init_prefetch_data.icc_level);
-
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
-
- if (ret)
- dev_err(hba->dev,
- "%s: Failed configuring bActiveICCLevel = %d ret = %d",
- __func__, hba->init_prefetch_data.icc_level , ret);
-
-out:
- kfree(desc_buf);
-}
-
-/**
- * ufshcd_scsi_add_wlus - Adds required W-LUs
- * @hba: per-adapter instance
- *
- * UFS device specification requires the UFS devices to support 4 well known
- * logical units:
- * "REPORT_LUNS" (address: 01h)
- * "UFS Device" (address: 50h)
- * "RPMB" (address: 44h)
- * "BOOT" (address: 30h)
- * UFS device's power management needs to be controlled by "POWER CONDITION"
- * field of SSU (START STOP UNIT) command. But this "power condition" field
- * will take effect only when its sent to "UFS device" well known logical unit
- * hence we require the scsi_device instance to represent this logical unit in
- * order for the UFS host driver to send the SSU command for power management.
- *
- * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
- * Block) LU so user space process can control this LU. User space may also
- * want to have access to BOOT LU.
- *
- * This function adds scsi device instances for each of all well known LUs
- * (except "REPORT LUNS" LU).
- *
- * Returns zero on success (all required W-LUs are added successfully),
- * non-zero error value on failure (if failed to add any of the required W-LU).
- */
-static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
-{
- int ret = 0;
- struct scsi_device *sdev_rpmb;
- struct scsi_device *sdev_boot;
-
- hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
- ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
- if (IS_ERR(hba->sdev_ufs_device)) {
- ret = PTR_ERR(hba->sdev_ufs_device);
- hba->sdev_ufs_device = NULL;
- goto out;
- }
- scsi_device_put(hba->sdev_ufs_device);
-
- sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
- ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
- if (IS_ERR(sdev_rpmb)) {
- ret = PTR_ERR(sdev_rpmb);
- goto remove_sdev_ufs_device;
- }
- scsi_device_put(sdev_rpmb);
-
- sdev_boot = __scsi_add_device(hba->host, 0, 0,
- ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
- if (IS_ERR(sdev_boot))
- dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
- else
- scsi_device_put(sdev_boot);
- goto out;
-
-remove_sdev_ufs_device:
- scsi_remove_device(hba->sdev_ufs_device);
-out:
- return ret;
-}
-
-static int ufs_get_device_desc(struct ufs_hba *hba)
-{
- int err;
- size_t buff_len;
- u8 model_index;
- u8 *desc_buf;
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- buff_len = max_t(size_t, hba->desc_size.dev_desc,
- QUERY_DESC_MAX_SIZE + 1);
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
- if (!desc_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf,
- hba->desc_size.dev_desc);
- if (err) {
- dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
- __func__, err);
- goto out;
- }
-
- /*
- * getting vendor (manufacturerID) and Bank Index in big endian
- * format
- */
- dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
- desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
-
- model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
- err = ufshcd_read_string_desc(hba, model_index,
- &dev_info->model, SD_ASCII_STD);
- if (err < 0) {
- dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
- __func__, err);
- goto out;
- }
-
- /*
- * ufshcd_read_string_desc returns size of the string
- * reset the error value
- */
- err = 0;
-
-out:
- kfree(desc_buf);
- return err;
-}
-
-static void ufs_put_device_desc(struct ufs_hba *hba)
-{
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- kfree(dev_info->model);
- dev_info->model = NULL;
-}
-
-static void ufs_fixup_device_setup(struct ufs_hba *hba)
-{
- struct ufs_dev_fix *f;
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- for (f = ufs_fixups; f->quirk; f++) {
- if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
- f->wmanufacturerid == UFS_ANY_VENDOR) &&
- ((dev_info->model &&
- STR_PRFX_EQUAL(f->model, dev_info->model)) ||
- !strcmp(f->model, UFS_ANY_MODEL)))
- hba->dev_quirks |= f->quirk;
- }
-}
-
-/**
- * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
- * @hba: per-adapter instance
- *
- * PA_TActivate parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
- * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
- * the hibern8 exit latency.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(
- RX_MIN_ACTIVATETIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_min_activatetime);
- if (ret)
- goto out;
-
- /* make sure proper unit conversion is applied */
- tuned_pa_tactivate =
- ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
- / PA_TACTIVATE_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
- tuned_pa_tactivate);
-
-out:
- return ret;
-}
-
-/**
- * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
- * @hba: per-adapter instance
- *
- * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
- * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
- * This optimal value can help reduce the hibern8 exit latency.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
- u32 max_hibern8_time, tuned_pa_hibern8time;
-
- ret = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &local_tx_hibern8_time_cap);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_hibern8_time_cap);
- if (ret)
- goto out;
-
- max_hibern8_time = max(local_tx_hibern8_time_cap,
- peer_rx_hibern8_time_cap);
- /* make sure proper unit conversion is applied */
- tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
- / PA_HIBERN8_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
- tuned_pa_hibern8time);
-out:
- return ret;
-}
-
-/**
- * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
- * less than device PA_TACTIVATE time.
- * @hba: per-adapter instance
- *
- * Some UFS devices require host PA_TACTIVATE to be lower than device
- * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
- * for such devices.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 granularity, peer_granularity;
- u32 pa_tactivate, peer_pa_tactivate;
- u32 pa_tactivate_us, peer_pa_tactivate_us;
- u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
-
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
- &granularity);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
- &peer_granularity);
- if (ret)
- goto out;
-
- if ((granularity < PA_GRANULARITY_MIN_VAL) ||
- (granularity > PA_GRANULARITY_MAX_VAL)) {
- dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
- __func__, granularity);
- return -EINVAL;
- }
-
- if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
- (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
- dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
- __func__, peer_granularity);
- return -EINVAL;
- }
-
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
- &peer_pa_tactivate);
- if (ret)
- goto out;
-
- pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
- peer_pa_tactivate_us = peer_pa_tactivate *
- gran_to_us_table[peer_granularity - 1];
-
- if (pa_tactivate_us > peer_pa_tactivate_us) {
- u32 new_peer_pa_tactivate;
-
- new_peer_pa_tactivate = pa_tactivate_us /
- gran_to_us_table[peer_granularity - 1];
- new_peer_pa_tactivate++;
- ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
- new_peer_pa_tactivate);
- }
-
-out:
- return ret;
-}
-
-static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
-{
- if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
- ufshcd_tune_pa_tactivate(hba);
- ufshcd_tune_pa_hibern8time(hba);
- }
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
- /* set 1ms timeout for PA_TACTIVATE */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
- ufshcd_quirk_tune_host_pa_tactivate(hba);
-
- ufshcd_vops_apply_dev_quirks(hba);
-}
-
-static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
-{
- hba->ufs_stats.hibern8_exit_cnt = 0;
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
- hba->req_abort_count = 0;
-}
-
-static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
-{
- int err;
-
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
- &hba->desc_size.dev_desc);
- if (err)
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
-
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
- &hba->desc_size.pwr_desc);
- if (err)
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
-
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
- &hba->desc_size.interc_desc);
- if (err)
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
-
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
- &hba->desc_size.conf_desc);
- if (err)
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
-
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
- &hba->desc_size.unit_desc);
- if (err)
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
-
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
- &hba->desc_size.geom_desc);
- if (err)
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
-
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
- &hba->desc_size.hlth_desc);
- if (err)
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
-}
-
-static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
-{
- int err;
- size_t buff_len;
- u8 *desc_buf;
-
- buff_len = hba->desc_size.geom_desc;
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
- if (!desc_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
- desc_buf, buff_len);
- if (err) {
- dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
- __func__, err);
- goto out;
- }
-
- if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
- hba->dev_info.max_lu_supported = 32;
- else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
- hba->dev_info.max_lu_supported = 8;
-
-out:
- kfree(desc_buf);
- return err;
-}
-
-static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
- {19200000, REF_CLK_FREQ_19_2_MHZ},
- {26000000, REF_CLK_FREQ_26_MHZ},
- {38400000, REF_CLK_FREQ_38_4_MHZ},
- {52000000, REF_CLK_FREQ_52_MHZ},
- {0, REF_CLK_FREQ_INVAL},
-};
-
-static enum ufs_ref_clk_freq
-ufs_get_bref_clk_from_hz(unsigned long freq)
-{
- int i;
-
- for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
- if (ufs_ref_clk_freqs[i].freq_hz == freq)
- return ufs_ref_clk_freqs[i].val;
-
- return REF_CLK_FREQ_INVAL;
-}
-
-void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
-{
- unsigned long freq;
-
- freq = clk_get_rate(refclk);
-
- hba->dev_ref_clk_freq =
- ufs_get_bref_clk_from_hz(freq);
-
- if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
- dev_err(hba->dev,
- "invalid ref_clk setting = %ld\n", freq);
-}
-
-static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
-{
- int err;
- u32 ref_clk;
- u32 freq = hba->dev_ref_clk_freq;
-
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
-
- if (err) {
- dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
- err);
- goto out;
- }
-
- if (ref_clk == freq)
- goto out; /* nothing to update */
-
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
-
- if (err) {
- dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
- ufs_ref_clk_freqs[freq].freq_hz);
- goto out;
- }
-
- dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
- ufs_ref_clk_freqs[freq].freq_hz);
-
-out:
- return err;
-}
-
-static int ufshcd_device_params_init(struct ufs_hba *hba)
-{
- bool flag;
- int ret;
-
- /* Clear any previous UFS device information */
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
-
- /* Init check for device descriptor sizes */
- ufshcd_init_desc_sizes(hba);
-
- /* Init UFS geometry descriptor related parameters */
- ret = ufshcd_device_geo_params_init(hba);
- if (ret)
- goto out;
-
- /* Check and apply UFS device quirks */
- ret = ufs_get_device_desc(hba);
- if (ret) {
- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
- __func__, ret);
- goto out;
- }
-
- ufs_fixup_device_setup(hba);
-
- if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
- hba->dev_info.f_power_on_wp_en = flag;
-
- /* Probe maximum power mode co-supported by both UFS host and device */
- if (ufshcd_get_max_pwr_mode(hba))
- dev_err(hba->dev,
- "%s: Failed getting max supported power mode\n",
- __func__);
-out:
- return ret;
-}
-
-/**
- * ufshcd_add_lus - probe and add UFS logical units
- * @hba: per-adapter instance
- */
-static int ufshcd_add_lus(struct ufs_hba *hba)
-{
- int ret;
-
- ufshcd_init_icc_levels(hba);
-
- /* Add required well known logical units to scsi mid layer */
- ret = ufshcd_scsi_add_wlus(hba);
- if (ret)
- goto out;
-
- /* Initialize devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- if (!hba->devfreq) {
- ret = ufshcd_devfreq_init(hba);
- if (ret)
- goto out;
- }
-
- hba->clk_scaling.is_allowed = true;
- }
-
- ufs_bsg_probe(hba);
- scsi_scan_host(hba->host);
- pm_runtime_put_sync(hba->dev);
-
-out:
- return ret;
-}
-
-/**
- * ufshcd_probe_hba - probe hba to detect device and initialize
- * @hba: per-adapter instance
- * @async: asynchronous execution or not
- *
- * Execute link-startup and verify device initialization
- */
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
-{
- int ret;
- ktime_t start = ktime_get();
-
- ret = ufshcd_link_startup(hba);
- if (ret)
- goto out;
-
- /* set the default level for urgent bkops */
- hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
- hba->is_urgent_bkops_lvl_checked = false;
-
- /* Debug counters initialization */
- ufshcd_clear_dbg_ufs_stats(hba);
-
- /* UniPro link is active now */
- ufshcd_set_link_active(hba);
-
- /* Verify device initialization by sending NOP OUT UPIU */
- ret = ufshcd_verify_dev_init(hba);
- if (ret)
- goto out;
-
- /* Initiate UFS initialization, and waiting until completion */
- ret = ufshcd_complete_dev_init(hba);
- if (ret)
- goto out;
-
- /*
- * Initialize UFS device parameters used by driver, these
- * parameters are associated with UFS descriptors.
- */
- if (async) {
- ret = ufshcd_device_params_init(hba);
- if (ret)
- goto out;
- }
-
- ufshcd_tune_unipro_params(hba);
-
- /* UFS device is also active now */
- ufshcd_set_ufs_dev_active(hba);
- ufshcd_force_reset_auto_bkops(hba);
- hba->wlun_dev_clr_ua = true;
-
- /* Gear up to HS gear if supported */
- if (hba->max_pwr_info.is_valid) {
- /*
- * Set the right value to bRefClkFreq before attempting to
- * switch to HS gears.
- */
- if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
- ufshcd_set_dev_ref_clk(hba);
- ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret) {
- dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
- __func__, ret);
- goto out;
- }
- }
-
- /* set the state as operational after switching to desired gear */
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
-out:
-
- trace_ufshcd_init(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
-}
-
-/**
- * ufshcd_async_scan - asynchronous execution for probing hba
- * @data: data pointer to pass to this function
- * @cookie: cookie data
- */
-static void ufshcd_async_scan(void *data, async_cookie_t cookie)
-{
- struct ufs_hba *hba = (struct ufs_hba *)data;
- int ret;
-
- /* Initialize hba, detect and initialize UFS device */
- ret = ufshcd_probe_hba(hba, true);
- if (ret)
- goto out;
-
- /* Probe and add UFS logical units */
- ret = ufshcd_add_lus(hba);
-out:
- /*
- * If we failed to initialize the device or the device is not
- * present, turn off the power/clocks etc.
- */
- if (ret) {
- pm_runtime_put_sync(hba->dev);
- ufshcd_exit_clk_scaling(hba);
- ufshcd_hba_exit(hba);
- }
-}
-
-static const struct attribute_group *ufshcd_driver_groups[] = {
- &ufs_sysfs_unit_descriptor_group,
- &ufs_sysfs_lun_attributes_group,
- NULL,
-};
-
-static struct scsi_host_template ufshcd_driver_template = {
- .module = THIS_MODULE,
- .name = UFSHCD,
- .proc_name = UFSHCD,
- .queuecommand = ufshcd_queuecommand,
- .slave_alloc = ufshcd_slave_alloc,
- .slave_configure = ufshcd_slave_configure,
- .slave_destroy = ufshcd_slave_destroy,
- .change_queue_depth = ufshcd_change_queue_depth,
- .eh_abort_handler = ufshcd_abort,
- .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
- .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
- .this_id = -1,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = UFSHCD_CMD_PER_LUN,
- .can_queue = UFSHCD_CAN_QUEUE,
- .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
- .max_host_blocked = 1,
- .track_queue_depth = 1,
- .sdev_groups = ufshcd_driver_groups,
- .dma_boundary = PAGE_SIZE - 1,
- .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
-};
-
-static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
- int ua)
-{
- int ret;
-
- if (!vreg)
- return 0;
-
- /*
- * "set_load" operation shall be required on those regulators
- * which specifically configured current limitation. Otherwise
- * zero max_uA may cause unexpected behavior when regulator is
- * enabled or set as high power mode.
- */
- if (!vreg->max_uA)
- return 0;
-
- ret = regulator_set_load(vreg->reg, ua);
- if (ret < 0) {
- dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
- __func__, vreg->name, ua, ret);
- }
-
- return ret;
-}
-
-static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
- struct ufs_vreg *vreg)
-{
- return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
-}
-
-static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
- struct ufs_vreg *vreg)
-{
- if (!vreg)
- return 0;
-
- return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
-}
-
-static int ufshcd_config_vreg(struct device *dev,
- struct ufs_vreg *vreg, bool on)
-{
- int ret = 0;
- struct regulator *reg;
- const char *name;
- int min_uV, uA_load;
-
- BUG_ON(!vreg);
-
- reg = vreg->reg;
- name = vreg->name;
-
- if (regulator_count_voltages(reg) > 0) {
- if (vreg->min_uV && vreg->max_uV) {
- min_uV = on ? vreg->min_uV : 0;
- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
- if (ret) {
- dev_err(dev,
- "%s: %s set voltage failed, err=%d\n",
- __func__, name, ret);
- goto out;
- }
- }
-
- uA_load = on ? vreg->max_uA : 0;
- ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
- if (ret)
- goto out;
- }
-out:
- return ret;
-}
-
-static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg || vreg->enabled)
- goto out;
-
- ret = ufshcd_config_vreg(dev, vreg, true);
- if (!ret)
- ret = regulator_enable(vreg->reg);
-
- if (!ret)
- vreg->enabled = true;
- else
- dev_err(dev, "%s: %s enable failed, err=%d\n",
- __func__, vreg->name, ret);
-out:
- return ret;
-}
-
-static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg || !vreg->enabled)
- goto out;
-
- ret = regulator_disable(vreg->reg);
-
- if (!ret) {
- /* ignore errors on applying disable config */
- ufshcd_config_vreg(dev, vreg, false);
- vreg->enabled = false;
- } else {
- dev_err(dev, "%s: %s disable failed, err=%d\n",
- __func__, vreg->name, ret);
- }
-out:
- return ret;
-}
-
-static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
-{
- int ret = 0;
- struct device *dev = hba->dev;
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- ret = ufshcd_toggle_vreg(dev, info->vcc, on);
- if (ret)
- goto out;
-
- ret = ufshcd_toggle_vreg(dev, info->vccq, on);
- if (ret)
- goto out;
-
- ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
- if (ret)
- goto out;
-
-out:
- if (ret) {
- ufshcd_toggle_vreg(dev, info->vccq2, false);
- ufshcd_toggle_vreg(dev, info->vccq, false);
- ufshcd_toggle_vreg(dev, info->vcc, false);
- }
- return ret;
-}
-
-static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
-{
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
-}
-
-static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg)
- goto out;
-
- vreg->reg = devm_regulator_get(dev, vreg->name);
- if (IS_ERR(vreg->reg)) {
- ret = PTR_ERR(vreg->reg);
- dev_err(dev, "%s: %s get failed, err=%d\n",
- __func__, vreg->name, ret);
- }
-out:
- return ret;
-}
-
-static int ufshcd_init_vreg(struct ufs_hba *hba)
-{
- int ret = 0;
- struct device *dev = hba->dev;
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- ret = ufshcd_get_vreg(dev, info->vcc);
- if (ret)
- goto out;
-
- ret = ufshcd_get_vreg(dev, info->vccq);
- if (ret)
- goto out;
-
- ret = ufshcd_get_vreg(dev, info->vccq2);
-out:
- return ret;
-}
-
-static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
-{
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- if (info)
- return ufshcd_get_vreg(hba->dev, info->vdd_hba);
-
- return 0;
-}
-
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
- bool skip_ref_clk)
-{
- int ret = 0;
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
- unsigned long flags;
- ktime_t start = ktime_get();
- bool clk_state_changed = false;
-
- if (list_empty(head))
- goto out;
-
- /*
- * vendor specific setup_clocks ops may depend on clocks managed by
- * this standard driver hence call the vendor specific setup_clocks
- * before disabling the clocks managed here.
- */
- if (!on) {
- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
- if (ret)
- return ret;
- }
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk)) {
- if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
- continue;
-
- clk_state_changed = on ^ clki->enabled;
- if (on && !clki->enabled) {
- ret = clk_prepare_enable(clki->clk);
- if (ret) {
- dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
- __func__, clki->name, ret);
- goto out;
- }
- } else if (!on && clki->enabled) {
- clk_disable_unprepare(clki->clk);
- }
- clki->enabled = on;
- dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
- clki->name, on ? "en" : "dis");
- }
- }
-
- /*
- * vendor specific setup_clocks ops may depend on clocks managed by
- * this standard driver hence call the vendor specific setup_clocks
- * after enabling the clocks managed here.
- */
- if (on) {
- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
- if (ret)
- return ret;
- }
-
-out:
- if (ret) {
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
- clk_disable_unprepare(clki->clk);
- }
- } else if (!ret && on) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
-
- if (clk_state_changed)
- trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
- (on ? "on" : "off"),
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
- return ret;
-}
-
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
-{
- return __ufshcd_setup_clocks(hba, on, false);
-}
-
-static int ufshcd_init_clocks(struct ufs_hba *hba)
-{
- int ret = 0;
- struct ufs_clk_info *clki;
- struct device *dev = hba->dev;
- struct list_head *head = &hba->clk_list_head;
-
- if (list_empty(head))
- goto out;
-
- list_for_each_entry(clki, head, list) {
- if (!clki->name)
- continue;
-
- clki->clk = devm_clk_get(dev, clki->name);
- if (IS_ERR(clki->clk)) {
- ret = PTR_ERR(clki->clk);
- dev_err(dev, "%s: %s clk get failed, %d\n",
- __func__, clki->name, ret);
- goto out;
- }
-
- /*
- * Parse device ref clk freq as per device tree "ref_clk".
- * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
- * in ufshcd_alloc_host().
- */
- if (!strcmp(clki->name, "ref_clk"))
- ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
-
- if (clki->max_freq) {
- ret = clk_set_rate(clki->clk, clki->max_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->max_freq, ret);
- goto out;
- }
- clki->curr_freq = clki->max_freq;
- }
- dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
- clki->name, clk_get_rate(clki->clk));
- }
-out:
- return ret;
-}
-
-static int ufshcd_variant_hba_init(struct ufs_hba *hba)
-{
- int err = 0;
-
- if (!hba->vops)
- goto out;
-
- err = ufshcd_vops_init(hba);
- if (err)
- goto out;
-
- err = ufshcd_vops_setup_regulators(hba, true);
- if (err)
- goto out_exit;
-
- goto out;
-
-out_exit:
- ufshcd_vops_exit(hba);
-out:
- if (err)
- dev_err(hba->dev, "%s: variant %s init failed err %d\n",
- __func__, ufshcd_get_var_name(hba), err);
- return err;
-}
-
-static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
-{
- if (!hba->vops)
- return;
-
- ufshcd_vops_setup_regulators(hba, false);
-
- ufshcd_vops_exit(hba);
-}
-
-static int ufshcd_hba_init(struct ufs_hba *hba)
-{
- int err;
-
- /*
- * Handle host controller power separately from the UFS device power
- * rails as it will help controlling the UFS host controller power
- * collapse easily which is different than UFS device power collapse.
- * Also, enable the host controller power before we go ahead with rest
- * of the initialization here.
- */
- err = ufshcd_init_hba_vreg(hba);
- if (err)
- goto out;
-
- err = ufshcd_setup_hba_vreg(hba, true);
- if (err)
- goto out;
-
- err = ufshcd_init_clocks(hba);
- if (err)
- goto out_disable_hba_vreg;
-
- err = ufshcd_setup_clocks(hba, true);
- if (err)
- goto out_disable_hba_vreg;
-
- err = ufshcd_init_vreg(hba);
- if (err)
- goto out_disable_clks;
-
- err = ufshcd_setup_vreg(hba, true);
- if (err)
- goto out_disable_clks;
-
- err = ufshcd_variant_hba_init(hba);
- if (err)
- goto out_disable_vreg;
-
- hba->is_powered = true;
- goto out;
-
-out_disable_vreg:
- ufshcd_setup_vreg(hba, false);
-out_disable_clks:
- ufshcd_setup_clocks(hba, false);
-out_disable_hba_vreg:
- ufshcd_setup_hba_vreg(hba, false);
-out:
- return err;
-}
-
-static void ufshcd_hba_exit(struct ufs_hba *hba)
-{
- if (hba->is_powered) {
- ufshcd_variant_hba_exit(hba);
- ufshcd_setup_vreg(hba, false);
- ufshcd_suspend_clkscaling(hba);
- if (ufshcd_is_clkscaling_supported(hba))
- if (hba->devfreq)
- ufshcd_suspend_clkscaling(hba);
- ufshcd_setup_clocks(hba, false);
- ufshcd_setup_hba_vreg(hba, false);
- hba->is_powered = false;
- ufs_put_device_desc(hba);
- }
-}
-
-static int
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
-{
- unsigned char cmd[6] = {REQUEST_SENSE,
- 0,
- 0,
- 0,
- UFS_SENSE_SIZE,
- 0};
- char *buffer;
- int ret;
-
- buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
- UFS_SENSE_SIZE, NULL, NULL,
- msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
- if (ret)
- pr_err("%s: failed with err %d\n", __func__, ret);
-
- kfree(buffer);
-out:
- return ret;
-}
-
-/**
- * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
- * power mode
- * @hba: per adapter instance
- * @pwr_mode: device power mode to set
- *
- * Returns 0 if requested power mode is set successfully
- * Returns non-zero if failed to set the requested power mode
- */
-static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
- enum ufs_dev_pwr_mode pwr_mode)
-{
- unsigned char cmd[6] = { START_STOP };
- struct scsi_sense_hdr sshdr;
- struct scsi_device *sdp;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- sdp = hba->sdev_ufs_device;
- if (sdp) {
- ret = scsi_device_get(sdp);
- if (!ret && !scsi_device_online(sdp)) {
- ret = -ENODEV;
- scsi_device_put(sdp);
- }
- } else {
- ret = -ENODEV;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (ret)
- return ret;
-
- /*
- * If scsi commands fail, the scsi mid-layer schedules scsi error-
- * handling, which would wait for host to be resumed. Since we know
- * we are functional while we are here, skip host resume in error
- * handling context.
- */
- hba->host->eh_noresume = 1;
- if (hba->wlun_dev_clr_ua) {
- ret = ufshcd_send_request_sense(hba, sdp);
- if (ret)
- goto out;
- /* Unit attention condition is cleared now */
- hba->wlun_dev_clr_ua = false;
- }
-
- cmd[4] = pwr_mode << 4;
-
- /*
- * Current function would be generally called from the power management
- * callbacks hence set the RQF_PM flag so that it doesn't resume the
- * already suspended childs.
- */
- ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
- if (ret) {
- sdev_printk(KERN_WARNING, sdp,
- "START_STOP failed for power mode: %d, result %x\n",
- pwr_mode, ret);
- if (driver_byte(ret) == DRIVER_SENSE)
- scsi_print_sense_hdr(sdp, NULL, &sshdr);
- }
-
- if (!ret)
- hba->curr_dev_pwr_mode = pwr_mode;
-out:
- scsi_device_put(sdp);
- hba->host->eh_noresume = 0;
- return ret;
-}
-
-static int ufshcd_link_state_transition(struct ufs_hba *hba,
- enum uic_link_state req_link_state,
- int check_for_bkops)
-{
- int ret = 0;
-
- if (req_link_state == hba->uic_link_state)
- return 0;
-
- if (req_link_state == UIC_LINK_HIBERN8_STATE) {
- ret = ufshcd_uic_hibern8_enter(hba);
- if (!ret)
- ufshcd_set_link_hibern8(hba);
- else
- goto out;
- }
- /*
- * If autobkops is enabled, link can't be turned off because
- * turning off the link would also turn off the device.
- */
- else if ((req_link_state == UIC_LINK_OFF_STATE) &&
- (!check_for_bkops || !hba->auto_bkops_enabled)) {
- /*
- * Let's make sure that link is in low power mode, we are doing
- * this currently by putting the link in Hibern8. Otherway to
- * put the link in low power mode is to send the DME end point
- * to device and then send the DME reset command to local
- * unipro. But putting the link in hibern8 is much faster.
- */
- ret = ufshcd_uic_hibern8_enter(hba);
- if (ret)
- goto out;
- /*
- * Change controller state to "reset state" which
- * should also put the link in off/reset state
- */
- ufshcd_hba_stop(hba, true);
- /*
- * TODO: Check if we need any delay to make sure that
- * controller is reset
- */
- ufshcd_set_link_off(hba);
- }
-
-out:
- return ret;
-}
-
-static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
-{
- /*
- * It seems some UFS devices may keep drawing more than sleep current
- * (atleast for 500us) from UFS rails (especially from VCCQ rail).
- * To avoid this situation, add 2ms delay before putting these UFS
- * rails in LPM mode.
- */
- if (!ufshcd_is_link_active(hba) &&
- hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
- usleep_range(2000, 2100);
-
- /*
- * If UFS device is either in UFS_Sleep turn off VCC rail to save some
- * power.
- *
- * If UFS device and link is in OFF state, all power supplies (VCC,
- * VCCQ, VCCQ2) can be turned off if power on write protect is not
- * required. If UFS link is inactive (Hibern8 or OFF state) and device
- * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
- *
- * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
- * in low power state which would save some power.
- */
- if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
- !hba->dev_info.is_lu_power_on_wp) {
- ufshcd_setup_vreg(hba, false);
- } else if (!ufshcd_is_ufs_dev_active(hba)) {
- ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
- if (!ufshcd_is_link_active(hba)) {
- ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
- ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
- }
- }
-}
-
-static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
-{
- int ret = 0;
-
- if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
- !hba->dev_info.is_lu_power_on_wp) {
- ret = ufshcd_setup_vreg(hba, true);
- } else if (!ufshcd_is_ufs_dev_active(hba)) {
- if (!ret && !ufshcd_is_link_active(hba)) {
- ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
- if (ret)
- goto vcc_disable;
- ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
- if (ret)
- goto vccq_lpm;
- }
- ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
- }
- goto out;
-
-vccq_lpm:
- ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
-vcc_disable:
- ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
-out:
- return ret;
-}
-
-static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
-{
- if (ufshcd_is_link_off(hba))
- ufshcd_setup_hba_vreg(hba, false);
-}
-
-static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
-{
- if (ufshcd_is_link_off(hba))
- ufshcd_setup_hba_vreg(hba, true);
-}
-
-/**
- * ufshcd_suspend - helper function for suspend operations
- * @hba: per adapter instance
- * @pm_op: desired low power operation type
- *
- * This function will try to put the UFS device and link into low power
- * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
- * (System PM level).
- *
- * If this function is called during shutdown, it will make sure that
- * both UFS device and UFS link is powered off.
- *
- * NOTE: UFS device & link must be active before we enter in this function.
- *
- * Returns 0 for success and non-zero for failure
- */
-static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- int ret = 0;
- enum ufs_pm_level pm_lvl;
- enum ufs_dev_pwr_mode req_dev_pwr_mode;
- enum uic_link_state req_link_state;
-
- hba->pm_op_in_progress = 1;
- if (!ufshcd_is_shutdown_pm(pm_op)) {
- pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
- hba->rpm_lvl : hba->spm_lvl;
- req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
- req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
- } else {
- req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
- req_link_state = UIC_LINK_OFF_STATE;
- }
-
- /*
- * If we can't transition into any of the low power modes
- * just gate the clocks.
- */
- ufshcd_hold(hba, false);
- hba->clk_gating.is_suspended = true;
-
- if (hba->clk_scaling.is_allowed) {
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
- ufshcd_suspend_clkscaling(hba);
- }
-
- if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
- req_link_state == UIC_LINK_ACTIVE_STATE) {
- goto disable_clks;
- }
-
- if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
- (req_link_state == hba->uic_link_state))
- goto enable_gating;
-
- /* UFS device & link must be active before we enter in this function */
- if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
- ret = -EINVAL;
- goto enable_gating;
- }
-
- if (ufshcd_is_runtime_pm(pm_op)) {
- if (ufshcd_can_autobkops_during_suspend(hba)) {
- /*
- * The device is idle with no requests in the queue,
- * allow background operations if bkops status shows
- * that performance might be impacted.
- */
- ret = ufshcd_urgent_bkops(hba);
- if (ret)
- goto enable_gating;
- } else {
- /* make sure that auto bkops is disabled */
- ufshcd_disable_auto_bkops(hba);
- }
- }
-
- if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
- ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
- !ufshcd_is_runtime_pm(pm_op))) {
- /* ensure that bkops is disabled */
- ufshcd_disable_auto_bkops(hba);
- ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
- if (ret)
- goto enable_gating;
- }
-
- ret = ufshcd_link_state_transition(hba, req_link_state, 1);
- if (ret)
- goto set_dev_active;
-
- ufshcd_vreg_set_lpm(hba);
-
-disable_clks:
- /*
- * Call vendor specific suspend callback. As these callbacks may access
- * vendor specific host controller register space call them before the
- * host clocks are ON.
- */
- ret = ufshcd_vops_suspend(hba, pm_op);
- if (ret)
- goto set_link_active;
- /*
- * Disable the host irq as host controller as there won't be any
- * host controller transaction expected till resume.
- */
- ufshcd_disable_irq(hba);
-
- if (!ufshcd_is_link_active(hba))
- ufshcd_setup_clocks(hba, false);
- else
- /* If link is active, device ref_clk can't be switched off */
- __ufshcd_setup_clocks(hba, false, true);
-
- hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
-
- /* Put the host controller in low power mode if possible */
- ufshcd_hba_vreg_set_lpm(hba);
- goto out;
-
-set_link_active:
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
- ufshcd_vreg_set_hpm(hba);
- if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
- ufshcd_set_link_active(hba);
- else if (ufshcd_is_link_off(hba))
- ufshcd_host_reset_and_restore(hba);
-set_dev_active:
- if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
- ufshcd_disable_auto_bkops(hba);
-enable_gating:
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
- hba->clk_gating.is_suspended = false;
- ufshcd_release(hba);
-out:
- hba->pm_op_in_progress = 0;
- if (ret)
- ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
- return ret;
-}
-
-/**
- * ufshcd_resume - helper function for resume operations
- * @hba: per adapter instance
- * @pm_op: runtime PM or system PM
- *
- * This function basically brings the UFS device, UniPro link and controller
- * to active state.
- *
- * Returns 0 for success and non-zero for failure
- */
-static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- int ret;
- enum uic_link_state old_link_state;
-
- hba->pm_op_in_progress = 1;
- old_link_state = hba->uic_link_state;
-
- ufshcd_hba_vreg_set_hpm(hba);
- /* Make sure clocks are enabled before accessing controller */
- ret = ufshcd_setup_clocks(hba, true);
- if (ret)
- goto out;
-
- /* enable the host irq as host controller would be active soon */
- ufshcd_enable_irq(hba);
-
- ret = ufshcd_vreg_set_hpm(hba);
- if (ret)
- goto disable_irq_and_vops_clks;
-
- /*
- * Call vendor specific resume callback. As these callbacks may access
- * vendor specific host controller register space call them when the
- * host clocks are ON.
- */
- ret = ufshcd_vops_resume(hba, pm_op);
- if (ret)
- goto disable_vreg;
-
- if (ufshcd_is_link_hibern8(hba)) {
- ret = ufshcd_uic_hibern8_exit(hba);
- if (!ret)
- ufshcd_set_link_active(hba);
- else
- goto vendor_suspend;
- } else if (ufshcd_is_link_off(hba)) {
- ret = ufshcd_host_reset_and_restore(hba);
- /*
- * ufshcd_host_reset_and_restore() should have already
- * set the link state as active
- */
- if (ret || !ufshcd_is_link_active(hba))
- goto vendor_suspend;
- }
-
- if (!ufshcd_is_ufs_dev_active(hba)) {
- ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
- if (ret)
- goto set_old_link_state;
- }
-
- if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
- ufshcd_enable_auto_bkops(hba);
- else
- /*
- * If BKOPs operations are urgently needed at this moment then
- * keep auto-bkops enabled or else disable it.
- */
- ufshcd_urgent_bkops(hba);
-
- hba->clk_gating.is_suspended = false;
-
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
-
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
- /* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
-
- goto out;
-
-set_old_link_state:
- ufshcd_link_state_transition(hba, old_link_state, 0);
-vendor_suspend:
- ufshcd_vops_suspend(hba, pm_op);
-disable_vreg:
- ufshcd_vreg_set_lpm(hba);
-disable_irq_and_vops_clks:
- ufshcd_disable_irq(hba);
- if (hba->clk_scaling.is_allowed)
- ufshcd_suspend_clkscaling(hba);
- ufshcd_setup_clocks(hba, false);
-out:
- hba->pm_op_in_progress = 0;
- if (ret)
- ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
- return ret;
-}
-
-/**
- * ufshcd_system_suspend - system suspend routine
- * @hba: per adapter instance
- *
- * Check the description of ufshcd_suspend() function for more details.
- *
- * Returns 0 for success and non-zero for failure
- */
-int ufshcd_system_suspend(struct ufs_hba *hba)
-{
- int ret = 0;
- ktime_t start = ktime_get();
-
- if (!hba || !hba->is_powered)
- return 0;
-
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) &&
- (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
- hba->uic_link_state))
- goto out;
-
- if (pm_runtime_suspended(hba->dev)) {
- /*
- * UFS device and/or UFS link low power states during runtime
- * suspend seems to be different than what is expected during
- * system suspend. Hence runtime resume the devic & link and
- * let the system suspend low power states to take effect.
- * TODO: If resume takes longer time, we might have optimize
- * it in future by not resuming everything if possible.
- */
- ret = ufshcd_runtime_resume(hba);
- if (ret)
- goto out;
- }
-
- ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
-out:
- trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- if (!ret)
- hba->is_sys_suspended = true;
- return ret;
-}
-EXPORT_SYMBOL(ufshcd_system_suspend);
-
-/**
- * ufshcd_system_resume - system resume routine
- * @hba: per adapter instance
- *
- * Returns 0 for success and non-zero for failure
- */
-
-int ufshcd_system_resume(struct ufs_hba *hba)
-{
- int ret = 0;
- ktime_t start = ktime_get();
-
- if (!hba)
- return -EINVAL;
-
- if (!hba->is_powered || pm_runtime_suspended(hba->dev))
- /*
- * Let the runtime resume take care of resuming
- * if runtime suspended.
- */
- goto out;
- else
- ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
-out:
- trace_ufshcd_system_resume(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- if (!ret)
- hba->is_sys_suspended = false;
- return ret;
-}
-EXPORT_SYMBOL(ufshcd_system_resume);
-
-/**
- * ufshcd_runtime_suspend - runtime suspend routine
- * @hba: per adapter instance
- *
- * Check the description of ufshcd_suspend() function for more details.
- *
- * Returns 0 for success and non-zero for failure
- */
-int ufshcd_runtime_suspend(struct ufs_hba *hba)
-{
- int ret = 0;
- ktime_t start = ktime_get();
-
- if (!hba)
- return -EINVAL;
-
- if (!hba->is_powered)
- goto out;
- else
- ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
-out:
- trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
-}
-EXPORT_SYMBOL(ufshcd_runtime_suspend);
-
-/**
- * ufshcd_runtime_resume - runtime resume routine
- * @hba: per adapter instance
- *
- * This function basically brings the UFS device, UniPro link and controller
- * to active state. Following operations are done in this function:
- *
- * 1. Turn on all the controller related clocks
- * 2. Bring the UniPro link out of Hibernate state
- * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
- * to active state.
- * 4. If auto-bkops is enabled on the device, disable it.
- *
- * So following would be the possible power state after this function return
- * successfully:
- * S1: UFS device in Active state with VCC rail ON
- * UniPro link in Active state
- * All the UFS/UniPro controller clocks are ON
- *
- * Returns 0 for success and non-zero for failure
- */
-int ufshcd_runtime_resume(struct ufs_hba *hba)
-{
- int ret = 0;
- ktime_t start = ktime_get();
-
- if (!hba)
- return -EINVAL;
-
- if (!hba->is_powered)
- goto out;
- else
- ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
-out:
- trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
-}
-EXPORT_SYMBOL(ufshcd_runtime_resume);
-
-int ufshcd_runtime_idle(struct ufs_hba *hba)
-{
- return 0;
-}
-EXPORT_SYMBOL(ufshcd_runtime_idle);
-
-/**
- * ufshcd_shutdown - shutdown routine
- * @hba: per adapter instance
- *
- * This function would power off both UFS device and UFS link.
- *
- * Returns 0 always to allow force shutdown even in case of errors.
- */
-int ufshcd_shutdown(struct ufs_hba *hba)
-{
- int ret = 0;
-
- if (!hba->is_powered)
- goto out;
-
- if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
- goto out;
-
- if (pm_runtime_suspended(hba->dev)) {
- ret = ufshcd_runtime_resume(hba);
- if (ret)
- goto out;
- }
-
- ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
-out:
- if (ret)
- dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
- /* allow force shutdown even in case of errors */
- return 0;
-}
-EXPORT_SYMBOL(ufshcd_shutdown);
-
-/**
- * ufshcd_remove - de-allocate SCSI host and host memory space
- * data structure memory
- * @hba: per adapter instance
- */
-void ufshcd_remove(struct ufs_hba *hba)
-{
- ufs_bsg_remove(hba);
- ufs_sysfs_remove_nodes(hba->dev);
- blk_cleanup_queue(hba->tmf_queue);
- blk_mq_free_tag_set(&hba->tmf_tag_set);
- blk_cleanup_queue(hba->cmd_queue);
- scsi_remove_host(hba->host);
- /* disable interrupts */
- ufshcd_disable_intr(hba, hba->intr_mask);
- ufshcd_hba_stop(hba, true);
-
- ufshcd_exit_clk_scaling(hba);
- ufshcd_exit_clk_gating(hba);
- if (ufshcd_is_clkscaling_supported(hba))
- device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
- ufshcd_hba_exit(hba);
-}
-EXPORT_SYMBOL_GPL(ufshcd_remove);
-
-/**
- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
- * @hba: pointer to Host Bus Adapter (HBA)
- */
-void ufshcd_dealloc_host(struct ufs_hba *hba)
-{
- scsi_host_put(hba->host);
-}
-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
-
-/**
- * ufshcd_set_dma_mask - Set dma mask based on the controller
- * addressing capability
- * @hba: per adapter instance
- *
- * Returns 0 for success, non-zero for failure
- */
-static int ufshcd_set_dma_mask(struct ufs_hba *hba)
-{
- if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
- if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
- return 0;
- }
- return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
-}
-
-/**
- * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
- * @dev: pointer to device handle
- * @hba_handle: driver private handle
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
-{
- struct Scsi_Host *host;
- struct ufs_hba *hba;
- int err = 0;
-
- if (!dev) {
- dev_err(dev,
- "Invalid memory reference for dev is NULL\n");
- err = -ENODEV;
- goto out_error;
- }
-
- host = scsi_host_alloc(&ufshcd_driver_template,
- sizeof(struct ufs_hba));
- if (!host) {
- dev_err(dev, "scsi_host_alloc failed\n");
- err = -ENOMEM;
- goto out_error;
- }
- hba = shost_priv(host);
- hba->host = host;
- hba->dev = dev;
- *hba_handle = hba;
- hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
-
- INIT_LIST_HEAD(&hba->clk_list_head);
-
-out_error:
- return err;
-}
-EXPORT_SYMBOL(ufshcd_alloc_host);
-
-/* This function exists because blk_mq_alloc_tag_set() requires this. */
-static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *qd)
-{
- WARN_ON_ONCE(true);
- return BLK_STS_NOTSUPP;
-}
-
-static const struct blk_mq_ops ufshcd_tmf_ops = {
- .queue_rq = ufshcd_queue_tmf,
-};
-
-/**
- * ufshcd_init - Driver initialization routine
- * @hba: per-adapter instance
- * @mmio_base: base register address
- * @irq: Interrupt line of device
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
-{
- int err;
- struct Scsi_Host *host = hba->host;
- struct device *dev = hba->dev;
-
- if (!mmio_base) {
- dev_err(hba->dev,
- "Invalid memory reference for mmio_base is NULL\n");
- err = -ENODEV;
- goto out_error;
- }
-
- hba->mmio_base = mmio_base;
- hba->irq = irq;
-
- err = ufshcd_hba_init(hba);
- if (err)
- goto out_error;
-
- /* Read capabilities registers */
- ufshcd_hba_capabilities(hba);
-
- /* Get UFS version supported by the controller */
- hba->ufs_version = ufshcd_get_ufs_version(hba);
-
- if ((hba->ufs_version != UFSHCI_VERSION_10) &&
- (hba->ufs_version != UFSHCI_VERSION_11) &&
- (hba->ufs_version != UFSHCI_VERSION_20) &&
- (hba->ufs_version != UFSHCI_VERSION_21))
- dev_err(hba->dev, "invalid UFS version 0x%x\n",
- hba->ufs_version);
-
- /* Get Interrupt bit mask per version */
- hba->intr_mask = ufshcd_get_intr_mask(hba);
-
- err = ufshcd_set_dma_mask(hba);
- if (err) {
- dev_err(hba->dev, "set dma mask failed\n");
- goto out_disable;
- }
-
- /* Allocate memory for host memory space */
- err = ufshcd_memory_alloc(hba);
- if (err) {
- dev_err(hba->dev, "Memory allocation failed\n");
- goto out_disable;
- }
-
- /* Configure LRB */
- ufshcd_host_memory_configure(hba);
-
- host->can_queue = hba->nutrs;
- host->cmd_per_lun = hba->nutrs;
- host->max_id = UFSHCD_MAX_ID;
- host->max_lun = UFS_MAX_LUNS;
- host->max_channel = UFSHCD_MAX_CHANNEL;
- host->unique_id = host->host_no;
- host->max_cmd_len = UFS_CDB_SIZE;
-
- hba->max_pwr_info.is_valid = false;
-
- /* Initialize work queues */
- INIT_WORK(&hba->eh_work, ufshcd_err_handler);
- INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
-
- /* Initialize UIC command mutex */
- mutex_init(&hba->uic_cmd_mutex);
-
- /* Initialize mutex for device management commands */
- mutex_init(&hba->dev_cmd.lock);
-
- init_rwsem(&hba->clk_scaling_lock);
-
- ufshcd_init_clk_gating(hba);
-
- ufshcd_init_clk_scaling(hba);
-
- /*
- * In order to avoid any spurious interrupt immediately after
- * registering UFS controller interrupt handler, clear any pending UFS
- * interrupt status and disable all the UFS interrupts.
- */
- ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
- REG_INTERRUPT_STATUS);
- ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
- /*
- * Make sure that UFS interrupts are disabled and any pending interrupt
- * status is cleared before registering UFS interrupt handler.
- */
- mb();
-
- /* IRQ registration */
- err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
- if (err) {
- dev_err(hba->dev, "request irq failed\n");
- goto exit_gating;
- } else {
- hba->is_irq_enabled = true;
- }
-
- err = scsi_add_host(host, hba->dev);
- if (err) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- goto exit_gating;
- }
-
- hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
- if (IS_ERR(hba->cmd_queue)) {
- err = PTR_ERR(hba->cmd_queue);
- goto out_remove_scsi_host;
- }
-
- hba->tmf_tag_set = (struct blk_mq_tag_set) {
- .nr_hw_queues = 1,
- .queue_depth = hba->nutmrs,
- .ops = &ufshcd_tmf_ops,
- .flags = BLK_MQ_F_NO_SCHED,
- };
- err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
- if (err < 0)
- goto free_cmd_queue;
- hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
- if (IS_ERR(hba->tmf_queue)) {
- err = PTR_ERR(hba->tmf_queue);
- goto free_tmf_tag_set;
- }
-
- /* Reset the attached device */
- ufshcd_vops_device_reset(hba);
-
- /* Host controller enable */
- err = ufshcd_hba_enable(hba);
- if (err) {
- dev_err(hba->dev, "Host controller enable failed\n");
- ufshcd_print_host_regs(hba);
- ufshcd_print_host_state(hba);
- goto free_tmf_queue;
- }
-
- /*
- * Set the default power management level for runtime and system PM.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
-
- /* Set the default auto-hiberate idle timer value to 150 ms */
- if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
- hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
- }
-
- /* Hold auto suspend until async scan completes */
- pm_runtime_get_sync(dev);
- atomic_set(&hba->scsi_block_reqs_cnt, 0);
- /*
- * We are assuming that device wasn't put in sleep/power-down
- * state exclusively during the boot stage before kernel.
- * This assumption helps avoid doing link startup twice during
- * ufshcd_probe_hba().
- */
- ufshcd_set_ufs_dev_active(hba);
-
- async_schedule(ufshcd_async_scan, hba);
- ufs_sysfs_add_nodes(hba->dev);
-
- return 0;
-
-free_tmf_queue:
- blk_cleanup_queue(hba->tmf_queue);
-free_tmf_tag_set:
- blk_mq_free_tag_set(&hba->tmf_tag_set);
-free_cmd_queue:
- blk_cleanup_queue(hba->cmd_queue);
-out_remove_scsi_host:
- scsi_remove_host(hba->host);
-exit_gating:
- ufshcd_exit_clk_scaling(hba);
- ufshcd_exit_clk_gating(hba);
-out_disable:
- hba->is_irq_enabled = false;
- ufshcd_hba_exit(hba);
-out_error:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufshcd_init);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
-MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("Generic UFS host controller driver Core");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
deleted file mode 100644
index 2ae6c7c8528c..000000000000
--- a/drivers/scsi/ufs/ufshcd.h
+++ /dev/null
@@ -1,1115 +0,0 @@
-/*
- * Universal Flash Storage Host controller driver
- *
- * This code is based on drivers/scsi/ufs/ufshcd.h
- * Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * See the COPYING file in the top-level directory or visit
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
- * without warranty of any kind. You are solely responsible for
- * determining the appropriateness of using and distributing
- * the program and assume all risks associated with your exercise
- * of rights with respect to the program, including but not limited
- * to infringement of third party rights, the risks and costs of
- * program errors, damage to or loss of data, programs or equipment,
- * and unavailability or interruption of operations. Under no
- * circumstances will the contributor of this Program be liable for
- * any damages of any kind arising from your use or distribution of
- * this program.
- */
-
-#ifndef _UFSHCD_H
-#define _UFSHCD_H
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/regulator/consumer.h>
-#include "unipro.h"
-
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-
-#include "ufs.h"
-#include "ufshci.h"
-
-#define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.2"
-
-struct ufs_hba;
-
-enum dev_cmd_type {
- DEV_CMD_TYPE_NOP = 0x0,
- DEV_CMD_TYPE_QUERY = 0x1,
-};
-
-/**
- * struct uic_command - UIC command structure
- * @command: UIC command
- * @argument1: UIC command argument 1
- * @argument2: UIC command argument 2
- * @argument3: UIC command argument 3
- * @cmd_active: Indicate if UIC command is outstanding
- * @result: UIC command result
- * @done: UIC command completion
- */
-struct uic_command {
- u32 command;
- u32 argument1;
- u32 argument2;
- u32 argument3;
- int cmd_active;
- int result;
- struct completion done;
-};
-
-/* Used to differentiate the power management options */
-enum ufs_pm_op {
- UFS_RUNTIME_PM,
- UFS_SYSTEM_PM,
- UFS_SHUTDOWN_PM,
-};
-
-#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
-#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
-#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
-
-/* Host <-> Device UniPro Link state */
-enum uic_link_state {
- UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
- UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
- UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
-};
-
-#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
-#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
- UIC_LINK_ACTIVE_STATE)
-#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
- UIC_LINK_HIBERN8_STATE)
-#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
-#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
- UIC_LINK_ACTIVE_STATE)
-#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
- UIC_LINK_HIBERN8_STATE)
-
-/*
- * UFS Power management levels.
- * Each level is in increasing order of power savings.
- */
-enum ufs_pm_level {
- UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
- UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
- UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
- UFS_PM_LVL_MAX
-};
-
-struct ufs_pm_lvl_states {
- enum ufs_dev_pwr_mode dev_state;
- enum uic_link_state link_state;
-};
-
-/**
- * struct ufshcd_lrb - local reference block
- * @utr_descriptor_ptr: UTRD address of the command
- * @ucd_req_ptr: UCD address of the command
- * @ucd_rsp_ptr: Response UPIU address for this command
- * @ucd_prdt_ptr: PRDT address of the command
- * @utrd_dma_addr: UTRD dma address for debug
- * @ucd_prdt_dma_addr: PRDT dma address for debug
- * @ucd_rsp_dma_addr: UPIU response dma address for debug
- * @ucd_req_dma_addr: UPIU request dma address for debug
- * @cmd: pointer to SCSI command
- * @sense_buffer: pointer to sense buffer address of the SCSI command
- * @sense_bufflen: Length of the sense buffer
- * @scsi_status: SCSI status of the command
- * @command_type: SCSI, UFS, Query.
- * @task_tag: Task tag of the command
- * @lun: LUN of the command
- * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
- * @issue_time_stamp: time stamp for debug purposes
- * @compl_time_stamp: time stamp for statistics
- * @req_abort_skip: skip request abort task flag
- */
-struct ufshcd_lrb {
- struct utp_transfer_req_desc *utr_descriptor_ptr;
- struct utp_upiu_req *ucd_req_ptr;
- struct utp_upiu_rsp *ucd_rsp_ptr;
- struct ufshcd_sg_entry *ucd_prdt_ptr;
-
- dma_addr_t utrd_dma_addr;
- dma_addr_t ucd_req_dma_addr;
- dma_addr_t ucd_rsp_dma_addr;
- dma_addr_t ucd_prdt_dma_addr;
-
- struct scsi_cmnd *cmd;
- u8 *sense_buffer;
- unsigned int sense_bufflen;
- int scsi_status;
-
- int command_type;
- int task_tag;
- u8 lun; /* UPIU LUN id field is only 8-bit wide */
- bool intr_cmd;
- ktime_t issue_time_stamp;
- ktime_t compl_time_stamp;
-
- bool req_abort_skip;
-};
-
-/**
- * struct ufs_query - holds relevant data structures for query request
- * @request: request upiu and function
- * @descriptor: buffer for sending/receiving descriptor
- * @response: response upiu and response
- */
-struct ufs_query {
- struct ufs_query_req request;
- u8 *descriptor;
- struct ufs_query_res response;
-};
-
-/**
- * struct ufs_dev_cmd - all assosiated fields with device management commands
- * @type: device management command type - Query, NOP OUT
- * @lock: lock to allow one command at a time
- * @complete: internal commands completion
- */
-struct ufs_dev_cmd {
- enum dev_cmd_type type;
- struct mutex lock;
- struct completion *complete;
- struct ufs_query query;
-};
-
-struct ufs_desc_size {
- int dev_desc;
- int pwr_desc;
- int geom_desc;
- int interc_desc;
- int unit_desc;
- int conf_desc;
- int hlth_desc;
-};
-
-/**
- * struct ufs_clk_info - UFS clock related info
- * @list: list headed by hba->clk_list_head
- * @clk: clock node
- * @name: clock name
- * @max_freq: maximum frequency supported by the clock
- * @min_freq: min frequency that can be used for clock scaling
- * @curr_freq: indicates the current frequency that it is set to
- * @enabled: variable to check against multiple enable/disable
- */
-struct ufs_clk_info {
- struct list_head list;
- struct clk *clk;
- const char *name;
- u32 max_freq;
- u32 min_freq;
- u32 curr_freq;
- bool enabled;
-};
-
-enum ufs_notify_change_status {
- PRE_CHANGE,
- POST_CHANGE,
-};
-
-struct ufs_pa_layer_attr {
- u32 gear_rx;
- u32 gear_tx;
- u32 lane_rx;
- u32 lane_tx;
- u32 pwr_rx;
- u32 pwr_tx;
- u32 hs_rate;
-};
-
-struct ufs_pwr_mode_info {
- bool is_valid;
- struct ufs_pa_layer_attr info;
-};
-
-/**
- * struct ufs_hba_variant_ops - variant specific callbacks
- * @name: variant name
- * @init: called when the driver is initialized
- * @exit: called to cleanup everything done in init
- * @get_ufs_hci_version: called to get UFS HCI version
- * @clk_scale_notify: notifies that clks are scaled up/down
- * @setup_clocks: called before touching any of the controller registers
- * @setup_regulators: called before accessing the host controller
- * @hce_enable_notify: called before and after HCE enable bit is set to allow
- * variant specific Uni-Pro initialization.
- * @link_startup_notify: called before and after Link startup is carried out
- * to allow variant specific Uni-Pro initialization.
- * @pwr_change_notify: called before and after a power mode change
- * is carried out to allow vendor spesific capabilities
- * to be set.
- * @setup_xfer_req: called before any transfer request is issued
- * to set some things
- * @setup_task_mgmt: called before any task management request is issued
- * to set some things
- * @hibern8_notify: called around hibern8 enter/exit
- * @apply_dev_quirks: called to apply device specific quirks
- * @suspend: called during host controller PM callback
- * @resume: called during host controller PM callback
- * @dbg_register_dump: used to dump controller debug information
- * @phy_initialization: used to initialize phys
- * @device_reset: called to issue a reset pulse on the UFS device
- */
-struct ufs_hba_variant_ops {
- const char *name;
- int (*init)(struct ufs_hba *);
- void (*exit)(struct ufs_hba *);
- u32 (*get_ufs_hci_version)(struct ufs_hba *);
- int (*clk_scale_notify)(struct ufs_hba *, bool,
- enum ufs_notify_change_status);
- int (*setup_clocks)(struct ufs_hba *, bool,
- enum ufs_notify_change_status);
- int (*setup_regulators)(struct ufs_hba *, bool);
- int (*hce_enable_notify)(struct ufs_hba *,
- enum ufs_notify_change_status);
- int (*link_startup_notify)(struct ufs_hba *,
- enum ufs_notify_change_status);
- int (*pwr_change_notify)(struct ufs_hba *,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *,
- struct ufs_pa_layer_attr *);
- void (*setup_xfer_req)(struct ufs_hba *, int, bool);
- void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
- void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
- enum ufs_notify_change_status);
- int (*apply_dev_quirks)(struct ufs_hba *hba);
- int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
- int (*resume)(struct ufs_hba *, enum ufs_pm_op);
- void (*dbg_register_dump)(struct ufs_hba *hba);
- int (*phy_initialization)(struct ufs_hba *);
- void (*device_reset)(struct ufs_hba *hba);
-};
-
-/* clock gating state */
-enum clk_gating_state {
- CLKS_OFF,
- CLKS_ON,
- REQ_CLKS_OFF,
- REQ_CLKS_ON,
-};
-
-/**
- * struct ufs_clk_gating - UFS clock gating related info
- * @gate_work: worker to turn off clocks after some delay as specified in
- * delay_ms
- * @ungate_work: worker to turn on clocks that will be used in case of
- * interrupt context
- * @state: the current clocks state
- * @delay_ms: gating delay in ms
- * @is_suspended: clk gating is suspended when set to 1 which can be used
- * during suspend/resume
- * @delay_attr: sysfs attribute to control delay_attr
- * @enable_attr: sysfs attribute to enable/disable clock gating
- * @is_enabled: Indicates the current status of clock gating
- * @active_reqs: number of requests that are pending and should be waited for
- * completion before gating clocks.
- */
-struct ufs_clk_gating {
- struct delayed_work gate_work;
- struct work_struct ungate_work;
- enum clk_gating_state state;
- unsigned long delay_ms;
- bool is_suspended;
- struct device_attribute delay_attr;
- struct device_attribute enable_attr;
- bool is_enabled;
- int active_reqs;
- struct workqueue_struct *clk_gating_workq;
-};
-
-struct ufs_saved_pwr_info {
- struct ufs_pa_layer_attr info;
- bool is_valid;
-};
-
-/**
- * struct ufs_clk_scaling - UFS clock scaling related data
- * @active_reqs: number of requests that are pending. If this is zero when
- * devfreq ->target() function is called then schedule "suspend_work" to
- * suspend devfreq.
- * @tot_busy_t: Total busy time in current polling window
- * @window_start_t: Start time (in jiffies) of the current polling window
- * @busy_start_t: Start time of current busy period
- * @enable_attr: sysfs attribute to enable/disable clock scaling
- * @saved_pwr_info: UFS power mode may also be changed during scaling and this
- * one keeps track of previous power mode.
- * @workq: workqueue to schedule devfreq suspend/resume work
- * @suspend_work: worker to suspend devfreq
- * @resume_work: worker to resume devfreq
- * @is_allowed: tracks if scaling is currently allowed or not
- * @is_busy_started: tracks if busy period has started or not
- * @is_suspended: tracks if devfreq is suspended or not
- */
-struct ufs_clk_scaling {
- int active_reqs;
- unsigned long tot_busy_t;
- unsigned long window_start_t;
- ktime_t busy_start_t;
- struct device_attribute enable_attr;
- struct ufs_saved_pwr_info saved_pwr_info;
- struct workqueue_struct *workq;
- struct work_struct suspend_work;
- struct work_struct resume_work;
- bool is_allowed;
- bool is_busy_started;
- bool is_suspended;
-};
-
-/**
- * struct ufs_init_prefetch - contains data that is pre-fetched once during
- * initialization
- * @icc_level: icc level which was read during initialization
- */
-struct ufs_init_prefetch {
- u32 icc_level;
-};
-
-#define UFS_ERR_REG_HIST_LENGTH 8
-/**
- * struct ufs_err_reg_hist - keeps history of errors
- * @pos: index to indicate cyclic buffer position
- * @reg: cyclic buffer for registers value
- * @tstamp: cyclic buffer for time stamp
- */
-struct ufs_err_reg_hist {
- int pos;
- u32 reg[UFS_ERR_REG_HIST_LENGTH];
- ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH];
-};
-
-/**
- * struct ufs_stats - keeps usage/err statistics
- * @hibern8_exit_cnt: Counter to keep track of number of exits,
- * reset this after link-startup.
- * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
- * Clear after the first successful command completion.
- * @pa_err: tracks pa-uic errors
- * @dl_err: tracks dl-uic errors
- * @nl_err: tracks nl-uic errors
- * @tl_err: tracks tl-uic errors
- * @dme_err: tracks dme errors
- * @auto_hibern8_err: tracks auto-hibernate errors
- * @fatal_err: tracks fatal errors
- * @linkup_err: tracks link-startup errors
- * @resume_err: tracks resume errors
- * @suspend_err: tracks suspend errors
- * @dev_reset: tracks device reset events
- * @host_reset: tracks host reset events
- * @tsk_abort: tracks task abort events
- */
-struct ufs_stats {
- u32 hibern8_exit_cnt;
- ktime_t last_hibern8_exit_tstamp;
-
- /* uic specific errors */
- struct ufs_err_reg_hist pa_err;
- struct ufs_err_reg_hist dl_err;
- struct ufs_err_reg_hist nl_err;
- struct ufs_err_reg_hist tl_err;
- struct ufs_err_reg_hist dme_err;
-
- /* fatal errors */
- struct ufs_err_reg_hist auto_hibern8_err;
- struct ufs_err_reg_hist fatal_err;
- struct ufs_err_reg_hist link_startup_err;
- struct ufs_err_reg_hist resume_err;
- struct ufs_err_reg_hist suspend_err;
-
- /* abnormal events */
- struct ufs_err_reg_hist dev_reset;
- struct ufs_err_reg_hist host_reset;
- struct ufs_err_reg_hist task_abort;
-};
-
-/**
- * struct ufs_hba - per adapter private structure
- * @mmio_base: UFSHCI base register address
- * @ucdl_base_addr: UFS Command Descriptor base address
- * @utrdl_base_addr: UTP Transfer Request Descriptor base address
- * @utmrdl_base_addr: UTP Task Management Descriptor base address
- * @ucdl_dma_addr: UFS Command Descriptor DMA address
- * @utrdl_dma_addr: UTRDL DMA address
- * @utmrdl_dma_addr: UTMRDL DMA address
- * @host: Scsi_Host instance of the driver
- * @dev: device handle
- * @lrb: local reference block
- * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
- * @outstanding_tasks: Bits representing outstanding task requests
- * @outstanding_reqs: Bits representing outstanding transfer requests
- * @capabilities: UFS Controller Capabilities
- * @nutrs: Transfer Request Queue depth supported by controller
- * @nutmrs: Task Management Queue depth supported by controller
- * @ufs_version: UFS Version to which controller complies
- * @vops: pointer to variant specific operations
- * @priv: pointer to variant specific private data
- * @irq: Irq number of the controller
- * @active_uic_cmd: handle of active UIC command
- * @uic_cmd_mutex: mutex for uic command
- * @tmf_tag_set: TMF tag set.
- * @tmf_queue: Used to allocate TMF tags.
- * @pwr_done: completion for power mode change
- * @ufshcd_state: UFSHCD states
- * @eh_flags: Error handling flags
- * @intr_mask: Interrupt Mask Bits
- * @ee_ctrl_mask: Exception event control mask
- * @is_powered: flag to check if HBA is powered
- * @init_prefetch_data: data pre-fetched during initialization
- * @eh_work: Worker to handle UFS errors that require s/w attention
- * @eeh_work: Worker to handle exception events
- * @errors: HBA errors
- * @uic_error: UFS interconnect layer error status
- * @saved_err: sticky error mask
- * @saved_uic_err: sticky UIC error mask
- * @silence_err_logs: flag to silence error logs
- * @dev_cmd: ufs device management command information
- * @last_dme_cmd_tstamp: time stamp of the last completed DME command
- * @auto_bkops_enabled: to track whether bkops is enabled in device
- * @vreg_info: UFS device voltage regulator information
- * @clk_list_head: UFS host controller clocks list node head
- * @pwr_info: holds current power mode
- * @max_pwr_info: keeps the device max valid pwm
- * @desc_size: descriptor sizes reported by device
- * @urgent_bkops_lvl: keeps track of urgent bkops level for device
- * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
- * device is known or not.
- * @scsi_block_reqs_cnt: reference counting for scsi block requests
- */
-struct ufs_hba {
- void __iomem *mmio_base;
-
- /* Virtual memory reference */
- struct utp_transfer_cmd_desc *ucdl_base_addr;
- struct utp_transfer_req_desc *utrdl_base_addr;
- struct utp_task_req_desc *utmrdl_base_addr;
-
- /* DMA memory reference */
- dma_addr_t ucdl_dma_addr;
- dma_addr_t utrdl_dma_addr;
- dma_addr_t utmrdl_dma_addr;
-
- struct Scsi_Host *host;
- struct device *dev;
- struct request_queue *cmd_queue;
- /*
- * This field is to keep a reference to "scsi_device" corresponding to
- * "UFS device" W-LU.
- */
- struct scsi_device *sdev_ufs_device;
-
- enum ufs_dev_pwr_mode curr_dev_pwr_mode;
- enum uic_link_state uic_link_state;
- /* Desired UFS power management level during runtime PM */
- enum ufs_pm_level rpm_lvl;
- /* Desired UFS power management level during system PM */
- enum ufs_pm_level spm_lvl;
- struct device_attribute rpm_lvl_attr;
- struct device_attribute spm_lvl_attr;
- int pm_op_in_progress;
-
- /* Auto-Hibernate Idle Timer register value */
- u32 ahit;
-
- struct ufshcd_lrb *lrb;
-
- unsigned long outstanding_tasks;
- unsigned long outstanding_reqs;
-
- u32 capabilities;
- int nutrs;
- int nutmrs;
- u32 ufs_version;
- const struct ufs_hba_variant_ops *vops;
- void *priv;
- unsigned int irq;
- bool is_irq_enabled;
- enum ufs_ref_clk_freq dev_ref_clk_freq;
-
- /* Interrupt aggregation support is broken */
- #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
-
- /*
- * delay before each dme command is required as the unipro
- * layer has shown instabilities
- */
- #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
-
- /*
- * If UFS host controller is having issue in processing LCC (Line
- * Control Command) coming from device then enable this quirk.
- * When this quirk is enabled, host controller driver should disable
- * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
- * attribute of device to 0).
- */
- #define UFSHCD_QUIRK_BROKEN_LCC 0x4
-
- /*
- * The attribute PA_RXHSUNTERMCAP specifies whether or not the
- * inbound Link supports unterminated line in HS mode. Setting this
- * attribute to 1 fixes moving to HS gear.
- */
- #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
-
- /*
- * This quirk needs to be enabled if the host contoller only allows
- * accessing the peer dme attributes in AUTO mode (FAST AUTO or
- * SLOW AUTO).
- */
- #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
-
- /*
- * This quirk needs to be enabled if the host contoller doesn't
- * advertise the correct version in UFS_VER register. If this quirk
- * is enabled, standard UFS host driver will call the vendor specific
- * ops (get_ufs_hci_version) to get the correct version.
- */
- #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
-
- /*
- * This quirk needs to be enabled if the host contoller regards
- * resolution of the values of PRDTO and PRDTL in UTRD as byte.
- */
- #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
-
- /*
- * Clear handling for transfer/task request list is just opposite.
- */
- #define UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR 0x100
-
- /*
- * This quirk needs to be enabled if host controller doesn't allow
- * that the interrupt aggregation timer and counter are reset by s/w.
- */
- #define UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR 0x200
-
- /*
- * This quirks needs to be enabled if host controller cannot be
- * enabled via HCE register.
- */
- #define UFSHCI_QUIRK_BROKEN_HCE 0x400
- unsigned int quirks; /* Deviations from standard UFSHCI spec. */
-
- /* Device deviations from standard UFS device spec. */
- unsigned int dev_quirks;
-
- struct blk_mq_tag_set tmf_tag_set;
- struct request_queue *tmf_queue;
-
- struct uic_command *active_uic_cmd;
- struct mutex uic_cmd_mutex;
- struct completion *uic_async_done;
-
- u32 ufshcd_state;
- u32 eh_flags;
- u32 intr_mask;
- u16 ee_ctrl_mask;
- bool is_powered;
- struct ufs_init_prefetch init_prefetch_data;
-
- /* Work Queues */
- struct work_struct eh_work;
- struct work_struct eeh_work;
-
- /* HBA Errors */
- u32 errors;
- u32 uic_error;
- u32 saved_err;
- u32 saved_uic_err;
- struct ufs_stats ufs_stats;
- bool silence_err_logs;
-
- /* Device management request data */
- struct ufs_dev_cmd dev_cmd;
- ktime_t last_dme_cmd_tstamp;
-
- /* Keeps information of the UFS device connected to this host */
- struct ufs_dev_info dev_info;
- bool auto_bkops_enabled;
- struct ufs_vreg_info vreg_info;
- struct list_head clk_list_head;
-
- bool wlun_dev_clr_ua;
-
- /* Number of requests aborts */
- int req_abort_count;
-
- /* Number of lanes available (1 or 2) for Rx/Tx */
- u32 lanes_per_direction;
- struct ufs_pa_layer_attr pwr_info;
- struct ufs_pwr_mode_info max_pwr_info;
-
- struct ufs_clk_gating clk_gating;
- /* Control to enable/disable host capabilities */
- u32 caps;
- /* Allow dynamic clk gating */
-#define UFSHCD_CAP_CLK_GATING (1 << 0)
- /* Allow hiberb8 with clk gating */
-#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
- /* Allow dynamic clk scaling */
-#define UFSHCD_CAP_CLK_SCALING (1 << 2)
- /* Allow auto bkops to enabled during runtime suspend */
-#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
- /*
- * This capability allows host controller driver to use the UFS HCI's
- * interrupt aggregation capability.
- * CAUTION: Enabling this might reduce overall UFS throughput.
- */
-#define UFSHCD_CAP_INTR_AGGR (1 << 4)
- /*
- * This capability allows the device auto-bkops to be always enabled
- * except during suspend (both runtime and suspend).
- * Enabling this capability means that device will always be allowed
- * to do background operation when it's active but it might degrade
- * the performance of ongoing read/write operations.
- */
-#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
- /*
- * This capability allows host controller driver to automatically
- * enable runtime power management by itself instead of waiting
- * for userspace to control the power management.
- */
-#define UFSHCD_CAP_RPM_AUTOSUSPEND (1 << 6)
-
- struct devfreq *devfreq;
- struct ufs_clk_scaling clk_scaling;
- bool is_sys_suspended;
-
- enum bkops_status urgent_bkops_lvl;
- bool is_urgent_bkops_lvl_checked;
-
- struct rw_semaphore clk_scaling_lock;
- struct ufs_desc_size desc_size;
- atomic_t scsi_block_reqs_cnt;
-
- struct device bsg_dev;
- struct request_queue *bsg_queue;
-};
-
-/* Returns true if clocks can be gated. Otherwise false */
-static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_CLK_GATING;
-}
-static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
-}
-static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_CLK_SCALING;
-}
-static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
-}
-static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
-}
-
-static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
-{
-/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/
-#ifndef CONFIG_SCSI_UFS_DWC
- if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
- !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
- return true;
- else
- return false;
-#else
-return true;
-#endif
-}
-
-static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
-{
- return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
-}
-
-#define ufshcd_writel(hba, val, reg) \
- writel((val), (hba)->mmio_base + (reg))
-#define ufshcd_readl(hba, reg) \
- readl((hba)->mmio_base + (reg))
-
-/**
- * ufshcd_rmwl - read modify write into a register
- * @hba - per adapter instance
- * @mask - mask to apply on read value
- * @val - actual value to write
- * @reg - register address
- */
-static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
-{
- u32 tmp;
-
- tmp = ufshcd_readl(hba, reg);
- tmp &= ~mask;
- tmp |= (val & mask);
- ufshcd_writel(hba, tmp, reg);
-}
-
-int ufshcd_alloc_host(struct device *, struct ufs_hba **);
-void ufshcd_dealloc_host(struct ufs_hba *);
-int ufshcd_hba_enable(struct ufs_hba *hba);
-int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
-int ufshcd_make_hba_operational(struct ufs_hba *hba);
-void ufshcd_remove(struct ufs_hba *);
-int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
-int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
- u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep);
-void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
-void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
- u32 reg);
-
-static inline void check_upiu_size(void)
-{
- BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
- GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
-}
-
-/**
- * ufshcd_set_variant - set variant specific data to the hba
- * @hba - per adapter instance
- * @variant - pointer to variant specific data
- */
-static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
-{
- BUG_ON(!hba);
- hba->priv = variant;
-}
-
-/**
- * ufshcd_get_variant - get variant specific data from the hba
- * @hba - per adapter instance
- */
-static inline void *ufshcd_get_variant(struct ufs_hba *hba)
-{
- BUG_ON(!hba);
- return hba->priv;
-}
-static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
- struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
-}
-
-extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
-extern int ufshcd_runtime_resume(struct ufs_hba *hba);
-extern int ufshcd_runtime_idle(struct ufs_hba *hba);
-extern int ufshcd_system_suspend(struct ufs_hba *hba);
-extern int ufshcd_system_resume(struct ufs_hba *hba);
-extern int ufshcd_shutdown(struct ufs_hba *hba);
-extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
- u8 attr_set, u32 mib_val, u8 peer);
-extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
- u32 *mib_val, u8 peer);
-extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode);
-
-/* UIC command interfaces for DME primitives */
-#define DME_LOCAL 0
-#define DME_PEER 1
-#define ATTR_SET_NOR 0 /* NORMAL */
-#define ATTR_SET_ST 1 /* STATIC */
-
-static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
- mib_val, DME_LOCAL);
-}
-
-static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
- mib_val, DME_LOCAL);
-}
-
-static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
- mib_val, DME_PEER);
-}
-
-static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
- mib_val, DME_PEER);
-}
-
-static inline int ufshcd_dme_get(struct ufs_hba *hba,
- u32 attr_sel, u32 *mib_val)
-{
- return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
-}
-
-static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
- u32 attr_sel, u32 *mib_val)
-{
- return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
-}
-
-static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
-{
- return (pwr_info->pwr_rx == FAST_MODE ||
- pwr_info->pwr_rx == FASTAUTO_MODE) &&
- (pwr_info->pwr_tx == FAST_MODE ||
- pwr_info->pwr_tx == FASTAUTO_MODE);
-}
-
-/* Expose Query-Request API */
-int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
- enum query_opcode opcode,
- enum desc_idn idn, u8 index,
- u8 selector,
- u8 *desc_buf, int *buf_len);
-int ufshcd_read_desc_param(struct ufs_hba *hba,
- enum desc_idn desc_id,
- int desc_index,
- u8 param_offset,
- u8 *param_read_buf,
- u8 param_size);
-int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
-int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res);
-
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
-void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-
-#define SD_ASCII_STD true
-#define SD_RAW false
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii);
-
-int ufshcd_hold(struct ufs_hba *hba, bool async);
-void ufshcd_release(struct ufs_hba *hba);
-
-int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_length);
-
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
-
-int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
-
-int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- int msgcode,
- u8 *desc_buff, int *buff_len,
- enum query_opcode desc_op);
-
-/* Wrapper functions for safely calling variant operations */
-static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
-{
- if (hba->vops)
- return hba->vops->name;
- return "";
-}
-
-static inline int ufshcd_vops_init(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->init)
- return hba->vops->init(hba);
-
- return 0;
-}
-
-static inline void ufshcd_vops_exit(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->exit)
- return hba->vops->exit(hba);
-}
-
-static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->get_ufs_hci_version)
- return hba->vops->get_ufs_hci_version(hba);
-
- return ufshcd_readl(hba, REG_UFS_VERSION);
-}
-
-static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
- bool up, enum ufs_notify_change_status status)
-{
- if (hba->vops && hba->vops->clk_scale_notify)
- return hba->vops->clk_scale_notify(hba, up, status);
- return 0;
-}
-
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
- enum ufs_notify_change_status status)
-{
- if (hba->vops && hba->vops->setup_clocks)
- return hba->vops->setup_clocks(hba, on, status);
- return 0;
-}
-
-static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
-{
- if (hba->vops && hba->vops->setup_regulators)
- return hba->vops->setup_regulators(hba, status);
-
- return 0;
-}
-
-static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
- bool status)
-{
- if (hba->vops && hba->vops->hce_enable_notify)
- return hba->vops->hce_enable_notify(hba, status);
-
- return 0;
-}
-static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
- bool status)
-{
- if (hba->vops && hba->vops->link_startup_notify)
- return hba->vops->link_startup_notify(hba, status);
-
- return 0;
-}
-
-static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
- bool status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- if (hba->vops && hba->vops->pwr_change_notify)
- return hba->vops->pwr_change_notify(hba, status,
- dev_max_params, dev_req_params);
-
- return -ENOTSUPP;
-}
-
-static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
- bool is_scsi_cmd)
-{
- if (hba->vops && hba->vops->setup_xfer_req)
- return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
-}
-
-static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
- int tag, u8 tm_function)
-{
- if (hba->vops && hba->vops->setup_task_mgmt)
- return hba->vops->setup_task_mgmt(hba, tag, tm_function);
-}
-
-static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
- enum uic_cmd_dme cmd,
- enum ufs_notify_change_status status)
-{
- if (hba->vops && hba->vops->hibern8_notify)
- return hba->vops->hibern8_notify(hba, cmd, status);
-}
-
-static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->apply_dev_quirks)
- return hba->vops->apply_dev_quirks(hba);
- return 0;
-}
-
-static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
-{
- if (hba->vops && hba->vops->suspend)
- return hba->vops->suspend(hba, op);
-
- return 0;
-}
-
-static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
-{
- if (hba->vops && hba->vops->resume)
- return hba->vops->resume(hba, op);
-
- return 0;
-}
-
-static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->dbg_register_dump)
- hba->vops->dbg_register_dump(hba);
-}
-
-static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->device_reset) {
- hba->vops->device_reset(hba);
- ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
- }
-}
-
-extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
-
-/*
- * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
- * @scsi_lun: scsi LUN id
- *
- * Returns UPIU LUN id
- */
-static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
-{
- if (scsi_is_wlun(scsi_lun))
- return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
- | UFS_UPIU_WLUN_ID;
- else
- return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
-}
-
-int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
- const char *prefix);
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci-dwc.h b/drivers/scsi/ufs/ufshci-dwc.h
deleted file mode 100644
index 6c290e272106..000000000000
--- a/drivers/scsi/ufs/ufshci-dwc.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UFS Host driver for Synopsys Designware Core
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#ifndef _UFSHCI_DWC_H
-#define _UFSHCI_DWC_H
-
-/* DWC HC UFSHCI specific Registers */
-enum dwc_specific_registers {
- DWC_UFS_REG_HCLKDIV = 0xFC,
-};
-
-/* Clock Divider Values: Hex equivalent of frequency in MHz */
-enum clk_div_values {
- DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e,
- DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d,
- DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8,
-};
-
-/* Selector Index */
-enum selector_index {
- SELIND_LN0_TX = 0x00,
- SELIND_LN1_TX = 0x01,
- SELIND_LN0_RX = 0x04,
- SELIND_LN1_RX = 0x05,
-};
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
deleted file mode 100644
index c2961d37cc1c..000000000000
--- a/drivers/scsi/ufs/ufshci.h
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- * Universal Flash Storage Host controller driver
- *
- * This code is based on drivers/scsi/ufs/ufshci.h
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * See the COPYING file in the top-level directory or visit
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
- * without warranty of any kind. You are solely responsible for
- * determining the appropriateness of using and distributing
- * the program and assume all risks associated with your exercise
- * of rights with respect to the program, including but not limited
- * to infringement of third party rights, the risks and costs of
- * program errors, damage to or loss of data, programs or equipment,
- * and unavailability or interruption of operations. Under no
- * circumstances will the contributor of this Program be liable for
- * any damages of any kind arising from your use or distribution of
- * this program.
- */
-
-#ifndef _UFSHCI_H
-#define _UFSHCI_H
-
-enum {
- TASK_REQ_UPIU_SIZE_DWORDS = 8,
- TASK_RSP_UPIU_SIZE_DWORDS = 8,
- ALIGNED_UPIU_SIZE = 512,
-};
-
-/* UFSHCI Registers */
-enum {
- REG_CONTROLLER_CAPABILITIES = 0x00,
- REG_UFS_VERSION = 0x08,
- REG_CONTROLLER_DEV_ID = 0x10,
- REG_CONTROLLER_PROD_ID = 0x14,
- REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
- REG_INTERRUPT_STATUS = 0x20,
- REG_INTERRUPT_ENABLE = 0x24,
- REG_CONTROLLER_STATUS = 0x30,
- REG_CONTROLLER_ENABLE = 0x34,
- REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38,
- REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C,
- REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40,
- REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44,
- REG_UIC_ERROR_CODE_DME = 0x48,
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C,
- REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50,
- REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54,
- REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
- REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
- REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
- REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
- REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
- REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
- REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C,
- REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80,
- REG_UIC_COMMAND = 0x90,
- REG_UIC_COMMAND_ARG_1 = 0x94,
- REG_UIC_COMMAND_ARG_2 = 0x98,
- REG_UIC_COMMAND_ARG_3 = 0x9C,
-
- UFSHCI_REG_SPACE_SIZE = 0xA0,
-
- REG_UFS_CCAP = 0x100,
- REG_UFS_CRYPTOCAP = 0x104,
-
- UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
-};
-
-/* Controller capability masks */
-enum {
- MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
- MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
- MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,
- MASK_64_ADDRESSING_SUPPORT = 0x01000000,
- MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
- MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
-};
-
-#define UFS_MASK(mask, offset) ((mask) << (offset))
-
-/* UFS Version 08h */
-#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
-#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
-
-/* Controller UFSHCI version */
-enum {
- UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
- UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
- UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
- UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
-};
-
-/*
- * HCDDID - Host Controller Identification Descriptor
- * - Device ID and Device Class 10h
- */
-#define DEVICE_CLASS UFS_MASK(0xFFFF, 0)
-#define DEVICE_ID UFS_MASK(0xFF, 24)
-
-/*
- * HCPMID - Host Controller Identification Descriptor
- * - Product/Manufacturer ID 14h
- */
-#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
-#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
-
-/* AHIT - Auto-Hibernate Idle Timer */
-#define UFSHCI_AHIBERN8_TIMER_MASK GENMASK(9, 0)
-#define UFSHCI_AHIBERN8_SCALE_MASK GENMASK(12, 10)
-#define UFSHCI_AHIBERN8_SCALE_FACTOR 10
-#define UFSHCI_AHIBERN8_MAX (1023 * 100000)
-
-/*
- * IS - Interrupt Status - 20h
- */
-#define UTP_TRANSFER_REQ_COMPL 0x1
-#define UIC_DME_END_PT_RESET 0x2
-#define UIC_ERROR 0x4
-#define UIC_TEST_MODE 0x8
-#define UIC_POWER_MODE 0x10
-#define UIC_HIBERNATE_EXIT 0x20
-#define UIC_HIBERNATE_ENTER 0x40
-#define UIC_LINK_LOST 0x80
-#define UIC_LINK_STARTUP 0x100
-#define UTP_TASK_REQ_COMPL 0x200
-#define UIC_COMMAND_COMPL 0x400
-#define DEVICE_FATAL_ERROR 0x800
-#define CONTROLLER_FATAL_ERROR 0x10000
-#define SYSTEM_BUS_FATAL_ERROR 0x20000
-
-#define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\
- UIC_HIBERNATE_EXIT)
-
-#define UFSHCD_UIC_PWR_MASK (UFSHCD_UIC_HIBERN8_MASK |\
- UIC_POWER_MODE)
-
-#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
-
-#define UFSHCD_ERROR_MASK (UIC_ERROR |\
- DEVICE_FATAL_ERROR |\
- CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR)
-
-#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
- CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR)
-
-/* HCS - Host Controller Status 30h */
-#define DEVICE_PRESENT 0x1
-#define UTP_TRANSFER_REQ_LIST_READY 0x2
-#define UTP_TASK_REQ_LIST_READY 0x4
-#define UIC_COMMAND_READY 0x8
-#define HOST_ERROR_INDICATOR 0x10
-#define DEVICE_ERROR_INDICATOR 0x20
-#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
-
-#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
- UTP_TASK_REQ_LIST_READY |\
- UIC_COMMAND_READY)
-
-enum {
- PWR_OK = 0x0,
- PWR_LOCAL = 0x01,
- PWR_REMOTE = 0x02,
- PWR_BUSY = 0x03,
- PWR_ERROR_CAP = 0x04,
- PWR_FATAL_ERROR = 0x05,
-};
-
-/* HCE - Host Controller Enable 34h */
-#define CONTROLLER_ENABLE 0x1
-#define CONTROLLER_DISABLE 0x0
-#define CRYPTO_GENERAL_ENABLE 0x2
-
-/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
-#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
-#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
-#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
-
-/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
-#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
-#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
-#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
-#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
-#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
-#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
-#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
-#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
-#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
-
-/* UECN - Host UIC Error Code Network Layer 40h */
-#define UIC_NETWORK_LAYER_ERROR 0x80000000
-#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
-#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
-#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
-#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
-
-/* UECT - Host UIC Error Code Transport Layer 44h */
-#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
-#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
-#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
-#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
-#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
-#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
-#define UIC_TRANSPORT_BAD_TC 0x10
-#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
-#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
-
-/* UECDME - Host UIC Error Code DME 48h */
-#define UIC_DME_ERROR 0x80000000
-#define UIC_DME_ERROR_CODE_MASK 0x1
-
-/* UTRIACR - Interrupt Aggregation control register - 0x4Ch */
-#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
-#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
-#define INT_AGGR_COUNTER_AND_TIMER_RESET 0x10000
-#define INT_AGGR_STATUS_BIT 0x100000
-#define INT_AGGR_PARAM_WRITE 0x1000000
-#define INT_AGGR_ENABLE 0x80000000
-
-/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
-#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1
-
-/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
-#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
-
-/* UICCMD - UIC Command */
-#define COMMAND_OPCODE_MASK 0xFF
-#define GEN_SELECTOR_INDEX_MASK 0xFFFF
-
-#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16)
-#define RESET_LEVEL 0xFF
-
-#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16)
-#define CONFIG_RESULT_CODE_MASK 0xFF
-#define GENERIC_ERROR_CODE_MASK 0xFF
-
-/* GenSelectorIndex calculation macros for M-PHY attributes */
-#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
-#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
-
-#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
- ((sel) & 0xFFFF))
-#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
-#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
-#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
-
-/* Link Status*/
-enum link_status {
- UFSHCD_LINK_IS_DOWN = 1,
- UFSHCD_LINK_IS_UP = 2,
-};
-
-/* UIC Commands */
-enum uic_cmd_dme {
- UIC_CMD_DME_GET = 0x01,
- UIC_CMD_DME_SET = 0x02,
- UIC_CMD_DME_PEER_GET = 0x03,
- UIC_CMD_DME_PEER_SET = 0x04,
- UIC_CMD_DME_POWERON = 0x10,
- UIC_CMD_DME_POWEROFF = 0x11,
- UIC_CMD_DME_ENABLE = 0x12,
- UIC_CMD_DME_RESET = 0x14,
- UIC_CMD_DME_END_PT_RST = 0x15,
- UIC_CMD_DME_LINK_STARTUP = 0x16,
- UIC_CMD_DME_HIBER_ENTER = 0x17,
- UIC_CMD_DME_HIBER_EXIT = 0x18,
- UIC_CMD_DME_TEST_MODE = 0x1A,
-};
-
-/* UIC Config result code / Generic error code */
-enum {
- UIC_CMD_RESULT_SUCCESS = 0x00,
- UIC_CMD_RESULT_INVALID_ATTR = 0x01,
- UIC_CMD_RESULT_FAILURE = 0x01,
- UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
- UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
- UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
- UIC_CMD_RESULT_BAD_INDEX = 0x05,
- UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
- UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
- UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
- UIC_CMD_RESULT_BUSY = 0x09,
- UIC_CMD_RESULT_DME_FAILURE = 0x0A,
-};
-
-#define MASK_UIC_COMMAND_RESULT 0xFF
-
-#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8)
-#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0)
-
-/* Interrupt disable masks */
-enum {
- /* Interrupt disable mask for UFSHCI v1.0 */
- INTERRUPT_MASK_ALL_VER_10 = 0x30FFF,
- INTERRUPT_MASK_RW_VER_10 = 0x30000,
-
- /* Interrupt disable mask for UFSHCI v1.1 */
- INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
-
- /* Interrupt disable mask for UFSHCI v2.1 */
- INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
-};
-
-/*
- * Request Descriptor Definitions
- */
-
-/* Transfer request command type */
-enum {
- UTP_CMD_TYPE_SCSI = 0x0,
- UTP_CMD_TYPE_UFS = 0x1,
- UTP_CMD_TYPE_DEV_MANAGE = 0x2,
-};
-
-/* To accommodate UFS2.0 required Command type */
-enum {
- UTP_CMD_TYPE_UFS_STORAGE = 0x1,
-};
-
-enum {
- UTP_SCSI_COMMAND = 0x00000000,
- UTP_NATIVE_UFS_COMMAND = 0x10000000,
- UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
- UTP_REQ_DESC_INT_CMD = 0x01000000,
-};
-
-/* UTP Transfer Request Data Direction (DD) */
-enum {
- UTP_NO_DATA_TRANSFER = 0x00000000,
- UTP_HOST_TO_DEVICE = 0x02000000,
- UTP_DEVICE_TO_HOST = 0x04000000,
-};
-
-/* Overall command status values */
-enum {
- OCS_SUCCESS = 0x0,
- OCS_INVALID_CMD_TABLE_ATTR = 0x1,
- OCS_INVALID_PRDT_ATTR = 0x2,
- OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
- OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
- OCS_PEER_COMM_FAILURE = 0x5,
- OCS_ABORTED = 0x6,
- OCS_FATAL_ERROR = 0x7,
- OCS_INVALID_COMMAND_STATUS = 0x0F,
- MASK_OCS = 0x0F,
-};
-
-/* The maximum length of the data byte count field in the PRDT is 256KB */
-#define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024)
-/* The granularity of the data byte count field in the PRDT is 32-bit */
-#define PRDT_DATA_BYTE_COUNT_PAD 4
-
-/**
- * struct ufshcd_sg_entry - UFSHCI PRD Entry
- * @base_addr: Lower 32bit physical address DW-0
- * @upper_addr: Upper 32bit physical address DW-1
- * @reserved: Reserved for future use DW-2
- * @size: size of physical segment DW-3
- */
-struct ufshcd_sg_entry {
- __le32 base_addr;
- __le32 upper_addr;
- __le32 reserved;
- __le32 size;
-};
-
-/**
- * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
- * @command_upiu: Command UPIU Frame address
- * @response_upiu: Response UPIU Frame address
- * @prd_table: Physical Region Descriptor
- */
-struct utp_transfer_cmd_desc {
- u8 command_upiu[ALIGNED_UPIU_SIZE];
- u8 response_upiu[ALIGNED_UPIU_SIZE];
- struct ufshcd_sg_entry prd_table[SG_ALL];
-};
-
-/**
- * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
- * @dword0: Descriptor Header DW0
- * @dword1: Descriptor Header DW1
- * @dword2: Descriptor Header DW2
- * @dword3: Descriptor Header DW3
- */
-struct request_desc_header {
- __le32 dword_0;
- __le32 dword_1;
- __le32 dword_2;
- __le32 dword_3;
-};
-
-/**
- * struct utp_transfer_req_desc - UTRD structure
- * @header: UTRD header DW-0 to DW-3
- * @command_desc_base_addr_lo: UCD base address low DW-4
- * @command_desc_base_addr_hi: UCD base address high DW-5
- * @response_upiu_length: response UPIU length DW-6
- * @response_upiu_offset: response UPIU offset DW-6
- * @prd_table_length: Physical region descriptor length DW-7
- * @prd_table_offset: Physical region descriptor offset DW-7
- */
-struct utp_transfer_req_desc {
-
- /* DW 0-3 */
- struct request_desc_header header;
-
- /* DW 4-5*/
- __le32 command_desc_base_addr_lo;
- __le32 command_desc_base_addr_hi;
-
- /* DW 6 */
- __le16 response_upiu_length;
- __le16 response_upiu_offset;
-
- /* DW 7 */
- __le16 prd_table_length;
- __le16 prd_table_offset;
-};
-
-/*
- * UTMRD structure.
- */
-struct utp_task_req_desc {
- /* DW 0-3 */
- struct request_desc_header header;
-
- /* DW 4-11 - Task request UPIU structure */
- struct utp_upiu_header req_header;
- __be32 input_param1;
- __be32 input_param2;
- __be32 input_param3;
- __be32 __reserved1[2];
-
- /* DW 12-19 - Task Management Response UPIU structure */
- struct utp_upiu_header rsp_header;
- __be32 output_param1;
- __be32 output_param2;
- __be32 __reserved2[3];
-};
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
deleted file mode 100644
index 3dc4d8b76509..000000000000
--- a/drivers/scsi/ufs/unipro.h
+++ /dev/null
@@ -1,287 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * drivers/scsi/ufs/unipro.h
- *
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- */
-
-#ifndef _UNIPRO_H_
-#define _UNIPRO_H_
-
-/*
- * M-TX Configuration Attributes
- */
-#define TX_HIBERN8TIME_CAPABILITY 0x000F
-#define TX_MODE 0x0021
-#define TX_HSRATE_SERIES 0x0022
-#define TX_HSGEAR 0x0023
-#define TX_PWMGEAR 0x0024
-#define TX_AMPLITUDE 0x0025
-#define TX_HS_SLEWRATE 0x0026
-#define TX_SYNC_SOURCE 0x0027
-#define TX_HS_SYNC_LENGTH 0x0028
-#define TX_HS_PREPARE_LENGTH 0x0029
-#define TX_LS_PREPARE_LENGTH 0x002A
-#define TX_HIBERN8_CONTROL 0x002B
-#define TX_LCC_ENABLE 0x002C
-#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D
-#define TX_BYPASS_8B10B_ENABLE 0x002E
-#define TX_DRIVER_POLARITY 0x002F
-#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030
-#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031
-#define TX_LCC_SEQUENCER 0x0032
-#define TX_MIN_ACTIVATETIME 0x0033
-#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034
-#define TX_REFCLKFREQ 0x00EB
-#define TX_CFGCLKFREQVAL 0x00EC
-#define CFGEXTRATTR 0x00F0
-#define DITHERCTRL2 0x00F1
-
-/*
- * M-RX Configuration Attributes
- */
-#define RX_MODE 0x00A1
-#define RX_HSRATE_SERIES 0x00A2
-#define RX_HSGEAR 0x00A3
-#define RX_PWMGEAR 0x00A4
-#define RX_LS_TERMINATED_ENABLE 0x00A5
-#define RX_HS_UNTERMINATED_ENABLE 0x00A6
-#define RX_ENTER_HIBERN8 0x00A7
-#define RX_BYPASS_8B10B_ENABLE 0x00A8
-#define RX_TERMINATION_FORCE_ENABLE 0x00A9
-#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
-#define RX_HIBERN8TIME_CAPABILITY 0x0092
-#define RX_REFCLKFREQ 0x00EB
-#define RX_CFGCLKFREQVAL 0x00EC
-#define CFGWIDEINLN 0x00F0
-#define CFGRXCDR8 0x00BA
-#define ENARXDIRECTCFG4 0x00F2
-#define CFGRXOVR8 0x00BD
-#define RXDIRECTCTRL2 0x00C7
-#define ENARXDIRECTCFG3 0x00F3
-#define RXCALCTRL 0x00B4
-#define ENARXDIRECTCFG2 0x00F4
-#define CFGRXOVR4 0x00E9
-#define RXSQCTRL 0x00B5
-#define CFGRXOVR6 0x00BF
-
-#define is_mphy_tx_attr(attr) (attr < RX_MODE)
-#define RX_MIN_ACTIVATETIME_UNIT_US 100
-#define HIBERN8TIME_UNIT_US 100
-
-/*
- * Common Block Attributes
- */
-#define TX_GLOBALHIBERNATE UNIPRO_CB_OFFSET(0x002B)
-#define REFCLKMODE UNIPRO_CB_OFFSET(0x00BF)
-#define DIRECTCTRL19 UNIPRO_CB_OFFSET(0x00CD)
-#define DIRECTCTRL10 UNIPRO_CB_OFFSET(0x00E6)
-#define CDIRECTCTRL6 UNIPRO_CB_OFFSET(0x00EA)
-#define RTOBSERVESELECT UNIPRO_CB_OFFSET(0x00F0)
-#define CBDIVFACTOR UNIPRO_CB_OFFSET(0x00F1)
-#define CBDCOCTRL5 UNIPRO_CB_OFFSET(0x00F3)
-#define CBPRGPLL2 UNIPRO_CB_OFFSET(0x00F8)
-#define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB)
-
-#define UNIPRO_CB_OFFSET(x) (0x8000 | x)
-
-/*
- * PHY Adpater attributes
- */
-#define PA_ACTIVETXDATALANES 0x1560
-#define PA_ACTIVERXDATALANES 0x1580
-#define PA_TXTRAILINGCLOCKS 0x1564
-#define PA_PHY_TYPE 0x1500
-#define PA_AVAILTXDATALANES 0x1520
-#define PA_AVAILRXDATALANES 0x1540
-#define PA_MINRXTRAILINGCLOCKS 0x1543
-#define PA_TXPWRSTATUS 0x1567
-#define PA_RXPWRSTATUS 0x1582
-#define PA_TXFORCECLOCK 0x1562
-#define PA_TXPWRMODE 0x1563
-#define PA_LEGACYDPHYESCDL 0x1570
-#define PA_MAXTXSPEEDFAST 0x1521
-#define PA_MAXTXSPEEDSLOW 0x1522
-#define PA_MAXRXSPEEDFAST 0x1541
-#define PA_MAXRXSPEEDSLOW 0x1542
-#define PA_TXLINKSTARTUPHS 0x1544
-#define PA_LOCAL_TX_LCC_ENABLE 0x155E
-#define PA_TXSPEEDFAST 0x1565
-#define PA_TXSPEEDSLOW 0x1566
-#define PA_REMOTEVERINFO 0x15A0
-#define PA_TXGEAR 0x1568
-#define PA_TXTERMINATION 0x1569
-#define PA_HSSERIES 0x156A
-#define PA_PWRMODE 0x1571
-#define PA_RXGEAR 0x1583
-#define PA_RXTERMINATION 0x1584
-#define PA_MAXRXPWMGEAR 0x1586
-#define PA_MAXRXHSGEAR 0x1587
-#define PA_RXHSUNTERMCAP 0x15A5
-#define PA_RXLSTERMCAP 0x15A6
-#define PA_GRANULARITY 0x15AA
-#define PA_PACPREQTIMEOUT 0x1590
-#define PA_PACPREQEOBTIMEOUT 0x1591
-#define PA_HIBERN8TIME 0x15A7
-#define PA_LOCALVERINFO 0x15A9
-#define PA_TACTIVATE 0x15A8
-#define PA_PACPFRAMECOUNT 0x15C0
-#define PA_PACPERRORCOUNT 0x15C1
-#define PA_PHYTESTCONTROL 0x15C2
-#define PA_PWRMODEUSERDATA0 0x15B0
-#define PA_PWRMODEUSERDATA1 0x15B1
-#define PA_PWRMODEUSERDATA2 0x15B2
-#define PA_PWRMODEUSERDATA3 0x15B3
-#define PA_PWRMODEUSERDATA4 0x15B4
-#define PA_PWRMODEUSERDATA5 0x15B5
-#define PA_PWRMODEUSERDATA6 0x15B6
-#define PA_PWRMODEUSERDATA7 0x15B7
-#define PA_PWRMODEUSERDATA8 0x15B8
-#define PA_PWRMODEUSERDATA9 0x15B9
-#define PA_PWRMODEUSERDATA10 0x15BA
-#define PA_PWRMODEUSERDATA11 0x15BB
-#define PA_CONNECTEDTXDATALANES 0x1561
-#define PA_CONNECTEDRXDATALANES 0x1581
-#define PA_LOGICALLANEMAP 0x15A1
-#define PA_SLEEPNOCONFIGTIME 0x15A2
-#define PA_STALLNOCONFIGTIME 0x15A3
-#define PA_SAVECONFIGTIME 0x15A4
-
-#define PA_TACTIVATE_TIME_UNIT_US 10
-#define PA_HIBERN8_TIME_UNIT_US 100
-
-/*Other attributes*/
-#define VS_MPHYCFGUPDT 0xD085
-#define VS_DEBUGOMC 0xD09E
-#define VS_POWERSTATE 0xD083
-
-#define PA_GRANULARITY_MIN_VAL 1
-#define PA_GRANULARITY_MAX_VAL 6
-
-/* PHY Adapter Protocol Constants */
-#define PA_MAXDATALANES 4
-
-#define DL_FC0ProtectionTimeOutVal_Default 8191
-#define DL_TC0ReplayTimeOutVal_Default 65535
-#define DL_AFC0ReqTimeOutVal_Default 32767
-#define DL_FC1ProtectionTimeOutVal_Default 8191
-#define DL_TC1ReplayTimeOutVal_Default 65535
-#define DL_AFC1ReqTimeOutVal_Default 32767
-
-#define DME_LocalFC0ProtectionTimeOutVal 0xD041
-#define DME_LocalTC0ReplayTimeOutVal 0xD042
-#define DME_LocalAFC0ReqTimeOutVal 0xD043
-
-/* PA power modes */
-enum {
- FAST_MODE = 1,
- SLOW_MODE = 2,
- FASTAUTO_MODE = 4,
- SLOWAUTO_MODE = 5,
- UNCHANGED = 7,
-};
-
-/* PA TX/RX Frequency Series */
-enum {
- PA_HS_MODE_A = 1,
- PA_HS_MODE_B = 2,
-};
-
-enum ufs_pwm_gear_tag {
- UFS_PWM_DONT_CHANGE, /* Don't change Gear */
- UFS_PWM_G1, /* PWM Gear 1 (default for reset) */
- UFS_PWM_G2, /* PWM Gear 2 */
- UFS_PWM_G3, /* PWM Gear 3 */
- UFS_PWM_G4, /* PWM Gear 4 */
- UFS_PWM_G5, /* PWM Gear 5 */
- UFS_PWM_G6, /* PWM Gear 6 */
- UFS_PWM_G7, /* PWM Gear 7 */
-};
-
-enum ufs_hs_gear_tag {
- UFS_HS_DONT_CHANGE, /* Don't change Gear */
- UFS_HS_G1, /* HS Gear 1 (default for reset) */
- UFS_HS_G2, /* HS Gear 2 */
- UFS_HS_G3, /* HS Gear 3 */
-};
-
-enum ufs_unipro_ver {
- UFS_UNIPRO_VER_RESERVED = 0,
- UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
- UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
- UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
- UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */
- /* UniPro version field mask in PA_LOCALVERINFO */
- UFS_UNIPRO_VER_MASK = 0xF,
-};
-
-/*
- * Data Link Layer Attributes
- */
-#define DL_TC0TXFCTHRESHOLD 0x2040
-#define DL_FC0PROTTIMEOUTVAL 0x2041
-#define DL_TC0REPLAYTIMEOUTVAL 0x2042
-#define DL_AFC0REQTIMEOUTVAL 0x2043
-#define DL_AFC0CREDITTHRESHOLD 0x2044
-#define DL_TC0OUTACKTHRESHOLD 0x2045
-#define DL_TC1TXFCTHRESHOLD 0x2060
-#define DL_FC1PROTTIMEOUTVAL 0x2061
-#define DL_TC1REPLAYTIMEOUTVAL 0x2062
-#define DL_AFC1REQTIMEOUTVAL 0x2063
-#define DL_AFC1CREDITTHRESHOLD 0x2064
-#define DL_TC1OUTACKTHRESHOLD 0x2065
-#define DL_TXPREEMPTIONCAP 0x2000
-#define DL_TC0TXMAXSDUSIZE 0x2001
-#define DL_TC0RXINITCREDITVAL 0x2002
-#define DL_TC0TXBUFFERSIZE 0x2005
-#define DL_PEERTC0PRESENT 0x2046
-#define DL_PEERTC0RXINITCREVAL 0x2047
-#define DL_TC1TXMAXSDUSIZE 0x2003
-#define DL_TC1RXINITCREDITVAL 0x2004
-#define DL_TC1TXBUFFERSIZE 0x2006
-#define DL_PEERTC1PRESENT 0x2066
-#define DL_PEERTC1RXINITCREVAL 0x2067
-
-/*
- * Network Layer Attributes
- */
-#define N_DEVICEID 0x3000
-#define N_DEVICEID_VALID 0x3001
-#define N_TC0TXMAXSDUSIZE 0x3020
-#define N_TC1TXMAXSDUSIZE 0x3021
-
-/*
- * Transport Layer Attributes
- */
-#define T_NUMCPORTS 0x4000
-#define T_NUMTESTFEATURES 0x4001
-#define T_CONNECTIONSTATE 0x4020
-#define T_PEERDEVICEID 0x4021
-#define T_PEERCPORTID 0x4022
-#define T_TRAFFICCLASS 0x4023
-#define T_PROTOCOLID 0x4024
-#define T_CPORTFLAGS 0x4025
-#define T_TXTOKENVALUE 0x4026
-#define T_RXTOKENVALUE 0x4027
-#define T_LOCALBUFFERSPACE 0x4028
-#define T_PEERBUFFERSPACE 0x4029
-#define T_CREDITSTOSEND 0x402A
-#define T_CPORTMODE 0x402B
-#define T_TC0TXMAXSDUSIZE 0x4060
-#define T_TC1TXMAXSDUSIZE 0x4061
-
-#ifdef FALSE
-#undef FALSE
-#endif
-
-#ifdef TRUE
-#undef TRUE
-#endif
-
-/* Boolean attribute values */
-enum {
- FALSE = 0,
- TRUE,
-};
-
-#endif /* _UNIPRO_H_ */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index bfec84aacd90..2a79ab16134b 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -22,6 +22,7 @@
#include <linux/virtio_scsi.h>
#include <linux/cpu.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
@@ -97,10 +98,10 @@ static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
{
if (resid)
- scsi_set_resid(sc, resid);
+ scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
}
-/**
+/*
* virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
*
* Called with vq_lock held.
@@ -140,15 +141,15 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
break;
case VIRTIO_SCSI_S_TARGET_FAILURE:
- set_host_byte(sc, DID_TARGET_FAILURE);
+ set_host_byte(sc, DID_BAD_TARGET);
break;
case VIRTIO_SCSI_S_NEXUS_FAILURE:
- set_host_byte(sc, DID_NEXUS_FAILURE);
+ set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
break;
default:
scmd_printk(KERN_WARNING, sc, "Unknown response %d",
resp->response);
- /* fall through */
+ fallthrough;
case VIRTIO_SCSI_S_FAILURE:
set_host_byte(sc, DID_ERROR);
break;
@@ -156,16 +157,14 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
VIRTIO_SCSI_SENSE_SIZE);
- if (sc->sense_buffer) {
+ if (resp->sense_len) {
memcpy(sc->sense_buffer, resp->sense,
min_t(u32,
virtio32_to_cpu(vscsi->vdev, resp->sense_len),
VIRTIO_SCSI_SENSE_SIZE));
- if (resp->sense_len)
- set_driver_byte(sc, DRIVER_SENSE);
}
- sc->scsi_done(sc);
+ scsi_done(sc);
}
static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -284,7 +283,12 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
case VIRTIO_SCSI_EVT_RESET_RESCAN:
- scsi_add_device(shost, 0, target, lun);
+ if (lun == 0) {
+ scsi_scan_target(&shost->shost_gendev, 0, target,
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ } else {
+ scsi_add_device(shost, 0, target, lun);
+ }
break;
case VIRTIO_SCSI_EVT_RESET_REMOVED:
sdev = scsi_device_lookup(shost, 0, target, lun);
@@ -297,7 +301,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
}
break;
default:
- pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
+ pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
}
}
@@ -350,6 +354,14 @@ static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
if (result == 0 && inq_result[0] >> 5) {
/* PQ indicates the LUN is not attached */
scsi_remove_device(sdev);
+ } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
+ /*
+ * If all LUNs of a virtio-scsi device are unplugged
+ * it will respond with BAD TARGET on any INQUIRY
+ * command.
+ * Remove the device in this case as well.
+ */
+ scsi_remove_device(sdev);
}
}
@@ -381,7 +393,7 @@ static void virtscsi_handle_event(struct work_struct *work)
virtscsi_handle_param_change(vscsi, event);
break;
default:
- pr_err("Unsupport virtio scsi event %x\n", event->event);
+ pr_err("Unsupported virtio scsi event %x\n", event->event);
}
virtscsi_kick_event(vscsi, event_node);
}
@@ -508,7 +520,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
struct virtio_scsi_cmd_req_pi *cmd_pi,
struct scsi_cmnd *sc)
{
- struct request *rq = sc->request;
+ struct request *rq = scsi_cmd_to_rq(sc);
struct blk_integrity *bi;
virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
@@ -516,7 +528,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
if (!rq || !scsi_prot_sg_count(sc))
return;
- bi = blk_get_integrity(rq->rq_disk);
+ bi = blk_get_integrity(rq->q->disk);
if (sc->sc_data_direction == DMA_TO_DEVICE)
cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
@@ -532,7 +544,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
struct scsi_cmnd *sc)
{
- u32 tag = blk_mq_unique_tag(sc->request);
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
u16 hwq = blk_mq_unique_tag_to_hwq(tag);
return &vscsi->req_vqs[hwq];
@@ -608,9 +620,8 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
* we're using independent interrupts (e.g. MSI). Poll the
* virtqueues once.
*
- * In the abort case, sc->scsi_done will do nothing, because
- * the block layer must have detected a timeout and as a result
- * REQ_ATOM_COMPLETE has been set.
+ * In the abort case, scsi_done() will do nothing, because the
+ * command timed out and hence SCMD_STATE_COMPLETE has been set.
*/
virtscsi_poll_requests(vscsi);
@@ -700,12 +711,12 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
-static int virtscsi_map_queues(struct Scsi_Host *shost)
+static void virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
+ blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
}
static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
@@ -742,19 +753,18 @@ static struct scsi_host_template virtscsi_host_template = {
.dma_boundary = UINT_MAX,
.map_queues = virtscsi_map_queues,
.track_queue_depth = 1,
- .force_blk_mq = 1,
};
#define virtscsi_config_get(vdev, fld) \
({ \
- typeof(((struct virtio_scsi_config *)0)->fld) __val; \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val; \
virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
__val; \
})
#define virtscsi_config_set(vdev, fld, val) \
do { \
- typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
} while(0)
@@ -768,7 +778,7 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
/* Stop all the virtqueues. */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
@@ -978,7 +988,7 @@ static struct virtio_driver virtio_scsi_driver = {
.remove = virtscsi_remove,
};
-static int __init init(void)
+static int __init virtio_scsi_init(void)
{
int ret = -ENOMEM;
@@ -1003,25 +1013,21 @@ static int __init init(void)
return 0;
error:
- if (virtscsi_cmd_pool) {
- mempool_destroy(virtscsi_cmd_pool);
- virtscsi_cmd_pool = NULL;
- }
- if (virtscsi_cmd_cache) {
- kmem_cache_destroy(virtscsi_cmd_cache);
- virtscsi_cmd_cache = NULL;
- }
+ mempool_destroy(virtscsi_cmd_pool);
+ virtscsi_cmd_pool = NULL;
+ kmem_cache_destroy(virtscsi_cmd_cache);
+ virtscsi_cmd_cache = NULL;
return ret;
}
-static void __exit fini(void)
+static void __exit virtio_scsi_fini(void)
{
unregister_virtio_driver(&virtio_scsi_driver);
mempool_destroy(virtscsi_cmd_pool);
kmem_cache_destroy(virtscsi_cmd_cache);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_scsi_init);
+module_exit(virtio_scsi_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio SCSI HBA driver");
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c3f010df641e..f88ecdb93a8a 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -17,8 +17,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * Maintained by: Jim Gill <jgill@vmware.com>
- *
*/
#include <linux/kernel.h>
@@ -578,16 +576,22 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
cmd->result = (DID_RESET << 16);
} else {
cmd->result = (DID_OK << 16) | sdstat;
- if (sdstat == SAM_STAT_CHECK_CONDITION &&
- cmd->sense_buffer)
- cmd->result |= (DRIVER_SENSE << 24);
}
} else
switch (btstat) {
case BTSTAT_SUCCESS:
case BTSTAT_LINKED_COMMAND_COMPLETED:
case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
- /* If everything went fine, let's move on.. */
+ /*
+ * Commands like INQUIRY may transfer less data than
+ * requested by the initiator via bufflen. Set residual
+ * count to make upper layer aware of the actual amount
+ * of data returned. There are cases when controller
+ * returns zero dataLen with non zero data - do not set
+ * residual count in that case.
+ */
+ if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
cmd->result = (DID_OK << 16);
break;
@@ -606,9 +610,6 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
case BTSTAT_LUNMISMATCH:
case BTSTAT_TAGREJECT:
case BTSTAT_BADMSG:
- cmd->result = (DRIVER_INVALID << 24);
- /* fall through */
-
case BTSTAT_HAHARDWARE:
case BTSTAT_INVPHASE:
case BTSTAT_HATIMEOUT:
@@ -645,7 +646,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
"cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/*
@@ -770,7 +771,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
return 0;
}
-static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int pvscsi_queue_lck(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct pvscsi_adapter *adapter = shost_priv(host);
@@ -788,7 +789,6 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
return SCSI_MLQUEUE_HOST_BUSY;
}
- cmd->scsi_done = done;
op = cmd->cmnd[0];
dev_dbg(&cmd->device->sdev_gendev,
@@ -862,7 +862,7 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
* Successfully aborted the command.
*/
cmd->result = (DID_ABORT << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
out:
spin_unlock_irqrestore(&adapter->hw_lock, flags);
@@ -889,7 +889,7 @@ static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
pvscsi_patch_sense(cmd);
pvscsi_release_context(adapter, ctx);
cmd->result = (DID_RESET << 16);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
}
}
@@ -908,7 +908,7 @@ static int pvscsi_host_reset(struct scsi_cmnd *cmd)
use_msg = adapter->use_msg;
if (use_msg) {
- adapter->use_msg = 0;
+ adapter->use_msg = false;
spin_unlock_irqrestore(&adapter->hw_lock, flags);
/*
@@ -1324,7 +1324,6 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
* indicate success.
*/
header = config_page;
- memset(header, 0, sizeof *header);
header->hostStatus = BTSTAT_INVPARAM;
header->scsiStatus = SDSTAT_CHECK;
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 75966d3f326e..9d16cf925483 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -17,8 +17,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * Maintained by: Jim Gill <jgill@vmware.com>
- *
*/
#ifndef _VMW_PVSCSI_H_
@@ -333,8 +331,8 @@ struct PVSCSIRingReqDesc {
u8 tag;
u8 bus;
u8 target;
- u8 vcpuHint;
- u8 unused[59];
+ u16 vcpuHint;
+ u8 unused[58];
} __packed;
/*
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index f81046f0e68a..e4fafc77bd20 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -162,65 +162,6 @@ module_param(setup_strings, charp, 0);
static void wd33c93_execute(struct Scsi_Host *instance);
-#ifdef CONFIG_WD33C93_PIO
-static inline uchar
-read_wd33c93(const wd33c93_regs regs, uchar reg_num)
-{
- uchar data;
-
- outb(reg_num, regs.SASR);
- data = inb(regs.SCMD);
- return data;
-}
-
-static inline unsigned long
-read_wd33c93_count(const wd33c93_regs regs)
-{
- unsigned long value;
-
- outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
- value = inb(regs.SCMD) << 16;
- value |= inb(regs.SCMD) << 8;
- value |= inb(regs.SCMD);
- return value;
-}
-
-static inline uchar
-read_aux_stat(const wd33c93_regs regs)
-{
- return inb(regs.SASR);
-}
-
-static inline void
-write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value)
-{
- outb(reg_num, regs.SASR);
- outb(value, regs.SCMD);
-}
-
-static inline void
-write_wd33c93_count(const wd33c93_regs regs, unsigned long value)
-{
- outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
- outb((value >> 16) & 0xff, regs.SCMD);
- outb((value >> 8) & 0xff, regs.SCMD);
- outb( value & 0xff, regs.SCMD);
-}
-
-#define write_wd33c93_cmd(regs, cmd) \
- write_wd33c93((regs), WD_COMMAND, (cmd))
-
-static inline void
-write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
-{
- int i;
-
- outb(WD_CDB_1, regs.SASR);
- for (i=0; i<len; i++)
- outb(cmnd[i], regs.SCMD);
-}
-
-#else /* CONFIG_WD33C93_PIO */
static inline uchar
read_wd33c93(const wd33c93_regs regs, uchar reg_num)
{
@@ -287,7 +228,6 @@ write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
for (i = 0; i < len; i++)
*regs.SCMD = cmnd[i];
}
-#endif /* CONFIG_WD33C93_PIO */
static inline uchar
read_1_byte(const wd33c93_regs regs)
@@ -362,10 +302,9 @@ calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast,
msg[1] = offset;
}
-static int
-wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct WD33C93_hostdata *hostdata;
struct scsi_cmnd *tmp;
@@ -376,11 +315,9 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
/* Set up a few fields in the scsi_cmnd structure for our own use:
* - host_scribble is the pointer to the next cmd in the input queue
- * - scsi_done points to the routine we call when a cmd is finished
* - result is what you'd expect
*/
cmd->host_scribble = NULL;
- cmd->scsi_done = done;
cmd->result = 0;
/* We use the Scsi_Pointer structure that's included with each command
@@ -399,15 +336,15 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
*/
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ scsi_pointer->buffer = scsi_sglist(cmd);
+ scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = NULL;
- cmd->SCp.this_residual = 0;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->buffers_residual = 0;
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
}
/* WD docs state that at the conclusion of a "LEVEL2" command, the
@@ -427,7 +364,7 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
* status byte is stored.
*/
- cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
+ scsi_pointer->Status = ILLEGAL_STATUS_BYTE;
/*
* Add the cmd to the end of 'input_Q'. Note that REQUEST SENSE
@@ -474,6 +411,7 @@ DEF_SCSI_QCMD(wd33c93_queuecommand)
static void
wd33c93_execute(struct Scsi_Host *instance)
{
+ struct scsi_pointer *scsi_pointer;
struct WD33C93_hostdata *hostdata =
(struct WD33C93_hostdata *) instance->hostdata;
const wd33c93_regs regs = hostdata->regs;
@@ -550,7 +488,8 @@ wd33c93_execute(struct Scsi_Host *instance)
* to change around and experiment with for now.
*/
- cmd->SCp.phase = 0; /* assume no disconnect */
+ scsi_pointer = WD33C93_scsi_pointer(cmd);
+ scsi_pointer->phase = 0; /* assume no disconnect */
if (hostdata->disconnect == DIS_NEVER)
goto no;
if (hostdata->disconnect == DIS_ALWAYS)
@@ -567,7 +506,7 @@ wd33c93_execute(struct Scsi_Host *instance)
(prev->device->lun != cmd->device->lun)) {
for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev;
prev = (struct scsi_cmnd *) prev->host_scribble)
- prev->SCp.phase = 1;
+ WD33C93_scsi_pointer(prev)->phase = 1;
goto yes;
}
}
@@ -575,7 +514,7 @@ wd33c93_execute(struct Scsi_Host *instance)
goto no;
yes:
- cmd->SCp.phase = 1;
+ scsi_pointer->phase = 1;
#ifdef PROC_STATISTICS
hostdata->disc_allowed_cnt[cmd->device->id]++;
@@ -583,7 +522,7 @@ wd33c93_execute(struct Scsi_Host *instance)
no:
- write_wd33c93(regs, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
+ write_wd33c93(regs, WD_SOURCE_ID, scsi_pointer->phase ? SRCID_ER : 0);
write_wd33c93(regs, WD_TARGET_LUN, (u8)cmd->device->lun);
write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
@@ -652,14 +591,14 @@ wd33c93_execute(struct Scsi_Host *instance)
* up ahead of time.
*/
- if ((cmd->SCp.phase == 0) && (hostdata->no_dma == 0)) {
+ if (scsi_pointer->phase == 0 && hostdata->no_dma == 0) {
if (hostdata->dma_setup(cmd,
(cmd->sc_data_direction == DMA_TO_DEVICE) ?
DATA_OUT_DIR : DATA_IN_DIR))
write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */
else {
write_wd33c93_count(regs,
- cmd->SCp.this_residual);
+ scsi_pointer->this_residual);
write_wd33c93(regs, WD_CONTROL,
CTRL_IDI | CTRL_EDI | hostdata->dma_mode);
hostdata->dma = D_DMA_RUNNING;
@@ -679,7 +618,7 @@ wd33c93_execute(struct Scsi_Host *instance)
*/
DB(DB_EXECUTE,
- printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
+ printk("%s)EX-2 ", scsi_pointer->phase ? "d:" : ""))
}
static void
@@ -721,6 +660,7 @@ static void
transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
int data_in_dir)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct WD33C93_hostdata *hostdata;
unsigned long length;
@@ -734,13 +674,13 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
* now we need to setup the next scatter-gather buffer as the
* source or destination for THIS transfer.
*/
- if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
- --cmd->SCp.buffers_residual;
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ if (!scsi_pointer->this_residual && scsi_pointer->buffers_residual) {
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ --scsi_pointer->buffers_residual;
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
}
- if (!cmd->SCp.this_residual) /* avoid bogus setups */
+ if (!scsi_pointer->this_residual) /* avoid bogus setups */
return;
write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
@@ -754,11 +694,12 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
#ifdef PROC_STATISTICS
hostdata->pio_cnt++;
#endif
- transfer_pio(regs, (uchar *) cmd->SCp.ptr,
- cmd->SCp.this_residual, data_in_dir, hostdata);
- length = cmd->SCp.this_residual;
- cmd->SCp.this_residual = read_wd33c93_count(regs);
- cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+ transfer_pio(regs, (uchar *) scsi_pointer->ptr,
+ scsi_pointer->this_residual, data_in_dir,
+ hostdata);
+ length = scsi_pointer->this_residual;
+ scsi_pointer->this_residual = read_wd33c93_count(regs);
+ scsi_pointer->ptr += length - scsi_pointer->this_residual;
}
/* We are able to do DMA (in fact, the Amiga hardware is
@@ -775,10 +716,10 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
hostdata->dma_cnt++;
#endif
write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | hostdata->dma_mode);
- write_wd33c93_count(regs, cmd->SCp.this_residual);
+ write_wd33c93_count(regs, scsi_pointer->this_residual);
if ((hostdata->level2 >= L2_DATA) ||
- (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ (hostdata->level2 == L2_BASIC && scsi_pointer->phase == 0)) {
write_wd33c93(regs, WD_COMMAND_PHASE, 0x45);
write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
@@ -792,6 +733,7 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
void
wd33c93_intr(struct Scsi_Host *instance)
{
+ struct scsi_pointer *scsi_pointer;
struct WD33C93_hostdata *hostdata =
(struct WD33C93_hostdata *) instance->hostdata;
const wd33c93_regs regs = hostdata->regs;
@@ -810,6 +752,7 @@ wd33c93_intr(struct Scsi_Host *instance)
#endif
cmd = (struct scsi_cmnd *) hostdata->connected; /* assume we're connected */
+ scsi_pointer = WD33C93_scsi_pointer(cmd);
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear the interrupt */
phs = read_wd33c93(regs, WD_COMMAND_PHASE);
@@ -831,14 +774,14 @@ wd33c93_intr(struct Scsi_Host *instance)
*/
if (hostdata->dma == D_DMA_RUNNING) {
DB(DB_TRANSFER,
- printk("[%p/%d:", cmd->SCp.ptr, cmd->SCp.this_residual))
+ printk("[%p/%d:", scsi_pointer->ptr, scsi_pointer->this_residual))
hostdata->dma_stop(cmd->device->host, cmd, 1);
hostdata->dma = D_DMA_OFF;
- length = cmd->SCp.this_residual;
- cmd->SCp.this_residual = read_wd33c93_count(regs);
- cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+ length = scsi_pointer->this_residual;
+ scsi_pointer->this_residual = read_wd33c93_count(regs);
+ scsi_pointer->ptr += length - scsi_pointer->this_residual;
DB(DB_TRANSFER,
- printk("%p/%d]", cmd->SCp.ptr, cmd->SCp.this_residual))
+ printk("%p/%d]", scsi_pointer->ptr, scsi_pointer->this_residual))
}
/* Respond to the specific WD3393 interrupt - there are quite a few! */
@@ -856,7 +799,7 @@ wd33c93_intr(struct Scsi_Host *instance)
cmd->result = DID_NO_CONNECT << 16;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
/* From esp.c:
* There is a window of time within the scsi_done() path
@@ -888,7 +831,7 @@ wd33c93_intr(struct Scsi_Host *instance)
/* construct an IDENTIFY message with correct disconnect bit */
hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun);
- if (cmd->SCp.phase)
+ if (scsi_pointer->phase)
hostdata->outgoing_msg[0] |= 0x40;
if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
@@ -930,8 +873,8 @@ wd33c93_intr(struct Scsi_Host *instance)
case CSR_UNEXP | PHS_DATA_IN:
case CSR_SRV_REQ | PHS_DATA_IN:
DB(DB_INTR,
- printk("IN-%d.%d", cmd->SCp.this_residual,
- cmd->SCp.buffers_residual))
+ printk("IN-%d.%d", scsi_pointer->this_residual,
+ scsi_pointer->buffers_residual))
transfer_bytes(regs, cmd, DATA_IN_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
@@ -942,8 +885,8 @@ wd33c93_intr(struct Scsi_Host *instance)
case CSR_UNEXP | PHS_DATA_OUT:
case CSR_SRV_REQ | PHS_DATA_OUT:
DB(DB_INTR,
- printk("OUT-%d.%d", cmd->SCp.this_residual,
- cmd->SCp.buffers_residual))
+ printk("OUT-%d.%d", scsi_pointer->this_residual,
+ scsi_pointer->buffers_residual))
transfer_bytes(regs, cmd, DATA_OUT_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
@@ -966,8 +909,8 @@ wd33c93_intr(struct Scsi_Host *instance)
case CSR_UNEXP | PHS_STATUS:
case CSR_SRV_REQ | PHS_STATUS:
DB(DB_INTR, printk("STATUS="))
- cmd->SCp.Status = read_1_byte(regs);
- DB(DB_INTR, printk("%02x", cmd->SCp.Status))
+ scsi_pointer->Status = read_1_byte(regs);
+ DB(DB_INTR, printk("%02x", scsi_pointer->Status))
if (hostdata->level2 >= L2_BASIC) {
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */
udelay(7);
@@ -995,7 +938,7 @@ wd33c93_intr(struct Scsi_Host *instance)
else
hostdata->incoming_ptr = 0;
- cmd->SCp.Message = msg;
+ scsi_pointer->Message = msg;
switch (msg) {
case COMMAND_COMPLETE:
@@ -1167,23 +1110,23 @@ wd33c93_intr(struct Scsi_Host *instance)
write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
if (phs == 0x60) {
DB(DB_INTR, printk("SX-DONE"))
- cmd->SCp.Message = COMMAND_COMPLETE;
+ scsi_pointer->Message = COMMAND_COMPLETE;
lun = read_wd33c93(regs, WD_TARGET_LUN);
- DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
+ DB(DB_INTR, printk(":%d.%d", scsi_pointer->Status, lun))
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
- cmd->SCp.Status = lun;
+ if (scsi_pointer->Status == ILLEGAL_STATUS_BYTE)
+ scsi_pointer->Status = lun;
if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != GOOD)
- cmd->result =
- (cmd->
- result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result =
- cmd->SCp.Status | (cmd->SCp.Message << 8);
- cmd->scsi_done(cmd);
+ && scsi_pointer->Status != SAM_STAT_GOOD) {
+ set_host_byte(cmd, DID_ERROR);
+ } else {
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ set_status_byte(cmd, scsi_pointer->Status);
+ }
+ scsi_done(cmd);
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
@@ -1262,12 +1205,15 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
- cmd->result =
- (cmd->result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- cmd->scsi_done(cmd);
+ if (cmd->cmnd[0] == REQUEST_SENSE &&
+ scsi_pointer->Status != SAM_STAT_GOOD) {
+ set_host_byte(cmd, DID_ERROR);
+ } else {
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ set_status_byte(cmd, scsi_pointer->Status);
+ }
+ scsi_done(cmd);
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
@@ -1294,16 +1240,16 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- DB(DB_INTR, printk(":%d", cmd->SCp.Status))
- if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != GOOD)
- cmd->result =
- (cmd->
- result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result =
- cmd->SCp.Status | (cmd->SCp.Message << 8);
- cmd->scsi_done(cmd);
+ DB(DB_INTR, printk(":%d", scsi_pointer->Status))
+ if (cmd->cmnd[0] == REQUEST_SENSE
+ && scsi_pointer->Status != SAM_STAT_GOOD) {
+ set_host_byte(cmd, DID_ERROR);
+ } else {
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ set_status_byte(cmd, scsi_pointer->Status);
+ }
+ scsi_done(cmd);
break;
case S_PRE_TMP_DISC:
case S_RUNNING_LEVEL2:
@@ -1633,7 +1579,7 @@ wd33c93_abort(struct scsi_cmnd * cmd)
("scsi%d: Abort - removing command from input_Q. ",
instance->host_no);
enable_irq(cmd->device->host->irq);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return SUCCESS;
}
prev = tmp;
@@ -1708,7 +1654,7 @@ wd33c93_abort(struct scsi_cmnd * cmd)
wd33c93_execute(instance);
enable_irq(cmd->device->host->irq);
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return SUCCESS;
}
@@ -1854,7 +1800,7 @@ round_4(unsigned int x)
case 1: --x;
break;
case 2: ++x;
- /* fall through */
+ fallthrough;
case 3: ++x;
}
return x;
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 2edec34c5a42..e5e4254b1477 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -180,13 +180,8 @@
/* This is what the 3393 chip looks like to us */
typedef struct {
-#ifdef CONFIG_WD33C93_PIO
- unsigned int SASR;
- unsigned int SCMD;
-#else
volatile unsigned char *SASR;
volatile unsigned char *SCMD;
-#endif
} wd33c93_regs;
@@ -262,6 +257,10 @@ struct WD33C93_hostdata {
#endif
};
+static inline struct scsi_pointer *WD33C93_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
/* defines for hostdata->chip */
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index edc8a139a60d..ff1b22077251 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -196,11 +196,11 @@ static void wd719x_finish_cmd(struct wd719x_scb *scb, int result)
dma_unmap_single(&wd->pdev->dev, scb->phys,
sizeof(struct wd719x_scb), DMA_BIDIRECTIONAL);
scsi_dma_unmap(cmd);
- dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+ dma_unmap_single(&wd->pdev->dev, scb->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
cmd->result = result << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
}
/* Build a SCB and send it to the card */
@@ -229,11 +229,11 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/* map sense buffer */
scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
- cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&wd->pdev->dev, cmd->SCp.dma_handle))
+ scb->dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&wd->pdev->dev, scb->dma_handle))
goto out_unmap_scb;
- scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);
+ scb->sense_buf = cpu_to_le32(scb->dma_handle);
/* request autosense */
scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE;
@@ -288,14 +288,14 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return 0;
out_unmap_sense:
- dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+ dma_unmap_single(&wd->pdev->dev, scb->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
out_unmap_scb:
dma_unmap_single(&wd->pdev->dev, scb->phys, sizeof(*scb),
DMA_BIDIRECTIONAL);
out_error:
cmd->result = DID_ERROR << 16;
- cmd->scsi_done(cmd);
+ scsi_done(cmd);
return 0;
}
@@ -466,14 +466,16 @@ static int wd719x_abort(struct scsi_cmnd *cmd)
unsigned long flags;
struct wd719x_scb *scb = scsi_cmd_priv(cmd);
struct wd719x *wd = shost_priv(cmd->device->host);
+ struct device *dev = &wd->pdev->dev;
- dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag);
+ dev_info(dev, "abort command, tag: %x\n", scsi_cmd_to_rq(cmd)->tag);
- action = /*cmd->tag ? WD719X_CMD_ABORT_TAG : */WD719X_CMD_ABORT;
+ action = WD719X_CMD_ABORT;
spin_lock_irqsave(wd->sh->host_lock, flags);
result = wd719x_direct_cmd(wd, action, cmd->device->id,
- cmd->device->lun, cmd->tag, scb->phys, 0);
+ cmd->device->lun, scsi_cmd_to_rq(cmd)->tag,
+ scb->phys, 0);
wd719x_finish_cmd(scb, DID_ABORT);
spin_unlock_irqrestore(wd->sh->host_lock, flags);
if (result)
@@ -902,7 +904,8 @@ static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
if (err)
goto fail;
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n");
goto disable_device;
}
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
index abaabd419a54..966ab0fb4621 100644
--- a/drivers/scsi/wd719x.h
+++ b/drivers/scsi/wd719x.h
@@ -56,6 +56,7 @@ struct wd719x_scb {
u8 flags[2]; /* 62-63 SCB specific flags (local to each thread) */
/* everything below is for driver use (not used by card) */
dma_addr_t phys; /* bus address of the SCB */
+ dma_addr_t dma_handle;
struct scsi_cmnd *cmd; /* a copy of the pointer we were passed */
struct list_head list;
struct wd719x_sglist sg_list[WD719X_SG] __aligned(8); /* SG list */
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index f0068e96a177..66b316d173b0 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -58,9 +58,6 @@
#include <asm/xen/hypervisor.h>
-
-#define GRANT_INVALID_REF 0
-
#define VSCSIFRONT_OP_ADD_LUN 1
#define VSCSIFRONT_OP_DEL_LUN 2
#define VSCSIFRONT_OP_READD_LUN 3
@@ -83,6 +80,8 @@ struct vscsifrnt_shadow {
uint16_t rqid;
uint16_t ref_rqid;
+ bool inflight;
+
unsigned int nr_grants; /* number of grants in gref[] */
struct scsiif_request_segment *sg; /* scatter/gather elements */
struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
@@ -104,7 +103,11 @@ struct vscsifrnt_info {
struct xenbus_device *dev;
struct Scsi_Host *host;
- int host_active;
+ enum {
+ STATE_INACTIVE,
+ STATE_ACTIVE,
+ STATE_ERROR
+ } host_active;
unsigned int evtchn;
unsigned int irq;
@@ -212,11 +215,13 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
- ring_req->timeout_per_command = sc->request->timeout / HZ;
+ ring_req->timeout_per_command = scsi_cmd_to_rq(sc)->timeout / HZ;
for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
ring_req->seg[i] = shadow->seg[i];
+ shadow->inflight = true;
+
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
@@ -224,6 +229,13 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
return 0;
}
+static void scsifront_set_error(struct vscsifrnt_info *info, const char *msg)
+{
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME "%s\n"
+ "Disabling device for further use\n", msg);
+ info->host_active = STATE_ERROR;
+}
+
static void scsifront_gnttab_done(struct vscsifrnt_info *info,
struct vscsifrnt_shadow *shadow)
{
@@ -233,17 +245,57 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info,
return;
for (i = 0; i < shadow->nr_grants; i++) {
- if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
- shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
- "grant still in use by backend\n");
- BUG();
+ if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
+ scsifront_set_error(info, "grant still in use by backend");
+ return;
}
- gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
kfree(shadow->sg);
}
+static unsigned int scsifront_host_byte(int32_t rslt)
+{
+ switch (XEN_VSCSIIF_RSLT_HOST(rslt)) {
+ case XEN_VSCSIIF_RSLT_HOST_OK:
+ return DID_OK;
+ case XEN_VSCSIIF_RSLT_HOST_NO_CONNECT:
+ return DID_NO_CONNECT;
+ case XEN_VSCSIIF_RSLT_HOST_BUS_BUSY:
+ return DID_BUS_BUSY;
+ case XEN_VSCSIIF_RSLT_HOST_TIME_OUT:
+ return DID_TIME_OUT;
+ case XEN_VSCSIIF_RSLT_HOST_BAD_TARGET:
+ return DID_BAD_TARGET;
+ case XEN_VSCSIIF_RSLT_HOST_ABORT:
+ return DID_ABORT;
+ case XEN_VSCSIIF_RSLT_HOST_PARITY:
+ return DID_PARITY;
+ case XEN_VSCSIIF_RSLT_HOST_ERROR:
+ return DID_ERROR;
+ case XEN_VSCSIIF_RSLT_HOST_RESET:
+ return DID_RESET;
+ case XEN_VSCSIIF_RSLT_HOST_BAD_INTR:
+ return DID_BAD_INTR;
+ case XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH:
+ return DID_PASSTHROUGH;
+ case XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR:
+ return DID_SOFT_ERROR;
+ case XEN_VSCSIIF_RSLT_HOST_IMM_RETRY:
+ return DID_IMM_RETRY;
+ case XEN_VSCSIIF_RSLT_HOST_REQUEUE:
+ return DID_REQUEUE;
+ case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED:
+ return DID_TRANSPORT_DISRUPTED;
+ case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST:
+ return DID_TRANSPORT_FAILFAST;
+ case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL:
+ return DID_TRANSPORT_MARGINAL;
+ default:
+ return DID_ERROR;
+ }
+}
+
static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
struct vscsiif_response *ring_rsp)
{
@@ -259,9 +311,12 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
BUG_ON(sc == NULL);
scsifront_gnttab_done(info, shadow);
+ if (info->host_active == STATE_ERROR)
+ return;
scsifront_put_rqid(info, id);
- sc->result = ring_rsp->rslt;
+ set_host_byte(sc, scsifront_host_byte(ring_rsp->rslt));
+ set_status_byte(sc, XEN_VSCSIIF_RSLT_STATUS(ring_rsp->rslt));
scsi_set_resid(sc, ring_rsp->residual_len);
sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
@@ -270,7 +325,7 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
if (sense_len)
memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len);
- sc->scsi_done(sc);
+ scsi_done(sc);
}
static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
@@ -285,7 +340,10 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
shadow->wait_reset = 1;
switch (shadow->rslt_reset) {
case RSLT_RESET_WAITING:
- shadow->rslt_reset = ring_rsp->rslt;
+ if (ring_rsp->rslt == XEN_VSCSIIF_RSLT_RESET_SUCCESS)
+ shadow->rslt_reset = SUCCESS;
+ else
+ shadow->rslt_reset = FAILED;
break;
case RSLT_RESET_ERR:
kick = _scsifront_put_rqid(info, id);
@@ -295,9 +353,7 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
scsifront_wake_up(info);
return;
default:
- shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
- "bad reset state %d, possibly leaking %u\n",
- shadow->rslt_reset, id);
+ scsifront_set_error(info, "bad reset state");
break;
}
spin_unlock_irqrestore(&info->shadow_lock, flags);
@@ -308,28 +364,41 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
static void scsifront_do_response(struct vscsifrnt_info *info,
struct vscsiif_response *ring_rsp)
{
- if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
- test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
- "illegal rqid %u returned by backend!\n", ring_rsp->rqid))
+ struct vscsifrnt_shadow *shadow;
+
+ if (ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
+ !info->shadow[ring_rsp->rqid]->inflight) {
+ scsifront_set_error(info, "illegal rqid returned by backend!");
return;
+ }
+ shadow = info->shadow[ring_rsp->rqid];
+ shadow->inflight = false;
- if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
+ if (shadow->act == VSCSIIF_ACT_SCSI_CDB)
scsifront_cdb_cmd_done(info, ring_rsp);
else
scsifront_sync_cmd_done(info, ring_rsp);
}
-static int scsifront_ring_drain(struct vscsifrnt_info *info)
+static int scsifront_ring_drain(struct vscsifrnt_info *info,
+ unsigned int *eoiflag)
{
- struct vscsiif_response *ring_rsp;
+ struct vscsiif_response ring_rsp;
RING_IDX i, rp;
int more_to_do = 0;
- rp = info->ring.sring->rsp_prod;
- rmb(); /* ordering required respective to dom0 */
+ rp = READ_ONCE(info->ring.sring->rsp_prod);
+ virt_rmb(); /* ordering required respective to backend */
+ if (RING_RESPONSE_PROD_OVERFLOW(&info->ring, rp)) {
+ scsifront_set_error(info, "illegal number of responses");
+ return 0;
+ }
for (i = info->ring.rsp_cons; i != rp; i++) {
- ring_rsp = RING_GET_RESPONSE(&info->ring, i);
- scsifront_do_response(info, ring_rsp);
+ RING_COPY_RESPONSE(&info->ring, i, &ring_rsp);
+ scsifront_do_response(info, &ring_rsp);
+ if (info->host_active == STATE_ERROR)
+ return 0;
+ *eoiflag &= ~XEN_EOI_FLAG_SPURIOUS;
}
info->ring.rsp_cons = i;
@@ -342,14 +411,15 @@ static int scsifront_ring_drain(struct vscsifrnt_info *info)
return more_to_do;
}
-static int scsifront_cmd_done(struct vscsifrnt_info *info)
+static int scsifront_cmd_done(struct vscsifrnt_info *info,
+ unsigned int *eoiflag)
{
int more_to_do;
unsigned long flags;
spin_lock_irqsave(info->host->host_lock, flags);
- more_to_do = scsifront_ring_drain(info);
+ more_to_do = scsifront_ring_drain(info, eoiflag);
info->wait_ring_available = 0;
@@ -363,20 +433,28 @@ static int scsifront_cmd_done(struct vscsifrnt_info *info)
static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
{
struct vscsifrnt_info *info = dev_id;
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+ if (info->host_active == STATE_ERROR) {
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ return IRQ_HANDLED;
+ }
- while (scsifront_cmd_done(info))
+ while (scsifront_cmd_done(info, &eoiflag))
/* Yield point for this unbounded loop. */
cond_resched();
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
}
static void scsifront_finish_all(struct vscsifrnt_info *info)
{
- unsigned i;
+ unsigned int i, dummy;
struct vscsiif_response resp;
- scsifront_ring_drain(info);
+ scsifront_ring_drain(info, &dummy);
for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
if (test_bit(i, info->shadow_free_bitmap))
@@ -533,6 +611,9 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
unsigned long flags;
int err;
+ if (info->host_active == STATE_ERROR)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
sc->result = 0;
shadow->sc = sc;
@@ -552,7 +633,7 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
if (err == -ENOMEM)
return SCSI_MLQUEUE_HOST_BUSY;
sc->result = DID_ERROR << 16;
- sc->scsi_done(sc);
+ scsi_done(sc);
return 0;
}
@@ -585,6 +666,9 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
int err = 0;
+ if (info->host_active == STATE_ERROR)
+ return FAILED;
+
shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
if (!shadow)
return FAILED;
@@ -656,6 +740,9 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
struct vscsifrnt_info *info = shost_priv(sdev->host);
int err;
+ if (info->host_active == STATE_ERROR)
+ return -EIO;
+
if (info && current == info->curr) {
err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
@@ -703,27 +790,15 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
{
struct xenbus_device *dev = info->dev;
struct vscsiif_sring *sring;
- grant_ref_t gref;
- int err = -ENOMEM;
+ int err;
/***** Frontend to Backend ring start *****/
- sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL);
- if (!sring) {
- xenbus_dev_fatal(dev, err,
- "fail to allocate shared ring (Front to Back)");
+ err = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&sring, 1,
+ &info->ring_ref);
+ if (err)
return err;
- }
- SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
- err = xenbus_grant_ring(dev, sring, 1, &gref);
- if (err < 0) {
- free_page((unsigned long)sring);
- xenbus_dev_fatal(dev, err,
- "fail to grant shared ring (Front to Back)");
- return err;
- }
- info->ring_ref = gref;
+ XEN_FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
err = xenbus_alloc_evtchn(dev, &info->evtchn);
if (err) {
@@ -731,7 +806,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
goto free_gnttab;
}
- err = bind_evtchn_to_irq(info->evtchn);
+ err = bind_evtchn_to_irq_lateeoi(info->evtchn);
if (err <= 0) {
xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
goto free_gnttab;
@@ -752,8 +827,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
free_irq:
unbind_from_irqhandler(info->irq, info);
free_gnttab:
- gnttab_end_foreign_access(info->ring_ref, 0,
- (unsigned long)info->ring.sring);
+ xenbus_teardown_ring((void **)&sring, 1, &info->ring_ref);
return err;
}
@@ -761,8 +835,7 @@ free_gnttab:
static void scsifront_free_ring(struct vscsifrnt_info *info)
{
unbind_from_irqhandler(info->irq, info);
- gnttab_end_foreign_access(info->ring_ref, 0,
- (unsigned long)info->ring.sring);
+ xenbus_teardown_ring((void **)&info->ring.sring, 1, &info->ring_ref);
}
static int scsifront_init_ring(struct vscsifrnt_info *info)
@@ -861,7 +934,7 @@ static int scsifront_probe(struct xenbus_device *dev,
goto free_sring;
}
info->host = host;
- info->host_active = 1;
+ info->host_active = STATE_ACTIVE;
xenbus_switch_state(dev, XenbusStateInitialised);
@@ -929,10 +1002,10 @@ static int scsifront_remove(struct xenbus_device *dev)
pr_debug("%s: %s removed\n", __func__, dev->nodename);
mutex_lock(&scsifront_mutex);
- if (info->host_active) {
+ if (info->host_active != STATE_INACTIVE) {
/* Scsi_host not yet removed */
scsi_remove_host(info->host);
- info->host_active = 0;
+ info->host_active = STATE_INACTIVE;
}
mutex_unlock(&scsifront_mutex);
@@ -956,9 +1029,9 @@ static void scsifront_disconnect(struct vscsifrnt_info *info)
*/
mutex_lock(&scsifront_mutex);
- if (info->host_active) {
+ if (info->host_active != STATE_INACTIVE) {
scsi_remove_host(host);
- info->host_active = 0;
+ info->host_active = STATE_INACTIVE;
}
mutex_unlock(&scsifront_mutex);
@@ -976,6 +1049,9 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
unsigned int hst, chn, tgt, lun;
struct scsi_device *sdev;
+ if (info->host_active == STATE_ERROR)
+ return;
+
dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
if (IS_ERR(dir))
return;
@@ -1111,7 +1187,7 @@ static void scsifront_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's Closing state */
+ fallthrough; /* Missed the backend's Closing state */
case XenbusStateClosing:
scsifront_disconnect(info);
break;
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 7eac76cccc4c..22d412cab91d 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -81,6 +81,7 @@ lasi_scsi_clock(void * hpa, int defaultclock)
static struct scsi_host_template zalon7xx_template = {
.module = THIS_MODULE,
.proc_name = "zalon7xx",
+ .cmd_size = sizeof(struct ncr_cmd_priv),
};
static int __init
@@ -168,15 +169,13 @@ static const struct parisc_device_id zalon_tbl[] __initconst = {
MODULE_DEVICE_TABLE(parisc, zalon_tbl);
-static int __exit zalon_remove(struct parisc_device *dev)
+static void __exit zalon_remove(struct parisc_device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
scsi_remove_host(host);
ncr53c8xx_release(host);
free_irq(dev->irq, host);
-
- return 0;
}
static struct parisc_driver zalon_driver __refdata = {
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 27b9e2baab1a..7acf9193a9e8 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z)
scsi_remove_host(host);
NCR_700_release(host);
+ if (host->base > 0x01000000)
+ iounmap(hostdata->base);
kfree(hostdata);
free_irq(host->irq, host);
zorro_release_device(z);
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index bdd82e497d5f..928c8adf5cb3 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -34,9 +34,9 @@
#include <linux/delay.h>
#include <linux/zorro.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
@@ -801,8 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
/* additional setup required for Fastlane */
if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
/* map full address space up to ESP base for DMA */
- zep->board_base = ioremap(board,
- FASTLANE_ESP_ADDR-1);
+ zep->board_base = ioremap(board, FASTLANE_ESP_ADDR - 1);
if (!zep->board_base) {
pr_err("Cannot allocate board address space\n");
err = -ENOMEM;
@@ -843,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
* dma_registers size if adding any more
*/
esp->dma_regs = ioremap(dmaaddr,
- sizeof(struct fastlane_dma_registers));
+ sizeof(struct fastlane_dma_registers));
} else
/* ZorroII address space remapped nocache by early startup */
esp->dma_regs = ZTWO_VADDR(dmaaddr);